From ce404e4f53ac3d164115850d6ee670f036d400f2 Mon Sep 17 00:00:00 2001 From: Auto Format Date: Mon, 11 Mar 2019 10:38:24 +0100 Subject: [PATCH] format source with scalafmt --- .../testkit/typed/CapturedLogEvent.scala | 32 +- .../akka/actor/testkit/typed/Effect.scala | 30 +- .../actor/testkit/typed/TestException.scala | 1 - .../actor/testkit/typed/TestKitSettings.scala | 9 +- .../typed/internal/ActorSystemStub.scala | 25 +- .../typed/internal/BehaviorTestKitImpl.scala | 9 +- .../testkit/typed/internal/DebugRef.scala | 4 +- .../internal/EffectfulActorContext.scala | 1 - .../typed/internal/StubbedActorContext.scala | 66 +- .../typed/internal/TestInboxImpl.scala | 4 +- .../testkit/typed/internal/TestKitUtils.scala | 14 +- .../typed/internal/TestProbeImpl.scala | 75 +- .../testkit/typed/javadsl/ActorTestKit.scala | 13 +- .../typed/javadsl/BehaviorTestKit.scala | 4 +- .../actor/testkit/typed/javadsl/Effects.scala | 24 +- .../testkit/typed/javadsl/ManualTime.scala | 6 +- .../testkit/typed/javadsl/TestInbox.scala | 5 +- .../typed/javadsl/TestKitJunitResource.scala | 15 +- .../testkit/typed/javadsl/TestProbe.scala | 5 +- .../testkit/typed/scaladsl/ActorTestKit.scala | 60 +- .../typed/scaladsl/ActorTestKitBase.scala | 7 + .../typed/scaladsl/BehaviorTestKit.scala | 2 +- .../testkit/typed/scaladsl/Effects.scala | 24 +- .../testkit/typed/scaladsl/ManualTime.scala | 10 +- .../scaladsl/ScalaTestWithActorTestKit.scala | 10 +- .../testkit/typed/scaladsl/TestInbox.scala | 3 +- .../testkit/typed/scaladsl/TestProbe.scala | 4 + .../typed/scaladsl/ActorTestKitSpec.scala | 13 +- .../typed/scaladsl/BehaviorTestKitSpec.scala | 23 +- .../typed/scaladsl/TestProbeSpec.scala | 7 +- .../ActorConfigurationVerificationSpec.scala | 11 +- .../akka/actor/ActorCreationPerfSpec.scala | 12 +- .../test/scala/akka/actor/ActorDSLSpec.scala | 38 +- .../scala/akka/actor/ActorLifeCycleSpec.scala | 21 +- .../scala/akka/actor/ActorLookupSpec.scala | 34 +- .../scala/akka/actor/ActorMailboxSpec.scala | 198 ++- .../test/scala/akka/actor/ActorPathSpec.scala | 6 +- .../test/scala/akka/actor/ActorRefSpec.scala | 105 +- .../scala/akka/actor/ActorSelectionSpec.scala | 37 +- .../scala/akka/actor/ActorSystemSpec.scala | 125 +- .../actor/ActorWithBoundedStashSpec.scala | 12 +- .../scala/akka/actor/ActorWithStashSpec.scala | 7 +- .../src/test/scala/akka/actor/Bench.scala | 41 +- .../scala/akka/actor/ConsistencySpec.scala | 1 - .../akka/actor/CoordinatedShutdownSpec.scala | 165 +- .../akka/actor/DeadLetterSupressionSpec.scala | 1 - .../scala/akka/actor/DeathWatchSpec.scala | 41 +- .../test/scala/akka/actor/DeployerSpec.scala | 73 +- .../test/scala/akka/actor/ExtensionSpec.scala | 16 +- .../test/scala/akka/actor/FSMActorSpec.scala | 41 +- .../test/scala/akka/actor/FSMTimingSpec.scala | 42 +- .../scala/akka/actor/FSMTransitionSpec.scala | 6 +- .../scala/akka/actor/ForwardActorSpec.scala | 12 +- .../scala/akka/actor/FunctionRefSpec.scala | 4 +- .../test/scala/akka/actor/HotSwapSpec.scala | 19 +- .../actor/LocalActorRefProviderSpec.scala | 59 +- .../akka/actor/RestartStrategySpec.scala | 14 +- .../test/scala/akka/actor/SchedulerSpec.scala | 56 +- .../akka/actor/SupervisorHierarchySpec.scala | 249 +-- .../scala/akka/actor/SupervisorMiscSpec.scala | 37 +- .../scala/akka/actor/SupervisorSpec.scala | 125 +- .../scala/akka/actor/SupervisorTreeSpec.scala | 12 +- .../test/scala/akka/actor/Ticket669Spec.scala | 8 +- .../src/test/scala/akka/actor/TimerSpec.scala | 29 +- .../scala/akka/actor/TypedActorSpec.scala | 66 +- .../test/scala/akka/actor/UidClashTest.scala | 17 +- .../akka/actor/dispatch/ActorModelSpec.scala | 261 +-- .../dispatch/BalancingDispatcherSpec.scala | 17 +- .../actor/dispatch/DispatcherActorSpec.scala | 46 +- .../akka/actor/dispatch/DispatchersSpec.scala | 26 +- .../akka/actor/dispatch/PinnedActorSpec.scala | 5 +- .../akka/actor/dungeon/DispatchSpec.scala | 3 +- .../akka/actor/routing/ListenerSpec.scala | 21 +- .../actor/setup/ActorSystemSetupSpec.scala | 12 +- .../scala/akka/dataflow/Future2Actor.scala | 8 +- .../dispatch/ControlAwareDispatcherSpec.scala | 2 +- .../dispatch/DispatcherShutdownSpec.scala | 12 +- .../akka/dispatch/ExecutionContextSpec.scala | 61 +- .../dispatch/ForkJoinPoolStarvationSpec.scala | 5 +- .../akka/dispatch/MailboxConfigSpec.scala | 141 +- .../dispatch/PriorityDispatcherSpec.scala | 26 +- .../StablePriorityDispatcherSpec.scala | 48 +- .../sysmsg/SystemMessageListSpec.scala | 2 +- .../test/scala/akka/event/EventBusSpec.scala | 54 +- .../scala/akka/event/EventStreamSpec.scala | 33 +- .../test/scala/akka/event/LoggerSpec.scala | 16 +- .../scala/akka/event/LoggingReceiveSpec.scala | 97 +- .../scala/akka/event/MarkerLoggingSpec.scala | 5 +- .../scala/akka/io/CapacityLimitSpec.scala | 12 +- .../akka/io/InetAddressDnsResolverSpec.scala | 21 +- .../scala/akka/io/TcpConnectionSpec.scala | 458 ++--- .../scala/akka/io/TcpIntegrationSpec.scala | 13 +- .../akka/io/TcpIntegrationSpecSupport.scala | 4 +- .../test/scala/akka/io/TcpListenerSpec.scala | 15 +- .../akka/io/UdpConnectedIntegrationSpec.scala | 6 +- .../dns/AsyncDnsResolverIntegrationSpec.scala | 53 +- .../scala/akka/io/dns/DnsSettingsSpec.scala | 37 +- .../akka/io/dns/DockerBindDnsService.scala | 27 +- .../io/dns/NameserverAddressParserSpec.scala | 8 +- .../io/dns/internal/AsyncDnsManagerSpec.scala | 5 +- .../dns/internal/AsyncDnsResolverSpec.scala | 14 +- .../akka/io/dns/internal/MessageSpec.scala | 3 +- .../dns/internal/ResolvConfParserSpec.scala | 15 +- .../io/dns/internal/TcpDnsClientSpec.scala | 2 +- .../src/test/scala/akka/pattern/AskSpec.scala | 22 +- .../BackoffOnRestartSupervisorSpec.scala | 18 +- .../akka/pattern/BackoffSupervisorSpec.scala | 65 +- .../akka/pattern/CircuitBreakerMTSpec.scala | 30 +- .../akka/pattern/CircuitBreakerSpec.scala | 9 +- .../test/scala/akka/pattern/PatternSpec.scala | 6 +- .../test/scala/akka/pattern/PipeToSpec.scala | 12 +- .../scala/akka/pattern/PromiseRefSpec.scala | 2 +- .../test/scala/akka/pattern/RetrySpec.scala | 39 +- .../scala/akka/routing/BalancingSpec.scala | 23 +- .../scala/akka/routing/BroadcastSpec.scala | 4 +- .../routing/ConfiguredLocalRoutingSpec.scala | 21 +- .../routing/ConsistentHashingRouterSpec.scala | 10 +- .../routing/MetricsBasedResizerSpec.scala | 52 +- .../test/scala/akka/routing/RandomSpec.scala | 21 +- .../test/scala/akka/routing/ResizerSpec.scala | 87 +- .../scala/akka/routing/RoundRobinSpec.scala | 12 +- .../akka/routing/RouteeCreationSpec.scala | 4 +- .../test/scala/akka/routing/RoutingSpec.scala | 44 +- .../ScatterGatherFirstCompletedSpec.scala | 35 +- .../akka/routing/SmallestMailboxSpec.scala | 10 +- .../scala/akka/routing/TailChoppingSpec.scala | 13 +- .../serialization/AsyncSerializeSpec.scala | 7 +- .../DisabledJavaSerializerWarningSpec.scala | 3 +- .../akka/serialization/NoVerification.scala | 3 +- .../SerializationSetupSpec.scala | 46 +- .../akka/serialization/SerializeSpec.scala | 182 +- .../CallingThreadDispatcherModelSpec.scala | 11 +- .../akka/util/BoundedBlockingQueueSpec.scala | 80 +- .../test/scala/akka/util/ByteStringSpec.scala | 591 +++++-- .../test/scala/akka/util/DurationSpec.scala | 4 +- .../scala/akka/util/IgnoreForScala212.scala | 3 +- .../scala/akka/util/ImmutableIntMapSpec.scala | 13 +- .../src/test/scala/akka/util/IndexSpec.scala | 7 +- .../scala/akka/util/PrettyDurationSpec.scala | 22 +- .../akka/actor/typed/ActorContextSpec.scala | 616 ++++--- .../test/scala/akka/actor/typed/AskSpec.scala | 7 +- .../scala/akka/actor/typed/BehaviorSpec.scala | 191 ++- .../scala/akka/actor/typed/DeferredSpec.scala | 33 +- .../akka/actor/typed/ExtensionsSpec.scala | 163 +- .../akka/actor/typed/InterceptSpec.scala | 118 +- .../akka/actor/typed/LogMessagesSpec.scala | 22 +- .../scala/akka/actor/typed/MonitorSpec.scala | 12 +- .../scala/akka/actor/typed/OrElseSpec.scala | 15 +- .../scala/akka/actor/typed/PropsSpec.scala | 3 +- .../akka/actor/typed/SpawnProtocolSpec.scala | 1 - .../akka/actor/typed/SupervisionSpec.scala | 402 ++--- .../scala/akka/actor/typed/TimerSpec.scala | 85 +- .../scala/akka/actor/typed/WatchSpec.scala | 216 +-- .../scala/akka/actor/typed/WidenSpec.scala | 51 +- .../internal/ActorRefSerializationSpec.scala | 3 +- .../typed/internal/ActorSystemSpec.scala | 51 +- .../receptionist/LocalReceptionistSpec.scala | 2 +- .../ServiceKeySerializationSpec.scala | 4 +- .../receptionist/ReceptionistApiSpec.scala | 1 - .../typed/scaladsl/ActorContextAskSpec.scala | 29 +- .../scaladsl/ActorContextPipeToSelfSpec.scala | 8 +- .../typed/scaladsl/ActorLoggingSpec.scala | 527 +++--- .../typed/scaladsl/MessageAdapterSpec.scala | 57 +- .../typed/scaladsl/StashBufferSpec.scala | 1 - .../akka/actor/typed/scaladsl/StashSpec.scala | 203 ++- .../akka/actor/typed/scaladsl/StopSpec.scala | 33 +- .../typed/scaladsl/adapter/AdapterSpec.scala | 23 +- .../docs/akka/typed/DispatchersDocSpec.scala | 7 +- .../akka/typed/FaultToleranceDocSpec.scala | 29 +- .../docs/akka/typed/GracefulStopDocSpec.scala | 44 +- .../akka/typed/InteractionPatternsSpec.scala | 108 +- .../scala/docs/akka/typed/IntroSpec.scala | 36 +- .../scala/docs/akka/typed/OOIntroSpec.scala | 42 +- .../scala/docs/akka/typed/RouterSpec.scala | 3 +- .../scala/docs/akka/typed/StashDocSpec.scala | 1 - .../TypedWatchingUntypedSpec.scala | 22 +- .../supervision/SupervisionCompileOnly.scala | 60 +- .../scala/akka/actor/typed/ActorRef.scala | 9 +- .../akka/actor/typed/ActorRefResolver.scala | 2 +- .../scala/akka/actor/typed/ActorSystem.scala | 50 +- .../scala/akka/actor/typed/Behavior.scala | 31 +- .../scala/akka/actor/typed/Dispatchers.scala | 1 + .../scala/akka/actor/typed/Extensions.scala | 14 +- .../main/scala/akka/actor/typed/Logger.scala | 63 +- .../akka/actor/typed/MessageAndSignals.scala | 6 +- .../main/scala/akka/actor/typed/Props.scala | 9 +- .../akka/actor/typed/SpawnProtocol.scala | 3 +- .../akka/actor/typed/SupervisorStrategy.scala | 46 +- .../akka/actor/typed/TypedActorContext.scala | 1 - .../typed/internal/ActorContextImpl.scala | 23 +- .../actor/typed/internal/ActorRefImpl.scala | 2 +- .../actor/typed/internal/BehaviorImpl.scala | 22 +- .../actor/typed/internal/ExtensionsImpl.scala | 62 +- .../typed/internal/InterceptorImpl.scala | 22 +- .../internal/MiscMessageSerializer.scala | 4 +- .../actor/typed/internal/PoisonPill.scala | 8 +- .../typed/internal/StashBufferImpl.scala | 31 +- .../actor/typed/internal/Supervision.scala | 50 +- .../actor/typed/internal/SystemMessage.scala | 9 +- .../typed/internal/TimerSchedulerImpl.scala | 14 +- .../internal/WithMdcBehaviorInterceptor.scala | 15 +- .../typed/internal/WrappingBehavior.scala | 2 + .../typed/internal/adapter/ActorAdapter.scala | 23 +- .../adapter/ActorContextAdapter.scala | 26 +- .../internal/adapter/ActorRefAdapter.scala | 21 +- .../internal/adapter/ActorSystemAdapter.scala | 23 +- .../internal/adapter/LoggerAdapterImpl.scala | 78 +- .../typed/internal/adapter/PropsAdapter.scala | 4 +- .../receptionist/LocalReceptionist.scala | 23 +- .../receptionist/ReceptionistMessages.scala | 23 +- .../receptionist/ServiceKeySerializer.scala | 4 +- .../internal/routing/GroupRouterImpl.scala | 17 +- .../internal/routing/PoolRouterImpl.scala | 27 +- .../typed/internal/routing/RoutingLogic.scala | 5 +- .../actor/typed/javadsl/ActorContext.scala | 11 +- .../akka/actor/typed/javadsl/AskPattern.scala | 5 +- .../actor/typed/javadsl/BehaviorBuilder.scala | 97 +- .../akka/actor/typed/javadsl/Behaviors.scala | 32 +- .../actor/typed/javadsl/ReceiveBuilder.scala | 33 +- .../typed/receptionist/Receptionist.scala | 22 +- .../actor/typed/scaladsl/ActorContext.scala | 3 +- .../actor/typed/scaladsl/AskPattern.scala | 21 +- .../akka/actor/typed/scaladsl/Behaviors.scala | 12 +- .../actor/typed/scaladsl/StashBuffer.scala | 1 + .../scaladsl/adapter/AdapterExtension.scala | 1 + .../typed/scaladsl/adapter/package.scala | 4 +- .../main/scala-2.11/akka/compat/Future.scala | 9 +- .../scala-2.11/akka/util/ByteIterator.scala | 146 +- .../main/scala-2.12/akka/compat/Future.scala | 19 +- .../scala-2.12/akka/util/ByteIterator.scala | 146 +- .../scala-2.13+/akka/util/ByteIterator.scala | 145 +- .../scala-2.13+/akka/util/ByteString.scala | 109 +- .../akka/util/ccompat/imm/package.scala | 3 +- .../scala-2.13-/akka/util/ByteString.scala | 103 +- .../akka/util/ccompat/package.scala | 7 +- .../main/scala-2.13/akka/compat/Future.scala | 18 +- .../src/main/scala/akka/AkkaVersion.scala | 8 +- akka-actor/src/main/scala/akka/Done.scala | 1 + akka-actor/src/main/scala/akka/Main.scala | 2 +- akka-actor/src/main/scala/akka/NotUsed.scala | 1 + .../main/scala/akka/actor/AbstractActor.scala | 1 + .../main/scala/akka/actor/AbstractFSM.scala | 47 +- .../main/scala/akka/actor/AbstractProps.scala | 8 +- .../src/main/scala/akka/actor/Actor.scala | 18 +- .../src/main/scala/akka/actor/ActorCell.scala | 97 +- .../src/main/scala/akka/actor/ActorDSL.scala | 12 +- .../src/main/scala/akka/actor/ActorPath.scala | 87 +- .../src/main/scala/akka/actor/ActorRef.scala | 124 +- .../scala/akka/actor/ActorRefProvider.scala | 199 ++- .../scala/akka/actor/ActorSelection.scala | 44 +- .../main/scala/akka/actor/ActorSystem.scala | 289 ++-- .../src/main/scala/akka/actor/Address.scala | 15 +- .../akka/actor/CoordinatedShutdown.scala | 108 +- .../src/main/scala/akka/actor/Deployer.scala | 82 +- .../main/scala/akka/actor/DynamicAccess.scala | 1 + .../src/main/scala/akka/actor/Extension.scala | 4 +- .../main/scala/akka/actor/FaultHandling.scala | 111 +- .../akka/actor/IndirectActorProducer.scala | 6 +- .../actor/LightArrayRevolverScheduler.scala | 94 +- .../src/main/scala/akka/actor/Props.scala | 2 +- .../akka/actor/ReflectiveDynamicAccess.scala | 15 +- .../akka/actor/RepointableActorRef.scala | 53 +- .../src/main/scala/akka/actor/Scheduler.scala | 97 +- .../src/main/scala/akka/actor/Stash.scala | 42 +- .../src/main/scala/akka/actor/Timers.scala | 1 + .../main/scala/akka/actor/TypedActor.scala | 290 ++-- .../main/scala/akka/actor/UntypedActor.scala | 1 - .../main/scala/akka/actor/dsl/Creators.scala | 6 +- .../src/main/scala/akka/actor/dsl/Inbox.scala | 113 +- .../scala/akka/actor/dungeon/Children.scala | 49 +- .../actor/dungeon/ChildrenContainer.scala | 30 +- .../scala/akka/actor/dungeon/DeathWatch.scala | 52 +- .../scala/akka/actor/dungeon/Dispatch.scala | 41 +- .../akka/actor/dungeon/FaultHandling.scala | 62 +- .../actor/dungeon/TimerSchedulerImpl.scala | 14 +- .../akka/dispatch/AbstractDispatcher.scala | 109 +- .../akka/dispatch/BalancingDispatcher.scala | 45 +- .../akka/dispatch/BatchingExecutor.scala | 10 +- .../scala/akka/dispatch/CachingConfig.scala | 4 +- .../main/scala/akka/dispatch/Dispatcher.scala | 35 +- .../scala/akka/dispatch/Dispatchers.scala | 102 +- .../ForkJoinExecutorConfigurator.scala | 89 +- .../src/main/scala/akka/dispatch/Future.scala | 29 +- .../main/scala/akka/dispatch/Mailbox.scala | 240 ++- .../main/scala/akka/dispatch/Mailboxes.scala | 108 +- .../akka/dispatch/PinnedDispatcher.scala | 28 +- .../akka/dispatch/ThreadPoolBuilder.scala | 71 +- .../akka/dispatch/affinity/AffinityPool.scala | 130 +- .../akka/dispatch/sysmsg/SystemMessage.scala | 24 +- .../ActorClassificationUnsubscriber.scala | 28 +- .../akka/event/AddressTerminatedTopic.scala | 2 +- .../scala/akka/event/DeadLetterListener.scala | 12 +- .../src/main/scala/akka/event/EventBus.scala | 49 +- .../main/scala/akka/event/EventStream.scala | 53 +- .../akka/event/EventStreamUnsubscriber.scala | 29 +- .../main/scala/akka/event/LoggerMailbox.scala | 8 +- .../src/main/scala/akka/event/Logging.scala | 509 ++++-- .../scala/akka/event/LoggingReceive.scala | 23 +- .../akka/event/japi/EventBusJavaAPI.scala | 6 +- .../scala/akka/event/jul/JavaLogger.scala | 3 +- .../scala/akka/io/DirectByteBufferPool.scala | 13 +- akka-actor/src/main/scala/akka/io/Dns.scala | 66 +- akka-actor/src/main/scala/akka/io/Inet.scala | 3 + .../akka/io/InetAddressDnsResolver.scala | 36 +- .../main/scala/akka/io/SelectionHandler.scala | 84 +- .../main/scala/akka/io/SimpleDnsCache.scala | 14 +- .../main/scala/akka/io/SimpleDnsManager.scala | 22 +- akka-actor/src/main/scala/akka/io/Tcp.scala | 127 +- .../main/scala/akka/io/TcpConnection.scala | 149 +- .../scala/akka/io/TcpIncomingConnection.scala | 15 +- .../src/main/scala/akka/io/TcpListener.scala | 26 +- .../src/main/scala/akka/io/TcpManager.scala | 3 +- .../scala/akka/io/TcpOutgoingConnection.scala | 18 +- akka-actor/src/main/scala/akka/io/Udp.scala | 21 +- .../src/main/scala/akka/io/UdpConnected.scala | 40 +- .../scala/akka/io/UdpConnectedManager.scala | 2 +- .../main/scala/akka/io/UdpConnection.scala | 27 +- .../src/main/scala/akka/io/UdpListener.scala | 25 +- .../src/main/scala/akka/io/UdpManager.scala | 3 +- .../src/main/scala/akka/io/UdpSender.scala | 17 +- .../src/main/scala/akka/io/WithUdpSend.scala | 12 +- .../main/scala/akka/io/dns/DnsProtocol.scala | 6 +- .../akka/io/dns/DnsResourceRecords.scala | 52 +- .../main/scala/akka/io/dns/DnsSettings.scala | 44 +- .../main/scala/akka/io/dns/RecordType.scala | 17 +- .../akka/io/dns/internal/AsyncDnsCache.scala | 7 +- .../io/dns/internal/AsyncDnsManager.scala | 46 +- .../io/dns/internal/AsyncDnsResolver.scala | 86 +- .../akka/io/dns/internal/DnsClient.scala | 20 +- .../akka/io/dns/internal/DnsMessage.scala | 53 +- .../scala/akka/io/dns/internal/Question.scala | 1 + .../io/dns/internal/ResolvConfParser.scala | 3 +- .../akka/io/dns/internal/TcpDnsClient.scala | 8 +- .../src/main/scala/akka/japi/JavaAPI.scala | 16 +- .../src/main/scala/akka/japi/Throwables.scala | 1 + .../scala/akka/japi/function/Function.scala | 2 +- .../scala/akka/japi/pf/CaseStatements.scala | 8 +- .../main/scala/akka/pattern/AskSupport.scala | 204 ++- .../src/main/scala/akka/pattern/Backoff.scala | 156 +- .../scala/akka/pattern/BackoffOptions.scala | 117 +- .../akka/pattern/BackoffSupervisor.scala | 183 +- .../scala/akka/pattern/CircuitBreaker.scala | 130 +- .../akka/pattern/FutureTimeoutSupport.scala | 22 +- .../akka/pattern/GracefulStopSupport.scala | 11 +- .../main/scala/akka/pattern/Patterns.scala | 112 +- .../scala/akka/pattern/PipeToSupport.scala | 21 +- .../main/scala/akka/pattern/PromiseRef.scala | 13 +- .../scala/akka/pattern/RetrySupport.scala | 12 +- .../scala/akka/pattern/extended/package.scala | 1 - .../internal/BackoffOnRestartSupervisor.scala | 85 +- .../internal/BackoffOnStopSupervisor.scala | 30 +- .../src/main/scala/akka/pattern/package.scala | 9 +- .../main/scala/akka/routing/Balancing.scala | 26 +- .../main/scala/akka/routing/Broadcast.scala | 27 +- .../scala/akka/routing/ConsistentHash.scala | 31 +- .../akka/routing/ConsistentHashing.scala | 87 +- .../main/scala/akka/routing/Listeners.scala | 4 +- .../main/scala/akka/routing/MurmurHash.scala | 2 - .../routing/OptimalSizeExploringResizer.scala | 105 +- .../src/main/scala/akka/routing/Random.scala | 27 +- .../src/main/scala/akka/routing/Resizer.scala | 77 +- .../main/scala/akka/routing/RoundRobin.scala | 27 +- .../scala/akka/routing/RoutedActorCell.scala | 71 +- .../scala/akka/routing/RoutedActorRef.scala | 19 +- .../src/main/scala/akka/routing/Router.scala | 2 +- .../scala/akka/routing/RouterConfig.scala | 30 +- .../routing/ScatterGatherFirstCompleted.scala | 45 +- .../scala/akka/routing/SmallestMailbox.scala | 46 +- .../scala/akka/routing/TailChopping.scala | 73 +- .../akka/serialization/AsyncSerializer.scala | 17 +- .../akka/serialization/Serialization.scala | 126 +- .../SerializationExtension.scala | 2 +- .../serialization/SerializationSetup.scala | 17 +- .../scala/akka/serialization/Serializer.scala | 37 +- .../akka/util/BoundedBlockingQueue.scala | 34 +- .../src/main/scala/akka/util/BoxedType.scala | 19 +- .../util/ClassLoaderObjectInputStream.scala | 3 +- .../main/scala/akka/util/Collections.scala | 2 +- .../src/main/scala/akka/util/HashCode.scala | 1 - .../src/main/scala/akka/util/Helpers.scala | 10 +- .../scala/akka/util/ImmutableIntMap.scala | 17 +- .../src/main/scala/akka/util/Index.scala | 36 +- .../akka/util/JavaDurationConverters.scala | 1 + .../main/scala/akka/util/LineNumbers.scala | 36 +- .../src/main/scala/akka/util/LockUtil.scala | 6 +- .../main/scala/akka/util/ManifestInfo.scala | 32 +- .../main/scala/akka/util/MessageBuffer.scala | 4 +- .../src/main/scala/akka/util/OptionVal.scala | 2 +- .../scala/akka/util/PrettyByteString.scala | 11 +- .../main/scala/akka/util/PrettyDuration.scala | 8 +- .../src/main/scala/akka/util/Reflect.scala | 78 +- ...erializedSuspendableExecutionContext.scala | 23 +- .../scala/akka/util/SubclassifiedIndex.scala | 46 +- .../main/scala/akka/util/WildcardIndex.scala | 55 +- .../src/main/scala/akka/agent/Agent.scala | 30 +- .../src/test/scala/akka/agent/AgentSpec.scala | 41 +- .../src/main/scala/akka/BenchRunner.scala | 3 +- .../actor/typed/TypedActorBenchmark.scala | 7 +- .../actor/typed/TypedBenchmarkActors.scala | 72 +- .../src/main/scala/akka/BenchRunner.scala | 3 +- .../scala/akka/actor/ActorBenchmark.scala | 13 +- .../akka/actor/ActorCreationBenchmark.scala | 2 +- .../AffinityPoolComparativeBenchmark.scala | 7 +- .../AffinityPoolIdleCPULevelBenchmark.scala | 10 +- ...AffinityPoolRequestResponseBenchmark.scala | 10 +- .../scala/akka/actor/BenchmarkActors.scala | 38 +- .../akka/actor/ForkJoinActorBenchmark.scala | 18 +- .../scala/akka/actor/JCToolsMailbox.scala | 13 +- .../akka/actor/ManyToOneArrayMailbox.scala | 12 +- .../akka/actor/RequestResponseActors.scala | 13 +- .../actor/RouterPoolCreationBenchmark.scala | 1 - .../scala/akka/actor/ScheduleBenchmark.scala | 14 +- .../akka/actor/StashCreationBenchmark.scala | 1 - .../scala/akka/actor/TellOnlyBenchmark.scala | 47 +- .../ddata/ORSetSerializationBenchmark.scala | 6 +- .../LevelDbBatchingBenchmark.scala | 9 +- .../PersistenceActorDeferBenchmark.scala | 25 +- .../PersistentActorBenchmark.scala | 21 +- ...ctorWithAtLeastOnceDeliveryBenchmark.scala | 39 +- .../akka/remote/artery/CodecBenchmark.scala | 134 +- .../remote/artery/FlightRecorderBench.scala | 3 +- .../scala/akka/remote/artery/LatchSink.scala | 2 +- .../remote/artery/SendQueueBenchmark.scala | 24 +- .../akka/stream/EmptySourceBenchmark.scala | 2 +- .../akka/stream/FlatMapConcatBenchmark.scala | 18 +- .../scala/akka/stream/FlowMapBenchmark.scala | 9 +- .../scala/akka/stream/FramingBenchmark.scala | 10 +- .../akka/stream/FusedGraphsBenchmark.scala | 206 +-- .../akka/stream/InterpreterBenchmark.scala | 30 +- .../stream/InvokeWithFeedbackBenchmark.scala | 3 +- .../akka/stream/JsonFramingBenchmark.scala | 13 +- .../scala/akka/stream/MapAsyncBenchmark.scala | 6 +- .../stream/MaterializationBenchmark.scala | 5 +- .../akka/stream/NewLayoutBenchmark.scala | 2 +- .../akka/stream/PartitionHubBenchmark.scala | 26 +- .../akka/stream/SourceRefBenchmark.scala | 6 +- .../OutputStreamSourceStageBenchmark.scala | 4 +- .../akka/stream/io/FileSourcesBenchmark.scala | 10 +- .../stream/io/FileSourcesScaleBenchmark.scala | 16 +- .../util/ByteString_indexOf_Benchmark.scala | 2 +- .../camel/ActorNotRegisteredException.scala | 3 +- .../akka/camel/ActorRouteDefinition.scala | 5 +- .../src/main/scala/akka/camel/Camel.scala | 22 +- .../main/scala/akka/camel/CamelMessage.scala | 76 +- .../src/main/scala/akka/camel/Consumer.scala | 24 +- .../scala/akka/camel/ContextProvider.scala | 1 + .../src/main/scala/akka/camel/Producer.scala | 44 +- .../camel/internal/ActivationMessage.scala | 1 + .../camel/internal/ActivationTracker.scala | 13 +- .../camel/internal/CamelExchangeAdapter.scala | 3 +- .../akka/camel/internal/CamelSupervisor.scala | 38 +- .../internal/ConsumerActorRouteBuilder.scala | 11 +- .../akka/camel/internal/DefaultCamel.scala | 43 +- .../internal/component/ActorComponent.scala | 84 +- .../camel/javaapi/UntypedProducerActor.scala | 1 + akka-camel/src/main/scala/akka/package.scala | 4 +- .../scala/akka/camel/CamelConfigSpec.scala | 1 - .../akka/camel/ConcurrentActivationTest.scala | 36 +- .../akka/camel/ConsumerIntegrationTest.scala | 22 +- .../scala/akka/camel/DefaultCamelTest.scala | 14 +- .../scala/akka/camel/MessageScalaTest.scala | 6 +- .../akka/camel/ProducerFeatureTest.scala | 125 +- .../test/scala/akka/camel/TestSupport.scala | 24 +- .../akka/camel/UntypedProducerTest.scala | 13 +- .../internal/ActivationTrackerTest.scala | 31 +- .../ActorComponentConfigurationTest.scala | 13 +- .../component/ActorProducerTest.scala | 53 +- .../component/DurationConverterTest.scala | 1 - .../metrics/ClusterMetricsCollector.scala | 39 +- .../metrics/ClusterMetricsExtension.scala | 11 +- .../metrics/ClusterMetricsRouting.scala | 147 +- .../metrics/ClusterMetricsSettings.scala | 6 +- .../metrics/ClusterMetricsStrategy.scala | 9 +- .../scala/akka/cluster/metrics/Metric.scala | 67 +- .../cluster/metrics/MetricsCollector.scala | 88 +- .../akka/cluster/metrics/Provision.scala | 17 +- .../metrics/protobuf/MessageSerializer.scala | 96 +- .../metrics/ClusterMetricsExtensionSpec.scala | 60 +- .../metrics/ClusterMetricsRoutingSpec.scala | 41 +- .../metrics/sample/StatsSampleSpec.scala | 16 +- .../cluster/metrics/sample/StatsService.scala | 27 +- .../metrics/ClusterMetricsExtensionSpec.scala | 32 +- .../metrics/ClusterMetricsRoutingSpec.scala | 62 +- .../scala/akka/cluster/metrics/EWMASpec.scala | 4 +- .../akka/cluster/metrics/MetricSpec.scala | 40 +- .../metrics/MetricsCollectorSpec.scala | 45 +- .../scala/akka/cluster/metrics/TestUtil.scala | 58 +- .../cluster/metrics/WeightedRouteesSpec.scala | 14 +- .../protobuf/MessageSerializerSpec.scala | 25 +- .../sharding/typed/ClusterShardingQuery.scala | 8 +- .../typed/ClusterShardingSettings.scala | 310 ++-- .../typed/ShardingMessageExtractor.scala | 15 +- .../typed/internal/ClusterShardingImpl.scala | 177 +- .../typed/internal/ShardingSerializer.scala | 3 +- .../typed/javadsl/ClusterSharding.scala | 85 +- .../typed/javadsl/EventSourcedEntity.scala | 21 +- .../typed/scaladsl/ClusterSharding.scala | 41 +- .../typed/scaladsl/EventSourcedEntity.scala | 11 +- .../typed/MultiDcClusterShardingSpec.scala | 27 +- ...nfigCompatCheckerClusterShardingSpec.scala | 9 +- .../ClusterShardingPersistenceSpec.scala | 102 +- .../typed/scaladsl/ClusterShardingSpec.scala | 138 +- .../scaladsl/ClusterShardingStateSpec.scala | 7 +- .../typed/scaladsl/EntityTypeKeySpec.scala | 8 +- ...loWorldEventSourcedEntityExampleSpec.scala | 11 +- .../HelloWorldPersistentEntityExample.scala | 35 +- .../typed/ShardingCompileOnlySpec.scala | 17 +- .../cluster/sharding/ClusterSharding.scala | 344 ++-- .../sharding/ClusterShardingSettings.scala | 286 ++-- .../RemoveInternalClusterShardingData.scala | 61 +- .../scala/akka/cluster/sharding/Shard.scala | 280 +-- .../cluster/sharding/ShardCoordinator.scala | 642 +++---- .../akka/cluster/sharding/ShardRegion.scala | 246 +-- .../ClusterShardingMessageSerializer.scala | 199 ++- ...terShardingCustomShardAllocationSpec.scala | 48 +- .../sharding/ClusterShardingFailureSpec.scala | 26 +- .../ClusterShardingGetStateSpec.scala | 27 +- .../ClusterShardingGetStatsSpec.scala | 27 +- .../ClusterShardingGracefulShutdownSpec.scala | 64 +- .../ClusterShardingIncorrectSetupSpec.scala | 20 +- .../sharding/ClusterShardingLeavingSpec.scala | 26 +- .../ClusterShardingMinMembersSpec.scala | 40 +- ...dingRememberEntitiesNewExtractorSpec.scala | 81 +- .../ClusterShardingRememberEntitiesSpec.scala | 48 +- ...sterShardingSingleShardPerEntitySpec.scala | 19 +- .../sharding/ClusterShardingSpec.scala | 194 +-- .../sharding/MultiDcClusterShardingSpec.scala | 46 +- .../ClusterShardingInternalsSpec.scala | 32 +- .../ConcurrentStartupShardingSpec.scala | 7 +- ...nstantRateEntityRecoveryStrategySpec.scala | 8 +- .../CoordinatedShutdownShardingSpec.scala | 21 +- .../InactiveEntityPassivationSpec.scala | 24 +- .../JoinConfigCompatCheckShardingSpec.scala | 12 +- .../LeastShardAllocationStrategySpec.scala | 11 +- .../sharding/PersistentShardSpec.scala | 16 +- .../cluster/sharding/ProxyShardingSpec.scala | 27 +- ...emoveInternalClusterShardingDataSpec.scala | 34 +- .../cluster/sharding/SupervisionSpec.scala | 59 +- ...ClusterShardingMessageSerializerSpec.scala | 10 +- .../akka/cluster/client/ClusterClient.scala | 215 +-- .../ClusterClientMessageSerializer.scala | 24 +- .../pubsub/DistributedPubSubMediator.scala | 183 +- .../DistributedPubSubMessageSerializer.scala | 112 +- .../singleton/ClusterSingletonManager.scala | 155 +- .../singleton/ClusterSingletonProxy.scala | 76 +- .../ClusterSingletonMessageSerializer.scala | 22 +- .../client/ClusterClientHandoverSpec.scala | 15 +- .../cluster/client/ClusterClientSpec.scala | 67 +- .../client/ClusterClientStopSpec.scala | 8 +- .../DistributedPubSubMediatorSpec.scala | 13 +- .../pubsub/DistributedPubSubRestartSpec.scala | 10 +- .../ClusterSingletonManagerChaosSpec.scala | 17 +- .../ClusterSingletonManagerDownedSpec.scala | 13 +- .../ClusterSingletonManagerLeaveSpec.scala | 24 +- .../ClusterSingletonManagerSpec.scala | 42 +- .../ClusterSingletonManagerStartupSpec.scala | 23 +- .../MultiDcSingletonManagerSpec.scala | 26 +- .../ClusterClientMessageSerializerSpec.scala | 7 +- ...ributedPubSubMediatorDeadLettersSpec.scala | 8 +- .../DistributedPubSubMediatorRouterSpec.scala | 22 +- ...stributedPubSubMessageSerializerSpec.scala | 11 +- .../ClusterSingletonLeavingSpeedSpec.scala | 18 +- .../singleton/ClusterSingletonProxySpec.scala | 17 +- .../ClusterSingletonRestart2Spec.scala | 24 +- .../ClusterSingletonRestartSpec.scala | 13 +- .../typed/internal/ReplicatorBehavior.scala | 240 +-- .../ddata/typed/javadsl/DistributedData.scala | 3 +- .../ddata/typed/javadsl/Replicator.scala | 79 +- .../typed/javadsl/ReplicatorSettings.scala | 1 + .../typed/scaladsl/DistributedData.scala | 4 +- .../ddata/typed/scaladsl/Replicator.scala | 53 +- .../typed/scaladsl/ReplicatorSettings.scala | 1 + .../ddata/typed/scaladsl/package.scala | 1 + .../scala/akka/cluster/typed/Cluster.scala | 10 +- .../akka/cluster/typed/ClusterSingleton.scala | 137 +- .../typed/internal/AdaptedClusterImpl.scala | 178 +- .../AdaptedClusterSingletonImpl.scala | 16 +- .../internal/AkkaClusterTypedSerializer.scala | 12 +- .../receptionist/ClusterReceptionist.scala | 76 +- ...usterReceptionistConfigCompatChecker.scala | 1 - .../ClusterReceptionistSettings.scala | 16 +- .../internal/receptionist/Registry.scala | 8 +- .../typed/MultiDcClusterSingletonSpec.scala | 34 +- .../typed/MultiNodeTypedClusterSpec.scala | 9 +- .../ddata/typed/scaladsl/ReplicatorSpec.scala | 30 +- .../akka/cluster/typed/ActorSystemSpec.scala | 54 +- .../akka/cluster/typed/ClusterApiSpec.scala | 4 +- .../typed/ClusterSingletonApiSpec.scala | 16 +- .../ClusterSingletonPersistenceSpec.scala | 30 +- .../ClusterSingletonPoisonPillSpec.scala | 7 +- .../cluster/typed/RemoteContextAskSpec.scala | 3 +- .../typed/RemoteDeployNotAllowedSpec.scala | 27 +- .../cluster/typed/RemoteMessageSpec.scala | 3 +- .../AkkaClusterTypedSerializerSpec.scala | 23 +- .../ClusterReceptionistSpec.scala | 33 +- .../typed/BasicClusterExampleSpec.scala | 24 +- .../typed/ReceptionistExampleSpec.scala | 33 +- .../typed/SingletonCompileOnlySpec.scala | 11 +- .../main/scala/akka/cluster/AutoDown.scala | 26 +- .../src/main/scala/akka/cluster/Cluster.scala | 73 +- .../cluster/ClusterActorRefProvider.scala | 58 +- .../scala/akka/cluster/ClusterDaemon.scala | 608 ++++--- .../scala/akka/cluster/ClusterEvent.scala | 217 +-- .../scala/akka/cluster/ClusterHeartbeat.scala | 65 +- .../main/scala/akka/cluster/ClusterJmx.scala | 38 +- .../scala/akka/cluster/ClusterReadView.scala | 120 +- .../akka/cluster/ClusterRemoteWatcher.scala | 62 +- .../scala/akka/cluster/ClusterSettings.scala | 62 +- .../cluster/CoordinatedShutdownLeave.scala | 5 +- .../cluster/CrossDcClusterHeartbeat.scala | 90 +- .../scala/akka/cluster/DowningProvider.scala | 12 +- .../src/main/scala/akka/cluster/Gossip.scala | 87 +- .../cluster/JoinConfigCompatChecker.scala | 18 +- .../src/main/scala/akka/cluster/Member.scala | 78 +- .../scala/akka/cluster/MembershipState.scala | 71 +- .../scala/akka/cluster/Reachability.scala | 41 +- .../main/scala/akka/cluster/VectorClock.scala | 21 +- .../protobuf/ClusterMessageSerializer.scala | 173 +- .../cluster/routing/ClusterRouterConfig.scala | 154 +- .../cluster/AttemptSysMsgRedeliverySpec.scala | 7 +- ...ientDowningNodeThatIsUnreachableSpec.scala | 37 +- .../ClientDowningNodeThatIsUpSpec.scala | 28 +- .../ClusterAccrualFailureDetectorSpec.scala | 87 +- .../akka/cluster/ClusterDeathWatchSpec.scala | 23 +- .../scala/akka/cluster/ConvergenceSpec.scala | 11 +- .../DeterministicOldestWhenJoiningSpec.scala | 11 +- .../DisallowJoinOfTwoClustersSpec.scala | 6 +- .../akka/cluster/InitialHeartbeatSpec.scala | 11 +- .../cluster/InitialMembersOfNewDcSpec.scala | 9 +- .../akka/cluster/JoinInProgressSpec.scala | 10 +- .../scala/akka/cluster/JoinSeedNodeSpec.scala | 7 +- .../cluster/LargeMessageClusterSpec.scala | 12 +- .../LeaderDowningAllOtherNodesSpec.scala | 13 +- ...aderDowningNodeThatIsUnreachableSpec.scala | 44 +- .../akka/cluster/LeaderElectionSpec.scala | 4 +- .../akka/cluster/LeaderLeavingSpec.scala | 13 +- .../scala/akka/cluster/MBeanSpec.scala | 11 +- .../akka/cluster/MemberWeaklyUpSpec.scala | 21 +- .../MembershipChangeListenerExitingSpec.scala | 48 +- .../MembershipChangeListenerUpSpec.scala | 8 +- .../akka/cluster/MinMembersBeforeUpSpec.scala | 38 +- .../akka/cluster/MultiDcClusterSpec.scala | 19 +- .../MultiDcHeartbeatTakingOverSpec.scala | 56 +- .../akka/cluster/MultiDcLastNodeSpec.scala | 12 +- .../akka/cluster/MultiDcSplitBrainSpec.scala | 27 +- .../cluster/MultiDcSunnyWeatherSpec.scala | 31 +- .../akka/cluster/MultiNodeClusterSpec.scala | 111 +- .../scala/akka/cluster/NodeChurnSpec.scala | 19 +- .../NodeDowningAndBeingRemovedSpec.scala | 11 +- ...LeavingAndExitingAndBeingRemovedSpec.scala | 7 +- .../cluster/NodeLeavingAndExitingSpec.scala | 7 +- .../akka/cluster/NodeMembershipSpec.scala | 4 +- .../scala/akka/cluster/NodeUpSpec.scala | 4 +- .../scala/akka/cluster/QuickRestartSpec.scala | 26 +- .../cluster/RestartFirstSeedNodeSpec.scala | 23 +- .../scala/akka/cluster/RestartNode2Spec.scala | 23 +- .../scala/akka/cluster/RestartNode3Spec.scala | 20 +- .../scala/akka/cluster/RestartNodeSpec.scala | 19 +- .../cluster/SharedMediaDriverSupport.scala | 5 +- .../akka/cluster/SingletonClusterSpec.scala | 16 +- .../scala/akka/cluster/SplitBrainSpec.scala | 17 +- .../scala/akka/cluster/StreamRefSpec.scala | 37 +- .../scala/akka/cluster/StressSpec.scala | 273 +-- .../scala/akka/cluster/SunnyWeatherSpec.scala | 6 +- .../SurviveNetworkInstabilitySpec.scala | 31 +- .../scala/akka/cluster/TransitionSpec.scala | 44 +- .../UnreachableNodeJoinsAgainSpec.scala | 28 +- .../ClusterConsistentHashingGroupSpec.scala | 8 +- .../ClusterConsistentHashingRouterSpec.scala | 42 +- .../routing/ClusterRoundRobinSpec.scala | 35 +- .../cluster/routing/UseRoleIgnoredSpec.scala | 99 +- .../scala/akka/cluster/AutoDownSpec.scala | 7 +- .../akka/cluster/ClusterConfigSpec.scala | 6 +- .../akka/cluster/ClusterDeployerSpec.scala | 41 +- .../ClusterDomainEventPublisherSpec.scala | 36 +- .../akka/cluster/ClusterDomainEventSpec.scala | 109 +- .../ClusterHeartbeatSenderStateSpec.scala | 39 +- .../scala/akka/cluster/ClusterLogSpec.scala | 19 +- .../test/scala/akka/cluster/ClusterSpec.scala | 64 +- .../scala/akka/cluster/ClusterTestKit.scala | 25 +- .../akka/cluster/DowningProviderSpec.scala | 23 +- .../akka/cluster/FailureDetectorPuppet.scala | 1 - .../test/scala/akka/cluster/GossipSpec.scala | 101 +- .../cluster/GossipTargetSelectorSpec.scala | 111 +- .../akka/cluster/HeartbeatNodeRingSpec.scala | 2 +- ...ConfigCompatCheckerRollingUpdateSpec.scala | 13 +- .../cluster/JoinConfigCompatCheckerSpec.scala | 87 +- ...JoinConfigCompatPreDefinedChecksSpec.scala | 73 +- .../akka/cluster/MembershipStateSpec.scala | 76 +- .../akka/cluster/ReachabilityPerfSpec.scala | 5 +- .../scala/akka/cluster/ReachabilitySpec.scala | 110 +- .../cluster/ResetSystemMessageSeqNrSpec.scala | 14 +- .../ShutdownAfterJoinSeedNodesSpec.scala | 3 +- .../cluster/StartupWithOneThreadSpec.scala | 19 +- .../test/scala/akka/cluster/TestMember.scala | 12 +- .../akka/cluster/VectorClockPerfSpec.scala | 4 +- .../scala/akka/cluster/VectorClockSpec.scala | 10 +- .../ClusterMessageSerializerSpec.scala | 73 +- .../routing/ClusterRouterSupervisorSpec.scala | 13 +- .../circuitbreaker/CircuitBreakerProxy.scala | 132 +- .../circuitbreaker/askExtensions.scala | 36 +- .../scala/akka/contrib/jul/JavaLogger.scala | 13 +- .../akka/contrib/mailbox/PeekMailbox.scala | 14 +- .../akka/contrib/pattern/Aggregator.scala | 8 +- .../contrib/pattern/ReceivePipeline.scala | 11 +- .../akka/contrib/pattern/ReliableProxy.scala | 70 +- .../throttle/TimerBasedThrottler.scala | 36 +- .../contrib/pattern/ReliableProxySpec.scala | 20 +- .../CircuitBreakerProxySpec.scala | 57 +- .../sample/CircuitBreaker.scala | 35 +- .../contrib/mailbox/PeekMailboxSpec.scala | 17 +- .../akka/contrib/pattern/AggregatorSpec.scala | 62 +- .../contrib/pattern/ReceivePipelineSpec.scala | 97 +- .../pattern/ReliableProxyDocSpec.scala | 6 +- .../throttle/TimerBasedThrottleTest.scala | 10 +- .../throttle/TimerBasedThrottlerSpec.scala | 44 +- .../main/scala/akka/discovery/Discovery.scala | 16 +- .../akka/discovery/ServiceDiscovery.scala | 26 +- .../config/ConfigServiceDiscovery.scala | 3 +- .../DiscoveryConfigurationSpec.scala | 21 +- .../scala/akka/discovery/LookupSpec.scala | 24 +- .../AggregateServiceDiscoverySpec.scala | 43 +- .../config/ConfigServiceDiscoverySpec.scala | 16 +- .../config/ConfigServicesParserSpec.scala | 11 +- .../akka/discovery/dns/DnsDiscoverySpec.scala | 17 +- .../dns/DnsServiceDiscoverySpec.scala | 71 +- .../doc/akka/discovery/CompileOnlySpec.scala | 3 +- .../ddata/DeltaPropagationSelector.scala | 30 +- .../akka/cluster/ddata/DistributedData.scala | 3 +- .../akka/cluster/ddata/DurableStore.scala | 32 +- .../main/scala/akka/cluster/ddata/Flag.scala | 1 + .../scala/akka/cluster/ddata/GCounter.scala | 17 +- .../main/scala/akka/cluster/ddata/GSet.scala | 8 +- .../main/scala/akka/cluster/ddata/Key.scala | 2 +- .../scala/akka/cluster/ddata/LWWMap.scala | 11 +- .../akka/cluster/ddata/LWWRegister.scala | 11 +- .../main/scala/akka/cluster/ddata/ORMap.scala | 89 +- .../scala/akka/cluster/ddata/ORMultiMap.scala | 57 +- .../main/scala/akka/cluster/ddata/ORSet.scala | 46 +- .../scala/akka/cluster/ddata/PNCounter.scala | 28 +- .../akka/cluster/ddata/PNCounterMap.scala | 9 +- .../akka/cluster/ddata/PruningState.scala | 3 +- .../akka/cluster/ddata/ReplicatedData.scala | 6 +- .../scala/akka/cluster/ddata/Replicator.scala | 648 ++++--- .../akka/cluster/ddata/VersionVector.scala | 52 +- .../protobuf/ReplicatedDataSerializer.scala | 445 +++-- .../ReplicatorMessageSerializer.scala | 160 +- .../ddata/protobuf/SerializationSupport.scala | 39 +- .../akka/cluster/ddata/DurableDataSpec.scala | 35 +- .../cluster/ddata/DurablePruningSpec.scala | 12 +- .../ddata/JepsenInspiredInsertSpec.scala | 20 +- .../akka/cluster/ddata/PerformanceSpec.scala | 16 +- .../cluster/ddata/ReplicatorChaosSpec.scala | 30 +- .../cluster/ddata/ReplicatorDeltaSpec.scala | 20 +- .../ddata/ReplicatorMapDeltaSpec.scala | 92 +- .../ddata/ReplicatorORSetDeltaSpec.scala | 19 +- .../cluster/ddata/ReplicatorPruningSpec.scala | 30 +- .../akka/cluster/ddata/ReplicatorSpec.scala | 8 +- .../akka/cluster/ddata/STMultiNodeSpec.scala | 3 +- .../ddata/DeltaPropagationSelectorSpec.scala | 68 +- .../scala/akka/cluster/ddata/FlagSpec.scala | 4 +- .../akka/cluster/ddata/GCounterSpec.scala | 84 +- .../scala/akka/cluster/ddata/GSetSpec.scala | 23 +- .../scala/akka/cluster/ddata/LWWMapSpec.scala | 20 +- .../akka/cluster/ddata/LWWRegisterSpec.scala | 8 +- .../cluster/ddata/LocalConcurrencySpec.scala | 14 +- .../akka/cluster/ddata/LotsOfDataBot.scala | 10 +- .../scala/akka/cluster/ddata/ORMapSpec.scala | 200 ++- .../akka/cluster/ddata/ORMultiMapSpec.scala | 204 +-- .../scala/akka/cluster/ddata/ORSetSpec.scala | 76 +- .../akka/cluster/ddata/PNCounterMapSpec.scala | 23 +- .../akka/cluster/ddata/PNCounterSpec.scala | 100 +- .../ddata/ReplicatorSettingsSpec.scala | 3 +- .../cluster/ddata/VersionVectorSpec.scala | 15 +- .../cluster/ddata/WriteAggregatorSpec.scala | 106 +- .../ReplicatedDataSerializerSpec.scala | 123 +- .../ReplicatorMessageSerializerSpec.scala | 52 +- .../src/test/scala/docs/CompileOnlySpec.scala | 1 + .../test/scala/docs/actor/ActorDocSpec.scala | 48 +- .../docs/actor/BlockingDispatcherSample.scala | 6 +- .../test/scala/docs/actor/FSMDocSpec.scala | 19 +- .../docs/actor/FaultHandlingDocSample.scala | 25 +- .../docs/actor/FaultHandlingDocSpec.scala | 27 +- .../docs/actor/InitializationDocSpec.scala | 6 +- .../scala/docs/actor/SchedulerDocSpec.scala | 6 +- .../actor/SharedMutableStateDocSpec.scala | 7 +- .../scala/docs/actor/TypedActorDocSpec.scala | 15 +- .../scala/docs/actor/UnnestedReceives.scala | 4 +- .../actor/io/dns/DnsCompileOnlyDocSpec.scala | 8 +- .../test/scala/docs/agent/AgentDocSpec.scala | 22 +- .../src/test/scala/docs/camel/Consumers.scala | 6 +- .../test/scala/docs/camel/CustomRoute.scala | 17 +- .../test/scala/docs/camel/Introduction.scala | 12 +- .../src/test/scala/docs/camel/Producers.scala | 25 +- .../scala/docs/camel/PublishSubscribe.scala | 7 +- .../CircuitBreakerDocSpec.scala | 22 +- .../scala/docs/cluster/FactorialBackend.scala | 13 +- .../docs/cluster/FactorialFrontend.scala | 35 +- .../scala/docs/cluster/MetricsListener.scala | 2 +- .../docs/cluster/SimpleClusterListener.scala | 7 +- .../docs/cluster/SimpleClusterListener2.scala | 4 +- .../docs/cluster/TransformationBackend.scala | 11 +- .../docs/cluster/TransformationFrontend.scala | 16 +- .../ClusterSingletonSupervision.scala | 9 +- .../scala/docs/config/ConfigDocSpec.scala | 3 +- .../docs/ddata/DistributedDataDocSpec.scala | 16 +- .../test/scala/docs/ddata/ShoppingCart.scala | 15 +- .../test/scala/docs/ddata/TwoPhaseSet.scala | 11 +- .../protobuf/TwoPhaseSetSerializer.scala | 13 +- .../protobuf/TwoPhaseSetSerializer2.scala | 6 +- .../docs/dispatcher/DispatcherDocSpec.scala | 39 +- .../docs/dispatcher/MyUnboundedMailbox.scala | 10 +- .../scala/docs/event/EventBusDocSpec.scala | 7 +- .../scala/docs/event/LoggingDocSpec.scala | 5 +- .../docs/extension/ExtensionDocSpec.scala | 4 +- .../extension/SettingsExtensionDocSpec.scala | 4 +- akka-docs/src/test/scala/docs/faq/Faq.scala | 17 +- .../scala/docs/future/FutureDocSpec.scala | 92 +- .../src/test/scala/docs/io/EchoServer.scala | 40 +- .../src/test/scala/docs/io/IODocSpec.scala | 12 +- .../test/scala/docs/io/ReadBackPressure.scala | 6 +- .../scala/docs/io/ScalaUdpMulticastSpec.scala | 11 +- .../src/test/scala/docs/io/UdpDocSpec.scala | 2 +- .../pattern/BackoffSupervisorDocSpec.scala | 72 +- .../docs/persistence/PersistenceDocSpec.scala | 60 +- .../PersistenceEventAdapterDocSpec.scala | 10 +- .../persistence/PersistenceMultiDocSpec.scala | 24 +- .../PersistencePluginDocSpec.scala | 44 +- .../PersistenceSchemaEvolutionDocSpec.scala | 32 +- .../PersistenceSerializerDocSpec.scala | 2 +- .../LeveldbPersistenceQueryDocSpec.scala | 12 +- .../query/MyEventsByTagPublisher.scala | 15 +- .../query/PersistenceQueryDocSpec.scala | 83 +- .../remoting/RemoteDeploymentDocSpec.scala | 10 +- .../ConsistentHashingRouterDocSpec.scala | 9 +- .../docs/routing/CustomRouterDocSpec.scala | 22 +- .../scala/docs/routing/RouterDocSpec.scala | 57 +- .../serialization/SerializationDocSpec.scala | 15 +- .../docs/stream/ActorPublisherDocSpec.scala | 8 +- .../docs/stream/ActorSubscriberDocSpec.scala | 3 +- .../docs/stream/CompositionDocSpec.scala | 42 +- .../test/scala/docs/stream/FlowDocSpec.scala | 34 +- .../scala/docs/stream/FlowErrorDocSpec.scala | 38 +- .../docs/stream/FlowParallelismDocSpec.scala | 23 +- .../docs/stream/FlowStreamRefsDocSpec.scala | 11 +- .../scala/docs/stream/GraphCyclesSpec.scala | 2 +- .../scala/docs/stream/GraphDSLDocSpec.scala | 74 +- .../scala/docs/stream/GraphStageDocSpec.scala | 135 +- .../stream/GraphStageLoggingDocSpec.scala | 6 +- .../test/scala/docs/stream/HubsDocSpec.scala | 40 +- .../docs/stream/IntegrationDocSpec.scala | 141 +- .../scala/docs/stream/QuickStartDocSpec.scala | 10 +- .../stream/RateTransformationDocSpec.scala | 66 +- .../docs/stream/ReactiveStreamsDocSpec.scala | 7 +- .../scala/docs/stream/RestartDocSpec.scala | 14 +- .../scala/docs/stream/SinkRecipeDocSpec.scala | 3 +- .../docs/stream/StreamBuffersRateSpec.scala | 28 +- .../docs/stream/StreamTestKitDocSpec.scala | 23 +- .../scala/docs/stream/SubstreamDocSpec.scala | 27 +- .../TwitterStreamQuickstartDocSpec.scala | 60 +- .../stream/cookbook/RecipeAdhocSource.scala | 45 +- .../stream/cookbook/RecipeByteStrings.scala | 69 +- .../cookbook/RecipeCollectingMetrics.scala | 2 +- .../stream/cookbook/RecipeDecompress.scala | 6 +- .../docs/stream/cookbook/RecipeDigest.scala | 13 +- .../cookbook/RecipeDroppyBroadcast.scala | 17 +- .../cookbook/RecipeGlobalRateLimit.scala | 40 +- .../docs/stream/cookbook/RecipeHold.scala | 32 +- .../cookbook/RecipeLoggingElements.scala | 16 +- .../stream/cookbook/RecipeMissedTicks.scala | 6 +- .../stream/cookbook/RecipeMultiGroupBy.scala | 17 +- .../stream/cookbook/RecipeParseLines.scala | 23 +- .../stream/cookbook/RecipeReduceByKey.scala | 33 +- .../stream/cookbook/RecipeWorkerPool.scala | 4 +- .../docs/stream/io/StreamFileDocSpec.scala | 11 +- .../docs/stream/io/StreamTcpDocSpec.scala | 66 +- .../docs/stream/operators/SourceOrFlow.scala | 18 +- .../scala/docs/testkit/ParentChildSpec.scala | 6 +- .../scala/docs/testkit/PlainWordSpec.scala | 8 +- .../scala/docs/testkit/TestKitUsageSpec.scala | 20 +- .../scala/docs/testkit/TestkitDocSpec.scala | 14 +- .../ActorHierarchyExperiments.scala | 2 +- .../src/test/scala/tutorial_4/Device.scala | 9 +- .../test/scala/tutorial_4/DeviceGroup.scala | 9 +- .../test/scala/tutorial_4/DeviceManager.scala | 4 +- .../src/test/scala/tutorial_5/Device.scala | 9 +- .../test/scala/tutorial_5/DeviceGroup.scala | 18 +- .../scala/tutorial_5/DeviceGroupQuery.scala | 43 +- .../tutorial_5/DeviceGroupQuerySpec.scala | 110 +- .../scala/tutorial_5/DeviceGroupSpec.scala | 10 +- .../test/scala/tutorial_5/DeviceManager.scala | 4 +- .../ActorHierarchyExperiments.scala | 5 +- .../test/scala/typed/tutorial_3/Device.scala | 7 +- .../typed/tutorial_3/DeviceInProgress.scala | 8 +- .../test/scala/typed/tutorial_4/Device.scala | 7 +- .../scala/typed/tutorial_4/DeviceGroup.scala | 13 +- .../typed/tutorial_4/DeviceManager.scala | 8 +- .../test/scala/typed/tutorial_5/Device.scala | 7 +- .../scala/typed/tutorial_5/DeviceGroup.scala | 21 +- .../typed/tutorial_5/DeviceGroupQuery.scala | 25 +- .../tutorial_5/DeviceGroupQuerySpec.scala | 90 +- .../typed/tutorial_5/DeviceGroupSpec.scala | 10 +- .../typed/tutorial_5/DeviceManager.scala | 12 +- .../akka/remote/testconductor/Conductor.scala | 162 +- .../akka/remote/testconductor/DataTypes.scala | 37 +- .../akka/remote/testconductor/Extension.scala | 9 +- .../akka/remote/testconductor/Player.scala | 124 +- .../testconductor/RemoteConnection.scala | 28 +- .../akka/remote/testkit/MultiNodeSpec.scala | 89 +- .../remote/testkit/PerfFlamesSupport.scala | 2 +- .../akka/osgi/ActorSystemActivator.scala | 18 +- .../osgi/BundleDelegatingClassLoader.scala | 36 +- .../scala/akka/osgi/DefaultOSGiLogger.scala | 8 +- .../akka/osgi/OsgiActorSystemFactory.scala | 15 +- .../akka/osgi/ActorSystemActivatorTest.scala | 9 +- .../scala/akka/osgi/PojoSRTestSupport.scala | 48 +- .../scala/akka/osgi/test/TestActivators.scala | 2 +- .../src/test/scala/docs/osgi/Activator.scala | 2 +- .../persistence/query/EventEnvelope.scala | 6 +- .../scala/akka/persistence/query/Offset.scala | 1 + .../persistence/query/PersistenceQuery.scala | 16 +- .../query/ReadJournalProvider.scala | 1 + .../CurrentEventsByPersistenceIdQuery.scala | 3 +- .../javadsl/CurrentEventsByTagQuery.scala | 1 - .../javadsl/EventsByPersistenceIdQuery.scala | 3 +- .../query/javadsl/ReadJournal.scala | 1 - .../leveldb/AllPersistenceIdsPublisher.scala | 4 +- .../journal/leveldb/DeliveryBuffer.scala | 4 +- .../EventsByPersistenceIdPublisher.scala | 75 +- .../leveldb/EventsByTagPublisher.scala | 58 +- .../leveldb/javadsl/LeveldbReadJournal.scala | 19 +- .../leveldb/scaladsl/LeveldbReadJournal.scala | 61 +- .../CurrentEventsByPersistenceIdQuery.scala | 3 +- .../scaladsl/CurrentEventsByTagQuery.scala | 1 - .../scaladsl/EventsByPersistenceIdQuery.scala | 3 +- .../query/scaladsl/EventsByTagQuery.scala | 1 - .../query/scaladsl/ReadJournal.scala | 1 - .../persistence/query/DummyReadJournal.scala | 11 +- .../query/PersistenceQuerySpec.scala | 34 +- .../leveldb/AllPersistenceIdsSpec.scala | 10 +- .../query/journal/leveldb/Cleanup.scala | 8 +- .../leveldb/EventsByPersistenceIdSpec.scala | 44 +- .../journal/leveldb/EventsByTagSpec.scala | 21 +- .../leveldb/PersistencePluginProxySpec.scala | 34 +- .../leveldb/SharedLeveldbJournalSpec.scala | 13 +- .../serialization/SerializerSpec.scala | 60 +- .../akka/persistence/CapabilityFlags.scala | 5 +- .../scala/akka/persistence/PluginSpec.scala | 7 +- .../akka/persistence/TestSerializer.scala | 3 +- .../persistence/journal/JournalPerfSpec.scala | 9 +- .../persistence/journal/JournalSpec.scala | 99 +- .../akka/persistence/scalatest/MayVerb.scala | 6 +- .../persistence/scalatest/OptionalTests.scala | 9 +- .../snapshot/SnapshotStoreSpec.scala | 35 +- .../akka/persistence/PluginCleanup.scala | 6 +- .../leveldb/LeveldbJournalJavaSpec.scala | 12 +- .../LeveldbJournalNativePerfSpec.scala | 12 +- .../leveldb/LeveldbJournalNativeSpec.scala | 12 +- ...nalNoAtomicPersistMultipleEventsSpec.scala | 13 +- .../local/LocalSnapshotStoreSpec.scala | 9 +- .../akka/persistence/typed/EventAdapter.scala | 3 +- .../typed/EventRejectedException.scala | 2 +- .../akka/persistence/typed/SideEffect.scala | 10 +- .../typed/internal/BehaviorSetup.scala | 51 +- .../typed/internal/EffectImpl.scala | 17 +- .../internal/EventSourcedBehaviorImpl.scala | 165 +- .../typed/internal/EventSourcedSettings.scala | 34 +- .../internal/JournalFailureException.scala | 4 +- .../typed/internal/JournalInteractions.scala | 54 +- .../typed/internal/ReplayingEvents.scala | 80 +- .../typed/internal/ReplayingSnapshot.scala | 44 +- .../internal/RequestingRecoveryPermit.scala | 39 +- .../persistence/typed/internal/Running.scala | 113 +- .../typed/internal/StashManagement.scala | 24 +- .../typed/javadsl/CommandHandler.scala | 71 +- .../javadsl/CommandHandlerWithReply.scala | 87 +- .../persistence/typed/javadsl/Effect.scala | 8 +- .../typed/javadsl/EventHandler.scala | 45 +- .../typed/javadsl/EventSourcedBehavior.scala | 11 +- .../persistence/typed/scaladsl/Effect.scala | 7 +- .../typed/scaladsl/EventSourcedBehavior.scala | 28 +- .../typed/ManyRecoveriesSpec.scala | 27 +- .../internal/RecoveryPermitterSpec.scala | 65 +- .../typed/internal/StashStateSpec.scala | 37 +- .../EventSourcedBehaviorFailureSpec.scala | 65 +- .../EventSourcedBehaviorReplySpec.scala | 57 +- .../scaladsl/EventSourcedBehaviorSpec.scala | 318 ++-- .../EventSourcedBehaviorStashSpec.scala | 174 +- .../EventSourcedSequenceNumberSpec.scala | 31 +- .../typed/scaladsl/NullEmptyStateSpec.scala | 33 +- .../scaladsl/OptionalSnapshotStoreSpec.scala | 22 +- .../typed/scaladsl/PerformanceSpec.scala | 58 +- .../PersistentActorCompileOnlyTest.scala | 312 ++-- .../typed/scaladsl/PrimitiveStateSpec.scala | 33 +- .../scaladsl/SnapshotMutableStateSpec.scala | 51 +- ...untExampleWithCommandHandlersInState.scala | 27 +- ...countExampleWithEventHandlersInState.scala | 73 +- .../typed/AccountExampleWithOptionState.scala | 43 +- .../BasicPersistentBehaviorCompileOnly.scala | 206 +-- .../persistence/typed/BlogPostExample.scala | 75 +- .../persistence/typed/MovieWatchList.scala | 31 +- .../persistence/typed/StashingExample.scala | 14 +- .../persistence/AtLeastOnceDelivery.scala | 35 +- .../scala/akka/persistence/Eventsourced.scala | 330 ++-- .../akka/persistence/JournalProtocol.scala | 47 +- .../scala/akka/persistence/Persistence.scala | 76 +- .../akka/persistence/PersistencePlugin.scala | 37 +- .../scala/akka/persistence/Persistent.scala | 70 +- .../akka/persistence/PersistentActor.scala | 12 +- .../akka/persistence/RecoveryPermitter.scala | 6 +- .../akka/persistence/SnapshotProtocol.scala | 49 +- .../persistence/fsm/PersistentFSMBase.scala | 74 +- .../persistence/journal/AsyncRecovery.scala | 4 +- .../journal/AsyncWriteJournal.scala | 60 +- .../persistence/journal/AsyncWriteProxy.scala | 44 +- .../persistence/journal/EventAdapter.scala | 1 + .../persistence/journal/EventAdapters.scala | 63 +- .../journal/PersistencePluginProxy.scala | 115 +- .../persistence/journal/ReplayFilter.scala | 37 +- .../journal/WriteJournalBase.scala | 2 +- .../journal/inmem/InmemJournal.scala | 21 +- .../journal/japi/AsyncRecovery.scala | 3 +- .../journal/japi/AsyncWriteJournal.scala | 10 +- .../journal/leveldb/LeveldbCompaction.scala | 3 +- .../journal/leveldb/LeveldbIdMapping.scala | 6 +- .../journal/leveldb/LeveldbJournal.scala | 32 +- .../journal/leveldb/LeveldbKey.scala | 6 +- .../journal/leveldb/LeveldbRecovery.scala | 17 +- .../journal/leveldb/LeveldbStore.scala | 68 +- .../journal/leveldb/SharedLeveldbStore.scala | 40 +- .../serialization/MessageSerializer.scala | 94 +- .../persistence/serialization/package.scala | 5 +- .../persistence/snapshot/SnapshotStore.scala | 80 +- .../snapshot/japi/SnapshotStore.scala | 3 +- .../snapshot/local/LocalSnapshotStore.scala | 74 +- .../AtLeastOnceDeliveryCrashSpec.scala | 26 +- .../AtLeastOnceDeliveryFailureSpec.scala | 20 +- .../persistence/AtLeastOnceDeliverySpec.scala | 104 +- .../akka/persistence/AtomicWriteSpec.scala | 14 +- .../EndToEndEventAdapterSpec.scala | 79 +- .../akka/persistence/EventAdapterSpec.scala | 13 +- .../EventSourcedActorDeleteFailureSpec.scala | 12 +- .../EventSourcedActorFailureSpec.scala | 27 +- .../akka/persistence/LoadPluginSpec.scala | 12 +- .../akka/persistence/ManyRecoveriesSpec.scala | 11 +- .../persistence/OptimizedRecoverySpec.scala | 18 +- .../OptionalSnapshotStoreSpec.scala | 4 +- .../akka/persistence/PerformanceSpec.scala | 45 +- .../akka/persistence/PersistenceSpec.scala | 31 +- .../PersistentActorBoundedStashingSpec.scala | 40 +- .../PersistentActorJournalProtocolSpec.scala | 11 +- .../PersistentActorRecoveryTimeoutSpec.scala | 37 +- .../persistence/PersistentActorSpec.scala | 995 +++++++---- .../PersistentActorStashingSpec.scala | 92 +- .../persistence/RecoveryPermitterSpec.scala | 6 +- .../SnapshotDirectoryFailureSpec.scala | 13 +- .../SnapshotFailureRobustnessSpec.scala | 29 +- .../SnapshotRecoveryLocalStoreSpec.scala | 9 +- .../SnapshotSerializationSpec.scala | 14 +- .../scala/akka/persistence/SnapshotSpec.scala | 31 +- .../TimerPersistentActorSpec.scala | 4 +- .../persistence/fsm/PersistentFSMSpec.scala | 103 +- .../journal/InmemEventAdaptersSpec.scala | 22 +- .../journal/ReplayFilterSpec.scala | 68 +- .../journal/SteppingInmemJournal.scala | 18 +- .../journal/chaos/ChaosJournal.scala | 10 +- .../CompactionSegmentManagementSpec.scala | 18 +- .../leveldb/JournalCompactionSpec.scala | 17 +- .../src/multi-jvm/scala/akka/io/DnsSpec.scala | 7 +- .../remote/AttemptSysMsgRedeliverySpec.scala | 29 +- .../akka/remote/LookupRemoteActorSpec.scala | 12 +- .../akka/remote/NewRemoteActorSpec.scala | 5 +- .../PiercingShouldKeepQuarantineSpec.scala | 21 +- .../akka/remote/RemoteDeliverySpec.scala | 5 +- .../RemoteDeploymentDeathWatchSpec.scala | 22 +- .../akka/remote/RemoteGatePiercingSpec.scala | 17 +- .../remote/RemoteNodeDeathWatchSpec.scala | 21 +- .../RemoteNodeRestartDeathWatchSpec.scala | 16 +- .../remote/RemoteNodeRestartGateSpec.scala | 18 +- .../RemoteNodeShutdownAndComesBackSpec.scala | 16 +- .../remote/RemoteQuarantinePiercingSpec.scala | 28 +- .../akka/remote/RemoteReDeploymentSpec.scala | 24 +- .../RemoteRestartedQuarantinedSpec.scala | 23 +- .../akka/remote/RemotingMultiNodeSpec.scala | 19 +- .../scala/akka/remote/Ticket15109Spec.scala | 10 +- .../scala/akka/remote/TransportFailSpec.scala | 3 +- .../remote/artery/BenchmarkFileReporter.scala | 30 +- .../akka/remote/artery/DirectMemorySpec.scala | 10 +- .../remote/artery/FanInThrougputSpec.scala | 85 +- .../remote/artery/FanOutThrougputSpec.scala | 77 +- .../artery/HandshakeRestartReceiverSpec.scala | 15 +- .../akka/remote/artery/LatencySpec.scala | 145 +- .../remote/artery/MaxThroughputSpec.scala | 169 +- .../scala/akka/remote/artery/PlotResult.scala | 4 +- .../RemoteRestartedQuarantinedSpec.scala | 15 +- ...amRestartWithCompressionInFlightSpec.scala | 49 +- .../artery/SurviveNetworkPartitionSpec.scala | 10 +- .../remote/artery/TaskRunnerMetrics.scala | 6 +- .../akka/remote/artery/TestMessage.scala | 29 +- .../akka/remote/artery/TestRateReporter.scala | 23 +- .../akka/remote/artery/UdpPortActor.scala | 4 +- .../aeron/AeronStreamConcistencySpec.scala | 40 +- .../artery/aeron/AeronStreamLatencySpec.scala | 104 +- .../aeron/AeronStreamMaxThroughputSpec.scala | 44 +- .../remote/routing/RemoteRandomSpec.scala | 10 +- .../remote/routing/RemoteRoundRobinSpec.scala | 18 +- .../routing/RemoteScatterGatherSpec.scala | 24 +- .../akka/remote/sample/MultiNodeSample.scala | 3 +- .../remote/RemotingFailedToBindSpec.scala | 3 +- .../artery/ArteryFailedToBindSpec.scala | 3 +- .../remote/testconductor/BarrierSpec.scala | 94 +- .../remote/testconductor/ControllerSpec.scala | 2 +- .../akka/remote/testkit/LogRoleReplace.scala | 16 +- .../akka/remote/testkit/STMultiNodeSpec.scala | 7 +- .../org/scalatest/extra/QuietReporter.scala | 2 +- .../scala/akka/remote/AckedDelivery.scala | 52 +- .../akka/remote/BoundAddressesExtension.scala | 10 +- .../akka/remote/DeadlineFailureDetector.scala | 15 +- .../DefaultFailureDetectorRegistry.scala | 5 +- .../src/main/scala/akka/remote/Endpoint.scala | 581 ++++--- .../akka/remote/FailureDetectorRegistry.scala | 17 +- .../scala/akka/remote/MessageSerializer.scala | 30 +- .../remote/PhiAccrualFailureDetector.scala | 87 +- .../akka/remote/RemoteActorRefProvider.scala | 275 +-- .../main/scala/akka/remote/RemoteDaemon.scala | 176 +- .../scala/akka/remote/RemoteDeployer.scala | 12 +- .../akka/remote/RemoteDeploymentWatcher.scala | 8 +- .../akka/remote/RemoteMetricsExtension.scala | 5 +- .../scala/akka/remote/RemoteSettings.scala | 58 +- .../scala/akka/remote/RemoteTransport.scala | 6 +- .../scala/akka/remote/RemoteWatcher.scala | 65 +- .../src/main/scala/akka/remote/Remoting.scala | 448 ++--- .../akka/remote/RemotingLifecycleEvent.scala | 38 +- .../akka/remote/artery/ArterySettings.scala | 154 +- .../akka/remote/artery/ArteryTransport.scala | 279 +-- .../akka/remote/artery/Association.scala | 199 ++- .../scala/akka/remote/artery/Codecs.scala | 419 ++--- .../scala/akka/remote/artery/Control.scala | 18 +- .../remote/artery/EnvelopeBufferPool.scala | 41 +- .../remote/artery/FixedSizePartitionHub.scala | 19 +- .../akka/remote/artery/FlightRecorder.scala | 47 +- .../remote/artery/FlightRecorderEvents.scala | 110 +- .../remote/artery/FlightRecorderReader.scala | 73 +- .../scala/akka/remote/artery/Handshake.scala | 107 +- .../akka/remote/artery/ImmutableLongMap.scala | 4 +- .../akka/remote/artery/InboundEnvelope.scala | 45 +- .../artery/InboundQuarantineCheck.scala | 14 +- .../akka/remote/artery/LruBoundedCache.scala | 13 +- .../remote/artery/MessageDispatcher.scala | 61 +- .../akka/remote/artery/OutboundEnvelope.scala | 17 +- .../akka/remote/artery/RemoteInstrument.scala | 85 +- .../scala/akka/remote/artery/SendQueue.scala | 2 +- .../remote/artery/SystemMessageDelivery.scala | 37 +- .../scala/akka/remote/artery/TestStage.scala | 24 +- .../akka/remote/artery/aeron/AeronSink.scala | 40 +- .../remote/artery/aeron/AeronSource.scala | 70 +- .../aeron/ArteryAeronUdpTransport.scala | 84 +- .../artery/compress/CompressionProtocol.scala | 23 +- .../artery/compress/CompressionTable.scala | 11 +- .../artery/compress/DecompressionTable.scala | 5 +- .../artery/compress/InboundCompressions.scala | 178 +- .../artery/compress/TopHeavyHitters.scala | 3 +- .../artery/tcp/ArteryTcpTransport.scala | 182 +- .../remote/artery/tcp/SSLEngineProvider.scala | 48 +- .../akka/remote/artery/tcp/TcpFraming.scala | 15 +- .../remote/routing/RemoteRouterConfig.scala | 14 +- .../remote/security/provider/SeedSize.scala | 1 - .../serialization/ActorRefResolveCache.scala | 12 +- .../ArteryMessageSerializer.scala | 152 +- .../DaemonMsgCreateSerializer.scala | 79 +- .../MessageContainerSerializer.scala | 36 +- .../serialization/MiscMessageSerializer.scala | 297 ++-- .../serialization/ProtobufSerializer.scala | 12 +- .../SystemMessageSerializer.scala | 45 +- .../serialization/ThrowableSupport.scala | 23 +- .../serialization/WrappedPayloadSupport.scala | 9 +- .../transport/AbstractTransportAdapter.scala | 72 +- .../akka/remote/transport/AkkaPduCodec.scala | 132 +- .../transport/AkkaProtocolTransport.scala | 406 +++-- .../FailureInjectorTransportAdapter.scala | 85 +- .../akka/remote/transport/TestTransport.scala | 101 +- .../transport/ThrottlerTransportAdapter.scala | 128 +- .../akka/remote/transport/Transport.scala | 18 +- .../remote/transport/netty/NettyHelpers.scala | 1 - .../transport/netty/NettyTransport.scala | 271 +-- .../transport/netty/SSLEngineProvider.scala | 29 +- .../remote/transport/netty/TcpSupport.scala | 33 +- .../remote/transport/netty/UdpSupport.scala | 45 +- .../remote/AccrualFailureDetectorSpec.scala | 44 +- .../scala/akka/remote/AckedDeliverySpec.scala | 21 +- .../scala/akka/remote/ActorsLeakSpec.scala | 44 +- .../test/scala/akka/remote/DaemonicSpec.scala | 13 +- .../remote/DeadlineFailureDetectorSpec.scala | 7 +- .../akka/remote/EndpointRegistrySpec.scala | 2 +- .../remote/FailureDetectorRegistrySpec.scala | 55 +- .../scala/akka/remote/LogSourceSpec.scala | 5 +- .../akka/remote/MessageLoggingSpec.scala | 4 +- .../akka/remote/NetworkFailureSpec.scala | 14 +- .../akka/remote/RemoteActorMailboxSpec.scala | 8 +- .../scala/akka/remote/RemoteConfigSpec.scala | 13 +- .../RemoteConsistentHashingRouterSpec.scala | 4 +- .../akka/remote/RemoteDeathWatchSpec.scala | 24 +- .../akka/remote/RemoteDeployerSpec.scala | 20 +- .../RemoteDeploymentWhitelistSpec.scala | 34 +- .../akka/remote/RemoteInitErrorSpec.scala | 5 +- .../scala/akka/remote/RemoteRouterSpec.scala | 59 +- .../akka/remote/RemoteSettingsSpec.scala | 5 +- .../scala/akka/remote/RemoteWatcherSpec.scala | 32 +- .../test/scala/akka/remote/RemotingSpec.scala | 187 +- .../remote/Ticket1978CommunicationSpec.scala | 60 +- .../TransientSerializationErrorSpec.scala | 26 +- .../remote/TypedActorRemoteDeploySpec.scala | 6 +- .../scala/akka/remote/UntrustedSpec.scala | 21 +- .../remote/artery/ArteryMultiNodeSpec.scala | 19 +- .../remote/artery/ArterySpecSupport.scala | 7 +- .../artery/BindCanonicalAddressSpec.scala | 43 +- .../artery/DuplicateHandshakeSpec.scala | 19 +- .../remote/artery/EnvelopeBufferSpec.scala | 111 +- .../remote/artery/FlightRecorderSpec.scala | 6 +- .../remote/artery/HandshakeDenySpec.scala | 14 +- .../remote/artery/HandshakeFailureSpec.scala | 5 +- .../remote/artery/HandshakeRetrySpec.scala | 6 +- .../remote/artery/ImmutableLongMapSpec.scala | 33 +- .../artery/InboundControlJunctionSpec.scala | 8 +- .../remote/artery/InboundHandshakeSpec.scala | 16 +- .../artery/LargeMessagesStreamSpec.scala | 3 +- .../akka/remote/artery/LateConnectSpec.scala | 5 +- .../remote/artery/LruBoundedCacheSpec.scala | 3 +- .../remote/artery/MetadataCarryingSpec.scala | 9 +- .../artery/OutboundControlJunctionSpec.scala | 3 +- .../remote/artery/OutboundHandshakeSpec.scala | 24 +- .../artery/OutboundIdleShutdownSpec.scala | 33 +- .../remote/artery/RemoteActorForSpec.scala | 17 +- .../artery/RemoteActorRefProviderSpec.scala | 7 +- .../artery/RemoteActorSelectionSpec.scala | 27 +- .../remote/artery/RemoteConnectionSpec.scala | 8 +- .../remote/artery/RemoteDeathWatchSpec.scala | 9 +- .../remote/artery/RemoteDeployerSpec.scala | 17 +- .../remote/artery/RemoteDeploymentSpec.scala | 7 +- .../remote/artery/RemoteFailureSpec.scala | 24 +- .../RemoteInstrumentsSerializationSpec.scala | 133 +- .../RemoteMessageSerializationSpec.scala | 21 +- .../akka/remote/artery/RemoteRouterSpec.scala | 65 +- .../artery/RemoteSendConsistencySpec.scala | 89 +- .../remote/artery/RemoteWatcherSpec.scala | 29 +- .../RollingEventLogSimulationSpec.scala | 6 +- .../akka/remote/artery/SendQueueSpec.scala | 22 +- .../artery/SerializationErrorSpec.scala | 14 +- ...erializationTransportInformationSpec.scala | 4 +- .../artery/SystemMessageAckerSpec.scala | 10 +- .../artery/SystemMessageDeliverySpec.scala | 58 +- .../akka/remote/artery/TestContext.scala | 35 +- .../TransientSerializationErrorSpec.scala | 3 +- .../akka/remote/artery/UntrustedSpec.scala | 24 +- .../remote/artery/aeron/AeronSinkSpec.scala | 6 +- .../compress/CompressionIntegrationSpec.scala | 42 +- ...dshakeShouldDropCompressionTableSpec.scala | 33 +- .../remote/artery/tcp/TcpFramingSpec.scala | 10 +- .../akka/remote/artery/tcp/TlsTcpSpec.scala | 49 +- .../AllowJavaSerializationOffSpec.scala | 47 +- .../ArteryMessageSerializerSpec.scala | 62 +- .../DaemonMsgCreateSerializerSpec.scala | 144 +- .../MessageContainerSerializerSpec.scala | 13 +- .../MiscMessageSerializerSpec.scala | 112 +- .../PrimitivesSerializationSpec.scala | 47 +- .../ProtobufSerializerSpec.scala | 1 - ...erializationTransportInformationSpec.scala | 17 +- .../SystemMessageSerializationSpec.scala | 39 +- .../remote/transport/AkkaProtocolSpec.scala | 300 ++-- .../transport/AkkaProtocolStressTest.scala | 37 +- .../transport/GenericTransportSpec.scala | 28 +- .../SwitchableLoggedBehaviorSpec.scala | 2 +- .../SystemMessageDeliveryStressTest.scala | 58 +- .../remote/transport/TestTransportSpec.scala | 8 +- .../ThrottlerTransportAdapterSpec.scala | 32 +- .../transport/netty/NettyTransportSpec.scala | 30 +- .../scala/akka/event/slf4j/Slf4jLogger.scala | 22 +- .../akka/event/slf4j/Slf4jLoggerSpec.scala | 15 +- .../event/slf4j/Slf4jLoggingFilterSpec.scala | 2 +- .../akka/stream/testkit/StreamTestKit.scala | 77 +- .../akka/stream/testkit/TestGraphStage.scala | 23 +- .../testkit/scaladsl/StreamTestKit.scala | 21 +- .../stream/testkit/scaladsl/TestSource.scala | 3 +- .../stream/testkit/BaseTwoStreamsSetup.scala | 3 +- .../akka/stream/testkit/ChainSetup.scala | 21 +- .../akka/stream/testkit/ScriptedTest.scala | 83 +- .../akka/stream/testkit/StreamSpec.scala | 18 +- .../testkit/StreamTestDefaultMailbox.scala | 18 +- .../stream/testkit/StreamTestKitSpec.scala | 119 +- .../testkit/TestPublisherSubscriberSpec.scala | 11 +- .../akka/stream/testkit/TwoStreamsSetup.scala | 18 +- .../scala/akka/stream/testkit/Utils.scala | 6 +- .../akka/stream/tck/ActorPublisherTest.scala | 13 +- .../stream/tck/ActorSystemLifecycle.scala | 5 +- .../AkkaIdentityProcessorVerification.scala | 10 +- .../tck/AkkaPublisherVerification.scala | 15 +- .../tck/AkkaSubscriberVerification.scala | 11 +- .../akka/stream/tck/EmptyPublisherTest.scala | 1 - .../akka/stream/tck/FanoutPublisherTest.scala | 4 +- .../akka/stream/tck/FilePublisherTest.scala | 6 +- .../FlatMapConcatDoubleSubscriberTest.scala | 11 +- .../tck/ForeachSinkSubscriberTest.scala | 5 +- .../stream/tck/FusableProcessorTest.scala | 4 +- .../stream/tck/InputStreamSourceTest.scala | 18 +- .../akka/stream/tck/MaybeSourceTest.scala | 5 +- .../stream/tck/SingleElementSourceTest.scala | 1 - .../stream/tck/SinkholeSubscriberTest.scala | 5 +- .../stream/tck/TransformProcessorTest.scala | 15 +- .../akka/stream/ActorMaterializerSpec.scala | 31 +- .../akka/stream/DslConsistencySpec.scala | 147 +- .../stream/DslFactoriesConsistencySpec.scala | 154 +- .../test/scala/akka/stream/FusingSpec.scala | 6 +- .../stream/actor/ActorPublisherSpec.scala | 68 +- .../stream/actor/ActorSubscriberSpec.scala | 9 +- .../akka/stream/extra/FlowTimedSpec.scala | 58 +- .../stream/impl/GraphStageLogicSpec.scala | 62 +- .../impl/LinearTraversalBuilderSpec.scala | 220 +-- .../ResizableMultiReaderRingBufferSpec.scala | 9 +- .../akka/stream/impl/StreamLayoutSpec.scala | 1 + .../scala/akka/stream/impl/TimeoutsSpec.scala | 120 +- .../stream/impl/TraversalBuilderSpec.scala | 103 +- .../akka/stream/impl/TraversalTestUtils.scala | 20 +- .../fusing/ActorGraphInterpreterSpec.scala | 141 +- .../impl/fusing/AsyncCallbackSpec.scala | 61 +- .../impl/fusing/ChasingEventsSpec.scala | 70 +- .../GraphInterpreterFailureModesSpec.scala | 1 - .../fusing/GraphInterpreterPortsSpec.scala | 30 +- .../impl/fusing/GraphInterpreterSpec.scala | 43 +- .../impl/fusing/GraphInterpreterSpecKit.scala | 137 +- .../stream/impl/fusing/InterpreterSpec.scala | 90 +- .../impl/fusing/InterpreterStressSpec.scala | 14 +- .../fusing/InterpreterSupervisionSpec.scala | 17 +- .../impl/fusing/KeepGoingStageSpec.scala | 11 +- .../fusing/LifecycleInterpreterSpec.scala | 54 +- .../akka/stream/io/ByteStringParserSpec.scala | 17 +- .../scala/akka/stream/io/FileSinkSpec.scala | 43 +- .../scala/akka/stream/io/FileSourceSpec.scala | 76 +- .../akka/stream/io/InputStreamSinkSpec.scala | 28 +- .../stream/io/InputStreamSourceSpec.scala | 64 +- .../akka/stream/io/OutputStreamSinkSpec.scala | 32 +- .../stream/io/OutputStreamSourceSpec.scala | 51 +- .../test/scala/akka/stream/io/TcpHelper.scala | 5 +- .../test/scala/akka/stream/io/TcpSpec.scala | 197 ++- .../test/scala/akka/stream/io/TlsSpec.scala | 228 +-- .../io/compression/CodecSpecSupport.scala | 7 +- .../stream/io/compression/CoderSpec.scala | 25 +- .../compression/CompressionTestingTools.scala | 7 +- .../akka/stream/io/compression/GzipSpec.scala | 2 +- .../GzipWithCustomCompressionLevelSpec.scala | 2 +- .../ActorRefBackpressureSinkSpec.scala | 41 +- .../stream/scaladsl/ActorRefSinkSpec.scala | 5 +- .../stream/scaladsl/ActorRefSourceSpec.scala | 8 +- .../akka/stream/scaladsl/AttributesSpec.scala | 180 +- .../akka/stream/scaladsl/BidiFlowSpec.scala | 34 +- .../stream/scaladsl/CollectionSinkSpec.scala | 7 +- .../stream/scaladsl/CompressionSpec.scala | 8 +- .../scaladsl/CoupledTerminationFlowSpec.scala | 44 +- .../akka/stream/scaladsl/FlowAppendSpec.scala | 8 +- .../akka/stream/scaladsl/FlowAskSpec.scala | 43 +- .../akka/stream/scaladsl/FlowBatchSpec.scala | 27 +- .../scaladsl/FlowBatchWeightedSpec.scala | 9 +- .../akka/stream/scaladsl/FlowBufferSpec.scala | 49 +- .../stream/scaladsl/FlowCollectSpec.scala | 21 +- .../stream/scaladsl/FlowConcatAllSpec.scala | 33 +- .../akka/stream/scaladsl/FlowConcatSpec.scala | 7 +- .../stream/scaladsl/FlowConflateSpec.scala | 40 +- .../akka/stream/scaladsl/FlowDelaySpec.scala | 90 +- .../stream/scaladsl/FlowDetacherSpec.scala | 13 +- .../stream/scaladsl/FlowDispatcherSpec.scala | 9 +- .../akka/stream/scaladsl/FlowDropSpec.scala | 10 +- .../stream/scaladsl/FlowDropWhileSpec.scala | 25 +- .../stream/scaladsl/FlowDropWithinSpec.scala | 16 +- .../akka/stream/scaladsl/FlowExpandSpec.scala | 28 +- .../stream/scaladsl/FlowExtrapolateSpec.scala | 40 +- .../akka/stream/scaladsl/FlowFilterSpec.scala | 23 +- .../scaladsl/FlowFlattenMergeSpec.scala | 73 +- .../stream/scaladsl/FlowFoldAsyncSpec.scala | 202 ++- .../akka/stream/scaladsl/FlowFoldSpec.scala | 17 +- .../stream/scaladsl/FlowForeachSpec.scala | 12 +- .../stream/scaladsl/FlowGroupBySpec.scala | 95 +- .../stream/scaladsl/FlowGroupedSpec.scala | 19 +- .../scaladsl/FlowGroupedWithinSpec.scala | 37 +- .../stream/scaladsl/FlowIdleInjectSpec.scala | 27 +- .../scaladsl/FlowInitialDelaySpec.scala | 16 +- .../stream/scaladsl/FlowInterleaveSpec.scala | 30 +- .../stream/scaladsl/FlowIntersperseSpec.scala | 50 +- .../stream/scaladsl/FlowIteratorSpec.scala | 3 +- .../akka/stream/scaladsl/FlowJoinSpec.scala | 20 +- .../stream/scaladsl/FlowKillSwitchSpec.scala | 22 +- .../akka/stream/scaladsl/FlowLimitSpec.scala | 5 +- .../scaladsl/FlowLimitWeightedSpec.scala | 5 +- .../akka/stream/scaladsl/FlowLogSpec.scala | 49 +- .../stream/scaladsl/FlowMapAsyncSpec.scala | 177 +- .../scaladsl/FlowMapAsyncUnorderedSpec.scala | 126 +- .../stream/scaladsl/FlowMapConcatSpec.scala | 26 +- .../stream/scaladsl/FlowMapErrorSpec.scala | 21 +- .../akka/stream/scaladsl/FlowMapSpec.scala | 21 +- .../akka/stream/scaladsl/FlowMergeSpec.scala | 15 +- .../stream/scaladsl/FlowMonitorSpec.scala | 3 +- .../stream/scaladsl/FlowOnCompleteSpec.scala | 20 +- .../scaladsl/FlowPrefixAndTailSpec.scala | 17 +- .../stream/scaladsl/FlowRecoverSpec.scala | 16 +- .../stream/scaladsl/FlowRecoverWithSpec.scala | 74 +- .../akka/stream/scaladsl/FlowReduceSpec.scala | 14 +- .../stream/scaladsl/FlowScanAsyncSpec.scala | 95 +- .../akka/stream/scaladsl/FlowScanSpec.scala | 40 +- .../stream/scaladsl/FlowSectionSpec.scala | 13 +- .../stream/scaladsl/FlowSlidingSpec.scala | 14 +- .../scala/akka/stream/scaladsl/FlowSpec.scala | 62 +- .../stream/scaladsl/FlowSplitAfterSpec.scala | 32 +- .../stream/scaladsl/FlowSplitWhenSpec.scala | 68 +- .../scaladsl/FlowStatefulMapConcatSpec.scala | 103 +- .../stream/scaladsl/FlowSupervisionSpec.scala | 6 +- .../akka/stream/scaladsl/FlowTakeSpec.scala | 10 +- .../stream/scaladsl/FlowTakeWhileSpec.scala | 17 +- .../stream/scaladsl/FlowTakeWithinSpec.scala | 20 +- .../stream/scaladsl/FlowThrottleSpec.scala | 102 +- .../akka/stream/scaladsl/FlowWatchSpec.scala | 6 +- .../scaladsl/FlowWatchTerminationSpec.scala | 10 +- .../stream/scaladsl/FlowWireTapSpec.scala | 12 +- .../scaladsl/FlowWithContextLogSpec.scala | 20 +- .../stream/scaladsl/FlowWithContextSpec.scala | 4 +- .../scaladsl/FlowZipWithIndexSpec.scala | 7 +- .../stream/scaladsl/FlowZipWithSpec.scala | 7 +- .../akka/stream/scaladsl/FramingSpec.scala | 111 +- .../scaladsl/FutureFlattenSourceSpec.scala | 44 +- .../stream/scaladsl/GraphBackedFlowSpec.scala | 97 +- .../stream/scaladsl/GraphBalanceSpec.scala | 226 +-- .../stream/scaladsl/GraphBroadcastSpec.scala | 184 +- .../stream/scaladsl/GraphConcatSpec.scala | 41 +- .../stream/scaladsl/GraphDSLCompileSpec.scala | 325 ++-- .../stream/scaladsl/GraphMatValueSpec.scala | 90 +- .../scaladsl/GraphMergeLatestSpec.scala | 68 +- .../scaladsl/GraphMergePreferredSpec.scala | 40 +- .../scaladsl/GraphMergePrioritizedSpec.scala | 8 +- .../scaladsl/GraphMergeSortedSpec.scala | 3 +- .../akka/stream/scaladsl/GraphMergeSpec.scala | 78 +- .../scaladsl/GraphOpsIntegrationSpec.scala | 199 ++- .../stream/scaladsl/GraphPartialSpec.scala | 64 +- .../stream/scaladsl/GraphPartitionSpec.scala | 144 +- .../scaladsl/GraphStageTimersSpec.scala | 35 +- .../akka/stream/scaladsl/GraphUnzipSpec.scala | 121 +- .../stream/scaladsl/GraphUnzipWithSpec.scala | 171 +- .../stream/scaladsl/GraphWireTapSpec.scala | 3 +- .../stream/scaladsl/GraphZipLatestSpec.scala | 37 +- .../scaladsl/GraphZipLatestWithSpec.scala | 49 +- .../akka/stream/scaladsl/GraphZipNSpec.scala | 96 +- .../akka/stream/scaladsl/GraphZipSpec.scala | 96 +- .../stream/scaladsl/GraphZipWithNSpec.scala | 68 +- .../stream/scaladsl/GraphZipWithSpec.scala | 135 +- .../akka/stream/scaladsl/HeadSinkSpec.scala | 14 +- .../scala/akka/stream/scaladsl/HubSpec.scala | 86 +- .../stream/scaladsl/JsonFramingSpec.scala | 133 +- .../akka/stream/scaladsl/LastSinkSpec.scala | 8 +- .../stream/scaladsl/LazilyAsyncSpec.scala | 26 +- .../akka/stream/scaladsl/LazyFlowSpec.scala | 43 +- .../akka/stream/scaladsl/LazySinkSpec.scala | 26 +- .../akka/stream/scaladsl/LazySourceSpec.scala | 18 +- .../stream/scaladsl/MaybeSourceSpec.scala | 8 +- .../stream/scaladsl/PublisherSinkSpec.scala | 25 +- .../akka/stream/scaladsl/QueueSinkSpec.scala | 11 +- .../stream/scaladsl/QueueSourceSpec.scala | 66 +- .../akka/stream/scaladsl/RestartSpec.scala | 314 ++-- .../stream/scaladsl/ReverseArrowSpec.scala | 236 ++- .../stream/scaladsl/RunnableGraphSpec.scala | 3 +- .../akka/stream/scaladsl/SeqSinkSpec.scala | 3 +- .../scaladsl/SinkAsJavaStreamSpec.scala | 17 +- .../scaladsl/SinkForeachAsyncSpec.scala | 55 +- .../scaladsl/SinkForeachParallelSpec.scala | 44 +- .../scala/akka/stream/scaladsl/SinkSpec.scala | 130 +- .../akka/stream/scaladsl/SourceSpec.scala | 141 +- .../scaladsl/SourceWithContextSpec.scala | 10 +- .../stream/scaladsl/StageActorRefSpec.scala | 37 +- .../akka/stream/scaladsl/StreamRefsSpec.scala | 94 +- .../stream/scaladsl/SubscriberSinkSpec.scala | 3 +- .../scaladsl/SubscriberSourceSpec.scala | 4 +- .../SubstreamSubscriptionTimeoutSpec.scala | 15 +- .../stream/scaladsl/TakeLastSinkSpec.scala | 15 +- .../akka/stream/scaladsl/TickSourceSpec.scala | 20 +- .../UnfoldResourceAsyncSourceSpec.scala | 272 +-- .../scaladsl/UnfoldResourceSourceSpec.scala | 102 +- .../scaladsl/WithContextUsageSpec.scala | 81 +- .../snapshot/MaterializerStateSpec.scala | 5 +- .../akka/stream/typed/javadsl/ActorFlow.scala | 16 +- .../javadsl/ActorMaterializerFactory.scala | 8 +- .../akka/stream/typed/javadsl/ActorSink.scala | 28 +- .../stream/typed/javadsl/ActorSource.scala | 17 +- .../stream/typed/scaladsl/ActorFlow.scala | 6 +- .../typed/scaladsl/ActorMaterializer.scala | 7 +- .../stream/typed/scaladsl/ActorSink.scala | 24 +- .../stream/typed/scaladsl/ActorSource.scala | 18 +- .../stream/typed/scaladsl/ActorFlowSpec.scala | 15 +- .../typed/scaladsl/ActorSourceSinkSpec.scala | 41 +- .../CustomGuardianAndMaterializerSpec.scala | 4 +- .../stream/typed/ActorSourceSinkExample.scala | 45 +- .../scala/akka/stream/ActorMaterializer.scala | 438 +++-- .../main/scala/akka/stream/Attributes.scala | 36 +- .../main/scala/akka/stream/FanInShape.scala | 9 +- .../main/scala/akka/stream/FanInShape1N.scala | 17 +- .../main/scala/akka/stream/FanOutShape.scala | 8 +- .../src/main/scala/akka/stream/Graph.scala | 9 +- .../src/main/scala/akka/stream/IOResult.scala | 3 +- .../main/scala/akka/stream/KillSwitch.scala | 29 +- .../main/scala/akka/stream/Materializer.scala | 12 +- .../scala/akka/stream/OverflowStrategy.scala | 11 + .../src/main/scala/akka/stream/Shape.scala | 43 +- .../scala/akka/stream/SslTlsOptions.scala | 21 +- .../akka/stream/StreamDetachedException.scala | 4 +- .../scala/akka/stream/StreamRefSettings.scala | 15 +- .../main/scala/akka/stream/StreamRefs.scala | 27 +- .../akka/stream/StreamTcpException.scala | 1 - .../akka/stream/SubstreamCancelStrategy.scala | 3 +- .../TooManySubstreamsOpenException.scala | 5 +- .../main/scala/akka/stream/Transformer.scala | 2 +- .../scala/akka/stream/UniformFanInShape.scala | 6 +- .../akka/stream/UniformFanOutShape.scala | 6 +- .../WatchedActorTerminatedException.scala | 2 +- .../akka/stream/actor/ActorPublisher.scala | 67 +- .../akka/stream/actor/ActorSubscriber.scala | 25 +- .../scala/akka/stream/extra/Implicits.scala | 6 +- .../main/scala/akka/stream/extra/Timed.scala | 133 +- .../stream/impl/ActorMaterializerImpl.scala | 43 +- .../akka/stream/impl/ActorProcessor.scala | 24 +- .../akka/stream/impl/ActorPublisher.scala | 12 +- .../impl/ActorRefBackpressureSinkStage.scala | 17 +- .../akka/stream/impl/ActorRefSinkActor.scala | 7 +- .../stream/impl/ActorRefSourceActor.scala | 89 +- .../main/scala/akka/stream/impl/Buffers.scala | 6 +- .../stream/impl/CompletedPublishers.scala | 3 +- .../scala/akka/stream/impl/EmptySource.scala | 1 - .../stream/impl/ExposedPublisherReceive.scala | 3 +- .../scala/akka/stream/impl/FailedSource.scala | 13 +- .../main/scala/akka/stream/impl/FanIn.scala | 55 +- .../main/scala/akka/stream/impl/FanOut.scala | 100 +- .../akka/stream/impl/FanoutProcessor.scala | 18 +- .../akka/stream/impl/JavaStreamSource.scala | 5 +- .../akka/stream/impl/JsonObjectParser.scala | 3 +- .../scala/akka/stream/impl/LazySource.scala | 3 +- .../scala/akka/stream/impl/MaybeSource.scala | 11 +- .../scala/akka/stream/impl/Messages.scala | 14 +- .../main/scala/akka/stream/impl/Modules.scala | 53 +- .../impl/PhasedFusingActorMaterializer.scala | 272 +-- .../scala/akka/stream/impl/QueueSource.scala | 117 +- .../impl/ReactiveStreamsCompliance.scala | 37 +- .../impl/ResizableMultiReaderRingBuffer.scala | 17 +- .../main/scala/akka/stream/impl/Sinks.scala | 136 +- .../scala/akka/stream/impl/StreamLayout.scala | 107 +- .../impl/StreamSubscriptionTimeout.scala | 15 +- .../scala/akka/stream/impl/SubFlowImpl.scala | 11 +- .../stream/impl/SubscriberManagement.scala | 11 +- .../scala/akka/stream/impl/Throttle.scala | 13 +- .../main/scala/akka/stream/impl/Timers.scala | 6 +- .../scala/akka/stream/impl/Transfer.scala | 7 +- .../akka/stream/impl/TraversalBuilder.scala | 385 ++--- .../main/scala/akka/stream/impl/Unfold.scala | 6 +- .../stream/impl/UnfoldResourceSource.scala | 8 +- .../impl/UnfoldResourceSourceAsync.scala | 54 +- .../impl/fusing/ActorGraphInterpreter.scala | 140 +- .../stream/impl/fusing/GraphInterpreter.scala | 84 +- .../akka/stream/impl/fusing/GraphStages.scala | 127 +- .../scala/akka/stream/impl/fusing/Ops.scala | 1524 +++++++++-------- .../stream/impl/fusing/StreamOfStreams.scala | 401 +++-- .../stream/impl/io/ByteStringParser.scala | 16 +- .../akka/stream/impl/io/FileSubscriber.scala | 53 +- .../scala/akka/stream/impl/io/IOSinks.scala | 15 +- .../scala/akka/stream/impl/io/IOSources.scala | 21 +- .../stream/impl/io/InputStreamPublisher.scala | 41 +- .../stream/impl/io/InputStreamSinkStage.scala | 15 +- .../impl/io/OutputStreamSourceStage.scala | 15 +- .../impl/io/OutputStreamSubscriber.scala | 9 +- .../scala/akka/stream/impl/io/TLSActor.scala | 41 +- .../scala/akka/stream/impl/io/TcpStages.scala | 194 ++- .../scala/akka/stream/impl/io/TlsModule.scala | 30 +- .../io/compression/CompressionUtils.scala | 40 +- .../impl/io/compression/Compressor.scala | 2 + .../io/compression/DeflateCompressor.scala | 7 +- .../io/compression/DeflateDecompressor.scala | 3 +- .../compression/DeflateDecompressorBase.scala | 2 +- .../impl/io/compression/GzipCompressor.scala | 6 +- .../io/compression/GzipDecompressor.scala | 7 +- .../main/scala/akka/stream/impl/package.scala | 4 +- .../stream/impl/streamref/SinkRefImpl.scala | 34 +- .../stream/impl/streamref/SourceRefImpl.scala | 33 +- .../streamref/StreamRefSettingsImpl.scala | 16 +- .../impl/streamref/StreamRefsProtocol.scala | 9 +- .../scala/akka/stream/javadsl/BidiFlow.scala | 21 +- .../akka/stream/javadsl/Compression.scala | 1 + .../scala/akka/stream/javadsl/FileIO.scala | 6 +- .../main/scala/akka/stream/javadsl/Flow.scala | 278 +-- .../akka/stream/javadsl/FlowWithContext.scala | 57 +- .../scala/akka/stream/javadsl/Framing.scala | 47 +- .../scala/akka/stream/javadsl/Graph.scala | 57 +- .../main/scala/akka/stream/javadsl/Hub.scala | 33 +- .../main/scala/akka/stream/javadsl/Keep.scala | 8 +- .../akka/stream/javadsl/MergeLatest.scala | 4 +- .../scala/akka/stream/javadsl/Queue.scala | 3 +- .../akka/stream/javadsl/RestartFlow.scala | 60 +- .../akka/stream/javadsl/RestartSink.scala | 38 +- .../akka/stream/javadsl/RestartSource.scala | 76 +- .../main/scala/akka/stream/javadsl/Sink.scala | 94 +- .../scala/akka/stream/javadsl/Source.scala | 256 +-- .../stream/javadsl/SourceWithContext.scala | 32 +- .../stream/javadsl/StreamConverters.scala | 26 +- .../scala/akka/stream/javadsl/SubFlow.scala | 105 +- .../scala/akka/stream/javadsl/SubSource.scala | 102 +- .../main/scala/akka/stream/javadsl/TLS.scala | 57 +- .../main/scala/akka/stream/javadsl/Tcp.scala | 173 +- .../scala/akka/stream/scaladsl/BidiFlow.scala | 71 +- .../akka/stream/scaladsl/Compression.scala | 6 +- .../scala/akka/stream/scaladsl/FileIO.scala | 6 +- .../scala/akka/stream/scaladsl/Flow.scala | 245 +-- .../stream/scaladsl/FlowWithContext.scala | 18 +- .../stream/scaladsl/FlowWithContextOps.scala | 10 +- .../scala/akka/stream/scaladsl/Framing.scala | 475 ++--- .../scala/akka/stream/scaladsl/Graph.scala | 1055 ++++++------ .../main/scala/akka/stream/scaladsl/Hub.scala | 447 ++--- .../akka/stream/scaladsl/JsonFraming.scala | 49 +- .../akka/stream/scaladsl/MergeLatest.scala | 72 +- .../scala/akka/stream/scaladsl/Queue.scala | 2 + .../akka/stream/scaladsl/RestartFlow.scala | 209 ++- .../akka/stream/scaladsl/RestartSink.scala | 54 +- .../akka/stream/scaladsl/RestartSource.scala | 92 +- .../scala/akka/stream/scaladsl/Sink.scala | 111 +- .../scala/akka/stream/scaladsl/Source.scala | 111 +- .../stream/scaladsl/SourceWithContext.scala | 10 +- .../stream/scaladsl/StreamConverters.scala | 104 +- .../scala/akka/stream/scaladsl/SubFlow.scala | 2 +- .../main/scala/akka/stream/scaladsl/TLS.scala | 47 +- .../main/scala/akka/stream/scaladsl/Tcp.scala | 205 ++- .../serialization/StreamRefSerializer.scala | 84 +- .../stream/snapshot/MaterializerState.scala | 50 +- .../scala/akka/stream/stage/GraphStage.scala | 136 +- .../sslconfig/akka/AkkaSSLConfig.scala | 18 +- .../akka/SSLEngineConfigurator.scala | 6 +- .../akka/util/AkkaLoggerBridge.scala | 3 +- .../testkit/CallingThreadDispatcher.scala | 65 +- .../ExplicitlyTriggeredScheduler.scala | 24 +- .../main/scala/akka/testkit/SocketUtil.scala | 44 +- .../scala/akka/testkit/TestActorRef.scala | 144 +- .../main/scala/akka/testkit/TestActors.scala | 4 +- .../main/scala/akka/testkit/TestBarrier.scala | 4 +- .../akka/testkit/TestEventListener.scala | 203 ++- .../main/scala/akka/testkit/TestFSMRef.scala | 28 +- .../src/main/scala/akka/testkit/TestKit.scala | 132 +- .../scala/akka/testkit/TestKitExtension.scala | 7 +- .../main/scala/akka/testkit/TestLatch.scala | 10 +- .../akka/testkit/TestMessageSerializer.scala | 2 - .../akka/testkit/javadsl/EventFilter.scala | 6 +- .../scala/akka/testkit/javadsl/TestKit.scala | 49 +- .../src/main/scala/akka/testkit/package.scala | 10 +- .../test/scala/akka/testkit/AkkaSpec.scala | 24 +- .../scala/akka/testkit/AkkaSpecSpec.scala | 21 +- .../src/test/scala/akka/testkit/Coroner.scala | 35 +- .../test/scala/akka/testkit/CoronerSpec.scala | 10 +- .../akka/testkit/DefaultTimeoutSpec.scala | 5 +- .../akka/testkit/ImplicitSenderSpec.scala | 5 +- .../scala/akka/testkit/JavaTestKitSpec.scala | 2 +- .../scala/akka/testkit/TestActorRefSpec.scala | 24 +- .../akka/testkit/TestEventListenerSpec.scala | 3 +- .../scala/akka/testkit/TestFSMRefSpec.scala | 6 +- .../scala/akka/testkit/TestProbeSpec.scala | 12 +- .../akka/testkit/metrics/AveragingGauge.scala | 6 +- .../metrics/FileDescriptorMetricSet.scala | 21 +- .../akka/testkit/metrics/HdrHistogram.scala | 21 +- .../metrics/MemoryUsageSnapshotting.scala | 62 +- .../akka/testkit/metrics/MetricsKit.scala | 22 +- .../akka/testkit/metrics/MetricsKitOps.scala | 11 +- .../akka/testkit/metrics/MetricsKitSpec.scala | 3 +- .../reporter/AkkaConsoleReporter.scala | 36 +- 1669 files changed, 43208 insertions(+), 35404 deletions(-) diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala index beaa703364..aea7fa097d 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/CapturedLogEvent.scala @@ -17,17 +17,20 @@ import scala.compat.java8.OptionConverters._ /** * Representation of a Log Event issued by a [[akka.actor.typed.Behavior]] */ -final case class CapturedLogEvent( - logLevel: LogLevel, - message: String, - cause: Option[Throwable], - marker: Option[LogMarker], - mdc: Map[String, Any]) { +final case class CapturedLogEvent(logLevel: LogLevel, + message: String, + cause: Option[Throwable], + marker: Option[LogMarker], + mdc: Map[String, Any]) { /** * Constructor for Java API */ - def this(logLevel: LogLevel, message: String, errorCause: Optional[Throwable], marker: Optional[LogMarker], mdc: java.util.Map[String, Any]) { + def this(logLevel: LogLevel, + message: String, + errorCause: Optional[Throwable], + marker: Optional[LogMarker], + mdc: java.util.Map[String, Any]) { this(logLevel, message, errorCause.asScala, marker.asScala, mdc.asScala.toMap) } @@ -76,9 +79,7 @@ object CapturedLogEvent { case _ => None } - def apply( - logLevel: LogLevel, - message: String): CapturedLogEvent = { + def apply(logLevel: LogLevel, message: String): CapturedLogEvent = { CapturedLogEvent(logLevel, message, None, None, Map.empty[String, Any]) } @@ -87,12 +88,11 @@ object CapturedLogEvent { * INTERNAL API */ @InternalApi - private[akka] def apply( - logLevel: LogLevel, - message: String, - errorCause: OptionVal[Throwable], - logMarker: OptionVal[LogMarker], - mdc: Map[String, Any]): CapturedLogEvent = { + private[akka] def apply(logLevel: LogLevel, + message: String, + errorCause: OptionVal[Throwable], + logMarker: OptionVal[LogMarker], + mdc: Map[String, Any]): CapturedLogEvent = { new CapturedLogEvent(logLevel, message, toOption(errorCause), toOption(logMarker), mdc) } } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/Effect.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/Effect.scala index b9abde688c..8638c98266 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/Effect.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/Effect.scala @@ -25,17 +25,20 @@ import scala.concurrent.duration.FiniteDuration abstract class Effect private[akka] () object Effect { + /** * The behavior spawned a named child with the given behavior (and optionally specific props) */ final class Spawned[T](val behavior: Behavior[T], val childName: String, val props: Props, val ref: ActorRef[T]) - extends Effect with Product3[Behavior[T], String, Props] with Serializable { + extends Effect + with Product3[Behavior[T], String, Props] + with Serializable { override def equals(other: Any) = other match { case o: Spawned[_] => this.behavior == o.behavior && - this.childName == o.childName && - this.props == o.props + this.childName == o.childName && + this.props == o.props case _ => false } override def hashCode: Int = (behavior.## * 31 + childName.##) * 31 + props.## @@ -49,7 +52,8 @@ object Effect { } object Spawned { - def apply[T](behavior: Behavior[T], childName: String, props: Props = Props.empty): Spawned[T] = new Spawned(behavior, childName, props, null) + def apply[T](behavior: Behavior[T], childName: String, props: Props = Props.empty): Spawned[T] = + new Spawned(behavior, childName, props, null) def unapply[T](s: Spawned[T]): Option[(Behavior[T], String, Props)] = Some((s.behavior, s.childName, s.props)) } @@ -57,7 +61,9 @@ object Effect { * The behavior spawned an anonymous child with the given behavior (and optionally specific props) */ final class SpawnedAnonymous[T](val behavior: Behavior[T], val props: Props, val ref: ActorRef[T]) - extends Effect with Product2[Behavior[T], Props] with Serializable { + extends Effect + with Product2[Behavior[T], Props] + with Serializable { override def equals(other: Any) = other match { case o: SpawnedAnonymous[_] => this.behavior == o.behavior && this.props == o.props @@ -73,7 +79,8 @@ object Effect { } object SpawnedAnonymous { - def apply[T](behavior: Behavior[T], props: Props = Props.empty): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, props, null) + def apply[T](behavior: Behavior[T], props: Props = Props.empty): SpawnedAnonymous[T] = + new SpawnedAnonymous(behavior, props, null) def unapply[T](s: SpawnedAnonymous[T]): Option[(Behavior[T], Props)] = Some((s.behavior, s.props)) } @@ -83,7 +90,9 @@ object Effect { */ @InternalApi private[akka] final class SpawnedAdapter[T](val name: String, val ref: ActorRef[T]) - extends Effect with Product1[String] with Serializable { + extends Effect + with Product1[String] + with Serializable { override def equals(other: Any) = other match { case o: SpawnedAdapter[_] => this.name == o.name @@ -113,7 +122,9 @@ object Effect { */ @InternalApi private[akka] final class SpawnedAnonymousAdapter[T](val ref: ActorRef[T]) - extends Effect with Product with Serializable { + extends Effect + with Product + with Serializable { override def equals(other: Any): Boolean = other match { case _: SpawnedAnonymousAdapter[_] => true @@ -142,6 +153,7 @@ object Effect { * The behavior create a message adapter for the messages of type clazz */ final case class MessageAdapter[A, T](messageClass: Class[A], adapt: A => T) extends Effect { + /** * JAVA API */ @@ -167,6 +179,7 @@ object Effect { * The behavior set a new receive timeout, with `message` as timeout notification */ final case class ReceiveTimeoutSet[T](d: FiniteDuration, message: T) extends Effect { + /** * Java API */ @@ -195,4 +208,3 @@ object Effect { */ sealed abstract class NoEffects extends Effect } - diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestException.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestException.scala index d4c47f8515..a0a125e493 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestException.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestException.scala @@ -10,4 +10,3 @@ import scala.util.control.NoStackTrace * A predefined exception that can be used in tests. It doesn't include a stack trace. */ final case class TestException(message: String) extends RuntimeException(message) with NoStackTrace - diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestKitSettings.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestKitSettings.scala index 8791648598..a9f9a427ae 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestKitSettings.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/TestKitSettings.scala @@ -12,6 +12,7 @@ import akka.util.Timeout import akka.actor.typed.ActorSystem object TestKitSettings { + /** * Reads configuration settings from `akka.actor.testkit.typed` section. */ @@ -43,15 +44,19 @@ final class TestKitSettings(val config: Config) { import akka.util.Helpers._ - val TestTimeFactor = config.getDouble("timefactor"). - requiring(tf => !tf.isInfinite && tf > 0, "timefactor must be positive finite double") + val TestTimeFactor = config + .getDouble("timefactor") + .requiring(tf => !tf.isInfinite && tf > 0, "timefactor must be positive finite double") /** dilated with `TestTimeFactor` */ val SingleExpectDefaultTimeout: FiniteDuration = dilated(config.getMillisDuration("single-expect-default")) + /** dilated with `TestTimeFactor` */ val ExpectNoMessageDefaultTimeout: FiniteDuration = dilated(config.getMillisDuration("expect-no-message-default")) + /** dilated with `TestTimeFactor` */ val DefaultTimeout: Timeout = Timeout(dilated(config.getMillisDuration("default-timeout"))) + /** dilated with `TestTimeFactor` */ val DefaultActorSystemShutdownTimeout: FiniteDuration = dilated(config.getMillisDuration("system-shutdown-default")) diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala index b87c78d725..6170fc5782 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/ActorSystemStub.scala @@ -7,7 +7,19 @@ package akka.actor.testkit.typed.internal import java.util.concurrent.{ CompletionStage, ThreadFactory } import akka.actor.typed.internal.ActorRefImpl -import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Dispatchers, Extension, ExtensionId, Logger, Props, Settings, Terminated } +import akka.actor.typed.{ + ActorRef, + ActorSystem, + Behavior, + DispatcherSelector, + Dispatchers, + Extension, + ExtensionId, + Logger, + Props, + Settings, + Terminated +} import akka.annotation.InternalApi import akka.util.Timeout import akka.{ actor => untyped } @@ -22,13 +34,17 @@ import akka.actor.typed.internal.InternalRecipientRef * INTERNAL API */ @InternalApi private[akka] final class ActorSystemStub(val name: String) - extends ActorSystem[Nothing] with ActorRef[Nothing] with ActorRefImpl[Nothing] with InternalRecipientRef[Nothing] { + extends ActorSystem[Nothing] + with ActorRef[Nothing] + with ActorRefImpl[Nothing] + with InternalRecipientRef[Nothing] { override val path: untyped.ActorPath = untyped.RootActorPath(untyped.Address("akka", name)) / "user" override val settings: Settings = new Settings(getClass.getClassLoader, ConfigFactory.empty, name) - override def tell(message: Nothing): Unit = throw new UnsupportedOperationException("must not send message to ActorSystemStub") + override def tell(message: Nothing): Unit = + throw new UnsupportedOperationException("must not send message to ActorSystemStub") // impl ActorRefImpl override def isLocal: Boolean = true @@ -72,7 +88,8 @@ import akka.actor.typed.internal.InternalRecipientRef override def printTree: String = "no tree for ActorSystemStub" - def systemActorOf[U](behavior: Behavior[U], name: String, props: Props)(implicit timeout: Timeout): Future[ActorRef[U]] = { + def systemActorOf[U](behavior: Behavior[U], name: String, props: Props)( + implicit timeout: Timeout): Future[ActorRef[U]] = { Future.failed(new UnsupportedOperationException("ActorSystemStub cannot create system actors")) } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala index cbc0fda8aa..634167817a 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/BehaviorTestKitImpl.scala @@ -24,8 +24,8 @@ import scala.util.control.NonFatal */ @InternalApi private[akka] final class BehaviorTestKitImpl[T](_path: ActorPath, _initialBehavior: Behavior[T]) - extends akka.actor.testkit.typed.javadsl.BehaviorTestKit[T] - with akka.actor.testkit.typed.scaladsl.BehaviorTestKit[T] { + extends akka.actor.testkit.typed.javadsl.BehaviorTestKit[T] + with akka.actor.testkit.typed.scaladsl.BehaviorTestKit[T] { // really this should be private, make so when we port out tests that need it private[akka] val context = new EffectfulActorContext[T](_path) @@ -79,9 +79,10 @@ private[akka] final class BehaviorTestKitImpl[T](_path: ActorPath, _initialBehav def expectEffectClass[E <: Effect](effectClass: Class[E]): E = { context.effectQueue.poll() match { case null if effectClass.isAssignableFrom(NoEffects.getClass) => effectClass.cast(NoEffects) - case null => throw new AssertionError(s"expected: effect type ${effectClass.getName} but no effects were recorded") + case null => + throw new AssertionError(s"expected: effect type ${effectClass.getName} but no effects were recorded") case effect if effectClass.isAssignableFrom(effect.getClass) => effect.asInstanceOf[E] - case other => throw new AssertionError(s"expected: effect class ${effectClass.getName} but found $other") + case other => throw new AssertionError(s"expected: effect class ${effectClass.getName} but found $other") } } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/DebugRef.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/DebugRef.scala index 6dd84179bd..aff21f8608 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/DebugRef.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/DebugRef.scala @@ -19,7 +19,9 @@ import akka.actor.typed.internal.InternalRecipientRef * INTERNAL API */ @InternalApi private[akka] final class DebugRef[T](override val path: untyped.ActorPath, override val isLocal: Boolean) - extends ActorRef[T] with ActorRefImpl[T] with InternalRecipientRef[T] { + extends ActorRef[T] + with ActorRefImpl[T] + with InternalRecipientRef[T] { private val q = new ConcurrentLinkedQueue[Either[SystemMessage, T]] diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/EffectfulActorContext.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/EffectfulActorContext.scala index 5c49571a13..7aea630c9d 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/EffectfulActorContext.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/EffectfulActorContext.scala @@ -83,4 +83,3 @@ import scala.compat.java8.FunctionConverters._ super.scheduleOnce(delay, target, message) } } - diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala index 60ac760a3a..4f2a3f00f6 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/StubbedActorContext.scala @@ -29,10 +29,10 @@ import akka.actor.ActorRefProvider * This reference cannot watch other references. */ @InternalApi -private[akka] final class FunctionRef[-T]( - override val path: ActorPath, - send: (T, FunctionRef[T]) => Unit) - extends ActorRef[T] with ActorRefImpl[T] with InternalRecipientRef[T] { +private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T, FunctionRef[T]) => Unit) + extends ActorRef[T] + with ActorRefImpl[T] + with InternalRecipientRef[T] { override def tell(message: T): Unit = { if (message == null) throw InvalidMessageException("[null] is not an allowed message") @@ -69,9 +69,13 @@ private[akka] final class FunctionRef[-T]( override def isInfoEnabled(marker: LogMarker): Boolean = true override def isDebugEnabled(marker: LogMarker): Boolean = true - override private[akka] def notifyError(message: String, cause: OptionVal[Throwable], marker: OptionVal[LogMarker]): Unit = + override private[akka] def notifyError(message: String, + cause: OptionVal[Throwable], + marker: OptionVal[LogMarker]): Unit = logBuffer = CapturedLogEvent(Logging.ErrorLevel, message, cause, marker, mdc) :: logBuffer - override private[akka] def notifyWarning(message: String, cause: OptionVal[Throwable], marker: OptionVal[LogMarker]): Unit = + override private[akka] def notifyWarning(message: String, + cause: OptionVal[Throwable], + marker: OptionVal[LogMarker]): Unit = logBuffer = CapturedLogEvent(Logging.WarningLevel, message, OptionVal.None, marker, mdc) :: logBuffer override private[akka] def notifyInfo(message: String, marker: OptionVal[LogMarker]): Unit = @@ -107,14 +111,18 @@ private[akka] final class FunctionRef[-T]( override def isInfoEnabled(marker: LogMarker): Boolean = actual.isInfoEnabled(marker) override def isDebugEnabled(marker: LogMarker): Boolean = actual.isDebugEnabled(marker) - override private[akka] def notifyError(message: String, cause: OptionVal[Throwable], marker: OptionVal[LogMarker]): Unit = { + override private[akka] def notifyError(message: String, + cause: OptionVal[Throwable], + marker: OptionVal[LogMarker]): Unit = { val original = actual.mdc actual.mdc = mdc actual.notifyError(message, cause, marker) actual.mdc = original } - override private[akka] def notifyWarning(message: String, cause: OptionVal[Throwable], marker: OptionVal[LogMarker]): Unit = { + override private[akka] def notifyWarning(message: String, + cause: OptionVal[Throwable], + marker: OptionVal[LogMarker]): Unit = { val original = actual.mdc actual.mdc = mdc actual.notifyWarning(message, cause, marker) @@ -147,11 +155,10 @@ private[akka] final class FunctionRef[-T]( * provides only stubs for the effects an Actor can perform and replaces * created child Actors by a synchronous Inbox (see `Inbox.sync`). */ -@InternalApi private[akka] class StubbedActorContext[T]( - val path: ActorPath) extends ActorContextImpl[T] { +@InternalApi private[akka] class StubbedActorContext[T](val path: ActorPath) extends ActorContextImpl[T] { def this(name: String) = { - this(TestInbox.address / name withUid rnd().nextInt()) + this((TestInbox.address / name).withUid(rnd().nextInt())) } /** @@ -162,24 +169,24 @@ private[akka] final class FunctionRef[-T]( override val self = selfInbox.ref override val system = new ActorSystemStub("StubbedActorContext") private var _children = TreeMap.empty[String, BehaviorTestKitImpl[_]] - private val childName = Iterator from 0 map (Helpers.base64(_)) + private val childName = Iterator.from(0).map(Helpers.base64(_)) private val loggingAdapter = new StubbedLogger - override def children: Iterable[ActorRef[Nothing]] = _children.values map (_.context.self) + override def children: Iterable[ActorRef[Nothing]] = _children.values.map(_.context.self) def childrenNames: Iterable[String] = _children.keys - override def child(name: String): Option[ActorRef[Nothing]] = _children get name map (_.context.self) + override def child(name: String): Option[ActorRef[Nothing]] = _children.get(name).map(_.context.self) override def spawnAnonymous[U](behavior: Behavior[U], props: Props = Props.empty): ActorRef[U] = { - val btk = new BehaviorTestKitImpl[U](path / childName.next() withUid rnd().nextInt(), behavior) + val btk = new BehaviorTestKitImpl[U]((path / childName.next()).withUid(rnd().nextInt()), behavior) _children += btk.context.self.path.name -> btk btk.context.self } override def spawn[U](behavior: Behavior[U], name: String, props: Props = Props.empty): ActorRef[U] = - _children get name match { + _children.get(name) match { case Some(_) => throw untyped.InvalidActorNameException(s"actor name $name is already taken") case None => - val btk = new BehaviorTestKitImpl[U](path / name withUid rnd().nextInt(), behavior) + val btk = new BehaviorTestKitImpl[U]((path / name).withUid(rnd().nextInt()), behavior) _children += name -> btk btk.context.self } @@ -189,8 +196,9 @@ private[akka] final class FunctionRef[-T]( * Removal is asynchronous, explicit removeInbox is needed from outside afterwards. */ override def stop[U](child: ActorRef[U]): Unit = { - if (child.path.parent != self.path) throw new IllegalArgumentException( - "Only direct children of an actor can be stopped through the actor context, " + + if (child.path.parent != self.path) + throw new IllegalArgumentException( + "Only direct children of an actor can be stopped through the actor context, " + s"but [$child] is not a child of [$self]. Stopping other actors has to be expressed as " + "an explicit stop message that the actor accepts.") else { @@ -203,10 +211,11 @@ private[akka] final class FunctionRef[-T]( override def setReceiveTimeout(d: FiniteDuration, message: T): Unit = () override def cancelReceiveTimeout(): Unit = () - override def scheduleOnce[U](delay: FiniteDuration, target: ActorRef[U], message: U): untyped.Cancellable = new untyped.Cancellable { - override def cancel() = false - override def isCancelled = true - } + override def scheduleOnce[U](delay: FiniteDuration, target: ActorRef[U], message: U): untyped.Cancellable = + new untyped.Cancellable { + override def cancel() = false + override def isCancelled = true + } // TODO allow overriding of this override def executionContext: ExecutionContextExecutor = system.executionContext @@ -217,13 +226,16 @@ private[akka] final class FunctionRef[-T]( @InternalApi private[akka] def internalSpawnMessageAdapter[U](f: U => T, name: String): ActorRef[U] = { val n = if (name != "") s"${childName.next()}-$name" else childName.next() - val p = path / n withUid rnd().nextInt() + val p = (path / n).withUid(rnd().nextInt()) val i = new BehaviorTestKitImpl[U](p, Behavior.ignore) _children += p.name -> i - new FunctionRef[U]( - p, - (message, _) => { val m = f(message); if (m != null) { selfInbox.ref ! m; i.selfInbox.ref ! message } }) + new FunctionRef[U](p, (message, _) => { + val m = f(message); + if (m != null) { + selfInbox.ref ! m; i.selfInbox.ref ! message + } + }) } /** diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestInboxImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestInboxImpl.scala index ed4791ee0b..5b8850d603 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestInboxImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestInboxImpl.scala @@ -18,8 +18,8 @@ import scala.collection.immutable */ @InternalApi private[akka] final class TestInboxImpl[T](path: ActorPath) - extends akka.actor.testkit.typed.javadsl.TestInbox[T] - with akka.actor.testkit.typed.scaladsl.TestInbox[T] { + extends akka.actor.testkit.typed.javadsl.TestInbox[T] + with akka.actor.testkit.typed.scaladsl.TestInbox[T] { private val q = new ConcurrentLinkedQueue[T] diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestKitUtils.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestKitUtils.scala index 8f887a3a4d..044886f1c4 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestKitUtils.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestKitUtils.scala @@ -18,8 +18,10 @@ import scala.concurrent.duration.Duration @InternalApi private[akka] object ActorTestKitGuardian { sealed trait TestKitCommand - final case class SpawnActor[T](name: String, behavior: Behavior[T], replyTo: ActorRef[ActorRef[T]], props: Props) extends TestKitCommand - final case class SpawnActorAnonymous[T](behavior: Behavior[T], replyTo: ActorRef[ActorRef[T]], props: Props) extends TestKitCommand + final case class SpawnActor[T](name: String, behavior: Behavior[T], replyTo: ActorRef[ActorRef[T]], props: Props) + extends TestKitCommand + final case class SpawnActorAnonymous[T](behavior: Behavior[T], replyTo: ActorRef[ActorRef[T]], props: Props) + extends TestKitCommand final case class StopActor[T](ref: ActorRef[T], replyTo: ActorRef[Ack.type]) extends TestKitCommand final case class ActorStopped[T](replyTo: ActorRef[Ack.type]) extends TestKitCommand @@ -94,12 +96,10 @@ private[akka] object TestKitUtils { .replaceAll("[^a-zA-Z_0-9]", "_") } - def shutdown( - system: ActorSystem[_], - timeout: Duration, - throwIfShutdownTimesOut: Boolean): Unit = { + def shutdown(system: ActorSystem[_], timeout: Duration, throwIfShutdownTimesOut: Boolean): Unit = { system.terminate() - try Await.ready(system.whenTerminated, timeout) catch { + try Await.ready(system.whenTerminated, timeout) + catch { case _: TimeoutException => val message = "Failed to stop [%s] within [%s] \n%s".format(system.name, timeout, system.printTree) if (throwIfShutdownTimesOut) throw new RuntimeException(message) diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala index bf764d47fe..1667fa252b 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/internal/TestProbeImpl.scala @@ -43,26 +43,30 @@ private[akka] object TestProbeImpl { private case object Stop private def testActor[M](queue: BlockingDeque[M], terminations: BlockingDeque[Terminated]): Behavior[M] = - Behaviors.receive[M] { (context, msg) => - msg match { - case WatchActor(ref) => - context.watch(ref) - Behaviors.same - case Stop => - Behaviors.stopped - case other => - queue.offerLast(other) + Behaviors + .receive[M] { (context, msg) => + msg match { + case WatchActor(ref) => + context.watch(ref) + Behaviors.same + case Stop => + Behaviors.stopped + case other => + queue.offerLast(other) + Behaviors.same + } + } + .receiveSignal { + case (_, t: Terminated) => + terminations.offerLast(t) Behaviors.same } - }.receiveSignal { - case (_, t: Terminated) => - terminations.offerLast(t) - Behaviors.same - } } @InternalApi -private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) extends JavaTestProbe[M] with ScalaTestProbe[M] { +private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) + extends JavaTestProbe[M] + with ScalaTestProbe[M] { import TestProbeImpl._ protected implicit val settings: TestKitSettings = TestKitSettings(system) @@ -80,7 +84,8 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) private val testActor: ActorRef[M] = { // FIXME arbitrary timeout? implicit val timeout: Timeout = Timeout(3.seconds) - val futRef = system.systemActorOf(TestProbeImpl.testActor(queue, terminations), s"$name-${testActorId.incrementAndGet()}") + val futRef = + system.systemActorOf(TestProbeImpl.testActor(queue, terminations), s"$name-${testActorId.incrementAndGet()}") Await.result(futRef, timeout.duration + 1.second) } @@ -129,7 +134,8 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) val prevEnd = end end = start + maxDiff - val ret = try f finally end = prevEnd + val ret = try f + finally end = prevEnd val diff = now - start assert(min <= diff, s"block took ${diff.pretty}, should at least have been $min") @@ -170,8 +176,7 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) override def receiveMessage(max: FiniteDuration): M = receiveMessage_internal(max.dilated) def receiveMessage_internal(max: FiniteDuration): M = - receiveOne_internal(max). - getOrElse(assertFail(s"Timeout ($max) during receiveMessage while waiting for message.")) + receiveOne_internal(max).getOrElse(assertFail(s"Timeout ($max) during receiveMessage while waiting for message.")) /** * Receive one message from the internal queue of the TestActor. If the given @@ -180,13 +185,11 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) * This method does NOT automatically scale its Duration parameter! */ private def receiveOne_internal(max: FiniteDuration): Option[M] = { - val message = Option( - if (max == Duration.Zero) { - queue.pollFirst - } else { - queue.pollFirst(max.length, max.unit) - } - ) + val message = Option(if (max == Duration.Zero) { + queue.pollFirst + } else { + queue.pollFirst(max.length, max.unit) + }) lastWasNoMessage = false message } @@ -224,9 +227,9 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) val o = receiveOne_internal(max) val bt = BoxedType(c) o match { - case Some(m) if bt isInstance m => m.asInstanceOf[C] - case Some(m) => assertFail(s"Expected $c, found ${m.getClass} ($m)") - case None => assertFail(s"Timeout ($max) during expectMessageClass waiting for $c") + case Some(m) if bt.isInstance(m) => m.asInstanceOf[C] + case Some(m) => assertFail(s"Expected $c, found ${m.getClass} ($m)") + case None => assertFail(s"Timeout ($max) during expectMessageClass waiting for $c") } } @@ -263,7 +266,9 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) override def fishForMessage(max: JDuration, fisher: java.util.function.Function[M, FishingOutcome]): JList[M] = fishForMessage(max, "", fisher) - override def fishForMessage(max: JDuration, hint: String, fisher: java.util.function.Function[M, FishingOutcome]): JList[M] = + override def fishForMessage(max: JDuration, + hint: String, + fisher: java.util.function.Function[M, FishingOutcome]): JList[M] = fishForMessage_internal(max.asScala.dilated, hint, fisher.apply).asJava private def fishForMessage_internal(max: FiniteDuration, hint: String, fisher: M => FishingOutcome): List[M] = { @@ -273,10 +278,12 @@ private[akka] final class TestProbeImpl[M](name: String, system: ActorSystem[_]) maybeMsg match { case Some(message) => val outcome = - try fisher(message) catch { - case ex: MatchError => throw new AssertionError( - s"Unexpected message $message while fishing for messages, " + - s"seen messages ${seen.reverse}, hint: $hint", ex) + try fisher(message) + catch { + case ex: MatchError => + throw new AssertionError(s"Unexpected message $message while fishing for messages, " + + s"seen messages ${seen.reverse}, hint: $hint", + ex) } outcome match { case FishingOutcome.Complete => (message :: seen).reverse diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ActorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ActorTestKit.scala index 2c2f342624..6429e13081 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ActorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ActorTestKit.scala @@ -89,11 +89,7 @@ object ActorTestKit { */ def shutdown(system: ActorSystem[_]): Unit = { val settings = TestKitSettings.create(system) - shutdown( - system, - settings.DefaultActorSystemShutdownTimeout.asJava, - settings.ThrowOnShutdownTimeout - ) + shutdown(system, settings.DefaultActorSystemShutdownTimeout.asJava, settings.ThrowOnShutdownTimeout) } } @@ -136,27 +132,32 @@ final class ActorTestKit private[akka] (delegate: akka.actor.testkit.typed.scala * Spawn a new auto-named actor under the testkit user guardian and return the ActorRef for the spawned actor */ def spawn[T](behavior: Behavior[T]): ActorRef[T] = delegate.spawn(behavior) + /** * Spawn a new named actor under the testkit user guardian and return the ActorRef for the spawned actor, * note that spawning actors with the same name in multiple test cases will cause failures. */ def spawn[T](behavior: Behavior[T], name: String): ActorRef[T] = delegate.spawn(behavior, name) + /** * Spawn a new auto-named actor under the testkit user guardian with the given props * and return the ActorRef for the spawned actor */ def spawn[T](behavior: Behavior[T], props: Props): ActorRef[T] = delegate.spawn(behavior, props) + /** * Spawn a new named actor under the testkit user guardian with the given props and return the ActorRef * for the spawned actor, note that spawning actors with the same name in multiple test cases will cause failures. */ def spawn[T](behavior: Behavior[T], name: String, props: Props): ActorRef[T] = delegate.spawn(behavior, name, props) + /** * Stop the actor under test and wait until it terminates. * It can only be used for actors that were spawned by this `ActorTestKit`. * Other actors will not be stopped by this method. */ def stop[T](ref: ActorRef[T]): Unit = delegate.stop(ref) + /** * Stop the actor under test and wait `max` until it terminates. * It can only be used for actors that were spawned by this `ActorTestKit`. @@ -169,6 +170,7 @@ final class ActorTestKit private[akka] (delegate: akka.actor.testkit.typed.scala * @tparam M the type of messages the probe should accept */ def createTestProbe[M](): TestProbe[M] = TestProbe.create(system) + /** * Shortcut for creating a new test probe for the testkit actor system * @tparam M the type of messages the probe should accept @@ -180,6 +182,7 @@ final class ActorTestKit private[akka] (delegate: akka.actor.testkit.typed.scala * @tparam M the type of messages the probe should accept */ def createTestProbe[M](name: String): TestProbe[M] = TestProbe.create(name, system) + /** * Shortcut for creating a new named test probe for the testkit actor system * @tparam M the type of messages the probe should accept diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/BehaviorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/BehaviorTestKit.scala index 37c890f8a0..01141929ab 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/BehaviorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/BehaviorTestKit.scala @@ -19,8 +19,9 @@ object BehaviorTestKit { */ def create[T](initialBehavior: Behavior[T], name: String): BehaviorTestKit[T] = { val uid = ThreadLocalRandom.current().nextInt() - new BehaviorTestKitImpl(address / name withUid (uid), initialBehavior) + new BehaviorTestKitImpl((address / name).withUid(uid), initialBehavior) } + /** * JAVA API */ @@ -39,6 +40,7 @@ object BehaviorTestKit { */ @DoNotInherit abstract class BehaviorTestKit[T] { + /** * Requests the oldest [[Effect]] or [[akka.actor.testkit.typed.javadsl.Effects.noEffects]] if no effects * have taken place. The effect is consumed, subsequent calls won't diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/Effects.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/Effects.scala index 4c213746d7..78c119e928 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/Effects.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/Effects.scala @@ -20,38 +20,52 @@ object Effects { * The behavior spawned a named child with the given behavior with no specific props */ def spawned[T](behavior: Behavior[T], childName: String): Spawned[T] = Spawned(behavior, childName) + /** * The behavior spawned a named child with the given behavior with no specific props */ - def spawned[T](behavior: Behavior[T], childName: String, ref: ActorRef[T]): Spawned[T] = new Spawned(behavior, childName, Props.empty, ref) + def spawned[T](behavior: Behavior[T], childName: String, ref: ActorRef[T]): Spawned[T] = + new Spawned(behavior, childName, Props.empty, ref) + /** * The behavior spawned a named child with the given behavior and specific props */ - def spawned[T](behavior: Behavior[T], childName: String, props: Props): Spawned[T] = Spawned(behavior, childName, props) + def spawned[T](behavior: Behavior[T], childName: String, props: Props): Spawned[T] = + Spawned(behavior, childName, props) + /** * The behavior spawned a named child with the given behavior and specific props */ - def spawned[T](behavior: Behavior[T], childName: String, props: Props, ref: ActorRef[T]): Spawned[T] = new Spawned(behavior, childName, props, ref) + def spawned[T](behavior: Behavior[T], childName: String, props: Props, ref: ActorRef[T]): Spawned[T] = + new Spawned(behavior, childName, props, ref) + /** * The behavior spawned an anonymous child with the given behavior with no specific props */ def spawnedAnonymous[T](behavior: Behavior[T]): SpawnedAnonymous[T] = SpawnedAnonymous(behavior) + /** * The behavior spawned an anonymous child with the given behavior with no specific props */ - def spawnedAnonymous[T](behavior: Behavior[T], ref: ActorRef[T]): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, Props.empty, ref) + def spawnedAnonymous[T](behavior: Behavior[T], ref: ActorRef[T]): SpawnedAnonymous[T] = + new SpawnedAnonymous(behavior, Props.empty, ref) + /** * The behavior spawned an anonymous child with the given behavior with specific props */ def spawnedAnonymous[T](behavior: Behavior[T], props: Props): SpawnedAnonymous[T] = SpawnedAnonymous(behavior, props) + /** * The behavior spawned an anonymous child with the given behavior with specific props */ - def spawnedAnonymous[T](behavior: Behavior[T], props: Props, ref: ActorRef[T]): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, props, ref) + def spawnedAnonymous[T](behavior: Behavior[T], props: Props, ref: ActorRef[T]): SpawnedAnonymous[T] = + new SpawnedAnonymous(behavior, props, ref) + /** * The behavior stopped `childName` */ def stopped(childName: String): Stopped = Stopped(childName) + /** * The behavior started watching `other`, through `context.watch(other)` */ diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ManualTime.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ManualTime.scala index bd3aab818e..c0b3fb4334 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ManualTime.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/ManualTime.scala @@ -30,8 +30,10 @@ object ManualTime { def get[A](system: ActorSystem[A]): ManualTime = system.scheduler match { case sc: akka.testkit.ExplicitlyTriggeredScheduler => new ManualTime(sc) - case _ => throw new IllegalArgumentException("ActorSystem not configured with explicitly triggered scheduler, " + - "make sure to include akka.actor.testkit.typed.javadsl.ManualTime.config() when setting up the test") + case _ => + throw new IllegalArgumentException( + "ActorSystem not configured with explicitly triggered scheduler, " + + "make sure to include akka.actor.testkit.typed.javadsl.ManualTime.config() when setting up the test") } } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestInbox.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestInbox.scala index ee42912c95..a139adc572 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestInbox.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestInbox.scala @@ -18,11 +18,11 @@ object TestInbox { def create[T](name: String): TestInbox[T] = { val uid = ThreadLocalRandom.current().nextInt() - new TestInboxImpl(address / name withUid (uid)) + new TestInboxImpl((address / name).withUid(uid)) } def create[T](): TestInbox[T] = { val uid = ThreadLocalRandom.current().nextInt() - new TestInboxImpl(address / "inbox" withUid (uid)) + new TestInboxImpl((address / "inbox").withUid(uid)) } } @@ -39,6 +39,7 @@ object TestInbox { */ @DoNotInherit abstract class TestInbox[T] { + /** * The actor ref of the inbox */ diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala index 95a233c5bc..9309423662 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestKitJunitResource.scala @@ -43,9 +43,9 @@ final class TestKitJunitResource(_kit: ActorTestKit) extends ExternalResource { * Use a custom config for the actor system. */ def this(customConfig: String) = - this(ActorTestKit.create( - TestKitUtils.testNameFromCallStack(classOf[TestKitJunitResource]), - ConfigFactory.parseString(customConfig))) + this( + ActorTestKit.create(TestKitUtils.testNameFromCallStack(classOf[TestKitJunitResource]), + ConfigFactory.parseString(customConfig))) /** * Use a custom config for the actor system. @@ -67,14 +67,17 @@ final class TestKitJunitResource(_kit: ActorTestKit) extends ExternalResource { * See corresponding method on [[ActorTestKit]] */ def system: ActorSystem[Void] = testKit.system + /** * See corresponding method on [[ActorTestKit]] */ def testKitSettings: TestKitSettings = testKit.testKitSettings + /** * See corresponding method on [[ActorTestKit]] */ def timeout: Timeout = testKit.timeout + /** * See corresponding method on [[ActorTestKit]] */ @@ -84,14 +87,17 @@ final class TestKitJunitResource(_kit: ActorTestKit) extends ExternalResource { * See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T]): ActorRef[T] = testKit.spawn(behavior) + /** * See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], name: String): ActorRef[T] = testKit.spawn(behavior, name) + /** * See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], props: Props): ActorRef[T] = testKit.spawn(behavior, props) + /** * See corresponding method on [[ActorTestKit]] */ @@ -101,14 +107,17 @@ final class TestKitJunitResource(_kit: ActorTestKit) extends ExternalResource { * See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](): TestProbe[M] = testKit.createTestProbe[M]() + /** * See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](clazz: Class[M]): TestProbe[M] = testKit.createTestProbe(clazz) + /** * See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](name: String, clazz: Class[M]): TestProbe[M] = testKit.createTestProbe(name, clazz) + /** * See corresponding method on [[ActorTestKit]] */ diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala index a2377cd682..4e96a48913 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/javadsl/TestProbe.scala @@ -17,6 +17,7 @@ import akka.annotation.DoNotInherit import akka.util.unused object FishingOutcomes { + /** * Consume this message and continue with the next */ @@ -214,7 +215,9 @@ abstract class TestProbe[M] { /** * Same as the other `fishForMessage` but includes the provided hint in all error messages */ - def fishForMessage(max: Duration, hint: String, fisher: java.util.function.Function[M, FishingOutcome]): java.util.List[M] + def fishForMessage(max: Duration, + hint: String, + fisher: java.util.function.Function[M, FishingOutcome]): java.util.List[M] /** * Expect the given actor to be stopped or stop within the given timeout or diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala index c1b96cc9d1..d4f22f0a2a 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKit.scala @@ -29,11 +29,9 @@ object ActorTestKit { * the testkit with [[ActorTestKit#shutdownTestKit]]. */ def apply(): ActorTestKit = - new ActorTestKit( - name = TestKitUtils.testNameFromCallStack(classOf[ActorTestKit]), - config = noConfigSet, - settings = None - ) + new ActorTestKit(name = TestKitUtils.testNameFromCallStack(classOf[ActorTestKit]), + config = noConfigSet, + settings = None) /** * Create a named testkit. @@ -44,11 +42,7 @@ object ActorTestKit { * the testkit with [[ActorTestKit#shutdownTestKit]]. */ def apply(name: String): ActorTestKit = - new ActorTestKit( - name = TestKitUtils.scrubActorSystemName(name), - config = noConfigSet, - settings = None - ) + new ActorTestKit(name = TestKitUtils.scrubActorSystemName(name), config = noConfigSet, settings = None) /** * Create a named testkit, and use a custom config for the actor system. @@ -59,11 +53,7 @@ object ActorTestKit { * the testkit with [[ActorTestKit#shutdownTestKit]]. */ def apply(name: String, customConfig: Config): ActorTestKit = - new ActorTestKit( - name = TestKitUtils.scrubActorSystemName(name), - config = customConfig, - settings = None - ) + new ActorTestKit(name = TestKitUtils.scrubActorSystemName(name), config = customConfig, settings = None) /** * Create a named testkit, and use a custom config for the actor system, @@ -75,11 +65,7 @@ object ActorTestKit { * the testkit with [[ActorTestKit#shutdownTestKit]]. */ def apply(name: String, customConfig: Config, settings: TestKitSettings): ActorTestKit = - new ActorTestKit( - name = TestKitUtils.scrubActorSystemName(name), - config = customConfig, - settings = Some(settings) - ) + new ActorTestKit(name = TestKitUtils.scrubActorSystemName(name), config = customConfig, settings = Some(settings)) /** * Shutdown the given [[akka.actor.typed.ActorSystem]] and block until it shuts down, @@ -87,21 +73,14 @@ object ActorTestKit { */ def shutdown(system: ActorSystem[_]): Unit = { val settings = TestKitSettings(system) - TestKitUtils.shutdown( - system, - settings.DefaultActorSystemShutdownTimeout, - settings.ThrowOnShutdownTimeout - ) + TestKitUtils.shutdown(system, settings.DefaultActorSystemShutdownTimeout, settings.ThrowOnShutdownTimeout) } /** * Shutdown the given [[akka.actor.typed.ActorSystem]] and block until it shuts down * or the `duration` hits. If the timeout hits `verifySystemShutdown` decides */ - def shutdown( - system: ActorSystem[_], - timeout: Duration, - throwIfShutdownFails: Boolean = false): Unit = + def shutdown(system: ActorSystem[_], timeout: Duration, throwIfShutdownFails: Boolean = false): Unit = TestKitUtils.shutdown(system, timeout, throwIfShutdownFails) // place holder for no custom config specified to avoid the boilerplate @@ -140,11 +119,9 @@ final class ActorTestKit private[akka] (val name: String, val config: Config, se implicit val timeout: Timeout = testKitSettings.DefaultTimeout def shutdownTestKit(): Unit = { - ActorTestKit.shutdown( - system, - testKitSettings.DefaultActorSystemShutdownTimeout, - testKitSettings.ThrowOnShutdownTimeout - ) + ActorTestKit.shutdown(system, + testKitSettings.DefaultActorSystemShutdownTimeout, + testKitSettings.ThrowOnShutdownTimeout) } /** @@ -180,12 +157,15 @@ final class ActorTestKit private[akka] (val name: String, val config: Config, se * It can only be used for actors that were spawned by this `ActorTestKit`. * Other actors will not be stopped by this method. */ - def stop[T](ref: ActorRef[T], max: FiniteDuration = timeout.duration): Unit = try { - Await.result(internalSystem.ask { x: ActorRef[ActorTestKitGuardian.Ack.type] => ActorTestKitGuardian.StopActor(ref, x) }, max) - } catch { - case _: TimeoutException => - assert(false, s"timeout ($max) during stop() waiting for actor [${ref.path}] to stop") - } + def stop[T](ref: ActorRef[T], max: FiniteDuration = timeout.duration): Unit = + try { + Await.result(internalSystem.ask { x: ActorRef[ActorTestKitGuardian.Ack.type] => + ActorTestKitGuardian.StopActor(ref, x) + }, max) + } catch { + case _: TimeoutException => + assert(false, s"timeout ($max) during stop() waiting for actor [${ref.path}] to stop") + } /** * Shortcut for creating a new test probe for the testkit actor system diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitBase.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitBase.scala index 7210a024b5..be34193a40 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitBase.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitBase.scala @@ -54,14 +54,17 @@ abstract class ActorTestKitBase(val testKit: ActorTestKit) { * See corresponding method on [[ActorTestKit]] */ implicit def system: ActorSystem[Nothing] = testKit.system + /** * See corresponding method on [[ActorTestKit]] */ implicit def testKitSettings: TestKitSettings = testKit.testKitSettings + /** * See corresponding method on [[ActorTestKit]] */ implicit def timeout: Timeout = testKit.timeout + /** * See corresponding method on [[ActorTestKit]] */ @@ -71,14 +74,17 @@ abstract class ActorTestKitBase(val testKit: ActorTestKit) { * See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T]): ActorRef[T] = testKit.spawn(behavior) + /** * See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], name: String): ActorRef[T] = testKit.spawn(behavior, name) + /** * See corresponding method on [[ActorTestKit]] */ def spawn[T](behavior: Behavior[T], props: Props): ActorRef[T] = testKit.spawn(behavior, props) + /** * See corresponding method on [[ActorTestKit]] */ @@ -88,6 +94,7 @@ abstract class ActorTestKitBase(val testKit: ActorTestKit) { * See corresponding method on [[ActorTestKit]] */ def createTestProbe[M](): TestProbe[M] = testKit.createTestProbe[M]() + /** * See corresponding method on [[ActorTestKit]] */ diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKit.scala index b51c5fbec4..d5b6d21b7b 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKit.scala @@ -19,7 +19,7 @@ object BehaviorTestKit { def apply[T](initialBehavior: Behavior[T], name: String): BehaviorTestKit[T] = { val uid = ThreadLocalRandom.current().nextInt() - new BehaviorTestKitImpl(address / name withUid (uid), initialBehavior) + new BehaviorTestKitImpl((address / name).withUid(uid), initialBehavior) } def apply[T](initialBehavior: Behavior[T]): BehaviorTestKit[T] = apply(initialBehavior, "testkit") diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/Effects.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/Effects.scala index 92d48895c4..d725b852bb 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/Effects.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/Effects.scala @@ -19,38 +19,52 @@ object Effects { * The behavior spawned a named child with the given behavior with no specific props */ def spawned[T](behavior: Behavior[T], childName: String): Spawned[T] = Spawned(behavior, childName) + /** * The behavior spawned a named child with the given behavior with no specific props */ - def spawned[T](behavior: Behavior[T], childName: String, ref: ActorRef[T]): Spawned[T] = new Spawned(behavior, childName, Props.empty, ref) + def spawned[T](behavior: Behavior[T], childName: String, ref: ActorRef[T]): Spawned[T] = + new Spawned(behavior, childName, Props.empty, ref) + /** * The behavior spawned a named child with the given behavior and specific props */ - def spawned[T](behavior: Behavior[T], childName: String, props: Props): Spawned[T] = Spawned(behavior, childName, props) + def spawned[T](behavior: Behavior[T], childName: String, props: Props): Spawned[T] = + Spawned(behavior, childName, props) + /** * The behavior spawned a named child with the given behavior and specific props */ - def spawned[T](behavior: Behavior[T], childName: String, props: Props, ref: ActorRef[T]): Spawned[T] = new Spawned(behavior, childName, props, ref) + def spawned[T](behavior: Behavior[T], childName: String, props: Props, ref: ActorRef[T]): Spawned[T] = + new Spawned(behavior, childName, props, ref) + /** * The behavior spawned an anonymous child with the given behavior with no specific props */ def spawnedAnonymous[T](behavior: Behavior[T]): SpawnedAnonymous[T] = SpawnedAnonymous(behavior) + /** * The behavior spawned an anonymous child with the given behavior with no specific props */ - def spawnedAnonymous[T](behavior: Behavior[T], ref: ActorRef[T]): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, Props.empty, ref) + def spawnedAnonymous[T](behavior: Behavior[T], ref: ActorRef[T]): SpawnedAnonymous[T] = + new SpawnedAnonymous(behavior, Props.empty, ref) + /** * The behavior spawned an anonymous child with the given behavior with specific props */ def spawnedAnonymous[T](behavior: Behavior[T], props: Props): SpawnedAnonymous[T] = SpawnedAnonymous(behavior, props) + /** * The behavior spawned an anonymous child with the given behavior with specific props */ - def spawnedAnonymous[T](behavior: Behavior[T], props: Props, ref: ActorRef[T]): SpawnedAnonymous[T] = new SpawnedAnonymous(behavior, props, ref) + def spawnedAnonymous[T](behavior: Behavior[T], props: Props, ref: ActorRef[T]): SpawnedAnonymous[T] = + new SpawnedAnonymous(behavior, props, ref) + /** * The behavior stopped `childName` */ def stopped(childName: String): Stopped = Stopped(childName) + /** * The behavior started watching `other`, through `context.watch(other)` */ diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ManualTime.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ManualTime.scala index 893ed2c6b2..67866d4565 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ManualTime.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ManualTime.scala @@ -17,10 +17,12 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } * scheduler control through [[ManualTime.apply()]] */ object ManualTime { + /** * Config needed to use the `ExplicitlyTriggeredScheduler` */ - val config: Config = ConfigFactory.parseString("""akka.scheduler.implementation = "akka.testkit.ExplicitlyTriggeredScheduler"""") + val config: Config = + ConfigFactory.parseString("""akka.scheduler.implementation = "akka.testkit.ExplicitlyTriggeredScheduler"""") /** * Access the manual scheduler, note that you need to setup the actor system/testkit with [[config()]] for this to @@ -29,8 +31,10 @@ object ManualTime { def apply()(implicit system: ActorSystem[_]): ManualTime = system.scheduler match { case sc: akka.testkit.ExplicitlyTriggeredScheduler => new ManualTime(sc) - case _ => throw new IllegalArgumentException("ActorSystem not configured with explicitly triggered scheduler, " + - "make sure to include akka.actor.testkit.typed.scaladsl.ManualTime.config() when setting up the test") + case _ => + throw new IllegalArgumentException( + "ActorSystem not configured with explicitly triggered scheduler, " + + "make sure to include akka.actor.testkit.typed.scaladsl.ManualTime.config() when setting up the test") } } diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ScalaTestWithActorTestKit.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ScalaTestWithActorTestKit.scala index caa4f9e002..8f0ec56cc5 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ScalaTestWithActorTestKit.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/ScalaTestWithActorTestKit.scala @@ -20,8 +20,13 @@ import org.scalatest.time.Span * Note that ScalaTest is not provided as a transitive dependency of the testkit module but must be added explicitly * to your project to use this. */ -abstract class ScalaTestWithActorTestKit(testKit: ActorTestKit) extends ActorTestKitBase(testKit) - with TestSuite with Matchers with BeforeAndAfterAll with ScalaFutures with Eventually { +abstract class ScalaTestWithActorTestKit(testKit: ActorTestKit) + extends ActorTestKitBase(testKit) + with TestSuite + with Matchers + with BeforeAndAfterAll + with ScalaFutures + with Eventually { def this() = this(ActorTestKit(ActorTestKitBase.testNameFromCallStack())) @@ -57,4 +62,3 @@ abstract class ScalaTestWithActorTestKit(testKit: ActorTestKit) extends ActorTes testKit.shutdownTestKit() } } - diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestInbox.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestInbox.scala index 647a92b9f0..a211c61386 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestInbox.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestInbox.scala @@ -17,7 +17,7 @@ import scala.collection.immutable object TestInbox { def apply[T](name: String = "inbox"): TestInbox[T] = { val uid = ThreadLocalRandom.current().nextInt() - new TestInboxImpl(address / name withUid uid) + new TestInboxImpl((address / name).withUid(uid)) } private[akka] val address = RootActorPath(Address("akka.actor.typed.inbox", "anonymous")) @@ -36,6 +36,7 @@ object TestInbox { */ @DoNotInherit trait TestInbox[T] { + /** * The actor ref of the inbox */ diff --git a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestProbe.scala b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestProbe.scala index 7f73733db5..14e5a0c28b 100644 --- a/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestProbe.scala +++ b/akka-actor-testkit-typed/src/main/scala/akka/actor/testkit/typed/scaladsl/TestProbe.scala @@ -16,18 +16,22 @@ import akka.actor.typed.ActorSystem import akka.annotation.DoNotInherit object FishingOutcomes { + /** * Complete fishing and return all messages up until this */ val complete: FishingOutcome = FishingOutcome.Complete + /** * Consume this message, collect it into the result, and continue with the next message */ val continue: FishingOutcome = FishingOutcome.Continue + /** * Consume this message, but do not collect it into the result, and continue with the next message */ val continueAndIgnore: FishingOutcome = FishingOutcome.ContinueAndIgnore + /** * Fail fishing with a custom error message */ diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala index 8c677a5659..f146135511 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/ActorTestKitSpec.scala @@ -25,8 +25,7 @@ class ActorTestKitSpec extends ScalaTestWithActorTestKit with WordSpecLike { val testkit2 = ActorTestKit() try { testkit2.system.name should ===("ActorTestKitSpec") - } finally - testkit2.shutdownTestKit() + } finally testkit2.shutdownTestKit() } "use name from given class name" in { @@ -34,8 +33,7 @@ class ActorTestKitSpec extends ScalaTestWithActorTestKit with WordSpecLike { try { // removing package name and such testkit2.system.name should ===("Vector") - } finally - testkit2.shutdownTestKit() + } finally testkit2.shutdownTestKit() } "spawn an actor" in { @@ -81,16 +79,14 @@ class MyConcreteDerivateSpec extends MyBaseSpec { val testkit2 = ActorTestKit() try { testkit2.system.name should ===("MyConcreteDerivateSpec") - } finally - testkit2.shutdownTestKit() + } finally testkit2.shutdownTestKit() } "use name from given class name" in { val testkit2 = ActorTestKit(classOf[Vector[_]].getName) try { testkit2.system.name should ===("Vector") - } finally - testkit2.shutdownTestKit() + } finally testkit2.shutdownTestKit() } } @@ -107,4 +103,3 @@ class CompositionSpec extends WordSpec with Matchers with BeforeAndAfterAll { testKit.system.name should ===("CompositionSpec") } } - diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala index 8895bbd396..e24c93315d 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/BehaviorTestKitSpec.scala @@ -67,13 +67,13 @@ object BehaviorTestKitSpec { context.stop(child) Behaviors.same case SpawnAdapter => - context.spawnMessageAdapter { - r: Reproduce => SpawnAnonymous(r.times) + context.spawnMessageAdapter { r: Reproduce => + SpawnAnonymous(r.times) } Behaviors.same case SpawnAdapterWithName(name) => - context.spawnMessageAdapter({ - r: Reproduce => SpawnAnonymous(r.times) + context.spawnMessageAdapter({ r: Reproduce => + SpawnAnonymous(r.times) }, name) Behaviors.same case SpawnAndWatchUnwatch(name) => @@ -253,7 +253,7 @@ class BehaviorTestKitSpec extends WordSpec with Matchers { } } - "BehaviorTestkit's run" can { + "BehaviorTestkit's run".can { "run behaviors with messages without canonicalization" in { val testkit = BehaviorTestKit[Father.Command](Father.init) testkit.run(SpawnAdapterWithName("adapter")) @@ -267,21 +267,16 @@ class BehaviorTestKitSpec extends WordSpec with Matchers { val testkit = BehaviorTestKit(Father.init) testkit.run(SpawnAndWatchUnwatch("hello")) val child = testkit.childInbox("hello").ref - testkit.retrieveAllEffects() should be(Seq( - Effects.spawned(Child.initial, "hello", Props.empty), - Effects.watched(child), - Effects.unwatched(child) - )) + testkit.retrieveAllEffects() should be( + Seq(Effects.spawned(Child.initial, "hello", Props.empty), Effects.watched(child), Effects.unwatched(child))) } "record effects for watchWith" in { val testkit = BehaviorTestKit(Father.init) testkit.run(SpawnAndWatchWith("hello")) val child = testkit.childInbox("hello").ref - testkit.retrieveAllEffects() should be(Seq( - Effects.spawned(Child.initial, "hello", Props.empty), - Effects.watched(child) - )) + testkit.retrieveAllEffects() should be( + Seq(Effects.spawned(Child.initial, "hello", Props.empty), Effects.watched(child))) } } diff --git a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala index 7416bd338e..3b6751ee6a 100644 --- a/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala +++ b/akka-actor-testkit-typed/src/test/scala/akka/actor/testkit/typed/scaladsl/TestProbeSpec.scala @@ -47,11 +47,8 @@ class TestProbeSpec extends ScalaTestWithActorTestKit with WordSpecLike { Behaviors.withTimers { (timer) => timer.startSingleTimer("key", Stop, 300.millis) - Behaviors.receive((context, stop) => - Behaviors.stopped - ) - } - )) + Behaviors.receive((context, stop) => Behaviors.stopped) + })) ref ! Stop // race, but not sure how to test in any other way probe.expectTerminated(ref, 500.millis) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala index 54c66e2537..a311a0fa69 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala @@ -34,7 +34,10 @@ object ActorConfigurationVerificationSpec { """ } -class ActorConfigurationVerificationSpec extends AkkaSpec(ActorConfigurationVerificationSpec.config) with DefaultTimeout with BeforeAndAfterEach { +class ActorConfigurationVerificationSpec + extends AkkaSpec(ActorConfigurationVerificationSpec.config) + with DefaultTimeout + with BeforeAndAfterEach { import ActorConfigurationVerificationSpec._ override def atStartup: Unit = { @@ -64,8 +67,10 @@ class ActorConfigurationVerificationSpec extends AkkaSpec(ActorConfigurationVeri } "fail verification with a ConfigurationException if also configured with a ScatterGatherFirstCompletedPool" in { intercept[ConfigurationException] { - system.actorOf(ScatterGatherFirstCompletedPool(nrOfInstances = 2, within = 2 seconds). - withDispatcher("balancing-dispatcher").props(Props[TestActor])) + system.actorOf( + ScatterGatherFirstCompletedPool(nrOfInstances = 2, within = 2 seconds) + .withDispatcher("balancing-dispatcher") + .props(Props[TestActor])) } } "not fail verification with a ConfigurationException also not configured with a Router" in { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorCreationPerfSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorCreationPerfSpec.scala index 0391a44a10..ed4c4e6c80 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorCreationPerfSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorCreationPerfSpec.scala @@ -6,7 +6,7 @@ package akka.actor import scala.language.postfixOps -import akka.testkit.{ PerformanceTest, ImplicitSender, AkkaSpec } +import akka.testkit.{ AkkaSpec, ImplicitSender, PerformanceTest } import scala.concurrent.duration._ import akka.testkit.metrics._ import org.scalatest.BeforeAndAfterAll @@ -58,7 +58,6 @@ object ActorCreationPerfSpec { case IsAlive => sender() ! Alive case Create(number, propsCreator) => - for (i <- 1 to number) { val start = System.nanoTime() context.actorOf(propsCreator.apply()) @@ -117,8 +116,11 @@ object ActorCreationPerfSpec { } } -class ActorCreationPerfSpec extends AkkaSpec(ActorCreationPerfSpec.config) with ImplicitSender - with MetricsKit with BeforeAndAfterAll { +class ActorCreationPerfSpec + extends AkkaSpec(ActorCreationPerfSpec.config) + with ImplicitSender + with MetricsKit + with BeforeAndAfterAll { import ActorCreationPerfSpec._ @@ -175,7 +177,7 @@ class ActorCreationPerfSpec extends AkkaSpec(ActorCreationPerfSpec.config) with watch(driver) expectTerminated(driver, 15.seconds) - after diff before + after.diff(before) } def registerTests(name: String, propsCreator: () => Props): Unit = { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala index 703b718c0b..4de335b595 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala @@ -49,19 +49,21 @@ class ActorDSLSpec extends AkkaSpec { actor(new Act {}) //#watch val i = inbox() - i watch target + i.watch(target) //#watch target ! PoisonPill - i receive 1.second should ===(Terminated(target)(true, false)) + i.receive(1.second) should ===(Terminated(target)(true, false)) } "support queueing multiple queries" in { val i = inbox() import system.dispatcher - val res = Future.sequence(Seq( - Future { i.receive() } recover { case x => x }, - Future { Thread.sleep(100); i.select() { case "world" => 1 } } recover { case x => x }, - Future { Thread.sleep(200); i.select() { case "hello" => 2 } } recover { case x => x })) + val res = + Future.sequence(Seq(Future { i.receive() }.recover { case x => x }, Future { + Thread.sleep(100); i.select() { case "world" => 1 } + }.recover { case x => x }, Future { Thread.sleep(200); i.select() { case "hello" => 2 } }.recover { + case x => x + })) Thread.sleep(1000) res.isCompleted should ===(false) i.receiver ! 42 @@ -87,14 +89,14 @@ class ActorDSLSpec extends AkkaSpec { try { for (_ <- 1 to 1000) i.receiver ! 0 expectNoMsg(1 second) - EventFilter.warning(start = "dropping message", occurrences = 1) intercept { + EventFilter.warning(start = "dropping message", occurrences = 1).intercept { i.receiver ! 42 } expectMsgType[Warning] i.receiver ! 42 expectNoMsg(1 second) val gotit = for (_ <- 1 to 1000) yield i.receive() - gotit should ===((1 to 1000) map (_ => 0)) + gotit should ===((1 to 1000).map(_ => 0)) intercept[TimeoutException] { i.receive(1 second) } @@ -165,7 +167,7 @@ class ActorDSLSpec extends AkkaSpec { }) //#simple-start-stop - system stop a + system.stop(a) expectMsg("started") expectMsg("stopped") } @@ -177,15 +179,17 @@ class ActorDSLSpec extends AkkaSpec { case "die" => throw new Exception } whenFailing { case m @ (cause, msg) => testActor ! m } - whenRestarted { cause => testActor ! cause } + whenRestarted { cause => + testActor ! cause + } }) //#failing-actor - EventFilter[Exception](occurrences = 1) intercept { + EventFilter[Exception](occurrences = 1).intercept { a ! "die" } expectMsgPF() { case (x: Exception, Some("die")) => } - expectMsgPF() { case _: Exception => } + expectMsgPF() { case _: Exception => } } "support superviseWith" in { @@ -198,7 +202,8 @@ class ActorDSLSpec extends AkkaSpec { }) //#supervise-with val child = actor("child")(new Act { - whenFailing { (_, _) => } + whenFailing { (_, _) => + } become { case ref: ActorRef => whenStopping(ref ! "stopped") case ex: Exception => throw ex @@ -209,11 +214,11 @@ class ActorDSLSpec extends AkkaSpec { } }) a ! testActor - EventFilter.warning("hi", occurrences = 1) intercept { + EventFilter.warning("hi", occurrences = 1).intercept { a ! new Exception("hi") } expectNoMsg(1 second) - EventFilter[Exception]("hello", occurrences = 1) intercept { + EventFilter[Exception]("hello", occurrences = 1).intercept { a ! new Exception("hello") } expectMsg("stopped") @@ -242,7 +247,8 @@ class ActorDSLSpec extends AkkaSpec { become { case 1 => stash() case 2 => - testActor ! 2; unstashAll(); becomeStacked { + testActor ! 2; unstashAll(); + becomeStacked { case 1 => testActor ! 1; unbecome() } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala index e97cb5da3d..7118e258a8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala @@ -29,7 +29,11 @@ object ActorLifeCycleSpec { } -class ActorLifeCycleSpec extends AkkaSpec("akka.actor.serialize-messages=off") with BeforeAndAfterEach with ImplicitSender with DefaultTimeout { +class ActorLifeCycleSpec + extends AkkaSpec("akka.actor.serialize-messages=off") + with BeforeAndAfterEach + with ImplicitSender + with DefaultTimeout { import ActorLifeCycleSpec._ "An Actor" must { @@ -37,7 +41,8 @@ class ActorLifeCycleSpec extends AkkaSpec("akka.actor.serialize-messages=off") w "invoke preRestart, preStart, postRestart when using OneForOneStrategy" in { filterException[ActorKilledException] { val id = newUuid.toString - val supervisor = system.actorOf(Props(classOf[Supervisor], OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception])))) + val supervisor = + system.actorOf(Props(classOf[Supervisor], OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception])))) val gen = new AtomicInteger(0) val restarterProps = Props(new LifeCycleTestActor(testActor, id, gen) { override def preRestart(reason: Throwable, message: Option[Any]): Unit = { report("preRestart") } @@ -71,7 +76,8 @@ class ActorLifeCycleSpec extends AkkaSpec("akka.actor.serialize-messages=off") w "default for preRestart and postRestart is to call postStop and preStart respectively" in { filterException[ActorKilledException] { val id = newUuid().toString - val supervisor = system.actorOf(Props(classOf[Supervisor], OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception])))) + val supervisor = + system.actorOf(Props(classOf[Supervisor], OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception])))) val gen = new AtomicInteger(0) val restarterProps = Props(classOf[LifeCycleTestActor], testActor, id, gen) val restarter = Await.result((supervisor ? restarterProps).mapTo[ActorRef], timeout.duration) @@ -101,9 +107,8 @@ class ActorLifeCycleSpec extends AkkaSpec("akka.actor.serialize-messages=off") w "not invoke preRestart and postRestart when never restarted using OneForOneStrategy" in { val id = newUuid().toString - val supervisor = system.actorOf(Props( - classOf[Supervisor], - OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception])))) + val supervisor = + system.actorOf(Props(classOf[Supervisor], OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception])))) val gen = new AtomicInteger(0) val props = Props(classOf[LifeCycleTestActor], testActor, id, gen) val a = Await.result((supervisor ? props).mapTo[ActorRef], timeout.duration) @@ -121,7 +126,7 @@ class ActorLifeCycleSpec extends AkkaSpec("akka.actor.serialize-messages=off") w def receive = Actor.emptyBehavior override def postStop: Unit = { throw new Exception("hurrah") } })) - EventFilter[Exception]("hurrah", occurrences = 1) intercept { + EventFilter[Exception]("hurrah", occurrences = 1).intercept { a ! PoisonPill } } @@ -143,7 +148,7 @@ class ActorLifeCycleSpec extends AkkaSpec("akka.actor.serialize-messages=off") w expectMsg("ok") a ! "hello" expectMsg(43) - EventFilter[RuntimeException]("buh", occurrences = 1) intercept { + EventFilter[RuntimeException]("buh", occurrences = 1).intercept { a ! "fail" } a ! "hello" diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala index a10edce5c8..6293a4afa0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala @@ -62,11 +62,11 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout { system.actorFor(system / "c1") should ===(c1) system.actorFor(system / "c2") should ===(c2) system.actorFor(system / "c2" / "c21") should ===(c21) - system.actorFor(system child "c2" child "c21") should ===(c21) // test Java API + system.actorFor(system.child("c2").child("c21")) should ===(c21) // test Java API system.actorFor(system / Seq("c2", "c21")) should ===(c21) import scala.collection.JavaConverters._ - system.actorFor(system descendant Seq("c2", "c21").asJava) // test Java API + system.actorFor(system.descendant(Seq("c2", "c21").asJava)) // test Java API } "find actors by looking up their string representation" in { @@ -201,7 +201,8 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout { "find actors by looking up their root-anchored relative path" in { def check(looker: ActorRef, pathOf: ActorRef, result: ActorRef): Unit = { Await.result(looker ? LookupString(pathOf.path.toStringWithoutAddress), timeout.duration) should ===(result) - Await.result(looker ? LookupString(pathOf.path.elements.mkString("/", "/", "/")), timeout.duration) should ===(result) + Await.result(looker ? LookupString(pathOf.path.elements.mkString("/", "/", "/")), timeout.duration) should ===( + result) } for { looker <- all @@ -212,8 +213,8 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout { "find actors by looking up their relative path" in { def check(looker: ActorRef, result: ActorRef, elems: String*): Unit = { Await.result(looker ? LookupElems(elems), timeout.duration) should ===(result) - Await.result(looker ? LookupString(elems mkString "/"), timeout.duration) should ===(result) - Await.result(looker ? LookupString(elems mkString ("", "/", "/")), timeout.duration) should ===(result) + Await.result(looker ? LookupString(elems.mkString("/")), timeout.duration) should ===(result) + Await.result(looker ? LookupString(elems.mkString("", "/", "/")), timeout.duration) should ===(result) } check(c1, user, "..") for { @@ -232,7 +233,9 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout { Await.result(looker ? LookupString(target.path.toString), timeout.duration) should ===(target) Await.result(looker ? LookupString(target.path.toString + "/"), timeout.duration) should ===(target) Await.result(looker ? LookupString(target.path.toStringWithoutAddress), timeout.duration) should ===(target) - if (target != root) Await.result(looker ? LookupString(target.path.elements.mkString("/", "/", "/")), timeout.duration) should ===(target) + if (target != root) + Await.result(looker ? LookupString(target.path.elements.mkString("/", "/", "/")), timeout.duration) should ===( + target) } } for (target <- Seq(root, syst, user, system.deadLetters)) check(target) @@ -248,17 +251,14 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout { } def check(looker: ActorRef): Unit = { val lookname = looker.path.elements.mkString("", "/", "/") - for ( - (l, r) <- Seq( - LookupString("a/b/c") -> empty(lookname + "a/b/c"), - LookupString("") -> system.deadLetters, - LookupString("akka://all-systems/Nobody") -> system.deadLetters, - LookupPath(system / "hallo") -> empty("user/hallo"), - LookupPath(looker.path child "hallo") -> empty(lookname + "hallo"), // test Java API - LookupPath(looker.path descendant Seq("a", "b").asJava) -> empty(lookname + "a/b"), // test Java API - LookupElems(Seq()) -> system.deadLetters, - LookupElems(Seq("a")) -> empty(lookname + "a")) - ) checkOne(looker, l, r) + for ((l, r) <- Seq(LookupString("a/b/c") -> empty(lookname + "a/b/c"), + LookupString("") -> system.deadLetters, + LookupString("akka://all-systems/Nobody") -> system.deadLetters, + LookupPath(system / "hallo") -> empty("user/hallo"), + LookupPath(looker.path.child("hallo")) -> empty(lookname + "hallo"), // test Java API + LookupPath(looker.path.descendant(Seq("a", "b").asJava)) -> empty(lookname + "a/b"), // test Java API + LookupElems(Seq()) -> system.deadLetters, + LookupElems(Seq("a")) -> empty(lookname + "a"))) checkOne(looker, l, r) } for (looker <- all) check(looker) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala index c350599262..3add8908d4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorMailboxSpec.scala @@ -174,11 +174,13 @@ object ActorMailboxSpec { class BoundedQueueReportingActor extends QueueReportingActor with RequiresMessageQueue[BoundedMessageQueueSemantics] - class BoundedControlAwareQueueReportingActor extends QueueReportingActor - with RequiresMessageQueue[BoundedControlAwareMessageQueueSemantics] + class BoundedControlAwareQueueReportingActor + extends QueueReportingActor + with RequiresMessageQueue[BoundedControlAwareMessageQueueSemantics] - class UnboundedControlAwareQueueReportingActor extends QueueReportingActor - with RequiresMessageQueue[UnboundedControlAwareMessageQueueSemantics] + class UnboundedControlAwareQueueReportingActor + extends QueueReportingActor + with RequiresMessageQueue[UnboundedControlAwareMessageQueueSemantics] class StashQueueReportingActor extends QueueReportingActor with Stash @@ -187,32 +189,28 @@ object ActorMailboxSpec { val UnboundedMailboxTypes = Seq(classOf[UnboundedMessageQueueSemantics]) val BoundedMailboxTypes = Seq(classOf[BoundedMessageQueueSemantics]) - val UnboundedDeqMailboxTypes = Seq( - classOf[DequeBasedMessageQueueSemantics], - classOf[UnboundedMessageQueueSemantics], - classOf[UnboundedDequeBasedMessageQueueSemantics]) + val UnboundedDeqMailboxTypes = Seq(classOf[DequeBasedMessageQueueSemantics], + classOf[UnboundedMessageQueueSemantics], + classOf[UnboundedDequeBasedMessageQueueSemantics]) - val BoundedDeqMailboxTypes = Seq( - classOf[DequeBasedMessageQueueSemantics], - classOf[BoundedMessageQueueSemantics], - classOf[BoundedDequeBasedMessageQueueSemantics]) + val BoundedDeqMailboxTypes = Seq(classOf[DequeBasedMessageQueueSemantics], + classOf[BoundedMessageQueueSemantics], + classOf[BoundedDequeBasedMessageQueueSemantics]) - val BoundedControlAwareMailboxTypes = Seq( - classOf[BoundedMessageQueueSemantics], - classOf[ControlAwareMessageQueueSemantics], - classOf[BoundedControlAwareMessageQueueSemantics]) - val UnboundedControlAwareMailboxTypes = Seq( - classOf[UnboundedMessageQueueSemantics], - classOf[ControlAwareMessageQueueSemantics], - classOf[UnboundedControlAwareMessageQueueSemantics]) + val BoundedControlAwareMailboxTypes = Seq(classOf[BoundedMessageQueueSemantics], + classOf[ControlAwareMessageQueueSemantics], + classOf[BoundedControlAwareMessageQueueSemantics]) + val UnboundedControlAwareMailboxTypes = Seq(classOf[UnboundedMessageQueueSemantics], + classOf[ControlAwareMessageQueueSemantics], + classOf[UnboundedControlAwareMessageQueueSemantics]) trait MCBoundedMessageQueueSemantics extends MessageQueue with MultipleConsumerSemantics final case class MCBoundedMailbox(val capacity: Int, val pushTimeOut: FiniteDuration) - extends MailboxType with ProducesMessageQueue[MCBoundedMessageQueueSemantics] { + extends MailboxType + with ProducesMessageQueue[MCBoundedMessageQueueSemantics] { - def this(settings: ActorSystem.Settings, config: Config) = this( - config.getInt("mailbox-capacity"), - config.getNanosDuration("mailbox-push-timeout-time")) + def this(settings: ActorSystem.Settings, config: Config) = + this(config.getInt("mailbox-capacity"), config.getNanosDuration("mailbox-push-timeout-time")) final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new BoundedMailbox.MessageQueue(capacity, pushTimeOut) @@ -231,7 +229,7 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout actor ! "ping" val q = expectMsgType[MessageQueue] - types foreach (t => assert(t isInstance q, s"Type [${q.getClass.getName}] is not assignable to [${t.getName}]")) + types.foreach(t => assert(t.isInstance(q), s"Type [${q.getClass.getName}] is not assignable to [${t.getName}]")) q } @@ -242,30 +240,24 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout } "get an unbounded deque message queue when it is only configured on the props" in { - checkMailboxQueue( - Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"), - "default-override-from-props", UnboundedDeqMailboxTypes) + checkMailboxQueue(Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"), + "default-override-from-props", + UnboundedDeqMailboxTypes) } "get an bounded message queue when it's only configured with RequiresMailbox" in { - checkMailboxQueue( - Props[BoundedQueueReportingActor], - "default-override-from-trait", BoundedMailboxTypes) + checkMailboxQueue(Props[BoundedQueueReportingActor], "default-override-from-trait", BoundedMailboxTypes) } "get an unbounded deque message queue when it's only mixed with Stash" in { - checkMailboxQueue( - Props[StashQueueReportingActor], - "default-override-from-stash", UnboundedDeqMailboxTypes) - checkMailboxQueue( - Props(new StashQueueReportingActor), - "default-override-from-stash2", UnboundedDeqMailboxTypes) - checkMailboxQueue( - Props(classOf[StashQueueReportingActorWithParams], 17, "hello"), - "default-override-from-stash3", UnboundedDeqMailboxTypes) - checkMailboxQueue( - Props(new StashQueueReportingActorWithParams(17, "hello")), - "default-override-from-stash4", UnboundedDeqMailboxTypes) + checkMailboxQueue(Props[StashQueueReportingActor], "default-override-from-stash", UnboundedDeqMailboxTypes) + checkMailboxQueue(Props(new StashQueueReportingActor), "default-override-from-stash2", UnboundedDeqMailboxTypes) + checkMailboxQueue(Props(classOf[StashQueueReportingActorWithParams], 17, "hello"), + "default-override-from-stash3", + UnboundedDeqMailboxTypes) + checkMailboxQueue(Props(new StashQueueReportingActorWithParams(17, "hello")), + "default-override-from-stash4", + UnboundedDeqMailboxTypes) } "get a bounded message queue when it's configured as mailbox" in { @@ -281,23 +273,26 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout } "get an unbounded control aware message queue when it's configured as mailbox" in { - checkMailboxQueue(Props[QueueReportingActor], "default-unbounded-control-aware", UnboundedControlAwareMailboxTypes) + checkMailboxQueue(Props[QueueReportingActor], + "default-unbounded-control-aware", + UnboundedControlAwareMailboxTypes) } "get an bounded control aware message queue when it's only configured with RequiresMailbox" in { - checkMailboxQueue( - Props[BoundedControlAwareQueueReportingActor], - "default-override-from-trait-bounded-control-aware", BoundedControlAwareMailboxTypes) + checkMailboxQueue(Props[BoundedControlAwareQueueReportingActor], + "default-override-from-trait-bounded-control-aware", + BoundedControlAwareMailboxTypes) } "get an unbounded control aware message queue when it's only configured with RequiresMailbox" in { - checkMailboxQueue( - Props[UnboundedControlAwareQueueReportingActor], - "default-override-from-trait-unbounded-control-aware", UnboundedControlAwareMailboxTypes) + checkMailboxQueue(Props[UnboundedControlAwareQueueReportingActor], + "default-override-from-trait-unbounded-control-aware", + UnboundedControlAwareMailboxTypes) } "fail to create actor when an unbounded dequeu message queue is configured as mailbox overriding RequestMailbox" in { - intercept[ConfigurationException](system.actorOf(Props[BoundedQueueReportingActor], "default-unbounded-deque-override-trait")) + intercept[ConfigurationException]( + system.actorOf(Props[BoundedQueueReportingActor], "default-unbounded-deque-override-trait")) } "get an unbounded message queue when defined in dispatcher" in { @@ -305,7 +300,8 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout } "fail to create actor when an unbounded message queue is defined in dispatcher overriding RequestMailbox" in { - intercept[ConfigurationException](system.actorOf(Props[BoundedQueueReportingActor], "unbounded-default-override-trait")) + intercept[ConfigurationException]( + system.actorOf(Props[BoundedQueueReportingActor], "unbounded-default-override-trait")) } "get a bounded message queue when it's configured as mailbox overriding unbounded in dispatcher" in { @@ -317,7 +313,9 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout } "get a bounded message queue with 0 push timeout when defined in dispatcher" in { - val q = checkMailboxQueue(Props[QueueReportingActor], "default-bounded-mailbox-with-zero-pushtimeout", BoundedMailboxTypes) + val q = checkMailboxQueue(Props[QueueReportingActor], + "default-bounded-mailbox-with-zero-pushtimeout", + BoundedMailboxTypes) q.asInstanceOf[BoundedMessageQueueSemantics].pushTimeOut should ===(Duration.Zero) } @@ -326,21 +324,25 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout } "get an unbounded message queue overriding configuration on the props" in { - checkMailboxQueue( - Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"), - "bounded-unbounded-override-props", UnboundedMailboxTypes) + checkMailboxQueue(Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"), + "bounded-unbounded-override-props", + UnboundedMailboxTypes) } "get a bounded deque-based message queue if configured and required" in { - checkMailboxQueue(Props[StashQueueReportingActor], "bounded-deque-requirements-configured", BoundedDeqMailboxTypes) + checkMailboxQueue(Props[StashQueueReportingActor], + "bounded-deque-requirements-configured", + BoundedDeqMailboxTypes) } "fail with a unbounded deque-based message queue if configured and required" in { - intercept[ConfigurationException](system.actorOf(Props[StashQueueReportingActor], "bounded-deque-require-unbounded-configured")) + intercept[ConfigurationException]( + system.actorOf(Props[StashQueueReportingActor], "bounded-deque-require-unbounded-configured")) } "fail with a bounded deque-based message queue if not configured" in { - intercept[ConfigurationException](system.actorOf(Props[StashQueueReportingActor], "bounded-deque-require-unbounded-unconfigured")) + intercept[ConfigurationException]( + system.actorOf(Props[StashQueueReportingActor], "bounded-deque-require-unbounded-unconfigured")) } "get a bounded deque-based message queue if configured and required with Props" in { @@ -353,79 +355,71 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout } "fail with a unbounded deque-based message queue if configured and required with Props" in { - intercept[ConfigurationException](system.actorOf( - Props[StashQueueReportingActor] - .withDispatcher("requiring-bounded-dispatcher") - .withMailbox("akka.actor.mailbox.unbounded-deque-based"), - "bounded-deque-require-unbounded-configured-props")) + intercept[ConfigurationException]( + system.actorOf( + Props[StashQueueReportingActor] + .withDispatcher("requiring-bounded-dispatcher") + .withMailbox("akka.actor.mailbox.unbounded-deque-based"), + "bounded-deque-require-unbounded-configured-props")) } "fail with a bounded deque-based message queue if not configured with Props" in { - intercept[ConfigurationException](system.actorOf( - Props[StashQueueReportingActor] - .withDispatcher("requiring-bounded-dispatcher"), - "bounded-deque-require-unbounded-unconfigured-props")) + intercept[ConfigurationException]( + system.actorOf(Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"), + "bounded-deque-require-unbounded-unconfigured-props")) } "get a bounded deque-based message queue if configured and required with Props (dispatcher)" in { - checkMailboxQueue( - Props[StashQueueReportingActor] - .withDispatcher("requiring-bounded-dispatcher"), - "bounded-deque-requirements-configured-props-disp", - BoundedDeqMailboxTypes) + checkMailboxQueue(Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"), + "bounded-deque-requirements-configured-props-disp", + BoundedDeqMailboxTypes) } "fail with a unbounded deque-based message queue if configured and required with Props (dispatcher)" in { - intercept[ConfigurationException](system.actorOf( - Props[StashQueueReportingActor] - .withDispatcher("requiring-bounded-dispatcher"), - "bounded-deque-require-unbounded-configured-props-disp")) + intercept[ConfigurationException]( + system.actorOf(Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"), + "bounded-deque-require-unbounded-configured-props-disp")) } "fail with a bounded deque-based message queue if not configured with Props (dispatcher)" in { - intercept[ConfigurationException](system.actorOf( - Props[StashQueueReportingActor] - .withDispatcher("requiring-bounded-dispatcher"), - "bounded-deque-require-unbounded-unconfigured-props-disp")) + intercept[ConfigurationException]( + system.actorOf(Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"), + "bounded-deque-require-unbounded-unconfigured-props-disp")) } "get a bounded deque-based message queue if configured and required with Props (mailbox)" in { - checkMailboxQueue( - Props[StashQueueReportingActor] - .withMailbox("akka.actor.mailbox.bounded-deque-based"), - "bounded-deque-requirements-configured-props-mail", - BoundedDeqMailboxTypes) + checkMailboxQueue(Props[StashQueueReportingActor].withMailbox("akka.actor.mailbox.bounded-deque-based"), + "bounded-deque-requirements-configured-props-mail", + BoundedDeqMailboxTypes) } "fail with a unbounded deque-based message queue if configured and required with Props (mailbox)" in { - intercept[ConfigurationException](system.actorOf( - Props[StashQueueReportingActor] - .withMailbox("akka.actor.mailbox.unbounded-deque-based"), - "bounded-deque-require-unbounded-configured-props-mail")) + intercept[ConfigurationException]( + system.actorOf(Props[StashQueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"), + "bounded-deque-require-unbounded-configured-props-mail")) } "fail with a bounded deque-based message queue if not configured with Props (mailbox)" in { - intercept[ConfigurationException](system.actorOf( - Props[StashQueueReportingActor], - "bounded-deque-require-unbounded-unconfigured-props-mail")) + intercept[ConfigurationException]( + system.actorOf(Props[StashQueueReportingActor], "bounded-deque-require-unbounded-unconfigured-props-mail")) } "get an unbounded message queue with a balancing dispatcher" in { - checkMailboxQueue( - Props[QueueReportingActor].withDispatcher("balancing-dispatcher"), - "unbounded-balancing", UnboundedMailboxTypes) + checkMailboxQueue(Props[QueueReportingActor].withDispatcher("balancing-dispatcher"), + "unbounded-balancing", + UnboundedMailboxTypes) } "get a bounded message queue with a balancing bounded dispatcher" in { - checkMailboxQueue( - Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"), - "bounded-balancing", BoundedMailboxTypes) + checkMailboxQueue(Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"), + "bounded-balancing", + BoundedMailboxTypes) } "get a bounded message queue with a requiring balancing bounded dispatcher" in { - checkMailboxQueue( - Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"), - "requiring-bounded-balancing", BoundedMailboxTypes) + checkMailboxQueue(Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"), + "requiring-bounded-balancing", + BoundedMailboxTypes) } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorPathSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorPathSpec.scala index d317132533..31b391f863 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorPathSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorPathSpec.scala @@ -39,7 +39,8 @@ class ActorPathSpec extends WordSpec with Matchers { } "have correct path elements" in { - (RootActorPath(Address("akka.tcp", "mysys")) / "user" / "foo" / "bar").elements.toSeq should ===(Seq("user", "foo", "bar")) + (RootActorPath(Address("akka.tcp", "mysys")) / "user" / "foo" / "bar").elements.toSeq should ===( + Seq("user", "foo", "bar")) } "create correct toStringWithoutAddress" in { @@ -51,7 +52,8 @@ class ActorPathSpec extends WordSpec with Matchers { } "validate path elements" in { - intercept[InvalidActorNameException](ActorPath.validatePathElement("")).getMessage should include("must not be empty") + intercept[InvalidActorNameException](ActorPath.validatePathElement("")).getMessage should include( + "must not be empty") } "create correct toStringWithAddress" in { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index 8eb2d7e9f1..2bc4dc1881 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -71,7 +71,7 @@ object ActorRefSpec { class OuterActor(val inner: ActorRef) extends Actor { def receive = { case "self" => sender() ! self - case x => inner forward x + case x => inner.forward(x) } } @@ -80,7 +80,7 @@ object ActorRefSpec { def receive = { case "self" => sender() ! self - case x => inner forward x + case x => inner.forward(x) } } @@ -112,15 +112,16 @@ object ActorRefSpec { class ActorRefSpec extends AkkaSpec with DefaultTimeout { import akka.actor.ActorRefSpec._ - def promiseIntercept(f: => Actor)(to: Promise[Actor]): Actor = try { - val r = f - to.success(r) - r - } catch { - case e: Throwable => - to.failure(e) - throw e - } + def promiseIntercept(f: => Actor)(to: Promise[Actor]): Actor = + try { + val r = f + to.success(r) + r + } catch { + case e: Throwable => + to.failure(e) + throw e + } def wrap[T](f: Promise[Actor] => T): T = { val result = Promise[Actor]() @@ -139,7 +140,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { def contextStackMustBeEmpty(): Unit = ActorCell.contextStack.get.headOption should ===(None) - EventFilter[ActorInitializationException](occurrences = 1) intercept { + EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { wrap(result => actorOf(Props(new Actor { @@ -151,25 +152,25 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 1) intercept { + EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap(result => - actorOf(Props(promiseIntercept(new FailingOuterActor(actorOf(Props(new InnerActor))))(result)))) + wrap( + result => actorOf(Props(promiseIntercept(new FailingOuterActor(actorOf(Props(new InnerActor))))(result)))) } contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 1) intercept { + EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap(result => - actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) + wrap( + result => actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) } contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 1) intercept { + EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { wrap(result => actorOf(Props(promiseIntercept(new FailingInheritingOuterActor(actorOf(Props(new InnerActor))))(result)))) @@ -178,34 +179,38 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 2) intercept { + EventFilter[ActorInitializationException](occurrences = 2).intercept { intercept[akka.actor.ActorInitializationException] { wrap(result => - actorOf(Props(new FailingOuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) + actorOf( + Props(new FailingOuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 2) intercept { + EventFilter[ActorInitializationException](occurrences = 2).intercept { intercept[akka.actor.ActorInitializationException] { - wrap(result => - actorOf(Props(new FailingInheritingOuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) + wrap( + result => + actorOf(Props(new FailingInheritingOuterActor( + actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 2) intercept { + EventFilter[ActorInitializationException](occurrences = 2).intercept { intercept[akka.actor.ActorInitializationException] { wrap(result => - actorOf(Props(new FailingInheritingOuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) + actorOf( + Props(new FailingInheritingOuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) } contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 1) intercept { + EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { wrap(result => actorOf(Props(new OuterActor(actorOf(Props(new InnerActor { @@ -216,16 +221,17 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 2) intercept { + EventFilter[ActorInitializationException](occurrences = 2).intercept { intercept[akka.actor.ActorInitializationException] { wrap(result => - actorOf(Props(new FailingOuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) + actorOf( + Props(new FailingOuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 1) intercept { + EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { wrap(result => actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) @@ -234,19 +240,23 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 1) intercept { + EventFilter[ActorInitializationException](occurrences = 1).intercept { intercept[akka.actor.ActorInitializationException] { - wrap(result => - actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept({ new InnerActor; new InnerActor })(result))))))) + wrap( + result => + actorOf( + Props(new OuterActor(actorOf(Props(promiseIntercept({ new InnerActor; new InnerActor })(result))))))) } contextStackMustBeEmpty() } - EventFilter[ActorInitializationException](occurrences = 1) intercept { + EventFilter[ActorInitializationException](occurrences = 1).intercept { (intercept[java.lang.IllegalStateException] { wrap(result => - actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept({ throw new IllegalStateException("Ur state be b0rked"); new InnerActor })(result))))))) + actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept({ + throw new IllegalStateException("Ur state be b0rked"); new InnerActor + })(result))))))) }).getMessage should ===("Ur state be b0rked") contextStackMustBeEmpty() @@ -254,7 +264,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { } "insert its path in a ActorInitializationException" in { - EventFilter[ActorInitializationException](occurrences = 1, pattern = "/user/failingActor:") intercept { + EventFilter[ActorInitializationException](occurrences = 1, pattern = "/user/failingActor:").intercept { intercept[java.lang.IllegalStateException] { wrap(result => system.actorOf(Props(promiseIntercept({ @@ -312,7 +322,8 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { (intercept[java.lang.IllegalStateException] { in.readObject - }).getMessage should ===("Trying to deserialize a serialized ActorRef without an ActorSystem in scope." + + }).getMessage should ===( + "Trying to deserialize a serialized ActorRef without an ActorSystem in scope." + " Use 'akka.serialization.JavaSerializer.currentSystem.withValue(system) { ... }'") } @@ -394,7 +405,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { val a = system.actorOf(NonPublicClass.createProps()) a.tell("pigdog", testActor) expectMsg("pigdog") - system stop a + system.stop(a) } "stop when sent a poison pill" in { @@ -426,12 +437,11 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 2, withinTimeRange = 1 second)(List(classOf[Throwable])) - val ref = context.actorOf( - Props(new Actor { - def receive = { case _ => } - override def preRestart(reason: Throwable, msg: Option[Any]) = latch.countDown() - override def postRestart(reason: Throwable) = latch.countDown() - })) + val ref = context.actorOf(Props(new Actor { + def receive = { case _ => } + override def preRestart(reason: Throwable, msg: Option[Any]) = latch.countDown() + override def postRestart(reason: Throwable) = latch.countDown() + })) def receive = { case "sendKill" => ref ! Kill } })) @@ -444,10 +454,9 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { "be able to check for existence of children" in { val parent = system.actorOf(Props(new Actor { - val child = context.actorOf( - Props(new Actor { - def receive = { case _ => } - }), "child") + val child = context.actorOf(Props(new Actor { + def receive = { case _ => } + }), "child") def receive = { case name: String => sender() ! context.child(name).isDefined } }), "parent") diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala index 708401def1..1083b0d2a4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSelectionSpec.scala @@ -66,9 +66,8 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { asked.correlationId should ===(selection) implicit val ec = system.dispatcher - val resolved = Await.result( - selection.resolveOne(timeout.duration).mapTo[ActorRef] recover { case _ => null }, - timeout.duration) + val resolved = + Await.result(selection.resolveOne(timeout.duration).mapTo[ActorRef].recover { case _ => null }, timeout.duration) Option(resolved) should ===(result) result @@ -93,11 +92,11 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { identify(system / "c1") should ===(Some(c1)) identify(system / "c2") should ===(Some(c2)) identify(system / "c2" / "c21") should ===(Some(c21)) - identify(system child "c2" child "c21") should ===(Some(c21)) // test Java API + identify(system.child("c2").child("c21")) should ===(Some(c21)) // test Java API identify(system / Seq("c2", "c21")) should ===(Some(c21)) import scala.collection.JavaConverters._ - identify(system descendant Seq("c2", "c21").asJava) // test Java API + identify(system.descendant(Seq("c2", "c21").asJava)) // test Java API } "select actors by their string path representation" in { @@ -218,8 +217,8 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { "select actors by their relative path" in { def check(looker: ActorRef, result: ActorRef, elems: String*): Unit = { - askNode(looker, SelectString(elems mkString "/")) should ===(Some(result)) - askNode(looker, SelectString(elems mkString ("", "/", "/"))) should ===(Some(result)) + askNode(looker, SelectString(elems.mkString("/"))) should ===(Some(result)) + askNode(looker, SelectString(elems.mkString("", "/", "/"))) should ===(Some(result)) } check(c1, user, "..") for { @@ -253,14 +252,12 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { } def check(looker: ActorRef): Unit = { val lookname = looker.path.elements.mkString("", "/", "/") - for ( - (l, r) <- Seq( - SelectString("a/b/c") -> None, - SelectString("akka://all-systems/Nobody") -> None, - SelectPath(system / "hallo") -> None, - SelectPath(looker.path child "hallo") -> None, // test Java API - SelectPath(looker.path descendant Seq("a", "b").asJava) -> None) // test Java API - ) checkOne(looker, l, r) + for ((l, r) <- Seq(SelectString("a/b/c") -> None, + SelectString("akka://all-systems/Nobody") -> None, + SelectPath(system / "hallo") -> None, + SelectPath(looker.path.child("hallo")) -> None, // test Java API + SelectPath(looker.path.descendant(Seq("a", "b").asJava)) -> None) // test Java API + ) checkOne(looker, l, r) } for (looker <- all) check(looker) } @@ -291,8 +288,8 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { implicit val sender = c1 ActorSelection(c21, "../../*") ! GetSender(testActor) val actors = Set() ++ receiveWhile(messages = 2) { - case `c1` => lastSender - } + case `c1` => lastSender + } actors should ===(Set(c1, c2)) expectNoMsg(1 second) } @@ -341,7 +338,8 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { "have a stringly serializable path" in { system.actorSelection(system / "c2").toSerializationFormat should ===("akka://ActorSelectionSpec/user/c2") - system.actorSelection(system / "c2" / "c21").toSerializationFormat should ===("akka://ActorSelectionSpec/user/c2/c21") + system.actorSelection(system / "c2" / "c21").toSerializationFormat should ===( + "akka://ActorSelectionSpec/user/c2/c21") ActorSelection(c2, "/").toSerializationFormat should ===("akka://ActorSelectionSpec/user/c2") ActorSelection(c2, "../*/hello").toSerializationFormat should ===("akka://ActorSelectionSpec/user/c2/../*/hello") ActorSelection(c2, "/../*/hello").toSerializationFormat should ===("akka://ActorSelectionSpec/user/c2/../*/hello") @@ -368,7 +366,8 @@ class ActorSelectionSpec extends AkkaSpec with DefaultTimeout { val probe = TestProbe() system.actorSelection("/user/a/*").tell(Identify(1), probe.ref) - probe.receiveN(2).map { case ActorIdentity(1, r) => r }.toSet should ===(Set[Option[ActorRef]](Some(b1), Some(b2))) + probe.receiveN(2).map { case ActorIdentity(1, r) => r }.toSet should ===( + Set[Option[ActorRef]](Some(b1), Some(b2))) probe.expectNoMsg(200.millis) system.actorSelection("/user/a/b1/*").tell(Identify(2), probe.ref) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index 99e4022f45..dded2d38cd 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -30,15 +30,15 @@ object ActorSystemSpec { case n: Int => master = sender() terminaters = Set() ++ (for (i <- 1 to n) yield { - val man = context.watch(context.system.actorOf(Props[Terminater])) - man ! "run" - man - }) + val man = context.watch(context.system.actorOf(Props[Terminater])) + man ! "run" + man + }) case Terminated(child) if terminaters contains child => terminaters -= child if (terminaters.isEmpty) { master ! "done" - context stop self + context.stop(self) } } @@ -46,7 +46,7 @@ object ActorSystemSpec { if (master ne null) { master ! "failed with " + cause + " while processing " + msg } - context stop self + context.stop(self) } } @@ -73,16 +73,18 @@ object ActorSystemSpec { } } - class SlowDispatcher(_config: Config, _prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(_config, _prerequisites) { - private val instance = new Dispatcher( - this, - config.getString("id"), - config.getInt("throughput"), - config.getNanosDuration("throughput-deadline-time"), - configureExecutor(), - config.getMillisDuration("shutdown-timeout")) { + class SlowDispatcher(_config: Config, _prerequisites: DispatcherPrerequisites) + extends MessageDispatcherConfigurator(_config, _prerequisites) { + private val instance = new Dispatcher(this, + config.getString("id"), + config.getInt("throughput"), + config.getNanosDuration("throughput-deadline-time"), + configureExecutor(), + config.getMillisDuration("shutdown-timeout")) { val doneIt = new Switch - override protected[akka] def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = { + override protected[akka] def registerForExecution(mbox: Mailbox, + hasMessageHint: Boolean, + hasSystemMessageHint: Boolean): Boolean = { val ret = super.registerForExecution(mbox, hasMessageHint, hasSystemMessageHint) doneIt.switchOn { TestKit.awaitCond(mbox.actor.actor != null, 1.second) @@ -127,21 +129,19 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend "An ActorSystem" must { "use scala.concurrent.Future's InternalCallbackEC" in { - system.asInstanceOf[ActorSystemImpl].internalCallingThreadExecutionContext.getClass.getName should ===("scala.concurrent.Future$InternalCallbackExecutor$") + system.asInstanceOf[ActorSystemImpl].internalCallingThreadExecutionContext.getClass.getName should ===( + "scala.concurrent.Future$InternalCallbackExecutor$") } "reject invalid names" in { - for ( - n <- Seq( - "-hallowelt", - "_hallowelt", - "hallo*welt", - "hallo@welt", - "hallo#welt", - "hallo$welt", - "hallo%welt", - "hallo/welt") - ) intercept[IllegalArgumentException] { + for (n <- Seq("-hallowelt", + "_hallowelt", + "hallo*welt", + "hallo@welt", + "hallo#welt", + "hallo$welt", + "hallo%welt", + "hallo/welt")) intercept[IllegalArgumentException] { ActorSystem(n) } } @@ -151,35 +151,46 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend } "log dead letters" in { - val sys = ActorSystem("LogDeadLetters", ConfigFactory.parseString("akka.loglevel=INFO").withFallback(AkkaSpec.testConf)) + val sys = + ActorSystem("LogDeadLetters", ConfigFactory.parseString("akka.loglevel=INFO").withFallback(AkkaSpec.testConf)) try { val probe = TestProbe()(sys) val a = sys.actorOf(Props[ActorSystemSpec.Terminater]) probe.watch(a) a.tell("run", probe.ref) probe.expectTerminated(a) - EventFilter.info(pattern = """from Actor\[akka://LogDeadLetters/system/testProbe.*not delivered""", occurrences = 1).intercept { - EventFilter.warning(pattern = """received dead letter from Actor\[akka://LogDeadLetters/system/testProbe""", occurrences = 1).intercept { - a.tell("boom", probe.ref) + EventFilter + .info(pattern = """from Actor\[akka://LogDeadLetters/system/testProbe.*not delivered""", occurrences = 1) + .intercept { + EventFilter + .warning(pattern = """received dead letter from Actor\[akka://LogDeadLetters/system/testProbe""", + occurrences = 1) + .intercept { + a.tell("boom", probe.ref) + }(sys) }(sys) - }(sys) } finally shutdown(sys) } "log dead letters sent without sender reference" in { - val sys = ActorSystem("LogDeadLetters", ConfigFactory.parseString("akka.loglevel=INFO").withFallback(AkkaSpec.testConf)) + val sys = + ActorSystem("LogDeadLetters", ConfigFactory.parseString("akka.loglevel=INFO").withFallback(AkkaSpec.testConf)) try { val probe = TestProbe()(sys) val a = sys.actorOf(Props[ActorSystemSpec.Terminater]) probe.watch(a) a.tell("run", probe.ref) probe.expectTerminated(a) - EventFilter.info(pattern = "without sender.*not delivered", occurrences = 1).intercept { - EventFilter.warning(pattern = "received dead letter without sender", occurrences = 1).intercept { - a.tell("boom", ActorRef.noSender) + EventFilter + .info(pattern = "without sender.*not delivered", occurrences = 1) + .intercept { + EventFilter + .warning(pattern = "received dead letter without sender", occurrences = 1) + .intercept { + a.tell("boom", ActorRef.noSender) + }(sys) }(sys) - }(sys) } finally shutdown(sys) } @@ -193,7 +204,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend for (i <- 1 to count) { system2.registerOnTermination { Thread.sleep((i % 3).millis.dilated.toMillis) - result add i + result.add(i) latch.countDown() } } @@ -232,7 +243,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend terminated.actor should ===(system.provider.rootGuardian) terminated.addressTerminated should ===(true) terminated.existenceConfirmed should ===(true) - terminated should be theSameInstanceAs Await.result(f, 10 seconds) + (terminated should be).theSameInstanceAs(Await.result(f, 10 seconds)) } "throw RejectedExecutionException when shutdown" in { @@ -295,22 +306,25 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend } } - created filter (ref => !ref.isTerminated && !ref.asInstanceOf[ActorRefWithCell].underlying.isInstanceOf[UnstartedCell]) should ===(Seq.empty[ActorRef]) + created.filter(ref => + !ref.isTerminated && !ref.asInstanceOf[ActorRefWithCell].underlying.isInstanceOf[UnstartedCell]) should ===( + Seq.empty[ActorRef]) } "shut down when /user fails" in { implicit val system = ActorSystem("Stop", AkkaSpec.testConf) - EventFilter[ActorKilledException]() intercept { + EventFilter[ActorKilledException]().intercept { system.actorSelection("/user") ! Kill Await.ready(system.whenTerminated, Duration.Inf) } } "allow configuration of guardian supervisor strategy" in { - implicit val system = ActorSystem( - "Stop", - ConfigFactory.parseString("akka.actor.guardian-supervisor-strategy=akka.actor.StoppingSupervisorStrategy") - .withFallback(AkkaSpec.testConf)) + implicit val system = + ActorSystem("Stop", + ConfigFactory + .parseString("akka.actor.guardian-supervisor-strategy=akka.actor.StoppingSupervisorStrategy") + .withFallback(AkkaSpec.testConf)) val a = system.actorOf(Props(new Actor { def receive = { case "die" => throw new Exception("hello") @@ -318,7 +332,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend })) val probe = TestProbe() probe.watch(a) - EventFilter[Exception]("hello", occurrences = 1) intercept { + EventFilter[Exception]("hello", occurrences = 1).intercept { a ! "die" } val t = probe.expectMsg(Terminated(a)(existenceConfirmed = true, addressTerminated = false)) @@ -328,16 +342,17 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend } "shut down when /user escalates" in { - implicit val system = ActorSystem( - "Stop", - ConfigFactory.parseString("akka.actor.guardian-supervisor-strategy=\"akka.actor.ActorSystemSpec$Strategy\"") - .withFallback(AkkaSpec.testConf)) + implicit val system = + ActorSystem("Stop", + ConfigFactory + .parseString("akka.actor.guardian-supervisor-strategy=\"akka.actor.ActorSystemSpec$Strategy\"") + .withFallback(AkkaSpec.testConf)) val a = system.actorOf(Props(new Actor { def receive = { case "die" => throw new Exception("hello") } })) - EventFilter[Exception]("hello") intercept { + EventFilter[Exception]("hello").intercept { a ! "die" Await.ready(system.whenTerminated, Duration.Inf) } @@ -388,8 +403,12 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend } "not allow top-level actor creation with custom guardian" in { - val sys = new ActorSystemImpl("custom", ConfigFactory.defaultReference(), - getClass.getClassLoader, None, Some(Props.empty), ActorSystemSetup.empty) + val sys = new ActorSystemImpl("custom", + ConfigFactory.defaultReference(), + getClass.getClassLoader, + None, + Some(Props.empty), + ActorSystemSetup.empty) sys.start() try { intercept[UnsupportedOperationException] { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala index 35cedcc140..7647000061 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala @@ -40,11 +40,13 @@ object ActorWithBoundedStashSpec { def receive = { case msg: String if msg.startsWith("hello") => numStashed += 1 - try { stash(); sender() ! "ok" } catch { + try { + stash(); sender() ! "ok" + } catch { case _: StashOverflowException => if (numStashed == 21) { sender() ! "STASHOVERFLOW" - context stop self + context.stop(self) } else { sender() ! "Unexpected StashOverflowException: " + numStashed } @@ -82,7 +84,11 @@ object ActorWithBoundedStashSpec { """) } -class ActorWithBoundedStashSpec extends AkkaSpec(ActorWithBoundedStashSpec.testConf) with BeforeAndAfterEach with DefaultTimeout with ImplicitSender { +class ActorWithBoundedStashSpec + extends AkkaSpec(ActorWithBoundedStashSpec.testConf) + with BeforeAndAfterEach + with DefaultTimeout + with ImplicitSender { import ActorWithBoundedStashSpec._ override def atStartup: Unit = { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala index b0c54c1917..133e1de67d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala @@ -142,8 +142,9 @@ class ActorWithStashSpec extends AkkaSpec(ActorWithStashSpec.testConf) with Defa } "process stashed messages after restart" in { - val boss = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 2, withinTimeRange = 1 second)(List(classOf[Throwable]))))) + val boss = system.actorOf( + Props( + new Supervisor(OneForOneStrategy(maxNrOfRetries = 2, withinTimeRange = 1 second)(List(classOf[Throwable]))))) val restartLatch = new TestLatch val hasMsgLatch = new TestLatch @@ -196,7 +197,7 @@ class ActorWithStashSpec extends AkkaSpec(ActorWithStashSpec.testConf) with Defa testActor ! "restarted" } }) - EventFilter[RuntimeException]("dying", occurrences = 1) intercept { + EventFilter[RuntimeException]("dying", occurrences = 1).intercept { a ! "die" } expectMsg("restarted") diff --git a/akka-actor-tests/src/test/scala/akka/actor/Bench.scala b/akka-actor-tests/src/test/scala/akka/actor/Bench.scala index 60498b52af..a71ee31396 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/Bench.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/Bench.scala @@ -2,7 +2,7 @@ http://shootout.alioth.debian.org/ contributed by Julien Gaugaz inspired by the version contributed by Yura Taras and modified by Isaac Gouy -*/ + */ package akka.actor @@ -47,24 +47,27 @@ object Chameneos { } def complement(otherColour: Colour): Colour = colour match { - case RED => otherColour match { - case RED => RED - case YELLOW => BLUE - case BLUE => YELLOW - case FADED => FADED - } - case YELLOW => otherColour match { - case RED => BLUE - case YELLOW => YELLOW - case BLUE => RED - case FADED => FADED - } - case BLUE => otherColour match { - case RED => YELLOW - case YELLOW => RED - case BLUE => BLUE - case FADED => FADED - } + case RED => + otherColour match { + case RED => RED + case YELLOW => BLUE + case BLUE => YELLOW + case FADED => FADED + } + case YELLOW => + otherColour match { + case RED => BLUE + case YELLOW => YELLOW + case BLUE => RED + case FADED => FADED + } + case BLUE => + otherColour match { + case RED => YELLOW + case YELLOW => RED + case BLUE => BLUE + case FADED => FADED + } case FADED => FADED } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala index 8d1ab0f05d..1e5baa485a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala @@ -35,7 +35,6 @@ object ConsistencySpec { var lastStep = -1L def receive = { case step: Long => - if (lastStep != (step - 1)) sender() ! "Test failed: Last step %s, this step %s".format(lastStep, step) diff --git a/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala index 347927ea10..ddd4e2f72b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/CoordinatedShutdownSpec.scala @@ -19,8 +19,8 @@ import scala.collection.JavaConverters._ import scala.concurrent.Promise import java.util.concurrent.TimeoutException -class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( - """ +class CoordinatedShutdownSpec + extends AkkaSpec(ConfigFactory.parseString(""" akka.loglevel=INFO akka.loggers = ["akka.testkit.TestEventListener"] """)) { @@ -38,7 +38,8 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( phases.get(phase) match { case Some(Phase(dependsOn, _, _, _)) => dependsOn.foreach { depPhase => - withClue(s"phase [$phase] depends on [$depPhase] but was ordered before it in topological sort result $result") { + withClue( + s"phase [$phase] depends on [$depPhase] but was ordered before it in topological sort result $result") { i should be > result.indexOf(depPhase) } } @@ -55,38 +56,29 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "sort phases in topological order" in { checkTopologicalSort(Map.empty) should ===(Nil) - checkTopologicalSort(Map( - "a" -> emptyPhase)) should ===(List("a")) + checkTopologicalSort(Map("a" -> emptyPhase)) should ===(List("a")) - checkTopologicalSort(Map( - "b" -> phase("a"))) should ===(List("a", "b")) + checkTopologicalSort(Map("b" -> phase("a"))) should ===(List("a", "b")) - val result1 = checkTopologicalSort(Map( - "c" -> phase("a"), "b" -> phase("a"))) + val result1 = checkTopologicalSort(Map("c" -> phase("a"), "b" -> phase("a"))) result1.head should ===("a") // b, c can be in any order result1.toSet should ===(Set("a", "b", "c")) - checkTopologicalSort(Map( - "b" -> phase("a"), "c" -> phase("b"))) should ===(List("a", "b", "c")) + checkTopologicalSort(Map("b" -> phase("a"), "c" -> phase("b"))) should ===(List("a", "b", "c")) - checkTopologicalSort(Map( - "b" -> phase("a"), "c" -> phase("a", "b"))) should ===(List("a", "b", "c")) + checkTopologicalSort(Map("b" -> phase("a"), "c" -> phase("a", "b"))) should ===(List("a", "b", "c")) - val result2 = checkTopologicalSort(Map( - "c" -> phase("a", "b"))) + val result2 = checkTopologicalSort(Map("c" -> phase("a", "b"))) result2.last should ===("c") // a, b can be in any order result2.toSet should ===(Set("a", "b", "c")) - checkTopologicalSort(Map( - "b" -> phase("a"), "c" -> phase("b"), "d" -> phase("b", "c"), - "e" -> phase("d"))) should ===( + checkTopologicalSort(Map("b" -> phase("a"), "c" -> phase("b"), "d" -> phase("b", "c"), "e" -> phase("d"))) should ===( List("a", "b", "c", "d", "e")) - val result3 = checkTopologicalSort(Map( - "a2" -> phase("a1"), "a3" -> phase("a2"), - "b2" -> phase("b1"), "b3" -> phase("b2"))) + val result3 = + checkTopologicalSort(Map("a2" -> phase("a1"), "a3" -> phase("a2"), "b2" -> phase("b1"), "b3" -> phase("b2"))) val (a, b) = result3.partition(_.charAt(0) == 'a') a should ===(List("a1", "a2", "a3")) b should ===(List("b1", "b2", "b3")) @@ -94,50 +86,44 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "detect cycles in phases (non-DAG)" in { intercept[IllegalArgumentException] { - CoordinatedShutdown.topologicalSort(Map( - "a" -> phase("a"))) + CoordinatedShutdown.topologicalSort(Map("a" -> phase("a"))) } intercept[IllegalArgumentException] { - CoordinatedShutdown.topologicalSort(Map( - "b" -> phase("a"), "a" -> phase("b"))) + CoordinatedShutdown.topologicalSort(Map("b" -> phase("a"), "a" -> phase("b"))) } intercept[IllegalArgumentException] { - CoordinatedShutdown.topologicalSort(Map( - "c" -> phase("a"), "c" -> phase("b"), "b" -> phase("c"))) + CoordinatedShutdown.topologicalSort(Map("c" -> phase("a"), "c" -> phase("b"), "b" -> phase("c"))) } intercept[IllegalArgumentException] { - CoordinatedShutdown.topologicalSort(Map( - "d" -> phase("a"), "d" -> phase("c"), "c" -> phase("b"), "b" -> phase("d"))) + CoordinatedShutdown.topologicalSort( + Map("d" -> phase("a"), "d" -> phase("c"), "c" -> phase("b"), "b" -> phase("d"))) } } "have pre-defined phases from config" in { import CoordinatedShutdown._ - CoordinatedShutdown(system).orderedPhases should ===(List( - PhaseBeforeServiceUnbind, - PhaseServiceUnbind, - PhaseServiceRequestsDone, - PhaseServiceStop, - PhaseBeforeClusterShutdown, - PhaseClusterShardingShutdownRegion, - PhaseClusterLeave, - PhaseClusterExiting, - PhaseClusterExitingDone, - PhaseClusterShutdown, - PhaseBeforeActorSystemTerminate, - PhaseActorSystemTerminate)) + CoordinatedShutdown(system).orderedPhases should ===( + List(PhaseBeforeServiceUnbind, + PhaseServiceUnbind, + PhaseServiceRequestsDone, + PhaseServiceStop, + PhaseBeforeClusterShutdown, + PhaseClusterShardingShutdownRegion, + PhaseClusterLeave, + PhaseClusterExiting, + PhaseClusterExitingDone, + PhaseClusterShutdown, + PhaseBeforeActorSystemTerminate, + PhaseActorSystemTerminate)) } "run ordered phases" in { import system.dispatcher - val phases = Map( - "a" -> emptyPhase, - "b" -> phase("a"), - "c" -> phase("b", "a")) + val phases = Map("a" -> emptyPhase, "b" -> phase("a"), "c" -> phase("b", "a")) val co = new CoordinatedShutdown(extSys, phases) co.addTask("a", "a1") { () => testActor ! "A" @@ -164,10 +150,7 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( } "run from a given phase" in { - val phases = Map( - "a" -> emptyPhase, - "b" -> phase("a"), - "c" -> phase("b", "a")) + val phases = Map("a" -> emptyPhase, "b" -> phase("a"), "c" -> phase("b", "a")) val co = new CoordinatedShutdown(extSys, phases) co.addTask("a", "a1") { () => testActor ! "A" @@ -205,10 +188,9 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "continue after timeout or failure" in { import system.dispatcher - val phases = Map( - "a" -> emptyPhase, - "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = true, enabled = true), - "c" -> phase("b", "a")) + val phases = Map("a" -> emptyPhase, + "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = true, enabled = true), + "c" -> phase("b", "a")) val co = new CoordinatedShutdown(extSys, phases) co.addTask("a", "a1") { () => testActor ! "A" @@ -231,9 +213,11 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( Future.successful(Done) } EventFilter.warning(message = "Task [a1] failed in phase [a]: boom", occurrences = 1).intercept { - EventFilter.warning(message = "Coordinated shutdown phase [b] timed out after 100 milliseconds", occurrences = 1).intercept { - Await.result(co.run(UnknownReason), remainingOrDefault) - } + EventFilter + .warning(message = "Coordinated shutdown phase [b] timed out after 100 milliseconds", occurrences = 1) + .intercept { + Await.result(co.run(UnknownReason), remainingOrDefault) + } } expectMsg("A") expectMsg("A") @@ -242,10 +226,9 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( } "abort if recover=off" in { - val phases = Map( - "a" -> emptyPhase, - "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = false, enabled = true), - "c" -> phase("b", "a")) + val phases = Map("a" -> emptyPhase, + "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = false, enabled = true), + "c" -> phase("b", "a")) val co = new CoordinatedShutdown(extSys, phases) co.addTask("b", "b1") { () => testActor ! "B" @@ -264,10 +247,9 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( } "skip tasks in disabled phase" in { - val phases = Map( - "a" -> emptyPhase, - "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = false, enabled = false), - "c" -> phase("b", "a")) + val phases = Map("a" -> emptyPhase, + "b" -> Phase(dependsOn = Set("a"), timeout = 100.millis, recover = false, enabled = false), + "c" -> phase("b", "a")) val co = new CoordinatedShutdown(extSys, phases) co.addTask("b", "b1") { () => testActor ! "B" @@ -285,9 +267,7 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( } "be possible to add tasks in later phase from task in earlier phase" in { - val phases = Map( - "a" -> emptyPhase, - "b" -> phase("a")) + val phases = Map("a" -> emptyPhase, "b" -> phase("a")) val co = new CoordinatedShutdown(extSys, phases) co.addTask("a", "a1") { () => testActor ! "A" @@ -303,8 +283,7 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( } "parse phases from config" in { - CoordinatedShutdown.phasesFromConfig(ConfigFactory.parseString( - """ + CoordinatedShutdown.phasesFromConfig(ConfigFactory.parseString(""" default-phase-timeout = 10s phases { a = {} @@ -317,10 +296,10 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( recover = off } } - """)) should ===(Map( - "a" -> Phase(dependsOn = Set.empty, timeout = 10.seconds, recover = true, enabled = true), - "b" -> Phase(dependsOn = Set("a"), timeout = 15.seconds, recover = true, enabled = true), - "c" -> Phase(dependsOn = Set("a", "b"), timeout = 10.seconds, recover = false, enabled = true))) + """)) should ===( + Map("a" -> Phase(dependsOn = Set.empty, timeout = 10.seconds, recover = true, enabled = true), + "b" -> Phase(dependsOn = Set("a"), timeout = 15.seconds, recover = true, enabled = true), + "c" -> Phase(dependsOn = Set("a", "b"), timeout = 10.seconds, recover = false, enabled = true))) } "default exit code to 0" in { @@ -331,7 +310,8 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "default exit code to -1 when the Reason is ClusterDowning" in { lazy val conf = ConfigFactory.load().getConfig("akka.coordinated-shutdown") - val confWithOverrides = CoordinatedShutdown.confWithOverrides(conf, Some(CoordinatedShutdown.ClusterDowningReason)) + val confWithOverrides = + CoordinatedShutdown.confWithOverrides(conf, Some(CoordinatedShutdown.ClusterDowningReason)) confWithOverrides.getInt("exit-code") should ===(-1) } @@ -344,16 +324,14 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "add and remove user JVM hooks with run-by-jvm-shutdown-hook = off, terminate-actor-system = off" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-JvmHooks-1-${System.currentTimeMillis()}" - lazy val systemConfig = ConfigFactory.parseString( - """ + lazy val systemConfig = ConfigFactory.parseString(""" akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off akka.coordinated-shutdown.terminate-actor-system = off """) override def withSystemRunning(newSystem: ActorSystem): Unit = { - val cancellable = CoordinatedShutdown(newSystem).addCancellableJvmShutdownHook( - println(s"User JVM hook from ${newSystem.name}") - ) + val cancellable = + CoordinatedShutdown(newSystem).addCancellableJvmShutdownHook(println(s"User JVM hook from ${newSystem.name}")) myHooksCount should ===(1) // one user, none from system cancellable.cancel() } @@ -361,16 +339,14 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "add and remove user JVM hooks with run-by-jvm-shutdown-hook = on, terminate-actor-system = off" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-JvmHooks-2-${System.currentTimeMillis()}" - lazy val systemConfig = ConfigFactory.parseString( - """ + lazy val systemConfig = ConfigFactory.parseString(""" akka.coordinated-shutdown.run-by-jvm-shutdown-hook = on akka.coordinated-shutdown.terminate-actor-system = off """) override def withSystemRunning(newSystem: ActorSystem): Unit = { - val cancellable = CoordinatedShutdown(newSystem).addCancellableJvmShutdownHook( - println(s"User JVM hook from ${newSystem.name}") - ) + val cancellable = + CoordinatedShutdown(newSystem).addCancellableJvmShutdownHook(println(s"User JVM hook from ${newSystem.name}")) myHooksCount should ===(2) // one user, one from system cancellable.cancel() @@ -379,16 +355,14 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "add and remove user JVM hooks with run-by-jvm-shutdown-hook = on, terminate-actor-system = on" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-JvmHooks-3-${System.currentTimeMillis()}" - lazy val systemConfig = ConfigFactory.parseString( - """ + lazy val systemConfig = ConfigFactory.parseString(""" akka.coordinated-shutdown.run-by-jvm-shutdown-hook = on akka.coordinated-shutdown.terminate-actor-system = on """) def withSystemRunning(newSystem: ActorSystem): Unit = { - val cancellable = CoordinatedShutdown(newSystem).addCancellableJvmShutdownHook( - println(s"User JVM hook from ${newSystem.name}") - ) + val cancellable = + CoordinatedShutdown(newSystem).addCancellableJvmShutdownHook(println(s"User JVM hook from ${newSystem.name}")) myHooksCount should ===(2) // one user, one from actor system cancellable.cancel() } @@ -396,16 +370,14 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "add and remove user JVM hooks with run-by-jvm-shutdown-hook = on, akka.jvm-shutdown-hooks = off" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-JvmHooks-4-${System.currentTimeMillis()}" - lazy val systemConfig = ConfigFactory.parseString( - """ + lazy val systemConfig = ConfigFactory.parseString(""" akka.jvm-shutdown-hooks = off akka.coordinated-shutdown.run-by-jvm-shutdown-hook = on """) def withSystemRunning(newSystem: ActorSystem): Unit = { - val cancellable = CoordinatedShutdown(newSystem).addCancellableJvmShutdownHook( - println(s"User JVM hook from ${newSystem.name}") - ) + val cancellable = + CoordinatedShutdown(newSystem).addCancellableJvmShutdownHook(println(s"User JVM hook from ${newSystem.name}")) myHooksCount should ===(1) // one user, none from actor system cancellable.cancel() } @@ -413,8 +385,7 @@ class CoordinatedShutdownSpec extends AkkaSpec(ConfigFactory.parseString( "access extension after system termination" in new JvmHookTest { lazy val systemName = s"CoordinatedShutdownSpec-terminated-${System.currentTimeMillis()}" - lazy val systemConfig = ConfigFactory.parseString( - """ + lazy val systemConfig = ConfigFactory.parseString(""" akka.coordinated-shutdown.run-by-jvm-shutdown-hook = on akka.coordinated-shutdown.terminate-actor-system = on """) diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeadLetterSupressionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeadLetterSupressionSpec.scala index 388aee1530..3cbf6133c8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeadLetterSupressionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeadLetterSupressionSpec.scala @@ -78,4 +78,3 @@ class DeadLetterSupressionSpec extends AkkaSpec with ImplicitSender { allListener.expectNoMsg(Duration.Zero) } } - diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index bbb688c844..135a4a78f1 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -20,8 +20,8 @@ object DeathWatchSpec { class Watcher(target: ActorRef, testActor: ActorRef) extends Actor { context.watch(target) def receive = { - case t: Terminated => testActor forward WrappedTerminated(t) - case x => testActor forward x + case t: Terminated => testActor.forward(WrappedTerminated(t)) + case x => testActor.forward(x) } } @@ -36,9 +36,11 @@ object DeathWatchSpec { class NKOTBWatcher(testActor: ActorRef) extends Actor { def receive = { case "NKOTB" => - val currentKid = context.watch(context.actorOf(Props(new Actor { def receive = { case "NKOTB" => context stop self } }), "kid")) - currentKid forward "NKOTB" - context become { + val currentKid = context.watch(context.actorOf(Props(new Actor { + def receive = { case "NKOTB" => context.stop(self) } + }), "kid")) + currentKid.forward("NKOTB") + context.become { case Terminated(`currentKid`) => testActor ! "GREEN" context unbecome @@ -48,8 +50,8 @@ object DeathWatchSpec { class WUWatcher extends Actor { def receive = { - case W(ref) => context watch ref - case U(ref) => context unwatch ref + case W(ref) => context.watch(ref) + case U(ref) => context.unwatch(ref) case Latches(t1: TestLatch, t2: TestLatch) => t1.countDown() Await.ready(t2, 3.seconds) @@ -78,9 +80,10 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout => def startWatching(target: ActorRef) = Await.result((supervisor ? props(target, testActor)).mapTo[ActorRef], 3 seconds) "The Death Watch" must { - def expectTerminationOf(actorRef: ActorRef) = expectMsgPF(5 seconds, actorRef + ": Stopped or Already terminated when linking") { - case WrappedTerminated(Terminated(`actorRef`)) => true - } + def expectTerminationOf(actorRef: ActorRef) = + expectMsgPF(5 seconds, actorRef + ": Stopped or Already terminated when linking") { + case WrappedTerminated(Terminated(`actorRef`)) => true + } "notify with one Terminated message when an Actor is stopped" in { val terminal = system.actorOf(Props.empty) @@ -144,8 +147,8 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout => "notify with a Terminated message once when an Actor is stopped but not when restarted" in { filterException[ActorKilledException] { - val supervisor = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 2)(List(classOf[Exception]))))) + val supervisor = + system.actorOf(Props(new Supervisor(OneForOneStrategy(maxNrOfRetries = 2)(List(classOf[Exception]))))) val terminalProps = TestActors.echoActorProps val terminal = Await.result((supervisor ? terminalProps).mapTo[ActorRef], timeout.duration) @@ -166,7 +169,11 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout => "fail a monitor which does not handle Terminated()" in { filterEvents(EventFilter[ActorKilledException](), EventFilter[DeathPactException]()) { val strategy = new OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider) { - override def handleFailure(context: ActorContext, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]) = { + override def handleFailure(context: ActorContext, + child: ActorRef, + cause: Throwable, + stats: ChildRestartStats, + children: Iterable[ChildRestartStats]) = { testActor.tell(FF(Failed(child, cause, 0)), child) super.handleFailure(context, child, cause, stats, children) } @@ -174,7 +181,8 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout => val supervisor = system.actorOf(Props(new Supervisor(strategy)).withDeploy(Deploy.local)) val failed = Await.result((supervisor ? Props.empty).mapTo[ActorRef], timeout.duration) - val brother = Await.result((supervisor ? Props(classOf[EmptyWatcher], failed)).mapTo[ActorRef], timeout.duration) + val brother = + Await.result((supervisor ? Props(classOf[EmptyWatcher], failed)).mapTo[ActorRef], timeout.duration) startWatching(brother) @@ -201,7 +209,8 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout => "only notify when watching" in { val subject = system.actorOf(Props[EmptyActor]()) - testActor.asInstanceOf[InternalActorRef] + testActor + .asInstanceOf[InternalActorRef] .sendSystemMessage(DeathWatchNotification(subject, existenceConfirmed = true, addressTerminated = false)) // the testActor is not watching subject and will not receive a Terminated msg @@ -216,7 +225,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout => w ! Latches(t1, t2) Await.ready(t1, 3.seconds) watch(p.ref) - system stop p.ref + system.stop(p.ref) expectTerminated(p.ref) w ! U(p.ref) t2.countDown() diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala index 4d853b89e2..c8c8233f11 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala @@ -67,7 +67,8 @@ object DeployerSpec { router = round-robin-pool } } - """, ConfigParseOptions.defaults) + """, + ConfigParseOptions.defaults) class RecipeActor extends Actor { def receive = { case _ => } @@ -82,14 +83,14 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { val service = "/service1" val deployment = system.asInstanceOf[ExtendedActorSystem].provider.deployer.lookup(service.split("/").drop(1)) - deployment should ===(Some( - Deploy( - service, - deployment.get.config, - NoRouter, - NoScopeGiven, - Deploy.NoDispatcherGiven, - Deploy.NoMailboxGiven))) + deployment should ===( + Some( + Deploy(service, + deployment.get.config, + NoRouter, + NoScopeGiven, + Deploy.NoDispatcherGiven, + Deploy.NoMailboxGiven))) } "use None deployment for undefined service" in { @@ -102,40 +103,43 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { val service = "/service3" val deployment = system.asInstanceOf[ExtendedActorSystem].provider.deployer.lookup(service.split("/").drop(1)) - deployment should ===(Some( - Deploy( - service, - deployment.get.config, - NoRouter, - NoScopeGiven, - dispatcher = "my-dispatcher", - Deploy.NoMailboxGiven))) + deployment should ===( + Some( + Deploy(service, + deployment.get.config, + NoRouter, + NoScopeGiven, + dispatcher = "my-dispatcher", + Deploy.NoMailboxGiven))) } "be able to parse 'akka.actor.deployment._' with mailbox config" in { val service = "/service4" val deployment = system.asInstanceOf[ExtendedActorSystem].provider.deployer.lookup(service.split("/").drop(1)) - deployment should ===(Some( - Deploy( - service, - deployment.get.config, - NoRouter, - NoScopeGiven, - Deploy.NoDispatcherGiven, - mailbox = "my-mailbox"))) + deployment should ===( + Some( + Deploy(service, + deployment.get.config, + NoRouter, + NoScopeGiven, + Deploy.NoDispatcherGiven, + mailbox = "my-mailbox"))) } "detect invalid number-of-instances" in { intercept[com.typesafe.config.ConfigException.WrongType] { - val invalidDeployerConf = ConfigFactory.parseString(""" + val invalidDeployerConf = ConfigFactory + .parseString(""" akka.actor.deployment { /service-invalid-number-of-instances { router = round-robin-pool nr-of-instances = boom } } - """, ConfigParseOptions.defaults).withFallback(AkkaSpec.testConf) + """, + ConfigParseOptions.defaults) + .withFallback(AkkaSpec.testConf) shutdown(ActorSystem("invalid-number-of-instances", invalidDeployerConf)) } @@ -143,14 +147,17 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { "detect invalid deployment path" in { val e = intercept[InvalidActorNameException] { - val invalidDeployerConf = ConfigFactory.parseString(""" + val invalidDeployerConf = ConfigFactory + .parseString(""" akka.actor.deployment { /gul/ubåt { router = round-robin-pool nr-of-instances = 2 } } - """, ConfigParseOptions.defaults).withFallback(AkkaSpec.testConf) + """, + ConfigParseOptions.defaults) + .withFallback(AkkaSpec.testConf) shutdown(ActorSystem("invalid-path", invalidDeployerConf)) } @@ -175,7 +182,9 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { } "be able to parse 'akka.actor.deployment._' with scatter-gather router" in { - assertRouting("/service-scatter-gather", ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds), "/service-scatter-gather") + assertRouting("/service-scatter-gather", + ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds), + "/service-scatter-gather") } "be able to parse 'akka.actor.deployment._' with consistent-hashing router" in { @@ -189,7 +198,9 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { "be able to use wildcards" in { assertRouting("/some/wildcardmatch", RandomPool(1), "/some/*") - assertRouting("/somewildcardmatch/some", ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds), "/*/some") + assertRouting("/somewildcardmatch/some", + ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 2 seconds), + "/*/some") } "be able to use double wildcards" in { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala index 381ff0aa7d..8a1009fcf6 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ExtensionSpec.scala @@ -84,8 +84,8 @@ class ExtensionSpec extends WordSpec with Matchers { "fail the actor system if an extension listed in akka.extensions fails to start" in { intercept[RuntimeException] { - val system = ActorSystem("failing", ConfigFactory.parseString( - """ + val system = ActorSystem("failing", + ConfigFactory.parseString(""" akka.extensions = ["akka.actor.FailingTestExtension"] """)) @@ -94,8 +94,8 @@ class ExtensionSpec extends WordSpec with Matchers { } "log an error if an extension listed in akka.extensions cannot be loaded" in { - val system = ActorSystem("failing", ConfigFactory.parseString( - """ + val system = ActorSystem("failing", + ConfigFactory.parseString(""" akka.extensions = ["akka.actor.MissingExtension"] """)) EventFilter.error("While trying to load extension [akka.actor.MissingExtension], skipping.").intercept(())(system) @@ -114,8 +114,8 @@ class ExtensionSpec extends WordSpec with Matchers { "fail the actor system if a library-extension fails to start" in { intercept[FailingTestExtension.TestException] { - ActorSystem("failing", ConfigFactory.parseString( - """ + ActorSystem("failing", + ConfigFactory.parseString(""" akka.library-extensions += "akka.actor.FailingTestExtension" """).withFallback(ConfigFactory.load()).resolve()) } @@ -124,8 +124,8 @@ class ExtensionSpec extends WordSpec with Matchers { "fail the actor system if a library-extension cannot be loaded" in { intercept[RuntimeException] { - ActorSystem("failing", ConfigFactory.parseString( - """ + ActorSystem("failing", + ConfigFactory.parseString(""" akka.library-extensions += "akka.actor.MissingExtension" """).withFallback(ConfigFactory.load())) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index 1a04d81a99..2c73ea6782 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -42,17 +42,17 @@ object FSMActorSpec { case Event(digit: Char, CodeState(soFar, code)) => { soFar + digit match { case incomplete if incomplete.length < code.length => - stay using CodeState(incomplete, code) + stay.using(CodeState(incomplete, code)) case codeTry if (codeTry == code) => { doUnlock() - goto(Open) using CodeState("", code) forMax timeout + goto(Open).using(CodeState("", code)).forMax(timeout) } case wrong => { - stay using CodeState("", code) + stay.using(CodeState("", code)) } } } - case Event("hello", _) => stay replying "world" + case Event("hello", _) => stay.replying("world") case Event("bye", _) => stop(FSM.Shutdown) } @@ -109,7 +109,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im "unlock the lock" in { - import FSM.{ Transition, CurrentState, SubscribeTransitionCallBack } + import FSM.{ CurrentState, SubscribeTransitionCallBack, Transition } val latches = new Latches import latches._ @@ -138,7 +138,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im Await.ready(transitionCallBackLatch, timeout.duration) Await.ready(lockedLatch, timeout.duration) - EventFilter.warning(start = "unhandled event", occurrences = 1) intercept { + EventFilter.warning(start = "unhandled event", occurrences = 1).intercept { lock ! "not_handled" Await.ready(unhandledLatch, timeout.duration) } @@ -166,7 +166,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im } }) val name = fsm.path.toString - EventFilter.error("Next state 2 does not exist", occurrences = 1) intercept { + EventFilter.error("Next state 2 does not exist", occurrences = 1).intercept { system.eventStream.subscribe(testActor, classOf[Logging.Error]) fsm ! "go" expectMsgPF(1 second, hint = "Next state 2 does not exist") { @@ -218,7 +218,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im lazy val fsmref = TestFSMRef(new Actor with FSM[String, Null] { startWith("not-started", null) when("not-started") { - case Event("start", _) => goto("started") replying "starting" + case Event("start", _) => goto("started").replying("starting") } when("started", stateTimeout = 10 seconds) { case Event("stop", _) => stop() @@ -252,12 +252,15 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im "log events and transitions if asked to do so" in { import scala.collection.JavaConverters._ - val config = ConfigFactory.parseMap(Map("akka.loglevel" -> "DEBUG", "akka.actor.serialize-messages" -> "off", - "akka.actor.debug.fsm" -> true).asJava).withFallback(system.settings.config) + val config = ConfigFactory + .parseMap(Map("akka.loglevel" -> "DEBUG", + "akka.actor.serialize-messages" -> "off", + "akka.actor.debug.fsm" -> true).asJava) + .withFallback(system.settings.config) val fsmEventSystem = ActorSystem("fsmEvent", config) try { new TestKit(fsmEventSystem) { - EventFilter.debug(occurrences = 5) intercept { + EventFilter.debug(occurrences = 5).intercept { val fsm = TestActorRef(new Actor with LoggingFSM[Int, Null] { startWith(1, null) when(1) { @@ -279,13 +282,17 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im system.eventStream.subscribe(testActor, classOf[Logging.Debug]) fsm ! "go" expectMsgPF(1 second, hint = "processing Event(go,null)") { - case Logging.Debug(`name`, `fsmClass`, s: String) if s.startsWith("processing Event(go,null) from Actor[") => true + case Logging.Debug(`name`, `fsmClass`, s: String) + if s.startsWith("processing Event(go,null) from Actor[") => + true } expectMsg(1 second, Logging.Debug(name, fsmClass, "setting timer 't'/1500 milliseconds: Shutdown")) expectMsg(1 second, Logging.Debug(name, fsmClass, "transition 1 -> 2")) fsm ! "stop" expectMsgPF(1 second, hint = "processing Event(stop,null)") { - case Logging.Debug(`name`, `fsmClass`, s: String) if s.startsWith("processing Event(stop,null) from Actor[") => true + case Logging.Debug(`name`, `fsmClass`, s: String) + if s.startsWith("processing Event(stop,null) from Actor[") => + true } expectMsgAllOf(1 second, Logging.Debug(name, fsmClass, "canceling timer 't'"), FSM.Normal) expectNoMsg(1 second) @@ -302,8 +309,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im override def logDepth = 3 startWith(1, 0) when(1) { - case Event("count", c) => stay using (c + 1) - case Event("log", _) => stay replying getLog + case Event("count", c) => stay.using(c + 1) + case Event("log", _) => stay.replying(getLog) } }) fsmref ! "log" @@ -324,7 +331,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im startWith(0, 0) when(0)(transform { case Event("go", _) => stay - } using { + }.using { case x => goto(1) }) when(1) { @@ -354,7 +361,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im case Event(OverrideTimeoutToInf, _) => p.ref ! OverrideTimeoutToInf - stay() forMax Duration.Inf + stay().forMax(Duration.Inf) } initialize() diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala index 551eefb494..4acdd62676 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala @@ -109,7 +109,8 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender { expectMsg(500 millis, Tick) Thread.sleep(200) // this is ugly: need to wait for StateTimeout to be queued resume(fsm) - expectMsg(500 millis, Transition(fsm, TestCancelStateTimerInNamedTimerMessage, TestCancelStateTimerInNamedTimerMessage2)) + expectMsg(500 millis, + Transition(fsm, TestCancelStateTimerInNamedTimerMessage, TestCancelStateTimerInNamedTimerMessage2)) fsm ! Cancel within(500 millis) { expectMsg(Cancel) // if this is not received, that means StateTimeout was not properly discarded @@ -122,7 +123,7 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender { val seq = receiveWhile(2 seconds) { case Tick => Tick } - seq should have length 5 + (seq should have).length(5) within(500 millis) { expectMsg(Transition(fsm, TestRepeatedTimer, Initial)) } @@ -131,18 +132,20 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender { "notify unhandled messages" taggedAs TimingTest in { filterEvents( EventFilter.warning("unhandled event Tick in state TestUnhandled", source = fsm.path.toString, occurrences = 1), - EventFilter.warning("unhandled event Unhandled(test) in state TestUnhandled", source = fsm.path.toString, occurrences = 1)) { - fsm ! TestUnhandled - within(3 second) { - fsm ! Tick - fsm ! SetHandler - fsm ! Tick - expectMsg(Unhandled(Tick)) - fsm ! Unhandled("test") - fsm ! Cancel - expectMsg(Transition(fsm, TestUnhandled, Initial)) - } + EventFilter.warning("unhandled event Unhandled(test) in state TestUnhandled", + source = fsm.path.toString, + occurrences = 1)) { + fsm ! TestUnhandled + within(3 second) { + fsm ! Tick + fsm ! SetHandler + fsm ! Tick + expectMsg(Unhandled(Tick)) + fsm ! Unhandled("test") + fsm ! Cancel + expectMsg(Transition(fsm, TestUnhandled, Initial)) } + } } } @@ -194,14 +197,14 @@ object FSMTimingSpec { goto(TestSingleTimer) case Event(TestRepeatedTimer, _) => setTimer("tester", Tick, 100.millis.dilated, true) - goto(TestRepeatedTimer) using 4 + goto(TestRepeatedTimer).using(4) case Event(TestStateTimeoutOverride, _) => - goto(TestStateTimeout) forMax (Duration.Inf) + goto(TestStateTimeout).forMax(Duration.Inf) case Event(x: FSMTimingSpec.State, _) => goto(x) } when(TestStateTimeout, stateTimeout = 800.millis.dilated) { case Event(StateTimeout, _) => goto(Initial) - case Event(Cancel, _) => goto(Initial) replying (Cancel) + case Event(Cancel, _) => goto(Initial).replying(Cancel) } when(TestSingleTimer) { case Event(Tick, _) => @@ -242,7 +245,7 @@ object FSMTimingSpec { cancelTimer("tester") goto(Initial) } else { - stay using (remaining - 1) + stay.using(remaining - 1) } } when(TestCancelStateTimerInNamedTimerMessage) { @@ -251,7 +254,7 @@ object FSMTimingSpec { suspend(self) setTimer("named", Tock, 1.millis.dilated) TestKit.awaitCond(context.asInstanceOf[ActorCell].mailbox.hasMessages, 1.second.dilated) - stay forMax (1.millis.dilated) replying Tick + stay.forMax(1.millis.dilated).replying(Tick) case Event(Tock, _) => goto(TestCancelStateTimerInNamedTimerMessage2) } @@ -259,7 +262,7 @@ object FSMTimingSpec { case Event(StateTimeout, _) => goto(Initial) case Event(Cancel, _) => - goto(Initial) replying Cancel + goto(Initial).replying(Cancel) } when(TestUnhandled) { case Event(SetHandler, _) => @@ -286,4 +289,3 @@ object FSMTimingSpec { } } - diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala index fc951aa4f7..9b10f8f1e0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala @@ -36,7 +36,7 @@ object FSMTransitionSpec { case Event("tick", _) => goto(0) } whenUnhandled { - case Event("reply", _) => stay replying "reply" + case Event("reply", _) => stay.replying("reply") } initialize() override def preRestart(reason: Throwable, msg: Option[Any]): Unit = { target ! "restarted" } @@ -45,7 +45,7 @@ object FSMTransitionSpec { class OtherFSM(target: ActorRef) extends Actor with FSM[Int, Int] { startWith(0, 0) when(0) { - case Event("tick", _) => goto(1) using 1 + case Event("tick", _) => goto(1).using(1) case Event("stay", _) => stay() } when(1) { @@ -150,7 +150,7 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender { val fsmref = system.actorOf(Props(new Actor with FSM[Int, ActorRef] { startWith(0, null) when(0) { - case Event("switch", _) => goto(1) using sender() + case Event("switch", _) => goto(1).using(sender()) } onTransition { case x -> y => nextStateData ! (x -> y) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala index 71a64b4a1b..bbca84b49e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala @@ -18,9 +18,9 @@ object ForwardActorSpec { def receive = { case x => sender() ! x } })) - def mkforwarder(forwardTo: ActorRef) = system.actorOf(Props( - new Actor { - def receive = { case x => forwardTo forward x } + def mkforwarder(forwardTo: ActorRef) = + system.actorOf(Props(new Actor { + def receive = { case x => forwardTo.forward(x) } })) mkforwarder(mkforwarder(mkforwarder(replier))) @@ -33,7 +33,9 @@ class ForwardActorSpec extends AkkaSpec { "A Forward Actor" must { "forward actor reference when invoking forward on tell" in { - val replyTo = system.actorOf(Props(new Actor { def receive = { case ExpectedMessage => testActor ! ExpectedMessage } })) + val replyTo = system.actorOf(Props(new Actor { + def receive = { case ExpectedMessage => testActor ! ExpectedMessage } + })) val chain = createForwardingChain(system) @@ -43,7 +45,7 @@ class ForwardActorSpec extends AkkaSpec { "forward actor reference when invoking forward on ask" in { val chain = createForwardingChain(system) - chain.ask(ExpectedMessage)(5 seconds) pipeTo testActor + chain.ask(ExpectedMessage)(5 seconds).pipeTo(testActor) expectMsg(5 seconds, ExpectedMessage) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/FunctionRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FunctionRefSpec.scala index f40c0f2f48..b7eca05d16 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FunctionRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FunctionRefSpec.scala @@ -94,7 +94,9 @@ class FunctionRefSpec extends AkkaSpec with ImplicitSender { "not be found" in { val provider = system.asInstanceOf[ExtendedActorSystem].provider val ref = new FunctionRef(testActor.path / "blabla", provider, system, (_, _) => ()) - EventFilter[SerializationCheckFailedException](start = "Failed to serialize and deserialize message of type akka.actor.FunctionRefSpec", occurrences = 1) intercept { + EventFilter[SerializationCheckFailedException]( + start = "Failed to serialize and deserialize message of type akka.actor.FunctionRefSpec", + occurrences = 1).intercept { // needs to be something that fails when the deserialized form is not a FunctionRef // this relies upon serialize-messages during tests testActor ! DropForwarder(ref) diff --git a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala index 9450985103..ce5d84f24c 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala @@ -7,9 +7,7 @@ package akka.actor import akka.testkit._ object HotSwapSpec { - abstract class Becomer extends Actor { - - } + abstract class Becomer extends Actor {} } class HotSwapSpec extends AkkaSpec with ImplicitSender { @@ -19,7 +17,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { "be able to become in its constructor" in { val a = system.actorOf(Props(new Becomer { context.become { case always => sender() ! always } - def receive = { case _ => sender() ! "FAILURE" } + def receive = { case _ => sender() ! "FAILURE" } })) a ! "pigdog" expectMsg("pigdog") @@ -37,7 +35,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { "be able to become with stacking in its constructor" in { val a = system.actorOf(Props(new Becomer { context.become({ case always => sender() ! "pigdog:" + always; context.unbecome() }, false) - def receive = { case always => sender() ! "badass:" + always } + def receive = { case always => sender() ! "badass:" + always } })) a ! "pigdog" expectMsg("pigdog:pigdog") @@ -79,10 +77,11 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { val a = system.actorOf(Props(new Actor { def receive = { case "init" => sender() ! "init" - case "swap" => context.become({ - case "swapped" => sender() ! "swapped" - case "revert" => context.unbecome() - }) + case "swap" => + context.become({ + case "swapped" => sender() ! "swapped" + case "revert" => context.unbecome() + }) } })) @@ -118,7 +117,7 @@ class HotSwapSpec extends AkkaSpec with ImplicitSender { expectMsg("swapped") a ! "state" expectMsg("1") - EventFilter[Exception](message = "Crash (expected)!", occurrences = 1) intercept { a ! "crash" } + EventFilter[Exception](message = "Crash (expected)!", occurrences = 1).intercept { a ! "crash" } a ! "state" expectMsg("0") } diff --git a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala index 82bf819cc7..7e545b01ea 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala @@ -108,14 +108,14 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi val child = expectMsgType[ActorRef] val childProps1 = child.asInstanceOf[LocalActorRef].underlying.props childProps1 should ===(Props.empty) - system stop a + system.stop(a) expectTerminated(a) // the fields are cleared after the Terminated message has been sent, // so we need to check for a reasonable time after we receive it awaitAssert({ val childProps2 = child.asInstanceOf[LocalActorRef].underlying.props childProps2 should not be theSameInstanceAs(childProps1) - childProps2 should be theSameInstanceAs ActorCell.terminatedProps + (childProps2 should be).theSameInstanceAs(ActorCell.terminatedProps) }, 1 second) } } @@ -131,12 +131,14 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi for (i <- 0 until 100) { val address = "new-actor" + i implicit val timeout = Timeout(5 seconds) - val actors = for (j <- 1 to 4) yield Future(system.actorOf(Props(new Actor { def receive = { case _ => } }), address)) - val set = Set() ++ actors.map(a => Await.ready(a, timeout.duration).value match { - case Some(Success(a: ActorRef)) => 1 - case Some(Failure(ex: InvalidActorNameException)) => 2 - case x => x - }) + val actors = for (j <- 1 to 4) + yield Future(system.actorOf(Props(new Actor { def receive = { case _ => } }), address)) + val set = Set() ++ actors.map(a => + Await.ready(a, timeout.duration).value match { + case Some(Success(a: ActorRef)) => 1 + case Some(Failure(ex: InvalidActorNameException)) => 2 + case x => x + }) set should ===(Set[Any](1, 2)) } } @@ -148,7 +150,7 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi val a, b = context.actorOf(Props.empty, "duplicate") } })) - EventFilter[InvalidActorNameException](occurrences = 1) intercept { + EventFilter[InvalidActorNameException](occurrences = 1).intercept { supervisor ! "" } } @@ -156,20 +158,33 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi "throw suitable exceptions for malformed actor names" in { intercept[InvalidActorNameException](system.actorOf(Props.empty, null)).getMessage should include("null") intercept[InvalidActorNameException](system.actorOf(Props.empty, "")).getMessage should include("empty") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "$hallo")).getMessage should include("not start with `$`") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "a%")).getMessage should include("Invalid actor path element") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "%3")).getMessage should include("Invalid actor path element") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "%xx")).getMessage should include("Invalid actor path element") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "%0G")).getMessage should include("Invalid actor path element") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "%gg")).getMessage should include("Invalid actor path element") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "%")).getMessage should include("Invalid actor path element") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "%1t")).getMessage should include("Invalid actor path element") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "a?")).getMessage should include("Invalid actor path element") - intercept[InvalidActorNameException](system.actorOf(Props.empty, "üß")).getMessage should include("include only ASCII") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "$hallo")).getMessage should include( + "not start with `$`") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "a%")).getMessage should include( + "Invalid actor path element") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "%3")).getMessage should include( + "Invalid actor path element") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "%xx")).getMessage should include( + "Invalid actor path element") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "%0G")).getMessage should include( + "Invalid actor path element") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "%gg")).getMessage should include( + "Invalid actor path element") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "%")).getMessage should include( + "Invalid actor path element") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "%1t")).getMessage should include( + "Invalid actor path element") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "a?")).getMessage should include( + "Invalid actor path element") + intercept[InvalidActorNameException](system.actorOf(Props.empty, "üß")).getMessage should include( + "include only ASCII") - intercept[InvalidActorNameException](system.actorOf(Props.empty, """he"llo""")).getMessage should include("""["] at position: 2""") - intercept[InvalidActorNameException](system.actorOf(Props.empty, """$hello""")).getMessage should include("""[$] at position: 0""") - intercept[InvalidActorNameException](system.actorOf(Props.empty, """hell>o""")).getMessage should include("""[>] at position: 4""") + intercept[InvalidActorNameException](system.actorOf(Props.empty, """he"llo""")).getMessage should include( + """["] at position: 2""") + intercept[InvalidActorNameException](system.actorOf(Props.empty, """$hello""")).getMessage should include( + """[$] at position: 0""") + intercept[InvalidActorNameException](system.actorOf(Props.empty, """hell>o""")).getMessage should include( + """[>] at position: 4""") } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala index dc3ee3c228..7c371f61dd 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala @@ -28,8 +28,9 @@ class RestartStrategySpec extends AkkaSpec("akka.actor.serialize-messages = off" "A RestartStrategy" must { "ensure that slave stays dead after max restarts within time range" in { - val boss = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 2, withinTimeRange = 1 second)(List(classOf[Throwable]))))) + val boss = system.actorOf( + Props( + new Supervisor(OneForOneStrategy(maxNrOfRetries = 2, withinTimeRange = 1 second)(List(classOf[Throwable]))))) val restartLatch = new TestLatch val secondRestartLatch = new TestLatch @@ -91,14 +92,16 @@ class RestartStrategySpec extends AkkaSpec("akka.actor.serialize-messages = off" }) val slave = Await.result((boss ? slaveProps).mapTo[ActorRef], timeout.duration) - (1 to 100) foreach { _ => slave ! Crash } + (1 to 100).foreach { _ => + slave ! Crash + } Await.ready(countDownLatch, 2 minutes) assert(!slave.isTerminated) } "ensure that slave restarts after number of crashes not within time range" in { - val boss = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 2, withinTimeRange = 500 millis)(List(classOf[Throwable]))))) + val boss = system.actorOf(Props( + new Supervisor(OneForOneStrategy(maxNrOfRetries = 2, withinTimeRange = 500 millis)(List(classOf[Throwable]))))) val restartLatch = new TestLatch val secondRestartLatch = new TestLatch @@ -261,4 +264,3 @@ class RestartStrategySpec extends AkkaSpec("akka.actor.serialize-messages = off" } } } - diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index 16e96e8464..660e193648 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -7,8 +7,8 @@ package akka.actor import language.postfixOps import java.io.Closeable import java.util.concurrent._ -import atomic.{ AtomicReference, AtomicInteger } -import scala.concurrent.{ Future, Await, ExecutionContext } +import atomic.{ AtomicInteger, AtomicReference } +import scala.concurrent.{ Await, ExecutionContext, Future } import scala.concurrent.duration._ import java.util.concurrent.ThreadLocalRandom import scala.util.Try @@ -20,7 +20,8 @@ import akka.testkit._ import scala.util.control.NoStackTrace object SchedulerSpec { - val testConfRevolver = ConfigFactory.parseString(""" + val testConfRevolver = + ConfigFactory.parseString(""" akka.scheduler.implementation = akka.actor.LightArrayRevolverScheduler akka.scheduler.ticks-per-wheel = 32 akka.actor.serialize-messages = off @@ -74,7 +75,7 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit expectMsg("msg") // stop the actor and, hence, the continuous messaging from happening - system stop actor + system.stop(actor) expectNoMsg(500 millis) } @@ -194,7 +195,7 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit collectCancellable(system.scheduler.schedule(500 milliseconds, 500 milliseconds, actor, Ping)) // appx 2 pings before crash - EventFilter[Exception]("CRASH", occurrences = 1) intercept { + EventFilter[Exception]("CRASH", occurrences = 1).intercept { collectCancellable(system.scheduler.scheduleOnce(1000 milliseconds, actor, Crash)) } @@ -292,11 +293,14 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit } } val latencies = within(10.seconds) { - for (i <- 1 to N) yield try expectMsgType[Long] catch { - case NonFatal(e) => throw new Exception(s"failed expecting the $i-th latency", e) - } + for (i <- 1 to N) + yield + try expectMsgType[Long] + catch { + case NonFatal(e) => throw new Exception(s"failed expecting the $i-th latency", e) + } } - val histogram = latencies groupBy (_ / 100000000L) + val histogram = latencies.groupBy(_ / 100000000L) for (k <- histogram.keys.toSeq.sorted) { system.log.info(f"${k * 100}%3d: ${histogram(k).size}") } @@ -362,11 +366,14 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev val cancelled = cancellations.sum println(cancelled) val latencies = within(10.seconds) { - for (i <- 1 to (N - cancelled)) yield try expectMsgType[Long] catch { - case NonFatal(e) => throw new Exception(s"failed expecting the $i-th latency", e) - } + for (i <- 1 to (N - cancelled)) + yield + try expectMsgType[Long] + catch { + case NonFatal(e) => throw new Exception(s"failed expecting the $i-th latency", e) + } } - val histogram = latencies groupBy (_ / 100000000L) + val histogram = latencies.groupBy(_ / 100000000L) for (k <- histogram.keys.toSeq.sorted) { system.log.info(f"${k * 100}%3d: ${histogram(k).size}") } @@ -389,7 +396,7 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev } def delay = if (ThreadLocalRandom.current.nextBoolean) step * 2 else step val N = 1000000 - (1 to N) foreach (_ => sched.scheduleOnce(delay)(counter.incrementAndGet())) + (1 to N).foreach(_ => sched.scheduleOnce(delay)(counter.incrementAndGet())) sched.close() Await.result(terminated, 3.seconds.dilated) should be > 10 awaitCond(counter.get == N) @@ -401,13 +408,13 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev implicit def ec = localEC import driver._ val start = step / 2 - (0 to 3) foreach (i => sched.scheduleOnce(start + step * i, testActor, "hello")) + (0 to 3).foreach(i => sched.scheduleOnce(start + step * i, testActor, "hello")) expectNoMsg(step) wakeUp(step) expectWait(step) wakeUp(step * 4 + step / 2) expectWait(step / 2) - (0 to 3) foreach (_ => expectMsg(Duration.Zero, "hello")) + (0 to 3).foreach(_ => expectMsg(Duration.Zero, "hello")) } } @@ -431,7 +438,7 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev implicit def ec = localEC import driver._ val start = step / 2 - (0 to 3) foreach (i => sched.scheduleOnce(start + step * i, probe.ref, "hello")) + (0 to 3).foreach(i => sched.scheduleOnce(start + step * i, probe.ref, "hello")) probe.expectNoMsg(step) wakeUp(step) expectWait(step) @@ -458,7 +465,7 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev implicit def ec = localEC import driver._ val start = step / 2 - (0 to 3) foreach (i => sched.scheduleOnce(start + step * i, testActor, "hello")) + (0 to 3).foreach(i => sched.scheduleOnce(start + step * i, testActor, "hello")) expectNoMsg(step) wakeUp(step) expectWait(step) @@ -494,12 +501,12 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev probe.expectMsgType[Long] val nums = 0 until numEvents - nums foreach (i => sched.scheduleOnce(start + step * i, testActor, "hello-" + i)) + nums.foreach(i => sched.scheduleOnce(start + step * i, testActor, "hello-" + i)) expectNoMsg(step) wakeUp(step) expectWait(step) - nums foreach { i => + nums.foreach { i => wakeUp(step) expectMsg("hello-" + i) expectWait(step) @@ -542,7 +549,8 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev def reportFailure(t: Throwable): Unit = { t.printStackTrace() } } - def withScheduler(start: Long = 0L, _startTick: Int = 0, config: Config = ConfigFactory.empty)(thunk: (Scheduler with Closeable, Driver) => Unit): Unit = { + def withScheduler(start: Long = 0L, _startTick: Int = 0, config: Config = ConfigFactory.empty)( + thunk: (Scheduler with Closeable, Driver) => Unit): Unit = { import akka.actor.{ LightArrayRevolverScheduler => LARS } val lbq = new AtomicReference[LinkedBlockingQueue[Long]](new LinkedBlockingQueue[Long]) val prb = TestProbe() @@ -560,9 +568,9 @@ class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRev // println(s"waiting $ns") prb.ref ! ns try time += (lbq.get match { - case q: LinkedBlockingQueue[Long] => q.take() - case _ => 0L - }) + case q: LinkedBlockingQueue[Long] => q.take() + case _ => 0L + }) catch { case _: InterruptedException => Thread.currentThread.interrupt() } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala index e1234c8522..c8536bc1b5 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala @@ -5,18 +5,18 @@ package akka.actor import language.postfixOps -import java.util.concurrent.{ TimeUnit, CountDownLatch } +import java.util.concurrent.{ CountDownLatch, TimeUnit } import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.Random import scala.util.control.NoStackTrace -import com.typesafe.config.{ ConfigFactory, Config } -import SupervisorStrategy.{ Resume, Restart, Stop, Directive } +import com.typesafe.config.{ Config, ConfigFactory } +import SupervisorStrategy.{ Directive, Restart, Resume, Stop } import akka.actor.SupervisorStrategy.seqThrowable2Decider -import akka.dispatch.{ MessageDispatcher, DispatcherPrerequisites, DispatcherConfigurator, Dispatcher } +import akka.dispatch.{ Dispatcher, DispatcherConfigurator, DispatcherPrerequisites, MessageDispatcher } import akka.pattern.ask -import akka.testkit.{ ImplicitSender, EventFilter, DefaultTimeout, AkkaSpec } -import akka.testkit.{ filterException, filterEvents, TestDuration, TestLatch } +import akka.testkit.{ AkkaSpec, DefaultTimeout, EventFilter, ImplicitSender } +import akka.testkit.{ filterEvents, filterException, TestDuration, TestLatch } import akka.testkit.TestEvent.Mute import java.util.concurrent.ConcurrentHashMap import java.lang.ref.WeakReference @@ -63,8 +63,15 @@ object SupervisorHierarchySpec { case object PongOfDeath final case class Event(msg: Any, identity: Long) { val time: Long = System.nanoTime } final case class ErrorLog(msg: String, log: Vector[Event]) - final case class Failure(directive: Directive, stop: Boolean, depth: Int, var failPre: Int, var failPost: Int, val failConstr: Int, stopKids: Int) - extends RuntimeException("Failure") with NoStackTrace { + final case class Failure(directive: Directive, + stop: Boolean, + depth: Int, + var failPre: Int, + var failPost: Int, + val failConstr: Int, + stopKids: Int) + extends RuntimeException("Failure") + with NoStackTrace { override def toString = productPrefix + productIterator.mkString("(", ",", ")") } final case class Dump(level: Int) @@ -79,16 +86,15 @@ object SupervisorHierarchySpec { """) class MyDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends DispatcherConfigurator(config, prerequisites) { + extends DispatcherConfigurator(config, prerequisites) { private val instance: MessageDispatcher = - new Dispatcher( - this, - config.getString("id"), - config.getInt("throughput"), - config.getNanosDuration("throughput-deadline-time"), - configureExecutor(), - config.getMillisDuration("shutdown-timeout")) { + new Dispatcher(this, + config.getString("id"), + config.getInt("throughput"), + config.getNanosDuration("throughput-deadline-time"), + configureExecutor(), + config.getMillisDuration("shutdown-timeout")) { override def suspend(cell: ActorCell): Unit = { cell.actor match { @@ -139,7 +145,7 @@ object SupervisorHierarchySpec { listener ! ErrorLog(msg, log) log = Vector(Event("log sent", identityHashCode(this))) context.parent ! Abort - context stop self + context.stop(self) } def setFlags(directive: Directive): Unit = directive match { @@ -161,8 +167,11 @@ object SupervisorHierarchySpec { var rest = s % kids val propsTemplate = Props.empty.withDispatcher("hierarchy") (1 to kids).iterator.map { (id) => - val kidSize = if (rest > 0) { rest -= 1; sizes + 1 } else sizes - val props = Props(new Hierarchy(kidSize, breadth, listener, myLevel + 1, random)).withDeploy(propsTemplate.deploy) + val kidSize = if (rest > 0) { + rest -= 1; sizes + 1 + } else sizes + val props = + Props(new Hierarchy(kidSize, breadth, listener, myLevel + 1, random)).withDeploy(propsTemplate.deploy) (context.watch(context.actorOf(props, id.toString)).path, kidSize) }.toMap } else Map() @@ -178,7 +187,7 @@ object SupervisorHierarchySpec { preRestartCalled = true cause match { case f: Failure => - context.children.take(f.stopKids) foreach { child => + context.children.take(f.stopKids).foreach { child => log :+= Event("killing " + child, identityHashCode(this)) context.unwatch(child) context.stop(child) @@ -194,11 +203,11 @@ object SupervisorHierarchySpec { } val unwrap: PartialFunction[Throwable, (Throwable, Throwable)] = { - case x @ PostRestartException(_, f: Failure, _) => (f, x) + case x @ PostRestartException(_, f: Failure, _) => (f, x) case x @ ActorInitializationException(_, _, f: Failure) => (f, x) - case x => (x, x) + case x => (x, x) } - override val supervisorStrategy = OneForOneStrategy()(unwrap andThen { + override val supervisorStrategy = OneForOneStrategy()(unwrap.andThen { case (_: Failure, _) if pongsToGo > 0 => log :+= Event("pongOfDeath resuming " + sender(), identityHashCode(this)) Resume @@ -225,12 +234,13 @@ object SupervisorHierarchySpec { val state = stateCache.get(self.path) log = state.log log :+= Event("restarted " + suspendCount + " " + cause, identityHashCode(this)) - state.kids foreach { + state.kids.foreach { case (childPath, kidSize) => val name = childPath.name if (context.child(name).isEmpty) { listener ! Died(childPath) - val props = Props(new Hierarchy(kidSize, breadth, listener, myLevel + 1, random)).withDispatcher("hierarchy") + val props = + Props(new Hierarchy(kidSize, breadth, listener, myLevel + 1, random)).withDispatcher("hierarchy") context.watch(context.actorOf(props, name)) } } @@ -238,9 +248,9 @@ object SupervisorHierarchySpec { abort("invariant violated: " + state.kids.size + " != " + context.children.size) } cause match { - case f: Failure if f.failPost > 0 => { f.failPost -= 1; throw f } + case f: Failure if f.failPost > 0 => { f.failPost -= 1; throw f } case PostRestartException(`self`, f: Failure, _) if f.failPost > 0 => { f.failPost -= 1; throw f } - case _ => + case _ => } } @@ -265,7 +275,8 @@ object SupervisorHierarchySpec { abort("processing message while suspended") false } else if (!Thread.currentThread.getName.startsWith("SupervisorHierarchySpec-hierarchy")) { - abort("running on wrong thread " + Thread.currentThread + " dispatcher=" + context.props.dispatcher + "=>" + + abort( + "running on wrong thread " + Thread.currentThread + " dispatcher=" + context.props.dispatcher + "=>" + context.asInstanceOf[ActorCell].dispatcher.id) false } else true @@ -279,9 +290,9 @@ object SupervisorHierarchySpec { setFlags(f.directive) stateCache.put(self.path, stateCache.get(self.path).copy(failConstr = f.copy())) throw f - case "ping" => { Thread.sleep((random.nextFloat * 1.03).toLong); sender() ! "pong" } - case Dump(0) => abort("dump") - case Dump(level) => context.children foreach (_ ! Dump(level - 1)) + case "ping" => { Thread.sleep((random.nextFloat * 1.03).toLong); sender() ! "pong" } + case Dump(0) => abort("dump") + case Dump(level) => context.children.foreach(_ ! Dump(level - 1)) case Terminated(ref) => /* * It might be that we acted upon this death already in postRestart @@ -306,15 +317,15 @@ object SupervisorHierarchySpec { if (size > 1) { pongsToGo = context.children.size log :+= Event("sending " + pongsToGo + " pingOfDeath", identityHashCode(Hierarchy.this)) - context.children foreach (_ ! PingOfDeath) + context.children.foreach(_ ! PingOfDeath) } else { - context stop self + context.stop(self) context.parent ! PongOfDeath } case PongOfDeath => pongsToGo -= 1 if (pongsToGo == 0) { - context stop self + context.stop(self) context.parent ! PongOfDeath } } @@ -406,10 +417,10 @@ object SupervisorHierarchySpec { // don’t escalate from this one! override val supervisorStrategy = OneForOneStrategy() { - case f: Failure => f.directive - case OriginalRestartException(f: Failure) => f.directive + case f: Failure => f.directive + case OriginalRestartException(f: Failure) => f.directive case ActorInitializationException(_, _, f: Failure) => f.directive - case _ => Stop + case _ => Stop } var children = Vector.empty[ActorRef] @@ -450,7 +461,8 @@ object SupervisorHierarchySpec { when(Idle) { case Event(Init, _) => - hierarchy = context.watch(context.actorOf(Props(new Hierarchy(size, breadth, self, 0, random)).withDispatcher("hierarchy"), "head")) + hierarchy = context.watch( + context.actorOf(Props(new Hierarchy(size, breadth, self, 0, random)).withDispatcher("hierarchy"), "head")) setTimer("phase", StateTimeout, 5 seconds, false) goto(Init) } @@ -490,9 +502,9 @@ object SupervisorHierarchySpec { val deadGuy = path.elements val deadGuySize = deadGuy.size val isChild = (other: ActorRef) => other.path.elements.take(deadGuySize) == deadGuy - activeChildren = activeChildren filterNot isChild - idleChildren = idleChildren filterNot isChild - pingChildren = pingChildren filterNot isChild + activeChildren = activeChildren.filterNot(isChild) + idleChildren = idleChildren.filterNot(isChild) + pingChildren = pingChildren.filterNot(isChild) } var ignoreNotResumedLogs = true @@ -505,17 +517,22 @@ object SupervisorHierarchySpec { nextJob.next match { case Ping(ref) => ref ! "ping" case Fail(ref, dir) => - val f = Failure(dir, stop = random012 > 0, depth = random012, failPre = random012, failPost = random012, failConstr = random012, - stopKids = random012 match { - case 0 => 0 - case 1 => random.nextInt(breadth / 2) - case 2 => 1000 - }) + val f = Failure(dir, + stop = random012 > 0, + depth = random012, + failPre = random012, + failPost = random012, + failConstr = random012, + stopKids = random012 match { + case 0 => 0 + case 1 => random.nextInt(breadth / 2) + case 2 => 1000 + }) ref ! f } if (idleChildren.nonEmpty) self ! Work else context.system.scheduler.scheduleOnce(workSchedule, self, Work)(context.dispatcher) - stay using (x - 1) + stay.using(x - 1) case Event(Work, _) => if (pingChildren.isEmpty) goto(LastPing) else goto(Finishing) case Event(Died(path), _) => bury(path) @@ -526,10 +543,11 @@ object SupervisorHierarchySpec { stay case Event(StateTimeout, todo) => log.info("dumping state due to StateTimeout") - log.info("children: " + children.size + " pinged: " + pingChildren.size + " idle: " + idleChildren.size + " work: " + todo) - pingChildren foreach println + log.info( + "children: " + children.size + " pinged: " + pingChildren.size + " idle: " + idleChildren.size + " work: " + todo) + pingChildren.foreach(println) println(system.asInstanceOf[ActorSystemImpl].printTree) - pingChildren foreach getErrorsUp + pingChildren.foreach(getErrorsUp) ignoreNotResumedLogs = false hierarchy ! Dump(2) goto(Failed) @@ -551,7 +569,7 @@ object SupervisorHierarchySpec { onTransition { case _ -> LastPing => - idleChildren foreach (_ ! "ping") + idleChildren.foreach(_ ! "ping") pingChildren ++= idleChildren idleChildren = Vector.empty } @@ -575,7 +593,7 @@ object SupervisorHierarchySpec { when(Stopping, stateTimeout = 5.seconds.dilated) { case Event(PongOfDeath, _) => stay case Event(Terminated(r), _) if r == hierarchy => - val undead = children filterNot (_.isTerminated) + val undead = children.filterNot(_.isTerminated) if (undead.nonEmpty) { log.info("undead:\n" + undead.mkString("\n")) testActor ! "stressTestFailed (" + undead.size + " undead)" @@ -588,7 +606,7 @@ object SupervisorHierarchySpec { * failed. I’m leaving this code in so that manual inspection remains * an option (by setting the above condition to “true”). */ - val weak = children map (new WeakReference(_)) + val weak = children.map(new WeakReference(_)) children = Vector.empty pingChildren = Set.empty idleChildren = Vector.empty @@ -604,7 +622,7 @@ object SupervisorHierarchySpec { println(system.asInstanceOf[ActorSystemImpl].printTree) getErrors(hierarchy, 10) printErrors() - idleChildren foreach println + idleChildren.foreach(println) testActor ! "timeout in Stopping" stop case Event(e: ErrorLog, _) => @@ -614,7 +632,7 @@ object SupervisorHierarchySpec { when(GC, stateTimeout = 10 seconds) { case Event(GCcheck(weak), _) => - val next = weak filter (_.get ne null) + val next = weak.filter(_.get ne null) if (next.nonEmpty) { println(next.size + " left") context.system.scheduler.scheduleOnce(workSchedule, self, GCcheck(next))(context.dispatcher) @@ -658,7 +676,7 @@ object SupervisorHierarchySpec { case _ => errors :+= target -> ErrorLog("fetched", stateCache.get(target.path).log) } if (depth > 0) { - l.underlying.children foreach (getErrors(_, depth - 1)) + l.underlying.children.foreach(getErrors(_, depth - 1)) } } } @@ -675,17 +693,17 @@ object SupervisorHierarchySpec { } def printErrors(): Unit = { - errors collect { - case (origin, ErrorLog("dump", _)) => getErrors(origin, 1) - case (origin, ErrorLog(msg, _)) if msg startsWith "not resumed" => getErrorsUp(origin) + errors.collect { + case (origin, ErrorLog("dump", _)) => getErrors(origin, 1) + case (origin, ErrorLog(msg, _)) if msg.startsWith("not resumed") => getErrorsUp(origin) } - val merged = errors.sortBy(_._1.toString) flatMap { + val merged = errors.sortBy(_._1.toString).flatMap { case (ref, ErrorLog(msg, log)) => println("Error: " + ref + " " + msg) - log map (l => (l.time, ref, l.identity, l.msg.toString)) + log.map(l => (l.time, ref, l.identity, l.msg.toString)) } println("random seed: " + randomSeed) - merged.sorted.distinct foreach println + merged.sorted.distinct.foreach(println) } whenUnhandled { @@ -707,7 +725,7 @@ object SupervisorHierarchySpec { // make sure that we get the logs of the remaining pingChildren pingChildren.foreach(getErrorsUp) // this will ensure that the error logs get printed and we stop the test - context stop hierarchy + context.stop(hierarchy) goto(Failed) case Event(Abort, _) => log.info("received Abort") @@ -757,7 +775,8 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 1, withinTimeRange = 5 seconds)(List(classOf[Throwable])) - val crasher = context.watch(context.actorOf(Props(new CountDownActor(countDownMessages, SupervisorStrategy.defaultStrategy)))) + val crasher = context.watch( + context.actorOf(Props(new CountDownActor(countDownMessages, SupervisorStrategy.defaultStrategy)))) def receive = { case "killCrasher" => crasher ! Kill @@ -782,7 +801,7 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w val worker = expectMsgType[ActorRef] worker ! "ping" expectMsg("pong") - EventFilter.warning("expected", occurrences = 1) intercept { + EventFilter.warning("expected", occurrences = 1).intercept { middle ! "fail" } middle ! "ping" @@ -794,7 +813,9 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w "suspend children while failing" taggedAs LongRunningTest in { val latch = TestLatch() val slowResumer = system.actorOf(Props(new Actor { - override def supervisorStrategy = OneForOneStrategy() { case _ => Await.ready(latch, 4.seconds.dilated); SupervisorStrategy.Resume } + override def supervisorStrategy = OneForOneStrategy() { + case _ => Await.ready(latch, 4.seconds.dilated); SupervisorStrategy.Resume + } def receive = { case "spawn" => sender() ! context.actorOf(Props[Resumer]) } @@ -807,7 +828,7 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w val worker = expectMsgType[ActorRef] worker ! "ping" expectMsg("pong") - EventFilter.warning("expected", occurrences = 1) intercept { + EventFilter.warning("expected", occurrences = 1).intercept { boss ! "fail" awaitCond(worker.asInstanceOf[LocalActorRef].underlying.mailbox.isSuspended) worker ! "ping" @@ -822,61 +843,63 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w val preStartCalled = new AtomicInteger(0) val postRestartCalled = new AtomicInteger(0) - filterEvents( - EventFilter[Failure](), - EventFilter[ActorInitializationException](), - EventFilter[IllegalArgumentException]("OH NO!"), - EventFilter.error(start = "changing Recreate into Create"), - EventFilter.error(start = "changing Resume into Create")) { - val failResumer = system.actorOf(Props(new Actor { - override def supervisorStrategy = OneForOneStrategy() { - case e: ActorInitializationException => - if (createAttempt.get % 2 == 0) SupervisorStrategy.Resume else SupervisorStrategy.Restart - } - - val child = context.actorOf(Props(new Actor { - val ca = createAttempt.incrementAndGet() - - if (ca <= 6 && ca % 3 == 0) - context.actorOf(Props(new Actor { override def receive = { case _ => } }), "workingChild") - - if (ca < 6) { - throw new IllegalArgumentException("OH NO!") - } - override def preStart() = { - preStartCalled.incrementAndGet() - } - override def postRestart(reason: Throwable) = { - postRestartCalled.incrementAndGet() + filterEvents(EventFilter[Failure](), + EventFilter[ActorInitializationException](), + EventFilter[IllegalArgumentException]("OH NO!"), + EventFilter.error(start = "changing Recreate into Create"), + EventFilter.error(start = "changing Resume into Create")) { + val failResumer = + system.actorOf( + Props(new Actor { + override def supervisorStrategy = OneForOneStrategy() { + case e: ActorInitializationException => + if (createAttempt.get % 2 == 0) SupervisorStrategy.Resume else SupervisorStrategy.Restart } + + val child = context.actorOf(Props(new Actor { + val ca = createAttempt.incrementAndGet() + + if (ca <= 6 && ca % 3 == 0) + context.actorOf(Props(new Actor { override def receive = { case _ => } }), "workingChild") + + if (ca < 6) { + throw new IllegalArgumentException("OH NO!") + } + override def preStart() = { + preStartCalled.incrementAndGet() + } + override def postRestart(reason: Throwable) = { + postRestartCalled.incrementAndGet() + } + override def receive = { + case m => sender() ! m + } + }), "failChild") + override def receive = { - case m => sender() ! m + case m => child.forward(m) } - }), "failChild") + }), + "failResumer") - override def receive = { - case m => child.forward(m) - } - }), "failResumer") - - failResumer ! "blahonga" - expectMsg("blahonga") - } + failResumer ! "blahonga" + expectMsg("blahonga") + } createAttempt.get should ===(6) preStartCalled.get should ===(1) postRestartCalled.get should ===(0) } "survive being stressed" taggedAs LongRunningTest in { - system.eventStream.publish(Mute( - EventFilter[Failure](), - EventFilter.warning("Failure"), - EventFilter[ActorInitializationException](), - EventFilter[NoSuchElementException]("head of empty list"), - EventFilter.error(start = "changing Resume into Restart"), - EventFilter.error(start = "changing Resume into Create"), - EventFilter.error(start = "changing Recreate into Create"), - EventFilter.warning(start = "received dead "))) + system.eventStream.publish( + Mute(EventFilter[Failure](), + EventFilter.warning("Failure"), + EventFilter[ActorInitializationException](), + EventFilter[NoSuchElementException]("head of empty list"), + EventFilter.error(start = "changing Resume into Restart"), + EventFilter.error(start = "changing Resume into Create"), + EventFilter.error(start = "changing Recreate into Create"), + EventFilter.warning(start = "received dead "))) val fsm = system.actorOf(Props(new StressTest(testActor, size = 500, breadth = 6)), "stressTest") diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala index 6a6adf27f0..1248d8134d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala @@ -8,7 +8,7 @@ import language.postfixOps import akka.testkit.{ filterEvents, EventFilter } import scala.concurrent.Await -import java.util.concurrent.{ TimeUnit, CountDownLatch } +import java.util.concurrent.{ CountDownLatch, TimeUnit } import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout import akka.pattern.ask @@ -35,8 +35,8 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul filterEvents(EventFilter[Exception]("Kill")) { val countDownLatch = new CountDownLatch(4) - val supervisor = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 5 seconds)(List(classOf[Exception]))))) + val supervisor = system.actorOf(Props( + new Supervisor(OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 5 seconds)(List(classOf[Exception]))))) val workerProps = Props(new Actor { override def postRestart(cause: Throwable): Unit = { countDownLatch.countDown() } @@ -46,11 +46,14 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul } }) - val actor1, actor2 = Await.result((supervisor ? workerProps.withDispatcher("pinned-dispatcher")).mapTo[ActorRef], timeout.duration) + val actor1, actor2 = + Await.result((supervisor ? workerProps.withDispatcher("pinned-dispatcher")).mapTo[ActorRef], timeout.duration) - val actor3 = Await.result((supervisor ? workerProps.withDispatcher("test-dispatcher")).mapTo[ActorRef], timeout.duration) + val actor3 = + Await.result((supervisor ? workerProps.withDispatcher("test-dispatcher")).mapTo[ActorRef], timeout.duration) - val actor4 = Await.result((supervisor ? workerProps.withDispatcher("pinned-dispatcher")).mapTo[ActorRef], timeout.duration) + val actor4 = + Await.result((supervisor ? workerProps.withDispatcher("pinned-dispatcher")).mapTo[ActorRef], timeout.duration) actor1 ! Kill actor2 ! Kill @@ -59,11 +62,13 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul countDownLatch.await(10, TimeUnit.SECONDS) - Seq("actor1" -> actor1, "actor2" -> actor2, "actor3" -> actor3, "actor4" -> actor4) map { - case (id, ref) => (id, ref ? "status") - } foreach { - case (id, f) => (id, Await.result(f, timeout.duration)) should ===((id, "OK")) - } + Seq("actor1" -> actor1, "actor2" -> actor2, "actor3" -> actor3, "actor4" -> actor4) + .map { + case (id, ref) => (id, ref ? "status") + } + .foreach { + case (id, f) => (id, Await.result(f, timeout.duration)) should ===((id, "OK")) + } } } @@ -74,7 +79,7 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul override def preStart(): Unit = testActor ! "preStart" })) val m = "weird message" - EventFilter[Exception](m, occurrences = 1) intercept { + EventFilter[Exception](m, occurrences = 1).intercept { a ! new Exception(m) } expectMsg("preStart") @@ -127,7 +132,9 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul "be able to create a similar kid in the fault handling strategy" in { val parent = system.actorOf(Props(new Actor { override val supervisorStrategy = new OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider) { - override def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit = { + override def handleChildTerminated(context: ActorContext, + child: ActorRef, + children: Iterable[ActorRef]): Unit = { val newKid = context.actorOf(Props.empty, child.path.name) testActor ! { if ((newKid ne child) && newKid.path == child.path) "green" else "red" } } @@ -137,7 +144,7 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul })) parent ! "engage" expectMsg("green") - EventFilter[IllegalStateException]("handleChildTerminated failed", occurrences = 1) intercept { + EventFilter[IllegalStateException]("handleChildTerminated failed", occurrences = 1).intercept { system.stop(parent) } } @@ -151,7 +158,7 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul case "doit" => context.actorOf(Props.empty, "child") ! Kill } })) - EventFilter[ActorKilledException](occurrences = 1) intercept { + EventFilter[ActorKilledException](occurrences = 1).intercept { parent ! "doit" } val p = expectMsgType[ActorRef].path diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index 0200ddf595..03c66eb4c6 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -64,7 +64,7 @@ object SupervisorSpec { override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 0)(List(classOf[Exception])) def receive = { - case Die => temp forward Die + case Die => temp.forward(Die) case Terminated(`temp`) => sendTo ! "terminated" case Status.Failure(_) => /*Ignore*/ } @@ -101,7 +101,11 @@ error-mailbox { """) } -class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfterEach with ImplicitSender with DefaultTimeout { +class SupervisorSpec + extends AkkaSpec(SupervisorSpec.config) + with BeforeAndAfterEach + with ImplicitSender + with DefaultTimeout { import SupervisorSpec._ @@ -111,54 +115,67 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter // Creating actors and supervisors // ===================================================== - private def child(supervisor: ActorRef, props: Props): ActorRef = Await.result((supervisor ? props).mapTo[ActorRef], timeout.duration) + private def child(supervisor: ActorRef, props: Props): ActorRef = + Await.result((supervisor ? props).mapTo[ActorRef], timeout.duration) def temporaryActorAllForOne = { - val supervisor = system.actorOf(Props(new Supervisor(AllForOneStrategy(maxNrOfRetries = 0)(List(classOf[Exception]))))) + val supervisor = + system.actorOf(Props(new Supervisor(AllForOneStrategy(maxNrOfRetries = 0)(List(classOf[Exception]))))) val temporaryActor = child(supervisor, Props(new PingPongActor(testActor))) (temporaryActor, supervisor) } def singleActorAllForOne = { - val supervisor = system.actorOf(Props(new Supervisor( - AllForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) + val supervisor = system.actorOf( + Props( + new Supervisor( + AllForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) val pingpong = child(supervisor, Props(new PingPongActor(testActor))) (pingpong, supervisor) } def singleActorOneForOne = { - val supervisor = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) + val supervisor = system.actorOf( + Props( + new Supervisor( + OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) val pingpong = child(supervisor, Props(new PingPongActor(testActor))) (pingpong, supervisor) } def multipleActorsAllForOne = { - val supervisor = system.actorOf(Props(new Supervisor( - AllForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) + val supervisor = system.actorOf( + Props( + new Supervisor( + AllForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) val pingpong1, pingpong2, pingpong3 = child(supervisor, Props(new PingPongActor(testActor))) (pingpong1, pingpong2, pingpong3, supervisor) } def multipleActorsOneForOne = { - val supervisor = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) + val supervisor = system.actorOf( + Props( + new Supervisor( + OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) val pingpong1, pingpong2, pingpong3 = child(supervisor, Props(new PingPongActor(testActor))) (pingpong1, pingpong2, pingpong3, supervisor) } def nestedSupervisorsAllForOne = { - val topSupervisor = system.actorOf(Props(new Supervisor( - AllForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) + val topSupervisor = system.actorOf( + Props( + new Supervisor( + AllForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(List(classOf[Exception]))))) val pingpong1 = child(topSupervisor, Props(new PingPongActor(testActor))) - val middleSupervisor = child(topSupervisor, Props(new Supervisor( - AllForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(Nil)))) + val middleSupervisor = child( + topSupervisor, + Props(new Supervisor(AllForOneStrategy(maxNrOfRetries = 3, withinTimeRange = DilatedTimeout)(Nil)))) val pingpong2, pingpong3 = child(middleSupervisor, Props(new PingPongActor(testActor))) (pingpong1, pingpong2, pingpong3, topSupervisor) @@ -168,9 +185,7 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter system.eventStream.publish(Mute(EventFilter[RuntimeException](ExceptionMessage))) } - override def beforeEach() = { - - } + override def beforeEach() = {} def ping(pingPongActor: ActorRef) = { Await.result(pingPongActor.?(Ping)(DilatedTimeout), DilatedTimeout) should ===(PongMessage) @@ -206,8 +221,12 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter var postRestarts = 0 var preStarts = 0 var postStops = 0 - override def preRestart(reason: Throwable, message: Option[Any]): Unit = { preRestarts += 1; testActor ! ("preRestart" + preRestarts) } - override def postRestart(reason: Throwable): Unit = { postRestarts += 1; testActor ! ("postRestart" + postRestarts) } + override def preRestart(reason: Throwable, message: Option[Any]): Unit = { + preRestarts += 1; testActor ! ("preRestart" + preRestarts) + } + override def postRestart(reason: Throwable): Unit = { + postRestarts += 1; testActor ! ("postRestart" + postRestarts) + } override def preStart(): Unit = { preStarts += 1; testActor ! ("preStart" + preStarts) } override def postStop(): Unit = { postStops += 1; testActor ! ("postStop" + postStops) } def receive = { @@ -219,7 +238,7 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = restarts)(List(classOf[Exception])) val child = context.actorOf(Props(childInstance)) def receive = { - case msg => child forward msg + case msg => child.forward(msg) } })) @@ -229,16 +248,15 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter expectMsg("pong") filterEvents(EventFilter[RuntimeException]("Expected", occurrences = restarts + 1)) { - (1 to restarts) foreach { - i => - master ! "crash" - expectMsg("crashed") + (1 to restarts).foreach { i => + master ! "crash" + expectMsg("crashed") - expectMsg("preRestart" + i) - expectMsg("postRestart" + i) + expectMsg("preRestart" + i) + expectMsg("postRestart" + i) - master ! "ping" - expectMsg("pong") + master ! "ping" + expectMsg("pong") } master ! "crash" expectMsg("crashed") @@ -377,8 +395,8 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter "attempt restart when exception during restart" in { val inits = new AtomicInteger(0) - val supervisor = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 10 seconds)(classOf[Exception] :: Nil)))) + val supervisor = system.actorOf(Props( + new Supervisor(OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 10 seconds)(classOf[Exception] :: Nil)))) val dyingProps = Props(new Actor { val init = inits.getAndIncrement() @@ -399,14 +417,13 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter supervisor ! dyingProps val dyingActor = expectMsgType[ActorRef] - filterEvents( - EventFilter[RuntimeException]("Expected", occurrences = 1), - EventFilter[PreRestartException]("Don't wanna!", occurrences = 1), - EventFilter[PostRestartException]("Don't wanna!", occurrences = 1)) { - intercept[RuntimeException] { - Await.result(dyingActor.?(DieReply)(DilatedTimeout), DilatedTimeout) - } + filterEvents(EventFilter[RuntimeException]("Expected", occurrences = 1), + EventFilter[PreRestartException]("Don't wanna!", occurrences = 1), + EventFilter[PostRestartException]("Don't wanna!", occurrences = 1)) { + intercept[RuntimeException] { + Await.result(dyingActor.?(DieReply)(DilatedTimeout), DilatedTimeout) } + } dyingActor ! Ping expectMsg(PongMessage) @@ -420,7 +437,7 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter val parent = system.actorOf(Props(new Actor { override val supervisorStrategy = OneForOneStrategy()({ case e: IllegalStateException if e.getMessage == "OHNOES" => throw e - case _ => SupervisorStrategy.Restart + case _ => SupervisorStrategy.Restart }) val child = context.watch(context.actorOf(Props(new Actor { override def postRestart(reason: Throwable): Unit = testActor ! "child restarted" @@ -434,7 +451,7 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter // Overriding to disable auto-unwatch override def preRestart(reason: Throwable, msg: Option[Any]): Unit = { - context.children foreach context.stop + context.children.foreach(context.stop) postStop() } @@ -442,8 +459,8 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter case Terminated(a) if a.path == child.path => testActor ! "child terminated" case l: TestLatch => child ! l case "test" => sender() ! "green" - case "testchild" => child forward "test" - case "testchildAndAck" => child forward "test"; sender() ! "ack" + case "testchild" => child.forward("test") + case "testchildAndAck" => child.forward("test"); sender() ! "ack" } })) @@ -451,11 +468,10 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter parent ! latch parent ! "testchildAndAck" expectMsg("ack") - filterEvents( - EventFilter[IllegalStateException]("OHNOES", occurrences = 1), - EventFilter.warning(pattern = "dead.*test", occurrences = 1)) { - latch.countDown() - } + filterEvents(EventFilter[IllegalStateException]("OHNOES", occurrences = 1), + EventFilter.warning(pattern = "dead.*test", occurrences = 1)) { + latch.countDown() + } expectMsg("parent restarted") expectMsg("child terminated") parent ! "test" @@ -504,8 +520,9 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter } "restarts a child infinitely if maxNrOfRetries = -1 and withinTimeRange = Duration.Inf" in { - val supervisor = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = -1, withinTimeRange = Duration.Inf)(classOf[Exception] :: Nil)))) + val supervisor = system.actorOf( + Props(new Supervisor( + OneForOneStrategy(maxNrOfRetries = -1, withinTimeRange = Duration.Inf)(classOf[Exception] :: Nil)))) val pingpong = child(supervisor, Props(new PingPongActor(testActor))) @@ -520,8 +537,8 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter } "treats maxNrOfRetries = -1 as maxNrOfRetries = 1 if withinTimeRange is non-infinite Duration" in { - val supervisor = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = -1, withinTimeRange = 10 seconds)(classOf[Exception] :: Nil)))) + val supervisor = system.actorOf(Props( + new Supervisor(OneForOneStrategy(maxNrOfRetries = -1, withinTimeRange = 10 seconds)(classOf[Exception] :: Nil)))) val pingpong = child(supervisor, Props(new PingPongActor(testActor))) @@ -532,8 +549,8 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter } "treats withinTimeRange = Duration.Inf as a single infinite restart window" in { - val supervisor = system.actorOf(Props(new Supervisor( - OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = Duration.Inf)(classOf[Exception] :: Nil)))) + val supervisor = system.actorOf(Props( + new Supervisor(OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = Duration.Inf)(classOf[Exception] :: Nil)))) val pingpong = child(supervisor, Props(new PingPongActor(testActor))) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala index b68bd76451..d9cb09ce2e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala @@ -8,18 +8,22 @@ import language.postfixOps import scala.concurrent.Await import scala.concurrent.duration._ -import akka.testkit.{ EventFilter, AkkaSpec, ImplicitSender, DefaultTimeout } +import akka.testkit.{ AkkaSpec, DefaultTimeout, EventFilter, ImplicitSender } import akka.pattern.ask -class SupervisorTreeSpec extends AkkaSpec("akka.actor.serialize-messages = off") with ImplicitSender with DefaultTimeout { +class SupervisorTreeSpec + extends AkkaSpec("akka.actor.serialize-messages = off") + with ImplicitSender + with DefaultTimeout { "In a 3 levels deep supervisor tree (linked in the constructor) we" must { "be able to kill the middle actor and see itself and its child restarted" in { - EventFilter[ActorKilledException](occurrences = 1) intercept { + EventFilter[ActorKilledException](occurrences = 1).intercept { within(5 seconds) { val p = Props(new Actor { - override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 1 second)(List(classOf[Exception])) + override val supervisorStrategy = + OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 1 second)(List(classOf[Exception])) def receive = { case p: Props => sender() ! context.actorOf(p) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala index 2ba153eda0..aa3d168225 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala @@ -26,8 +26,8 @@ class Ticket669Spec extends AkkaSpec with BeforeAndAfterAll with ImplicitSender "A supervised actor with lifecycle PERMANENT" should { "be able to reply on failure during preRestart" in { filterEvents(EventFilter[Exception]("test", occurrences = 1)) { - val supervisor = system.actorOf(Props(new Supervisor( - AllForOneStrategy(5, 10 seconds)(List(classOf[Exception]))))) + val supervisor = + system.actorOf(Props(new Supervisor(AllForOneStrategy(5, 10 seconds)(List(classOf[Exception]))))) val supervised = Await.result((supervisor ? Props[Supervised]).mapTo[ActorRef], timeout.duration) supervised.!("test")(testActor) @@ -38,8 +38,8 @@ class Ticket669Spec extends AkkaSpec with BeforeAndAfterAll with ImplicitSender "be able to reply on failure during postStop" in { filterEvents(EventFilter[Exception]("test", occurrences = 1)) { - val supervisor = system.actorOf(Props(new Supervisor( - AllForOneStrategy(maxNrOfRetries = 0)(List(classOf[Exception]))))) + val supervisor = + system.actorOf(Props(new Supervisor(AllForOneStrategy(maxNrOfRetries = 0)(List(classOf[Exception]))))) val supervised = Await.result((supervisor ? Props[Supervised]).mapTo[ActorRef], timeout.duration) supervised.!("test")(testActor) diff --git a/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala index 99665bd261..12a342cdc0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TimerSpec.scala @@ -15,13 +15,11 @@ object TimerSpec { sealed trait Command case class Tick(n: Int) extends Command case object Bump extends Command - case class SlowThenBump(latch: TestLatch) extends Command - with NoSerializationVerificationNeeded + case class SlowThenBump(latch: TestLatch) extends Command with NoSerializationVerificationNeeded case object End extends Command case class Throw(e: Throwable) extends Command case object Cancel extends Command - case class SlowThenThrow(latch: TestLatch, e: Throwable) extends Command - with NoSerializationVerificationNeeded + case class SlowThenThrow(latch: TestLatch, e: Throwable) extends Command with NoSerializationVerificationNeeded case object AutoReceive extends Command sealed trait Event @@ -34,7 +32,9 @@ object TimerSpec { def target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () => Int): Props = Props(new Target(monitor, interval, repeat, initial)) - class Target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () => Int) extends Actor with Timers { + class Target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () => Int) + extends Actor + with Timers { private var bumpCount = initial() if (repeat) @@ -86,7 +86,8 @@ object TimerSpec { object TheState - class FsmTarget(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () => Int) extends FSM[TheState.type, Int] { + class FsmTarget(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () => Int) + extends FSM[TheState.type, Int] { private var restarting = false @@ -104,7 +105,7 @@ object TimerSpec { def bump(bumpCount: Int): State = { setTimer("T", Tick(bumpCount + 1), interval, repeat) - stay using (bumpCount + 1) + stay.using(bumpCount + 1) } def autoReceive(): State = { @@ -148,13 +149,19 @@ object TimerSpec { class TimerSpec extends AbstractTimerSpec { override def testName: String = "Timers" - override def target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () => Int = () => 1): Props = + override def target(monitor: ActorRef, + interval: FiniteDuration, + repeat: Boolean, + initial: () => Int = () => 1): Props = TimerSpec.target(monitor, interval, repeat, initial) } class FsmTimerSpec extends AbstractTimerSpec { override def testName: String = "FSM Timers" - override def target(monitor: ActorRef, interval: FiniteDuration, repeat: Boolean, initial: () => Int = () => 1): Props = + override def target(monitor: ActorRef, + interval: FiniteDuration, + repeat: Boolean, + initial: () => Int = () => 1): Props = TimerSpec.fsmTarget(monitor, interval, repeat, initial) } @@ -232,8 +239,8 @@ abstract class AbstractTimerSpec extends AkkaSpec { "discard timers from old incarnation after restart, alt 1" taggedAs TimingTest in { val probe = TestProbe() val startCounter = new AtomicInteger(0) - val ref = system.actorOf(target(probe.ref, dilatedInterval, repeat = true, - initial = () => startCounter.incrementAndGet())) + val ref = system.actorOf( + target(probe.ref, dilatedInterval, repeat = true, initial = () => startCounter.incrementAndGet())) probe.expectMsg(Tock(1)) val latch = new TestLatch(1) diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index 6810ecd290..e597909b8a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -12,7 +12,7 @@ import akka.japi.{ Option => JOption } import akka.pattern.ask import akka.routing.RoundRobinGroup import akka.serialization.{ JavaSerializer, SerializerWithStringManifest } -import akka.testkit.{ AkkaSpec, DefaultTimeout, EventFilter, TimingTest, filterEvents } +import akka.testkit.{ filterEvents, AkkaSpec, DefaultTimeout, EventFilter, TimingTest } import akka.util.Timeout import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } @@ -59,7 +59,7 @@ object TypedActorSpec { findNext } - override def exists(f: T => Boolean): Boolean = items exists f + override def exists(f: T => Boolean): Boolean = items.exists(f) } trait Foo { @@ -108,7 +108,8 @@ object TypedActorSpec { @throws(classOf[TimeoutException]) def read(): Int - def testMethodCallSerialization(foo: Foo, s: String, i: Int, o: WithStringSerializedClass): Unit = throw new IllegalStateException("expected") + def testMethodCallSerialization(foo: Foo, s: String, i: Int, o: WithStringSerializedClass): Unit = + throw new IllegalStateException("expected") } class Bar extends Foo with Serializable { @@ -177,7 +178,13 @@ object TypedActorSpec { def crash(): Unit } - class LifeCyclesImpl(val latch: CountDownLatch) extends PreStart with PostStop with PreRestart with PostRestart with LifeCycles with Receiver { + class LifeCyclesImpl(val latch: CountDownLatch) + extends PreStart + with PostStop + with PreRestart + with PostRestart + with LifeCycles + with Receiver { private def ensureContextAvailable[T](f: => T): T = TypedActor.context match { case null => throw new IllegalStateException("TypedActor.context is null!") @@ -190,15 +197,15 @@ object TypedActorSpec { override def postStop(): Unit = ensureContextAvailable(for (i <- 1 to 3) latch.countDown()) - override def preRestart(reason: Throwable, message: Option[Any]): Unit = ensureContextAvailable(for (i <- 1 to 5) latch.countDown()) + override def preRestart(reason: Throwable, message: Option[Any]): Unit = + ensureContextAvailable(for (i <- 1 to 5) latch.countDown()) override def postRestart(reason: Throwable): Unit = ensureContextAvailable(for (i <- 1 to 7) latch.countDown()) override def onReceive(msg: Any, sender: ActorRef): Unit = { - ensureContextAvailable( - msg match { - case "pigdog" => sender ! "dogpig" - }) + ensureContextAvailable(msg match { + case "pigdog" => sender ! "dogpig" + }) } } @@ -225,7 +232,7 @@ object TypedActorSpec { override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match { case manifest if bytes.length == 1 && bytes(0) == 255.toByte => WithStringSerializedClass() - case _ => throw new IllegalArgumentException(s"Cannot deserialize object with manifest $manifest") + case _ => throw new IllegalArgumentException(s"Cannot deserialize object with manifest $manifest") } } @@ -233,8 +240,11 @@ object TypedActorSpec { } -class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) - with BeforeAndAfterEach with BeforeAndAfterAll with DefaultTimeout { +class TypedActorSpec + extends AkkaSpec(TypedActorSpec.config) + with BeforeAndAfterEach + with BeforeAndAfterAll + with DefaultTimeout { import akka.actor.TypedActorSpec._ @@ -244,7 +254,8 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) TypedActor(system).typedActorOf(TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(Timeout(d))) def newFooBar(dispatcher: String, d: FiniteDuration): Foo = - TypedActor(system).typedActorOf(TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(Timeout(d)).withDispatcher(dispatcher)) + TypedActor(system).typedActorOf( + TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(Timeout(d)).withDispatcher(dispatcher)) def newStacked(): Stacked = TypedActor(system).typedActorOf( @@ -292,7 +303,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) "be able to call equals" in { val t = newFooBar t should ===(t) - t should not equal (null) + (t should not).equal(null) mustStop(t) } @@ -382,13 +393,15 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) case p: TypedProps[_] => context.sender() ! TypedActor(context).typedActorOf(p) } })) - val t = Await.result((boss ? TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(2 seconds)).mapTo[Foo], timeout.duration) + val t = Await.result((boss ? TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(2 seconds)).mapTo[Foo], + timeout.duration) t.incr() t.failingPigdog() t.read() should ===(1) //Make sure state is not reset after failure - intercept[IllegalStateException] { Await.result(t.failingFuturePigdog, 2 seconds) }.getMessage should ===("expected") + intercept[IllegalStateException] { Await.result(t.failingFuturePigdog, 2 seconds) }.getMessage should ===( + "expected") t.read() should ===(1) //Make sure state is not reset after failure (intercept[IllegalStateException] { t.failingJOptionPigdog }).getMessage should ===("expected") @@ -474,7 +487,11 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) import java.io._ val someFoo: Foo = new Bar JavaSerializer.currentSystem.withValue(system.asInstanceOf[ExtendedActorSystem]) { - val m = TypedActor.MethodCall(classOf[Foo].getDeclaredMethod("testMethodCallSerialization", Array[Class[_]](classOf[Foo], classOf[String], classOf[Int], classOf[WithStringSerializedClass]): _*), Array[AnyRef](someFoo, null, 1.asInstanceOf[AnyRef], WithStringSerializedClass())) + val m = TypedActor.MethodCall( + classOf[Foo].getDeclaredMethod( + "testMethodCallSerialization", + Array[Class[_]](classOf[Foo], classOf[String], classOf[Int], classOf[WithStringSerializedClass]): _*), + Array[AnyRef](someFoo, null, 1.asInstanceOf[AnyRef], WithStringSerializedClass())) val baos = new ByteArrayOutputStream(8192 * 4) val out = new ObjectOutputStream(baos) @@ -526,12 +543,12 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) val latch = new CountDownLatch(16) val ta = TypedActor(system) val t: LifeCycles = ta.typedActorOf(TypedProps[LifeCyclesImpl](classOf[LifeCycles], new LifeCyclesImpl(latch))) - EventFilter[IllegalStateException]("Crash!", occurrences = 1) intercept { + EventFilter[IllegalStateException]("Crash!", occurrences = 1).intercept { t.crash() } //Sneak in a check for the Receiver override - val ref = ta getActorRefFor t + val ref = ta.getActorRefFor(t) ref.tell("pigdog", testActor) @@ -545,8 +562,11 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) } } -class TypedActorRouterSpec extends AkkaSpec(TypedActorSpec.config) - with BeforeAndAfterEach with BeforeAndAfterAll with DefaultTimeout { +class TypedActorRouterSpec + extends AkkaSpec(TypedActorSpec.config) + with BeforeAndAfterEach + with BeforeAndAfterAll + with DefaultTimeout { import akka.actor.TypedActorSpec._ @@ -564,7 +584,9 @@ class TypedActorRouterSpec extends AkkaSpec(TypedActorSpec.config) val t2 = newFooBar val t3 = newFooBar val t4 = newFooBar - val routees = List(t1, t2, t3, t4) map { t => TypedActor(system).getActorRefFor(t).path.toStringWithoutAddress } + val routees = List(t1, t2, t3, t4).map { t => + TypedActor(system).getActorRefFor(t).path.toStringWithoutAddress + } TypedActor(system).isTypedActor(t1) should ===(true) TypedActor(system).isTypedActor(t2) should ===(true) diff --git a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala index cc1d12e4e6..66a255880c 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/UidClashTest.scala @@ -4,7 +4,7 @@ package akka.actor -import akka.testkit.{ TestProbe, AkkaSpec } +import akka.testkit.{ AkkaSpec, TestProbe } import akka.actor.SupervisorStrategy.{ Restart, Stop } import akka.dispatch.sysmsg.SystemMessage import akka.event.EventStream @@ -12,15 +12,16 @@ import scala.util.control.NoStackTrace object UidClashTest { - class TerminatedForNonWatchedActor extends Exception("Received Terminated for actor that was not actually watched") - with NoStackTrace + class TerminatedForNonWatchedActor + extends Exception("Received Terminated for actor that was not actually watched") + with NoStackTrace @volatile var oldActor: ActorRef = _ - private[akka] class EvilCollidingActorRef( - override val provider: ActorRefProvider, - override val path: ActorPath, - val eventStream: EventStream) extends MinimalActorRef { + private[akka] class EvilCollidingActorRef(override val provider: ActorRefProvider, + override val path: ActorPath, + val eventStream: EventStream) + extends MinimalActorRef { //Ignore everything override def isTerminated: Boolean = true @@ -52,7 +53,7 @@ object UidClashTest { } override def preRestart(reason: Throwable, message: Option[Any]): Unit = { - context.children foreach { child => + context.children.foreach { child => oldActor = child context.unwatch(child) context.stop(child) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 2ff7fc120a..d1cad439f9 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -7,8 +7,8 @@ package akka.actor.dispatch import language.postfixOps import java.rmi.RemoteException -import java.util.concurrent.{ TimeUnit, CountDownLatch, ConcurrentHashMap } -import java.util.concurrent.atomic.{ AtomicLong, AtomicInteger } +import java.util.concurrent.{ ConcurrentHashMap, CountDownLatch, TimeUnit } +import java.util.concurrent.atomic.{ AtomicInteger, AtomicLong } import org.scalatest.Assertions._ @@ -80,18 +80,21 @@ object ActorModelSpec { } def receive = { - case AwaitLatch(latch) => { ack(); latch.await(); busy.switchOff(()) } - case Meet(sign, wait) => { ack(); sign.countDown(); wait.await(); busy.switchOff(()) } - case Wait(time) => { ack(); Thread.sleep(time); busy.switchOff(()) } - case WaitAck(time, l) => { ack(); Thread.sleep(time); l.countDown(); busy.switchOff(()) } - case Reply(msg) => { ack(); sender() ! msg; busy.switchOff(()) } - case TryReply(msg) => { ack(); sender().tell(msg, null); busy.switchOff(()) } - case Forward(to, msg) => { ack(); to.forward(msg); busy.switchOff(()) } - case CountDown(latch) => { ack(); latch.countDown(); busy.switchOff(()) } - case Increment(count) => { ack(); count.incrementAndGet(); busy.switchOff(()) } - case CountDownNStop(l) => { ack(); l.countDown(); context.stop(self); busy.switchOff(()) } - case Restart => { ack(); busy.switchOff(()); throw new Exception("Restart requested") } - case Interrupt => { ack(); sender() ! Status.Failure(new ActorInterruptedException(new InterruptedException("Ping!"))); busy.switchOff(()); throw new InterruptedException("Ping!") } + case AwaitLatch(latch) => { ack(); latch.await(); busy.switchOff(()) } + case Meet(sign, wait) => { ack(); sign.countDown(); wait.await(); busy.switchOff(()) } + case Wait(time) => { ack(); Thread.sleep(time); busy.switchOff(()) } + case WaitAck(time, l) => { ack(); Thread.sleep(time); l.countDown(); busy.switchOff(()) } + case Reply(msg) => { ack(); sender() ! msg; busy.switchOff(()) } + case TryReply(msg) => { ack(); sender().tell(msg, null); busy.switchOff(()) } + case Forward(to, msg) => { ack(); to.forward(msg); busy.switchOff(()) } + case CountDown(latch) => { ack(); latch.countDown(); busy.switchOff(()) } + case Increment(count) => { ack(); count.incrementAndGet(); busy.switchOff(()) } + case CountDownNStop(l) => { ack(); l.countDown(); context.stop(self); busy.switchOff(()) } + case Restart => { ack(); busy.switchOff(()); throw new Exception("Restart requested") } + case Interrupt => { + ack(); sender() ! Status.Failure(new ActorInterruptedException(new InterruptedException("Ping!"))); + busy.switchOff(()); throw new InterruptedException("Ping!") + } case InterruptNicely(msg) => { ack(); sender() ! msg; busy.switchOff(()); Thread.currentThread().interrupt() } case ThrowException(e: Throwable) => { ack(); busy.switchOff(()); throw e } case DoubleStop => { ack(); context.stop(self); context.stop(self); busy.switchOff } @@ -106,7 +109,8 @@ object ActorModelSpec { val msgsReceived = new AtomicLong(0) val msgsProcessed = new AtomicLong(0) val restarts = new AtomicLong(0) - override def toString = "InterceptorStats(susp=" + suspensions + + override def toString = + "InterceptorStats(susp=" + suspensions + ",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters + ",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts } @@ -160,15 +164,19 @@ object ActorModelSpec { } } - def assertDispatcher(dispatcher: MessageDispatcherInterceptor)( - stops: Long = dispatcher.stops.get())(implicit system: ActorSystem): Unit = { + def assertDispatcher(dispatcher: MessageDispatcherInterceptor)(stops: Long = dispatcher.stops.get())( + implicit system: ActorSystem): Unit = { val deadline = System.currentTimeMillis + dispatcher.shutdownTimeout.toMillis * 5 try { await(deadline)(stops == dispatcher.stops.get) } catch { case e: Throwable => - system.eventStream.publish(Error(e, dispatcher.toString, dispatcher.getClass, "actual: stops=" + dispatcher.stops.get + - " required: stops=" + stops)) + system.eventStream.publish( + Error(e, + dispatcher.toString, + dispatcher.getClass, + "actual: stops=" + dispatcher.stops.get + + " required: stops=" + stops)) throw e } } @@ -187,32 +195,27 @@ object ActorModelSpec { dispatcher.asInstanceOf[MessageDispatcherInterceptor].getStats(actorRef) def assertRefDefaultZero(actorRef: ActorRef, dispatcher: MessageDispatcher = null)( - suspensions: Long = 0, - resumes: Long = 0, - registers: Long = 0, - unregisters: Long = 0, - msgsReceived: Long = 0, - msgsProcessed: Long = 0, - restarts: Long = 0)(implicit system: ActorSystem): Unit = { - assertRef(actorRef, dispatcher)( - suspensions, - resumes, - registers, - unregisters, - msgsReceived, - msgsProcessed, - restarts) + suspensions: Long = 0, + resumes: Long = 0, + registers: Long = 0, + unregisters: Long = 0, + msgsReceived: Long = 0, + msgsProcessed: Long = 0, + restarts: Long = 0)(implicit system: ActorSystem): Unit = { + assertRef(actorRef, dispatcher)(suspensions, resumes, registers, unregisters, msgsReceived, msgsProcessed, restarts) } def assertRef(actorRef: ActorRef, dispatcher: MessageDispatcher = null)( - suspensions: Long = statsFor(actorRef, dispatcher).suspensions.get(), - resumes: Long = statsFor(actorRef, dispatcher).resumes.get(), - registers: Long = statsFor(actorRef, dispatcher).registers.get(), - unregisters: Long = statsFor(actorRef, dispatcher).unregisters.get(), - msgsReceived: Long = statsFor(actorRef, dispatcher).msgsReceived.get(), - msgsProcessed: Long = statsFor(actorRef, dispatcher).msgsProcessed.get(), - restarts: Long = statsFor(actorRef, dispatcher).restarts.get())(implicit system: ActorSystem): Unit = { - val stats = statsFor(actorRef, Option(dispatcher).getOrElse(actorRef.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].dispatcher)) + suspensions: Long = statsFor(actorRef, dispatcher).suspensions.get(), + resumes: Long = statsFor(actorRef, dispatcher).resumes.get(), + registers: Long = statsFor(actorRef, dispatcher).registers.get(), + unregisters: Long = statsFor(actorRef, dispatcher).unregisters.get(), + msgsReceived: Long = statsFor(actorRef, dispatcher).msgsReceived.get(), + msgsProcessed: Long = statsFor(actorRef, dispatcher).msgsProcessed.get(), + restarts: Long = statsFor(actorRef, dispatcher).restarts.get())(implicit system: ActorSystem): Unit = { + val stats = statsFor(actorRef, + Option(dispatcher).getOrElse( + actorRef.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].dispatcher)) val deadline = System.currentTimeMillis + 1000 try { await(deadline)(stats.suspensions.get() == suspensions) @@ -224,13 +227,13 @@ object ActorModelSpec { await(deadline)(stats.restarts.get() == restarts) } catch { case e: Throwable => - system.eventStream.publish(Error( - e, - Option(dispatcher).toString, - (Option(dispatcher) getOrElse this).getClass, - "actual: " + stats + ", required: InterceptorStats(susp=" + suspensions + - ",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters + - ",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts)) + system.eventStream.publish( + Error(e, + Option(dispatcher).toString, + Option(dispatcher).getOrElse(this).getClass, + "actual: " + stats + ", required: InterceptorStats(susp=" + suspensions + + ",res=" + resumes + ",reg=" + registers + ",unreg=" + unregisters + + ",recv=" + msgsReceived + ",proc=" + msgsProcessed + ",restart=" + restarts)) throw e } } @@ -273,14 +276,13 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa assertDispatcher(dispatcher)(stops = 0) system.stop(a) assertDispatcher(dispatcher)(stops = 1) - assertRef(a, dispatcher)( - suspensions = 0, - resumes = 0, - registers = 1, - unregisters = 1, - msgsReceived = 0, - msgsProcessed = 0, - restarts = 0) + assertRef(a, dispatcher)(suspensions = 0, + resumes = 0, + registers = 1, + unregisters = 1, + msgsReceived = 0, + msgsProcessed = 0, + restarts = 0) for (i <- 1 to 10) yield Future { i } assertDispatcher(dispatcher)(stops = 2) @@ -335,7 +337,8 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa def spawn(f: => Unit): Unit = { (new Thread { override def run(): Unit = - try f catch { + try f + catch { case e: Throwable => system.eventStream.publish(Error(e, "spawn", this.getClass, "error in spawned thread")) } }).start() @@ -353,12 +356,15 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa a.resume(causedByFailure = null) assertCountDown(done, 3.seconds.dilated.toMillis, "Should resume processing of messages when resumed") - assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, msgsProcessed = 1, - suspensions = 1, resumes = 1) + assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, msgsProcessed = 1, suspensions = 1, resumes = 1) system.stop(a) - assertRefDefaultZero(a)(registers = 1, unregisters = 1, msgsReceived = 1, msgsProcessed = 1, - suspensions = 1, resumes = 1) + assertRefDefaultZero(a)(registers = 1, + unregisters = 1, + msgsReceived = 1, + msgsProcessed = 1, + suspensions = 1, + resumes = 1) } "handle waves of actors" in { @@ -381,9 +387,10 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa // the boss doesn't create children fast enough to keep the dispatcher from becoming empty // and it needs to be on a separate thread to not deadlock the calling thread dispatcher new Thread(new Runnable { - def run() = Future { - keepAliveLatch.await(waitTime, TimeUnit.MILLISECONDS) - }(dispatcher) + def run() = + Future { + keepAliveLatch.await(waitTime, TimeUnit.MILLISECONDS) + }(dispatcher) }).start() boss ! "run" try { @@ -395,18 +402,18 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa val team = dispatcher.team val mq = dispatcher.messageQueue - System.err.println("Teammates left: " + team.size + " stopLatch: " + stopLatch.getCount + " inhab:" + dispatcher.inhabitants) + System.err.println( + "Teammates left: " + team.size + " stopLatch: " + stopLatch.getCount + " inhab:" + dispatcher.inhabitants) import scala.collection.JavaConverters._ - team.asScala.toList - .sortBy(_.self.path) - .foreach { cell: ActorCell => - System.err.println(" - " + cell.self.path + " " + cell.isTerminated + " " + cell.mailbox.currentStatus + " " - + cell.mailbox.numberOfMessages + " " + cell.mailbox.systemDrain(SystemMessageList.LNil).size) - } + team.asScala.toList.sortBy(_.self.path).foreach { cell: ActorCell => + System.err.println( + " - " + cell.self.path + " " + cell.isTerminated + " " + cell.mailbox.currentStatus + " " + + cell.mailbox.numberOfMessages + " " + cell.mailbox.systemDrain(SystemMessageList.LNil).size) + } System.err.println("Mailbox: " + mq.numberOfMessages + " " + mq.hasMessages) - Iterator.continually(mq.dequeue) takeWhile (_ ne null) foreach System.err.println + Iterator.continually(mq.dequeue).takeWhile(_ ne null).foreach(System.err.println) case _ => } @@ -425,38 +432,37 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa } "continue to process messages when a thread gets interrupted and throws an exception" in { - filterEvents( - EventFilter[InterruptedException](), - EventFilter[ActorInterruptedException](), - EventFilter[akka.event.Logging.LoggerException]()) { - implicit val dispatcher = interceptedDispatcher() - val a = newTestActor(dispatcher.id) - val f1 = a ? Reply("foo") - val f2 = a ? Reply("bar") - val f3 = a ? Interrupt - Thread.interrupted() // CallingThreadDispatcher may necessitate this - val f4 = a ? Reply("foo2") - val f5 = a ? Interrupt - Thread.interrupted() // CallingThreadDispatcher may necessitate this - val f6 = a ? Reply("bar2") + filterEvents(EventFilter[InterruptedException](), + EventFilter[ActorInterruptedException](), + EventFilter[akka.event.Logging.LoggerException]()) { + implicit val dispatcher = interceptedDispatcher() + val a = newTestActor(dispatcher.id) + val f1 = a ? Reply("foo") + val f2 = a ? Reply("bar") + val f3 = a ? Interrupt + Thread.interrupted() // CallingThreadDispatcher may necessitate this + val f4 = a ? Reply("foo2") + val f5 = a ? Interrupt + Thread.interrupted() // CallingThreadDispatcher may necessitate this + val f6 = a ? Reply("bar2") - val c = system.scheduler.scheduleOnce(2.seconds) { - import collection.JavaConverters._ - Thread.getAllStackTraces().asScala foreach { - case (thread, stack) => - println(s"$thread:") - stack foreach (s => println(s"\t$s")) - } + val c = system.scheduler.scheduleOnce(2.seconds) { + import collection.JavaConverters._ + Thread.getAllStackTraces().asScala.foreach { + case (thread, stack) => + println(s"$thread:") + stack.foreach(s => println(s"\t$s")) } - assert(Await.result(f1, timeout.duration) === "foo") - assert(Await.result(f2, timeout.duration) === "bar") - assert(Await.result(f4, timeout.duration) === "foo2") - assert(intercept[ActorInterruptedException](Await.result(f3, timeout.duration)).getCause.getMessage === "Ping!") - assert(Await.result(f6, timeout.duration) === "bar2") - assert(intercept[ActorInterruptedException](Await.result(f5, timeout.duration)).getCause.getMessage === "Ping!") - c.cancel() - Thread.sleep(300) // give the EventFilters a chance of catching all messages } + assert(Await.result(f1, timeout.duration) === "foo") + assert(Await.result(f2, timeout.duration) === "bar") + assert(Await.result(f4, timeout.duration) === "foo2") + assert(intercept[ActorInterruptedException](Await.result(f3, timeout.duration)).getCause.getMessage === "Ping!") + assert(Await.result(f6, timeout.duration) === "bar2") + assert(intercept[ActorInterruptedException](Await.result(f5, timeout.duration)).getCause.getMessage === "Ping!") + c.cancel() + Thread.sleep(300) // give the EventFilters a chance of catching all messages + } } "continue to process messages without failure when a thread gets interrupted and doesn't throw an exception" in { @@ -522,26 +528,25 @@ object DispatcherModelSpec { type = PinnedDispatcher } """ + - // use unique dispatcher id for each test, since MessageDispatcherInterceptor holds state - (for (n <- 1 to 30) yield """ + // use unique dispatcher id for each test, since MessageDispatcherInterceptor holds state + (for (n <- 1 to 30) yield """ test-dispatcher-%s { type = "akka.actor.dispatch.DispatcherModelSpec$MessageDispatcherInterceptorConfigurator" }""".format(n)).mkString } class MessageDispatcherInterceptorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends MessageDispatcherConfigurator(config, prerequisites) { + extends MessageDispatcherConfigurator(config, prerequisites) { import akka.util.Helpers.ConfigOps private val instance: MessageDispatcher = - new Dispatcher( - this, - config.getString("id"), - config.getInt("throughput"), - config.getNanosDuration("throughput-deadline-time"), - configureExecutor(), - config.getMillisDuration("shutdown-timeout")) with MessageDispatcherInterceptor + new Dispatcher(this, + config.getString("id"), + config.getInt("throughput"), + config.getNanosDuration("throughput-deadline-time"), + configureExecutor(), + config.getMillisDuration("shutdown-timeout")) with MessageDispatcherInterceptor override def dispatcher(): MessageDispatcher = instance } @@ -554,7 +559,9 @@ class DispatcherModelSpec extends ActorModelSpec(DispatcherModelSpec.config) { override def interceptedDispatcher(): MessageDispatcherInterceptor = { // use new id for each test, since the MessageDispatcherInterceptor holds state - system.dispatchers.lookup("test-dispatcher-" + dispatcherCount.incrementAndGet()).asInstanceOf[MessageDispatcherInterceptor] + system.dispatchers + .lookup("test-dispatcher-" + dispatcherCount.incrementAndGet()) + .asInstanceOf[MessageDispatcherInterceptor] } override def dispatcherType = "Dispatcher" @@ -595,8 +602,9 @@ object BalancingDispatcherModelSpec { type = PinnedDispatcher } """ + - // use unique dispatcher id for each test, since MessageDispatcherInterceptor holds state - (for (n <- 1 to 30) yield """ + // use unique dispatcher id for each test, since MessageDispatcherInterceptor holds state + (for (n <- 1 to 30) + yield """ test-balancing-dispatcher-%s { type = "akka.actor.dispatch.BalancingDispatcherModelSpec$BalancingMessageDispatcherInterceptorConfigurator" throughput=1 @@ -604,20 +612,19 @@ object BalancingDispatcherModelSpec { } class BalancingMessageDispatcherInterceptorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends BalancingDispatcherConfigurator(config, prerequisites) { + extends BalancingDispatcherConfigurator(config, prerequisites) { import akka.util.Helpers.ConfigOps override protected def create(mailboxType: MailboxType): BalancingDispatcher = - new BalancingDispatcher( - this, - config.getString("id"), - config.getInt("throughput"), - config.getNanosDuration("throughput-deadline-time"), - mailboxType, - configureExecutor(), - config.getMillisDuration("shutdown-timeout"), - config.getBoolean("attempt-teamwork")) with MessageDispatcherInterceptor + new BalancingDispatcher(this, + config.getString("id"), + config.getInt("throughput"), + config.getNanosDuration("throughput-deadline-time"), + mailboxType, + configureExecutor(), + config.getMillisDuration("shutdown-timeout"), + config.getBoolean("attempt-teamwork")) with MessageDispatcherInterceptor } } @@ -628,7 +635,9 @@ class BalancingDispatcherModelSpec extends ActorModelSpec(BalancingDispatcherMod override def interceptedDispatcher(): MessageDispatcherInterceptor = { // use new id for each test, since the MessageDispatcherInterceptor holds state - system.dispatchers.lookup("test-balancing-dispatcher-" + dispatcherCount.incrementAndGet()).asInstanceOf[MessageDispatcherInterceptor] + system.dispatchers + .lookup("test-balancing-dispatcher-" + dispatcherCount.incrementAndGet()) + .asInstanceOf[MessageDispatcherInterceptor] } override def dispatcherType = "Balancing Dispatcher" diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/BalancingDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/BalancingDispatcherSpec.scala index 28149fbbdb..bb9a2c8bc4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/BalancingDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/BalancingDispatcherSpec.scala @@ -4,9 +4,9 @@ package akka.actor.dispatch -import java.util.concurrent.{ TimeUnit, CountDownLatch } +import java.util.concurrent.{ CountDownLatch, TimeUnit } -import akka.actor.{ Props, ActorRefWithCell, ActorCell, Actor } +import akka.actor.{ Actor, ActorCell, ActorRefWithCell, Props } import akka.dispatch.Mailbox import akka.testkit.AkkaSpec @@ -48,15 +48,18 @@ class BalancingDispatcherSpec extends AkkaSpec(BalancingDispatcherSpec.config) { def receive = { case _ => {} } } - class ChildActor extends ParentActor { - } + class ChildActor extends ParentActor {} "A BalancingDispatcher" must { "have fast actor stealing work from slow actor" in { val finishedCounter = new CountDownLatch(110) - val slow = system.actorOf(Props(new DelayableActor(50, finishedCounter)).withDispatcher(delayableActorDispatcher)).asInstanceOf[ActorRefWithCell] - val fast = system.actorOf(Props(new DelayableActor(10, finishedCounter)).withDispatcher(delayableActorDispatcher)).asInstanceOf[ActorRefWithCell] + val slow = system + .actorOf(Props(new DelayableActor(50, finishedCounter)).withDispatcher(delayableActorDispatcher)) + .asInstanceOf[ActorRefWithCell] + val fast = system + .actorOf(Props(new DelayableActor(10, finishedCounter)).withDispatcher(delayableActorDispatcher)) + .asInstanceOf[ActorRefWithCell] var sentToFast = 0 @@ -84,7 +87,7 @@ class BalancingDispatcherSpec extends AkkaSpec(BalancingDispatcherSpec.config) { slow.underlying.asInstanceOf[ActorCell].mailbox.asInstanceOf[Mailbox].hasMessages should ===(false) fast.underlying.asInstanceOf[ActorCell].actor.asInstanceOf[DelayableActor].invocationCount should be > sentToFast fast.underlying.asInstanceOf[ActorCell].actor.asInstanceOf[DelayableActor].invocationCount should be > - (slow.underlying.asInstanceOf[ActorCell].actor.asInstanceOf[DelayableActor].invocationCount) + (slow.underlying.asInstanceOf[ActorCell].actor.asInstanceOf[DelayableActor].invocationCount) system.stop(slow) system.stop(fast) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala index affe649236..e4beac70be 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala @@ -9,7 +9,7 @@ import language.postfixOps import java.util.concurrent.{ CountDownLatch, TimeUnit } import java.util.concurrent.atomic.{ AtomicBoolean } import akka.testkit.{ AkkaSpec } -import akka.actor.{ Props, Actor } +import akka.actor.{ Actor, Props } import scala.concurrent.Await import scala.concurrent.duration._ import akka.testkit.DefaultTimeout @@ -80,19 +80,19 @@ class DispatcherActorSpec extends AkkaSpec(DispatcherActorSpec.config) with Defa val latch = new CountDownLatch(100) val start = new CountDownLatch(1) val fastOne = system.actorOf( - Props(new Actor { def receive = { case "sabotage" => works.set(false) } }) - .withDispatcher(throughputDispatcher)) + Props(new Actor { def receive = { case "sabotage" => works.set(false) } }).withDispatcher(throughputDispatcher)) - val slowOne = system.actorOf( - Props(new Actor { - def receive = { - case "hogexecutor" => { sender() ! "OK"; start.await } - case "ping" => if (works.get) latch.countDown() - } - }).withDispatcher(throughputDispatcher)) + val slowOne = system.actorOf(Props(new Actor { + def receive = { + case "hogexecutor" => { sender() ! "OK"; start.await } + case "ping" => if (works.get) latch.countDown() + } + }).withDispatcher(throughputDispatcher)) assert(Await.result(slowOne ? "hogexecutor", timeout.duration) === "OK") - (1 to 100) foreach { _ => slowOne ! "ping" } + (1 to 100).foreach { _ => + slowOne ! "ping" + } fastOne ! "sabotage" start.countDown() latch.await(10, TimeUnit.SECONDS) @@ -110,20 +110,18 @@ class DispatcherActorSpec extends AkkaSpec(DispatcherActorSpec.config) with Defa val start = new CountDownLatch(1) val ready = new CountDownLatch(1) - val fastOne = system.actorOf( - Props(new Actor { - def receive = { - case "ping" => if (works.get) latch.countDown(); context.stop(self) - } - }).withDispatcher(throughputDispatcher)) + val fastOne = system.actorOf(Props(new Actor { + def receive = { + case "ping" => if (works.get) latch.countDown(); context.stop(self) + } + }).withDispatcher(throughputDispatcher)) - val slowOne = system.actorOf( - Props(new Actor { - def receive = { - case "hogexecutor" => { ready.countDown(); start.await } - case "ping" => { works.set(false); context.stop(self) } - } - }).withDispatcher(throughputDispatcher)) + val slowOne = system.actorOf(Props(new Actor { + def receive = { + case "hogexecutor" => { ready.countDown(); start.await } + case "ping" => { works.set(false); context.stop(self) } + } + }).withDispatcher(throughputDispatcher)) slowOne ! "hogexecutor" slowOne ! "ping" diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala index abce01dffc..66f025641e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala @@ -70,7 +70,8 @@ object DispatchersSpec { } class OneShotMailboxType(settings: ActorSystem.Settings, config: Config) - extends MailboxType with ProducesMessageQueue[DoublingMailbox] { + extends MailboxType + with ProducesMessageQueue[DoublingMailbox] { val created = new AtomicBoolean(false) override def create(owner: Option[ActorRef], system: Option[ActorSystem]) = if (created.compareAndSet(false, true)) { @@ -82,8 +83,8 @@ object DispatchersSpec { class DoublingMailbox(owner: Option[ActorRef]) extends UnboundedQueueBasedMessageQueue { final val queue = new ConcurrentLinkedQueue[Envelope]() override def enqueue(receiver: ActorRef, handle: Envelope): Unit = { - queue add handle - queue add handle + queue.add(handle) + queue.add(handle) } } @@ -108,19 +109,20 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend val id = "id" def instance(dispatcher: MessageDispatcher): (MessageDispatcher) => Boolean = _ == dispatcher - def ofType[T <: MessageDispatcher: ClassTag]: (MessageDispatcher) => Boolean = _.getClass == implicitly[ClassTag[T]].runtimeClass + def ofType[T <: MessageDispatcher: ClassTag]: (MessageDispatcher) => Boolean = + _.getClass == implicitly[ClassTag[T]].runtimeClass - def typesAndValidators: Map[String, (MessageDispatcher) => Boolean] = Map( - "PinnedDispatcher" -> ofType[PinnedDispatcher], - "Dispatcher" -> ofType[Dispatcher]) + def typesAndValidators: Map[String, (MessageDispatcher) => Boolean] = + Map("PinnedDispatcher" -> ofType[PinnedDispatcher], "Dispatcher" -> ofType[Dispatcher]) def validTypes = typesAndValidators.keys.toList val defaultDispatcherConfig = settings.config.getConfig("akka.actor.default-dispatcher") lazy val allDispatchers: Map[String, MessageDispatcher] = { - validTypes.map(t => (t, from(ConfigFactory.parseMap(Map(tipe -> t, id -> t).asJava). - withFallback(defaultDispatcherConfig)))).toMap + validTypes + .map(t => (t, from(ConfigFactory.parseMap(Map(tipe -> t, id -> t).asJava).withFallback(defaultDispatcherConfig)))) + .toMap } def assertMyDispatcherIsUsed(actor: ActorRef): Unit = { @@ -157,8 +159,10 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend "throw ConfigurationException if type does not exist" in { intercept[ConfigurationException] { - from(ConfigFactory.parseMap(Map(tipe -> "typedoesntexist", id -> "invalid-dispatcher").asJava). - withFallback(defaultDispatcherConfig)) + from( + ConfigFactory + .parseMap(Map(tipe -> "typedoesntexist", id -> "invalid-dispatcher").asJava) + .withFallback(defaultDispatcherConfig)) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/PinnedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/PinnedActorSpec.scala index b9bede63ca..72cf66d471 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/PinnedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/PinnedActorSpec.scala @@ -7,7 +7,7 @@ package akka.actor.dispatch import java.util.concurrent.{ CountDownLatch, TimeUnit } import akka.testkit._ -import akka.actor.{ Props, Actor } +import akka.actor.{ Actor, Props } import akka.testkit.AkkaSpec import org.scalatest.BeforeAndAfterEach import scala.concurrent.Await @@ -38,7 +38,8 @@ class PinnedActorSpec extends AkkaSpec(PinnedActorSpec.config) with BeforeAndAft "support tell" in { var oneWay = new CountDownLatch(1) - val actor = system.actorOf(Props(new Actor { def receive = { case "OneWay" => oneWay.countDown() } }).withDispatcher("pinned-dispatcher")) + val actor = system.actorOf( + Props(new Actor { def receive = { case "OneWay" => oneWay.countDown() } }).withDispatcher("pinned-dispatcher")) val result = actor ! "OneWay" assert(oneWay.await(1, TimeUnit.SECONDS)) system.stop(actor) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dungeon/DispatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dungeon/DispatchSpec.scala index 22a6e42425..4c09027eda 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dungeon/DispatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dungeon/DispatchSpec.scala @@ -23,10 +23,9 @@ class DispatchSpec extends AkkaSpec("akka.actor.serialize-messages = on") with D "The dispatcher" should { "log an appropriate message when akka.actor.serialize-messages triggers a serialization error" in { val actor = system.actorOf(Props[EmptyActor]) - EventFilter[Exception](pattern = ".*NoSerializationVerificationNeeded.*", occurrences = 1) intercept { + EventFilter[Exception](pattern = ".*NoSerializationVerificationNeeded.*", occurrences = 1).intercept { actor ! new UnserializableMessageClass } } } } - diff --git a/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala index 862c6de2eb..7ff3264df8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala @@ -20,20 +20,21 @@ class ListenerSpec extends AkkaSpec { val barCount = new AtomicInteger(0) val broadcast = system.actorOf(Props(new Actor with Listeners { - def receive = listenerManagement orElse { + def receive = listenerManagement.orElse { case "foo" => gossip("bar") } })) - def newListener = system.actorOf(Props(new Actor { - def receive = { - case "bar" => - barCount.incrementAndGet - barLatch.countDown() - case "foo" => - fooLatch.countDown() - } - })) + def newListener = + system.actorOf(Props(new Actor { + def receive = { + case "bar" => + barCount.incrementAndGet + barLatch.countDown() + case "foo" => + fooLatch.countDown() + } + })) val a1 = newListener val a2 = newListener diff --git a/akka-actor-tests/src/test/scala/akka/actor/setup/ActorSystemSetupSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/setup/ActorSystemSetupSpec.scala index 728e838802..54e7cd2080 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/setup/ActorSystemSetupSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/setup/ActorSystemSetupSpec.scala @@ -18,8 +18,7 @@ class ActorSystemSetupSpec extends WordSpec with Matchers { "store and retrieve a setup" in { val setup = DummySetup("Al Dente") - val setups = ActorSystemSetup() - .withSetup(setup) + val setups = ActorSystemSetup().withSetup(setup) setups.get[DummySetup] should ===(Some(setup)) setups.get[DummySetup2] should ===(None) @@ -28,9 +27,7 @@ class ActorSystemSetupSpec extends WordSpec with Matchers { "replace setup if already defined" in { val setup1 = DummySetup("Al Dente") val setup2 = DummySetup("Earl E. Bird") - val setups = ActorSystemSetup() - .withSetup(setup1) - .withSetup(setup2) + val setups = ActorSystemSetup().withSetup(setup1).withSetup(setup2) setups.get[DummySetup] should ===(Some(setup2)) } @@ -61,10 +58,7 @@ class ActorSystemSetupSpec extends WordSpec with Matchers { val setup = DummySetup("Tad Moore") system = ActorSystem("name", ActorSystemSetup(setup)) - system - .settings - .setup - .get[DummySetup] should ===(Some(setup)) + system.settings.setup.get[DummySetup] should ===(Some(setup)) } finally { TestKit.shutdownActorSystem(system) diff --git a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala index 46dedb7e28..78823c88ff 100644 --- a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala +++ b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala @@ -19,13 +19,13 @@ class Future2ActorSpec extends AkkaSpec with DefaultTimeout { "The Future2Actor bridge" must { "support convenient sending to multiple destinations" in { - Future(42) pipeTo testActor pipeTo testActor + Future(42).pipeTo(testActor).pipeTo(testActor) expectMsgAllOf(1 second, 42, 42) } "support convenient sending to multiple destinations with implicit sender" in { implicit val someActor = system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior })) - Future(42) pipeTo testActor pipeTo testActor + Future(42).pipeTo(testActor).pipeTo(testActor) expectMsgAllOf(1 second, 42, 42) lastSender should ===(someActor) } @@ -40,8 +40,8 @@ class Future2ActorSpec extends AkkaSpec with DefaultTimeout { "support reply via sender" in { val actor = system.actorOf(Props(new Actor { def receive = { - case "do" => Future(31) pipeTo context.sender() - case "ex" => Future(throw new AssertionError) pipeTo context.sender() + case "do" => Future(31).pipeTo(context.sender()) + case "ex" => Future(throw new AssertionError).pipeTo(context.sender()) } })) Await.result(actor ? "do", timeout.duration) should ===(31) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ControlAwareDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ControlAwareDispatcherSpec.scala index 213c981263..14fd8a9f29 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ControlAwareDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ControlAwareDispatcherSpec.scala @@ -4,7 +4,7 @@ package akka.dispatch -import akka.testkit.{ DefaultTimeout, AkkaSpec } +import akka.testkit.{ AkkaSpec, DefaultTimeout } import akka.actor.{ Actor, Props } object ControlAwareDispatcherSpec { diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/DispatcherShutdownSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/DispatcherShutdownSpec.scala index 47342338f2..ad7c54305b 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/DispatcherShutdownSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/DispatcherShutdownSpec.scala @@ -18,11 +18,13 @@ class DispatcherShutdownSpec extends WordSpec with Matchers { "eventually shutdown when used after system terminate" in { val threads = ManagementFactory.getThreadMXBean() - def threadCount = threads - .dumpAllThreads(false, false).toList - .map(_.getThreadName) - .filter(_.startsWith("DispatcherShutdownSpec-akka.actor.default")) - .size + def threadCount = + threads + .dumpAllThreads(false, false) + .toList + .map(_.getThreadName) + .filter(_.startsWith("DispatcherShutdownSpec-akka.actor.default")) + .size val system = ActorSystem("DispatcherShutdownSpec") threadCount should be > 0 diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala index 809e2d2ab3..bb624fac8f 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala @@ -4,12 +4,12 @@ package akka.dispatch -import java.util.concurrent.{ ExecutorService, Executor, Executors } +import java.util.concurrent.{ Executor, ExecutorService, Executors } import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor, ExecutionContextExecutorService } -import scala.concurrent.{ Await, blocking, Promise, Future } +import scala.concurrent.{ blocking, Await, Future, Promise } import scala.concurrent.duration._ -import akka.testkit.{ TestLatch, AkkaSpec, DefaultTimeout } +import akka.testkit.{ AkkaSpec, DefaultTimeout, TestLatch } import akka.util.SerializedSuspendableExecutionContext import akka.testkit.TestActorRef import akka.actor.Props @@ -45,21 +45,23 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { import system.dispatcher - def batchable[T](f: => T)(implicit ec: ExecutionContext): Unit = ec.execute(new Batchable { - override def isBatchable = true - override def run: Unit = f - }) + def batchable[T](f: => T)(implicit ec: ExecutionContext): Unit = + ec.execute(new Batchable { + override def isBatchable = true + override def run: Unit = f + }) val p = Promise[Unit]() batchable { val lock, callingThreadLock, count = new AtomicInteger(0) callingThreadLock.compareAndSet(0, 1) // Enable the lock - (1 to 100) foreach { i => + (1 to 100).foreach { i => batchable { if (callingThreadLock.get != 0) p.tryFailure(new IllegalStateException("Batch was executed inline!")) else if (count.incrementAndGet == 100) p.trySuccess(()) //Done else if (lock.compareAndSet(0, 1)) { - try Thread.sleep(10) finally lock.compareAndSet(1, 0) + try Thread.sleep(10) + finally lock.compareAndSet(1, 0) } else p.tryFailure(new IllegalStateException("Executed batch in parallel!")) } } @@ -72,14 +74,15 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { system.dispatcher.isInstanceOf[BatchingExecutor] should ===(true) import system.dispatcher - def batchable[T](f: => T)(implicit ec: ExecutionContext): Unit = ec.execute(new Batchable { - override def isBatchable = true - override def run: Unit = f - }) + def batchable[T](f: => T)(implicit ec: ExecutionContext): Unit = + ec.execute(new Batchable { + override def isBatchable = true + override def run: Unit = f + }) val latch = TestLatch(101) batchable { - (1 to 100) foreach { i => + (1 to 100).foreach { i => batchable { val deadlock = TestLatch(1) batchable { deadlock.open() } @@ -100,7 +103,9 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { // this needs to be within an OnCompleteRunnable so that things are added to the batch val p = Future.successful(42) // we need the callback list to be non-empty when the blocking{} call is executing - p.onComplete { _ => () } + p.onComplete { _ => + () + } val r = p.map { _ => // trigger the resubmitUnbatched() call blocking { () } @@ -109,7 +114,9 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { // now try again to blockOn() blocking { () } } - p.onComplete { _ => () } + p.onComplete { _ => + () + } r } Await.result(f, 3.seconds) should be(()) @@ -157,7 +164,7 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { })) val b = TestActorRef(Props(new Actor { def receive = { - case msg => a forward msg + case msg => a.forward(msg) } })) val p = TestProbe() @@ -193,7 +200,7 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { "be suspendable and resumable" in { val sec = SerializedSuspendableExecutionContext(1)(ExecutionContext.global) val counter = new AtomicInteger(0) - def perform(f: Int => Int) = sec execute new Runnable { def run = counter.set(f(counter.get)) } + def perform(f: Int => Int) = sec.execute(new Runnable { def run = counter.set(f(counter.get)) }) perform(_ + 1) perform(x => { sec.suspend(); x * 2 }) awaitCond(counter.get == 2) @@ -220,10 +227,12 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { val throughput = 25 val sec = SerializedSuspendableExecutionContext(throughput)(underlying) sec.suspend() - def perform(f: Int => Int) = sec execute new Runnable { def run = counter.set(f(counter.get)) } + def perform(f: Int => Int) = sec.execute(new Runnable { def run = counter.set(f(counter.get)) }) val total = 1000 - 1 to total foreach { _ => perform(_ + 1) } + (1 to total).foreach { _ => + perform(_ + 1) + } sec.size() should ===(total) sec.resume() awaitCond(counter.get == total) @@ -235,9 +244,11 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { val sec = SerializedSuspendableExecutionContext(1)(ExecutionContext.global) val total = 10000 val counter = new AtomicInteger(0) - def perform(f: Int => Int) = sec execute new Runnable { def run = counter.set(f(counter.get)) } + def perform(f: Int => Int) = sec.execute(new Runnable { def run = counter.set(f(counter.get)) }) - 1 to total foreach { i => perform(c => if (c == (i - 1)) c + 1 else c) } + (1 to total).foreach { i => + perform(c => if (c == (i - 1)) c + 1 else c) + } awaitCond(counter.get == total) sec.isEmpty should ===(true) } @@ -252,9 +263,11 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { val throughput = 25 val sec = SerializedSuspendableExecutionContext(throughput)(underlying) sec.suspend() - def perform(f: Int => Int) = sec execute new Runnable { def run = counter.set(f(counter.get)) } + def perform(f: Int => Int) = sec.execute(new Runnable { def run = counter.set(f(counter.get)) }) perform(_ + 1) - 1 to 10 foreach { _ => perform(identity) } + (1 to 10).foreach { _ => + perform(identity) + } perform(x => { sec.suspend(); x * 2 }) perform(_ + 8) sec.size should ===(13) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ForkJoinPoolStarvationSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ForkJoinPoolStarvationSpec.scala index 6f4199bf5b..5f7cb0b0da 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ForkJoinPoolStarvationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ForkJoinPoolStarvationSpec.scala @@ -5,12 +5,11 @@ package akka.dispatch import akka.actor.{ Actor, Props } -import akka.testkit.{ ImplicitSender, AkkaSpec } +import akka.testkit.{ AkkaSpec, ImplicitSender } import com.typesafe.config.ConfigFactory object ForkJoinPoolStarvationSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" |actorhang { | | task-dispatcher { diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index e3d2f23a28..86aee39be0 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -6,12 +6,12 @@ package akka.dispatch import language.postfixOps -import java.util.concurrent.{ ConcurrentLinkedQueue, BlockingQueue } -import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll } +import java.util.concurrent.{ BlockingQueue, ConcurrentLinkedQueue } +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import com.typesafe.config.{ Config, ConfigFactory } import akka.actor._ -import akka.testkit.{ EventFilter, AkkaSpec } -import scala.concurrent.{ Future, Await, ExecutionContext } +import akka.testkit.{ AkkaSpec, EventFilter } +import scala.concurrent.{ Await, ExecutionContext, Future } import scala.concurrent.duration._ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAndAfterEach { @@ -126,57 +126,57 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn q.hasMessages should ===(false) } - def testEnqueueDequeue( - config: MailboxType, - enqueueN: Int = 10000, - dequeueN: Int = 10000, - parallel: Boolean = true): Unit = within(10 seconds) { + def testEnqueueDequeue(config: MailboxType, + enqueueN: Int = 10000, + dequeueN: Int = 10000, + parallel: Boolean = true): Unit = within(10 seconds) { val q = factory(config) ensureInitialMailboxState(config, q) - EventFilter.warning( - pattern = "received dead letter without sender", - occurrences = (enqueueN - dequeueN)) intercept { + EventFilter + .warning(pattern = "received dead letter without sender", occurrences = (enqueueN - dequeueN)) + .intercept { - def createProducer(fromNum: Int, toNum: Int): Future[Vector[Envelope]] = spawn { - val messages = Vector() ++ (for (i <- fromNum to toNum) yield createMessageInvocation(i)) - for (i <- messages) q.enqueue(testActor, i) - messages + def createProducer(fromNum: Int, toNum: Int): Future[Vector[Envelope]] = spawn { + val messages = Vector() ++ (for (i <- fromNum to toNum) yield createMessageInvocation(i)) + for (i <- messages) q.enqueue(testActor, i) + messages + } + + val producers = { + val step = 500 + val ps = for (i <- (1 to enqueueN by step).toList) yield createProducer(i, Math.min(enqueueN, i + step - 1)) + + if (parallel == false) + ps.foreach { Await.ready(_, remainingOrDefault) } + + ps + } + + def createConsumer: Future[Vector[Envelope]] = spawn { + var r = Vector[Envelope]() + + while (producers.exists(_.isCompleted == false) || q.hasMessages) Option(q.dequeue).foreach { message => + r = r :+ message + } + + r + } + + val consumers = List.fill(maxConsumers)(createConsumer) + + val ps = producers.map(Await.result(_, remainingOrDefault)) + val cs = consumers.map(Await.result(_, remainingOrDefault)) + + ps.map(_.size).sum should ===(enqueueN) //Must have produced 1000 messages + cs.map(_.size).sum should ===(dequeueN) //Must have consumed all produced messages + //No message is allowed to be consumed by more than one consumer + cs.flatten.distinct.size should ===(dequeueN) + //All consumed messages should have been produced + cs.flatten.diff(ps.flatten).size should ===(0) + //The ones that were produced and not consumed + ps.flatten.diff(cs.flatten).size should ===(enqueueN - dequeueN) } - - val producers = { - val step = 500 - val ps = for (i <- (1 to enqueueN by step).toList) yield createProducer(i, Math.min(enqueueN, i + step - 1)) - - if (parallel == false) - ps foreach { Await.ready(_, remainingOrDefault) } - - ps - } - - def createConsumer: Future[Vector[Envelope]] = spawn { - var r = Vector[Envelope]() - - while (producers.exists(_.isCompleted == false) || q.hasMessages) - Option(q.dequeue) foreach { message => r = r :+ message } - - r - } - - val consumers = List.fill(maxConsumers)(createConsumer) - - val ps = producers.map(Await.result(_, remainingOrDefault)) - val cs = consumers.map(Await.result(_, remainingOrDefault)) - - ps.map(_.size).sum should ===(enqueueN) //Must have produced 1000 messages - cs.map(_.size).sum should ===(dequeueN) //Must have consumed all produced messages - //No message is allowed to be consumed by more than one consumer - cs.flatten.distinct.size should ===(dequeueN) - //All consumed messages should have been produced - (cs.flatten diff ps.flatten).size should ===(0) - //The ones that were produced and not consumed - (ps.flatten diff cs.flatten).size should ===(enqueueN - dequeueN) - } } } @@ -192,8 +192,9 @@ class PriorityMailboxSpec extends MailboxSpec { val comparator = PriorityGenerator(_.##) lazy val name = "The priority mailbox implementation" def factory = { - case UnboundedMailbox() => new UnboundedPriorityMailbox(comparator).create(None, None) - case BoundedMailbox(capacity, pushTimeOut) => new BoundedPriorityMailbox(comparator, capacity, pushTimeOut).create(None, None) + case UnboundedMailbox() => new UnboundedPriorityMailbox(comparator).create(None, None) + case BoundedMailbox(capacity, pushTimeOut) => + new BoundedPriorityMailbox(comparator, capacity, pushTimeOut).create(None, None) } } @@ -201,16 +202,18 @@ class StablePriorityMailboxSpec extends MailboxSpec { val comparator = PriorityGenerator(_.##) lazy val name = "The stable priority mailbox implementation" def factory = { - case UnboundedMailbox() => new UnboundedStablePriorityMailbox(comparator).create(None, None) - case BoundedMailbox(capacity, pushTimeOut) => new BoundedStablePriorityMailbox(comparator, capacity, pushTimeOut).create(None, None) + case UnboundedMailbox() => new UnboundedStablePriorityMailbox(comparator).create(None, None) + case BoundedMailbox(capacity, pushTimeOut) => + new BoundedStablePriorityMailbox(comparator, capacity, pushTimeOut).create(None, None) } } class ControlAwareMailboxSpec extends MailboxSpec { lazy val name = "The control aware mailbox implementation" def factory = { - case UnboundedMailbox() => new UnboundedControlAwareMailbox().create(None, None) - case BoundedMailbox(capacity, pushTimeOut) => new BoundedControlAwareMailbox(capacity, pushTimeOut).create(None, None) + case UnboundedMailbox() => new UnboundedControlAwareMailbox().create(None, None) + case BoundedMailbox(capacity, pushTimeOut) => + new BoundedControlAwareMailbox(capacity, pushTimeOut).create(None, None) } } @@ -271,26 +274,26 @@ object SingleConsumerOnlyMailboxVerificationSpec { }""") } -class SingleConsumerOnlyMailboxVerificationSpec extends AkkaSpec(SingleConsumerOnlyMailboxVerificationSpec.mailboxConf) { +class SingleConsumerOnlyMailboxVerificationSpec + extends AkkaSpec(SingleConsumerOnlyMailboxVerificationSpec.mailboxConf) { import SingleConsumerOnlyMailboxVerificationSpec.Ping def pathologicalPingPong(dispatcherId: String): Unit = { val total = 2000000 val runner = system.actorOf(Props(new Actor { - val a, b = context.watch( - context.actorOf(Props(new Actor { - var n = total / 2 - def receive = { - case Ping => - n -= 1 - sender() ! Ping - if (n == 0) - context stop self - } - }).withDispatcher(dispatcherId))) + val a, b = context.watch(context.actorOf(Props(new Actor { + var n = total / 2 + def receive = { + case Ping => + n -= 1 + sender() ! Ping + if (n == 0) + context.stop(self) + } + }).withDispatcher(dispatcherId))) def receive = { case Ping => a.tell(Ping, b) - case Terminated(`a` | `b`) => if (context.children.isEmpty) context stop self + case Terminated(`a` | `b`) => if (context.children.isEmpty) context.stop(self) } })) watch(runner) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala index 385fe99833..50f6c8151f 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala @@ -8,8 +8,8 @@ import language.postfixOps import com.typesafe.config.Config -import akka.actor.{ Props, ActorSystem, Actor } -import akka.testkit.{ DefaultTimeout, AkkaSpec } +import akka.actor.{ Actor, ActorSystem, Props } +import akka.testkit.{ AkkaSpec, DefaultTimeout } import scala.concurrent.duration._ object PriorityDispatcherSpec { @@ -22,15 +22,17 @@ object PriorityDispatcherSpec { } """ - class Unbounded(settings: ActorSystem.Settings, config: Config) extends UnboundedPriorityMailbox(PriorityGenerator({ - case i: Int => i //Reverse order - case 'Result => Int.MaxValue - }: Any => Int)) + class Unbounded(settings: ActorSystem.Settings, config: Config) + extends UnboundedPriorityMailbox(PriorityGenerator({ + case i: Int => i //Reverse order + case 'Result => Int.MaxValue + }: Any => Int)) - class Bounded(settings: ActorSystem.Settings, config: Config) extends BoundedPriorityMailbox(PriorityGenerator({ - case i: Int => i //Reverse order - case 'Result => Int.MaxValue - }: Any => Int), 1000, 10 seconds) + class Bounded(settings: ActorSystem.Settings, config: Config) + extends BoundedPriorityMailbox(PriorityGenerator({ + case i: Int => i //Reverse order + case 'Result => Int.MaxValue + }: Any => Int), 1000, 10 seconds) } @@ -60,7 +62,9 @@ class PriorityDispatcherSpec extends AkkaSpec(PriorityDispatcherSpec.config) wit val acc = scala.collection.mutable.ListBuffer[Int]() - scala.util.Random.shuffle(msgs) foreach { m => self ! m } + scala.util.Random.shuffle(msgs).foreach { m => + self ! m + } self.tell('Result, testActor) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala index bbf64251c7..7d482e4053 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/StablePriorityDispatcherSpec.scala @@ -8,8 +8,8 @@ import language.postfixOps import com.typesafe.config.Config -import akka.actor.{ Props, ActorSystem, Actor } -import akka.testkit.{ DefaultTimeout, AkkaSpec } +import akka.actor.{ Actor, ActorSystem, Props } +import akka.testkit.{ AkkaSpec, DefaultTimeout } import scala.concurrent.duration._ object StablePriorityDispatcherSpec { @@ -22,17 +22,19 @@ object StablePriorityDispatcherSpec { } """ - class Unbounded(settings: ActorSystem.Settings, config: Config) extends UnboundedStablePriorityMailbox(PriorityGenerator({ - case i: Int if i <= 100 => i // Small integers have high priority - case i: Int => 101 // Don't care for other integers - case 'Result => Int.MaxValue - }: Any => Int)) + class Unbounded(settings: ActorSystem.Settings, config: Config) + extends UnboundedStablePriorityMailbox(PriorityGenerator({ + case i: Int if i <= 100 => i // Small integers have high priority + case i: Int => 101 // Don't care for other integers + case 'Result => Int.MaxValue + }: Any => Int)) - class Bounded(settings: ActorSystem.Settings, config: Config) extends BoundedStablePriorityMailbox(PriorityGenerator({ - case i: Int if i <= 100 => i // Small integers have high priority - case i: Int => 101 // Don't care for other integers - case 'Result => Int.MaxValue - }: Any => Int), 1000, 10 seconds) + class Bounded(settings: ActorSystem.Settings, config: Config) + extends BoundedStablePriorityMailbox(PriorityGenerator({ + case i: Int if i <= 100 => i // Small integers have high priority + case i: Int => 101 // Don't care for other integers + case 'Result => Int.MaxValue + }: Any => Int), 1000, 10 seconds) } @@ -40,16 +42,16 @@ class StablePriorityDispatcherSpec extends AkkaSpec(StablePriorityDispatcherSpec "A StablePriorityDispatcher" must { "Order its messages according to the specified comparator while preserving FIFO for equal priority messages, " + - "using an unbounded mailbox" in { - val dispatcherKey = "unbounded-stable-prio-dispatcher" - testOrdering(dispatcherKey) - } + "using an unbounded mailbox" in { + val dispatcherKey = "unbounded-stable-prio-dispatcher" + testOrdering(dispatcherKey) + } "Order its messages according to the specified comparator while preserving FIFO for equal priority messages, " + - "using a bounded mailbox" in { - val dispatcherKey = "bounded-stable-prio-dispatcher" - testOrdering(dispatcherKey) - } + "using a bounded mailbox" in { + val dispatcherKey = "bounded-stable-prio-dispatcher" + testOrdering(dispatcherKey) + } def testOrdering(dispatcherKey: String): Unit = { val msgs = (1 to 200) toList @@ -64,7 +66,9 @@ class StablePriorityDispatcherSpec extends AkkaSpec(StablePriorityDispatcherSpec val acc = scala.collection.mutable.ListBuffer[Int]() - shuffled foreach { m => self ! m } + shuffled.foreach { m => + self ! m + } self.tell('Result, testActor) @@ -81,7 +85,7 @@ class StablePriorityDispatcherSpec extends AkkaSpec(StablePriorityDispatcherSpec // Low messages should come out first, and in priority order. High messages follow - they are equal priority and // should come out in the same order in which they were sent. val lo = (1 to 100) toList - val hi = shuffled filter { _ > 100 } + val hi = shuffled.filter { _ > 100 } expectMsgType[List[Int]] should ===(lo ++ hi) } } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/sysmsg/SystemMessageListSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/sysmsg/SystemMessageListSpec.scala index b0339c7cd6..0000022214 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/sysmsg/SystemMessageListSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/sysmsg/SystemMessageListSpec.scala @@ -101,7 +101,7 @@ class SystemMessageListSpec extends AkkaSpec { val fwdList = create3 :: create4 :: create5 :: ENil val revList = create2 :: create1 :: create0 :: LNil - val list = fwdList.reversePrepend(revList) + val list = fwdList.reversePrepend(revList) (list.head eq create0) should ===(true) (list.tail.head eq create1) should ===(true) diff --git a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala index cf1a78a29d..3d5afdbd2f 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala @@ -10,19 +10,21 @@ import org.scalatest.BeforeAndAfterEach import akka.testkit._ import scala.concurrent.duration._ -import akka.actor.{ Props, Actor, ActorRef, ActorSystem, PoisonPill } +import akka.actor.{ Actor, ActorRef, ActorSystem, PoisonPill, Props } import akka.japi.{ Procedure } import com.typesafe.config.{ Config, ConfigFactory } object EventBusSpec { class TestActorWrapperActor(testActor: ActorRef) extends Actor { def receive = { - case x => testActor forward x + case x => testActor.forward(x) } } } -abstract class EventBusSpec(busName: String, conf: Config = ConfigFactory.empty()) extends AkkaSpec(conf) with BeforeAndAfterEach { +abstract class EventBusSpec(busName: String, conf: Config = ConfigFactory.empty()) + extends AkkaSpec(conf) + with BeforeAndAfterEach { type BusType <: EventBus def createNewEventBus(): BusType @@ -40,7 +42,8 @@ abstract class EventBusSpec(busName: String, conf: Config = ConfigFactory.empty( busName must { def createNewSubscriber() = createSubscriber(testActor).asInstanceOf[bus.Subscriber] def getClassifierFor(event: BusType#Event) = classifierFor(event).asInstanceOf[bus.Classifier] - def createNewEvents(numberOfEvents: Int): Iterable[bus.Event] = createEvents(numberOfEvents).asInstanceOf[Iterable[bus.Event]] + def createNewEvents(numberOfEvents: Int): Iterable[bus.Event] = + createEvents(numberOfEvents).asInstanceOf[Iterable[bus.Event]] val events = createNewEvents(100) val event = events.head @@ -74,13 +77,15 @@ abstract class EventBusSpec(busName: String, conf: Config = ConfigFactory.empty( } "allow to add multiple subscribers" in { - val subscribers = (1 to 10) map { _ => createNewSubscriber() } + val subscribers = (1 to 10).map { _ => + createNewSubscriber() + } val events = createEvents(10) - val classifiers = events map getClassifierFor - subscribers.zip(classifiers) forall { case (s, c) => bus.subscribe(s, c) } should ===(true) - subscribers.zip(classifiers) forall { case (s, c) => bus.unsubscribe(s, c) } should ===(true) + val classifiers = events.map(getClassifierFor) + subscribers.zip(classifiers).forall { case (s, c) => bus.subscribe(s, c) } should ===(true) + subscribers.zip(classifiers).forall { case (s, c) => bus.unsubscribe(s, c) } should ===(true) - subscribers foreach (disposeSubscriber(system, _)) + subscribers.foreach(disposeSubscriber(system, _)) } "publishing events without any subscribers shouldn't be a problem" in { @@ -109,11 +114,17 @@ abstract class EventBusSpec(busName: String, conf: Config = ConfigFactory.empty( "publish the given event to all intended subscribers" in { val range = 0 until 10 - val subscribers = range map (_ => createNewSubscriber()) - subscribers foreach { s => bus.subscribe(s, classifier) should ===(true) } + val subscribers = range.map(_ => createNewSubscriber()) + subscribers.foreach { s => + bus.subscribe(s, classifier) should ===(true) + } bus.publish(event) - range foreach { _ => expectMsg(event) } - subscribers foreach { s => bus.unsubscribe(s, classifier) should ===(true); disposeSubscriber(system, s) } + range.foreach { _ => + expectMsg(event) + } + subscribers.foreach { s => + bus.unsubscribe(s, classifier) should ===(true); disposeSubscriber(system, s) + } } "not publish the given event to any other subscribers than the intended ones" in { @@ -142,8 +153,10 @@ abstract class EventBusSpec(busName: String, conf: Config = ConfigFactory.empty( } object ActorEventBusSpec { - class MyActorEventBus(protected val system: ActorSystem) extends ActorEventBus - with ManagedActorClassification with ActorClassifier { + class MyActorEventBus(protected val system: ActorSystem) + extends ActorEventBus + with ManagedActorClassification + with ActorClassifier { type Event = Notification @@ -261,16 +274,16 @@ class ActorEventBusSpec(conf: Config) extends EventBusSpec("ActorEventBus", conf private def expectUnsubscribedByUnsubscriber(p: TestProbe, a: ActorRef): Unit = { val expectedMsg = s"actor $a has terminated, unsubscribing it from $bus" p.fishForMessage(1 second, hint = expectedMsg) { - case Logging.Debug(_, _, msg) if msg equals expectedMsg => true - case other => false + case Logging.Debug(_, _, msg) if msg.equals(expectedMsg) => true + case other => false } } private def expectUnregisterFromUnsubscriber(p: TestProbe, a: ActorRef): Unit = { val expectedMsg = s"unregistered watch of $a in $bus" p.fishForMessage(1 second, hint = expectedMsg) { - case Logging.Debug(_, _, msg) if msg equals expectedMsg => true - case other => false + case Logging.Debug(_, _, msg) if msg.equals(expectedMsg) => true + case other => false } } } @@ -282,7 +295,7 @@ object ScanningEventBusSpec { type Subscriber = Procedure[Int] type Classifier = String - protected def compareClassifiers(a: Classifier, b: Classifier): Int = a compareTo b + protected def compareClassifiers(a: Classifier, b: Classifier): Int = a.compareTo(b) protected def compareSubscribers(a: Subscriber, b: Subscriber): Int = akka.util.Helpers.compareIdentityHash(a, b) protected def matches(classifier: Classifier, event: Event): Boolean = event.toString == classifier @@ -337,4 +350,3 @@ class LookupEventBusSpec extends EventBusSpec("LookupEventBus") { def disposeSubscriber(system: ActorSystem, subscriber: BusType#Subscriber): Unit = () } - diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index 0f48ab9edd..aaeeef172a 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -9,7 +9,7 @@ import language.postfixOps import scala.concurrent.duration._ import akka.actor._ import com.typesafe.config.ConfigFactory -import akka.testkit.{ TestProbe, AkkaSpec } +import akka.testkit.{ AkkaSpec, TestProbe } object EventStreamSpec { @@ -32,8 +32,7 @@ object EventStreamSpec { """) val configUnhandledWithDebug = - ConfigFactory.parseString("akka.actor.debug.event-stream = on") - .withFallback(configUnhandled) + ConfigFactory.parseString("akka.actor.debug.event-stream = on").withFallback(configUnhandled) final case class M(i: Int) @@ -93,12 +92,14 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { "not allow null as subscriber" in { val bus = new EventStream(system, true) - intercept[IllegalArgumentException] { bus.subscribe(null, classOf[M]) }.getMessage should ===("subscriber is null") + intercept[IllegalArgumentException] { bus.subscribe(null, classOf[M]) }.getMessage should ===( + "subscriber is null") } "not allow null as unsubscriber" in { val bus = new EventStream(system, true) - intercept[IllegalArgumentException] { bus.unsubscribe(null, classOf[M]) }.getMessage should ===("subscriber is null") + intercept[IllegalArgumentException] { bus.unsubscribe(null, classOf[M]) }.getMessage should ===( + "subscriber is null") intercept[IllegalArgumentException] { bus.unsubscribe(null) }.getMessage should ===("subscriber is null") } @@ -108,7 +109,10 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { sys.eventStream.subscribe(testActor, classOf[AnyRef]) val m = UnhandledMessage(42, sys.deadLetters, sys.deadLetters) sys.eventStream.publish(m) - expectMsgAllOf(m, Logging.Debug(sys.deadLetters.path.toString, sys.deadLetters.getClass, "unhandled message from " + sys.deadLetters + ": 42")) + expectMsgAllOf(m, + Logging.Debug(sys.deadLetters.path.toString, + sys.deadLetters.getClass, + "unhandled message from " + sys.deadLetters + ": 42")) sys.eventStream.unsubscribe(testActor) } finally { shutdown(sys) @@ -289,7 +293,7 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { val tm = new A val target = sys.actorOf(Props(new Actor { - def receive = { case in => a1.ref forward in } + def receive = { case in => a1.ref.forward(in) } }), "to-be-killed") es.subscribe(a2.ref, classOf[Any]) @@ -317,7 +321,7 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { val a1, a2 = TestProbe() val target = system.actorOf(Props(new Actor { - def receive = { case in => a1.ref forward in } + def receive = { case in => a1.ref.forward(in) } }), "to-be-killed") watch(target) @@ -408,16 +412,17 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) { private def verifyLevel(bus: LoggingBus, level: Logging.LogLevel): Unit = { import Logging._ - val allmsg = Seq(Debug("", null, "debug"), Info("", null, "info"), Warning("", null, "warning"), Error("", null, "error")) - val msg = allmsg filter (_.level <= level) - allmsg foreach bus.publish - msg foreach (expectMsg(_)) + val allmsg = + Seq(Debug("", null, "debug"), Info("", null, "info"), Warning("", null, "warning"), Error("", null, "error")) + val msg = allmsg.filter(_.level <= level) + allmsg.foreach(bus.publish) + msg.foreach(expectMsg(_)) } private def fishForDebugMessage(a: TestProbe, messagePrefix: String, max: Duration = 3 seconds): Unit = { a.fishForMessage(max, hint = "expected debug message prefix: " + messagePrefix) { - case Logging.Debug(_, _, msg: String) if msg startsWith messagePrefix => true - case other => false + case Logging.Debug(_, _, msg: String) if msg.startsWith(messagePrefix) => true + case other => false } } diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala index 7f446250f5..3a8ab14711 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggerSpec.scala @@ -48,7 +48,8 @@ object LoggerSpec { } """).withFallback(AkkaSpec.testConf) - val multipleConfig = ConfigFactory.parseString(""" + val multipleConfig = + ConfigFactory.parseString(""" akka { stdout-loglevel = "OFF" loglevel = "WARNING" @@ -97,10 +98,10 @@ object LoggerSpec { ref ! ("OK") case event: LogEvent if !event.mdc.isEmpty => print(event) - target foreach { _ ! event } + target.foreach { _ ! event } case event: LogEvent => print(event) - target foreach { _ ! event.message } + target.foreach { _ ! event.message } } } @@ -248,10 +249,11 @@ class LoggerSpec extends WordSpec with Matchers { ref ! "Current Message in MDC" probe.expectMsgPF(max = 3.seconds) { - case w @ Warning(_, _, "Current Message in MDC") if w.mdc.size == 3 && - w.mdc("requestId") == 3 && - w.mdc("currentMsg") == "Current Message in MDC" && - w.mdc("currentMsgLength") == 22 => + case w @ Warning(_, _, "Current Message in MDC") + if w.mdc.size == 3 && + w.mdc("requestId") == 3 && + w.mdc("currentMsg") == "Current Message in MDC" && + w.mdc("currentMsgLength") == 22 => } ref ! "Current Message removed from MDC" diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala index 50fe07bd94..de8fba3ad5 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala @@ -30,9 +30,14 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { akka.loglevel=DEBUG # test verifies debug akka.actor.serialize-messages = off # debug noise from serialization """).withFallback(AkkaSpec.testConf) - val appLogging = ActorSystem("logging", ConfigFactory.parseMap(Map("akka.actor.debug.receive" -> true).asJava).withFallback(config)) - val appAuto = ActorSystem("autoreceive", ConfigFactory.parseMap(Map("akka.actor.debug.autoreceive" -> true).asJava).withFallback(config)) - val appLifecycle = ActorSystem("lifecycle", ConfigFactory.parseMap(Map("akka.actor.debug.lifecycle" -> true).asJava).withFallback(config)) + val appLogging = + ActorSystem("logging", ConfigFactory.parseMap(Map("akka.actor.debug.receive" -> true).asJava).withFallback(config)) + val appAuto = ActorSystem( + "autoreceive", + ConfigFactory.parseMap(Map("akka.actor.debug.autoreceive" -> true).asJava).withFallback(config)) + val appLifecycle = ActorSystem( + "lifecycle", + ConfigFactory.parseMap(Map("akka.actor.debug.lifecycle" -> true).asJava).withFallback(config)) val filter = TestEvent.Mute(EventFilter.custom { case _: Logging.Debug => true @@ -62,13 +67,16 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { system.eventStream.subscribe(testActor, classOf[Logging.Debug]) system.eventStream.subscribe(testActor, classOf[UnhandledMessage]) val a = system.actorOf(Props(new Actor { - def receive = new LoggingReceive(Some("funky"), { - case null => - }) + def receive = + new LoggingReceive(Some("funky"), { + case null => + }) })) a ! "hallo" - expectMsg(1 second, Logging.Debug("funky", classOf[DummyClassForStringSources], - "received unhandled message hallo from " + system.deadLetters)) + expectMsg(1 second, + Logging.Debug("funky", + classOf[DummyClassForStringSources], + "received unhandled message hallo from " + system.deadLetters)) expectMsgType[UnhandledMessage](1 second) } } @@ -85,15 +93,17 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { val actor = TestActorRef(new Actor { def switch: Actor.Receive = { case "becomenull" => context.become(r, false) } - def receive = switch orElse LoggingReceive { - case x => sender() ! "x" - } + def receive = + switch.orElse(LoggingReceive { + case x => sender() ! "x" + }) }) val name = actor.path.toString actor ! "buh" - expectMsg(Logging.Debug(actor.path.toString, actor.underlyingActor.getClass, - "received handled message buh from " + self)) + expectMsg( + Logging + .Debug(actor.path.toString, actor.underlyingActor.getClass, "received handled message buh from " + self)) expectMsg("x") actor ! "becomenull" @@ -109,13 +119,15 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { new TestKit(appLogging) with ImplicitSender { system.eventStream.subscribe(testActor, classOf[Logging.Debug]) val actor = TestActorRef(new Actor { - def receive = LoggingReceive(LoggingReceive { - case _ => sender() ! "x" - }) + def receive = + LoggingReceive(LoggingReceive { + case _ => sender() ! "x" + }) }) actor ! "buh" - expectMsg(Logging.Debug(actor.path.toString, actor.underlyingActor.getClass, - "received handled message buh from " + self)) + expectMsg( + Logging + .Debug(actor.path.toString, actor.underlyingActor.getClass, "received handled message buh from " + self)) expectMsg("x") } } @@ -146,8 +158,9 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { } }) actor ! "buh" - expectMsg(Logging.Info(actor.path.toString, actor.underlyingActor.getClass, - "received handled message buh from " + self)) + expectMsg( + Logging + .Info(actor.path.toString, actor.underlyingActor.getClass, "received handled message buh from " + self)) expectMsg("x") } } @@ -167,7 +180,9 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { val name = actor.path.toString actor ! PoisonPill fishForMessage(hint = "received AutoReceiveMessage Envelope(PoisonPill") { - case Logging.Debug(`name`, _, msg: String) if msg startsWith "received AutoReceiveMessage Envelope(PoisonPill" => true + case Logging.Debug(`name`, _, msg: String) + if msg.startsWith("received AutoReceiveMessage Envelope(PoisonPill") => + true case _ => false } awaitCond(actor.isTerminated) @@ -184,15 +199,15 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { val sname = supervisor.path.toString fishForMessage(hint = "now supervising") { - case Logging.Debug(`lname`, _, msg: String) if msg startsWith "now supervising" => true - case _ => false + case Logging.Debug(`lname`, _, msg: String) if msg.startsWith("now supervising") => true + case _ => false } TestActorRef[TestLogActor](Props[TestLogActor], supervisor, "none") fishForMessage(hint = "now supervising") { - case Logging.Debug(`sname`, _, msg: String) if msg startsWith "now supervising" => true - case _ => false + case Logging.Debug(`sname`, _, msg: String) if msg.startsWith("now supervising") => true + case _ => false } } } @@ -207,16 +222,16 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { val actor = TestActorRef[TestLogActor](Props[TestLogActor], supervisor, "none") val aname = actor.path.toString - supervisor watch actor + supervisor.watch(actor) fishForMessage(hint = "now watched by") { case Logging.Debug(`aname`, `sclass`, msg: String) if msg.startsWith("now watched by") => true - case m => false + case m => false } - supervisor unwatch actor + supervisor.unwatch(actor) fishForMessage(hint = "no longer watched by") { case Logging.Debug(`aname`, `sclass`, msg: String) if msg.startsWith("no longer watched by") => true - case _ => false + case _ => false } } } @@ -232,8 +247,8 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { val sclass = classOf[TestLogActor] expectMsgAllPF(messages = 2) { - case Logging.Debug(`sname`, `sclass`, msg: String) if msg startsWith "started" => 0 - case Logging.Debug(_, _, msg: String) if msg startsWith "now supervising" => 1 + case Logging.Debug(`sname`, `sclass`, msg: String) if msg.startsWith("started") => 0 + case Logging.Debug(_, _, msg: String) if msg.startsWith("now supervising") => 1 } val actor = TestActorRef[TestLogActor](Props[TestLogActor], supervisor, "none") @@ -241,11 +256,13 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { val aclass = classOf[TestLogActor] expectMsgAllPF(messages = 2) { - case Logging.Debug(`aname`, `aclass`, msg: String) if msg.startsWith("started (" + classOf[TestLogActor].getName) => 0 - case Logging.Debug(`sname`, `sclass`, msg: String) if msg == s"now supervising TestActor[$aname]" => 1 + case Logging.Debug(`aname`, `aclass`, msg: String) + if msg.startsWith("started (" + classOf[TestLogActor].getName) => + 0 + case Logging.Debug(`sname`, `sclass`, msg: String) if msg == s"now supervising TestActor[$aname]" => 1 } - EventFilter[ActorKilledException](occurrences = 1) intercept { + EventFilter[ActorKilledException](occurrences = 1).intercept { actor ! Kill expectMsgAllPF(messages = 3) { case Logging.Error(_: ActorKilledException, `aname`, _, "Kill") => 0 @@ -255,10 +272,9 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { } system.stop(supervisor) - expectMsgAllOf( - Logging.Debug(aname, aclass, "stopped"), - Logging.Debug(sname, sclass, "stopping"), - Logging.Debug(sname, sclass, "stopped")) + expectMsgAllOf(Logging.Debug(aname, aclass, "stopped"), + Logging.Debug(sname, sclass, "stopping"), + Logging.Debug(sname, sclass, "stopped")) } def expectMsgAllPF(messages: Int)(matchers: PartialFunction[AnyRef, Int]): Set[Int] = { @@ -269,8 +285,9 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll { else if (gotMatching.size == messages) gotMatching else { val msg = receiveOne(remainingOrDefault) - assert(msg ne null, s"timeout ($max) during expectMsgAllPF, got matching " + - s"[${gotMatching.mkString(", ")}], got unknown: [${unknown.mkString(", ")}]") + assert(msg ne null, + s"timeout ($max) during expectMsgAllPF, got matching " + + s"[${gotMatching.mkString(", ")}], got unknown: [${unknown.mkString(", ")}]") if (matchers.isDefinedAt(msg)) receiveNMatching(gotMatching + matchers(msg), Vector.empty) else receiveNMatching(gotMatching, unknown :+ msg) // unknown message, just ignore } diff --git a/akka-actor-tests/src/test/scala/akka/event/MarkerLoggingSpec.scala b/akka-actor-tests/src/test/scala/akka/event/MarkerLoggingSpec.scala index 125f25595d..5e06b9e45b 100644 --- a/akka-actor-tests/src/test/scala/akka/event/MarkerLoggingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/MarkerLoggingSpec.scala @@ -9,7 +9,10 @@ import akka.testkit._ class MarkerLoggingSpec extends AkkaSpec with ImplicitSender { "A MarkerLoggerAdapter" should { - val markerLogging = new MarkerLoggingAdapter(system.eventStream, getClass.getName, this.getClass, new DefaultLoggingFilter(() => Logging.InfoLevel)) + val markerLogging = new MarkerLoggingAdapter(system.eventStream, + getClass.getName, + this.getClass, + new DefaultLoggingFilter(() => Logging.InfoLevel)) "add markers to logging" in { system.eventStream.subscribe(self, classOf[Info]) diff --git a/akka-actor-tests/src/test/scala/akka/io/CapacityLimitSpec.scala b/akka-actor-tests/src/test/scala/akka/io/CapacityLimitSpec.scala index 6e9e67d40b..69d6e76d7c 100644 --- a/akka-actor-tests/src/test/scala/akka/io/CapacityLimitSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/CapacityLimitSpec.scala @@ -4,7 +4,7 @@ package akka.io -import akka.testkit.{ TestProbe, AkkaSpec } +import akka.testkit.{ AkkaSpec, TestProbe } import akka.testkit.SocketUtil.temporaryServerAddresses import Tcp._ @@ -12,12 +12,12 @@ class CapacityLimitSpec extends AkkaSpec(""" akka.loglevel = ERROR akka.io.tcp.max-channels = 4 akka.actor.serialize-creators = on - """) - with TcpIntegrationSpecSupport { + """) with TcpIntegrationSpecSupport { "The TCP transport implementation" should { - "reply with CommandFailed to a Bind or Connect command if max-channels capacity has been reached" in new TestSetup(runClientInExtraSystem = false) { + "reply with CommandFailed to a Bind or Connect command if max-channels capacity has been reached" in new TestSetup( + runClientInExtraSystem = false) { establishNewClientConnection() // we now have three channels registered: a listener, a server connection and a client connection @@ -31,11 +31,11 @@ class CapacityLimitSpec extends AkkaSpec(""" val bindToFail = Bind(bindHandler.ref, addresses(1)) commander.send(IO(Tcp), bindToFail) - commander.expectMsgType[CommandFailed].cmd should be theSameInstanceAs (bindToFail) + (commander.expectMsgType[CommandFailed].cmd should be).theSameInstanceAs(bindToFail) val connectToFail = Connect(endpoint) commander.send(IO(Tcp), connectToFail) - commander.expectMsgType[CommandFailed].cmd should be theSameInstanceAs (connectToFail) + (commander.expectMsgType[CommandFailed].cmd should be).theSameInstanceAs(connectToFail) } } diff --git a/akka-actor-tests/src/test/scala/akka/io/InetAddressDnsResolverSpec.scala b/akka-actor-tests/src/test/scala/akka/io/InetAddressDnsResolverSpec.scala index fc0049c889..9fe9d64715 100644 --- a/akka-actor-tests/src/test/scala/akka/io/InetAddressDnsResolverSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/InetAddressDnsResolverSpec.scala @@ -88,11 +88,10 @@ class InetAddressDnsResolverSpec extends AkkaSpec(""" private def secondsToMillis(seconds: Int) = TimeUnit.SECONDS.toMillis(seconds) private def dnsResolver = { - val actorRef = TestActorRef[InetAddressDnsResolver](Props( - classOf[InetAddressDnsResolver], - new SimpleDnsCache(), - system.settings.config.getConfig("akka.io.dns.inet-address") - )) + val actorRef = TestActorRef[InetAddressDnsResolver]( + Props(classOf[InetAddressDnsResolver], + new SimpleDnsCache(), + system.settings.config.getConfig("akka.io.dns.inet-address"))) actorRef.underlyingActor } @@ -117,8 +116,7 @@ class InetAddressDnsResolverSpec extends AkkaSpec(""" } } -class InetAddressDnsResolverConfigSpec extends AkkaSpec( - """ +class InetAddressDnsResolverConfigSpec extends AkkaSpec(""" akka.io.dns.inet-address.positive-ttl = forever akka.io.dns.inet-address.negative-ttl = never akka.actor.serialize-creators = on @@ -137,11 +135,10 @@ class InetAddressDnsResolverConfigSpec extends AkkaSpec( } private def dnsResolver = { - val actorRef = TestActorRef[InetAddressDnsResolver](Props( - classOf[InetAddressDnsResolver], - new SimpleDnsCache(), - system.settings.config.getConfig("akka.io.dns.inet-address") - )) + val actorRef = TestActorRef[InetAddressDnsResolver]( + Props(classOf[InetAddressDnsResolver], + new SimpleDnsCache(), + system.settings.config.getConfig("akka.io.dns.inet-address"))) actorRef.underlyingActor } } diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala index 4a75039e57..4e929c1a89 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpConnectionSpec.scala @@ -104,7 +104,7 @@ class TcpConnectionSpec extends AkkaSpec(""" val connectionActor = createConnectionActor(options = Vector(SO.KeepAlive(false))) val clientChannel = connectionActor.underlyingActor.channel clientChannel.socket.getKeepAlive should ===(true) // only set after connection is established - EventFilter.warning(pattern = "registration timeout", occurrences = 1) intercept { + EventFilter.warning(pattern = "registration timeout", occurrences = 1).intercept { selector.send(connectionActor, ChannelConnectable) clientChannel.socket.getKeepAlive should ===(false) } @@ -126,26 +126,26 @@ class TcpConnectionSpec extends AkkaSpec(""" } "forward incoming data as Received messages instantly as long as more data is available" in - new EstablishedConnectionTest() { // to make sure enough data gets through - override lazy val connectionActor = createConnectionActor(options = List(Inet.SO.ReceiveBufferSize(1000000))) - run { - val bufferSize = Tcp(system).Settings.DirectBufferSize - val DataSize = bufferSize + 1500 - val bigData = new Array[Byte](DataSize) - val buffer = ByteBuffer.wrap(bigData) + new EstablishedConnectionTest() { // to make sure enough data gets through + override lazy val connectionActor = createConnectionActor(options = List(Inet.SO.ReceiveBufferSize(1000000))) + run { + val bufferSize = Tcp(system).Settings.DirectBufferSize + val DataSize = bufferSize + 1500 + val bigData = new Array[Byte](DataSize) + val buffer = ByteBuffer.wrap(bigData) - serverSideChannel.socket.setSendBufferSize(150000) - val wrote = serverSideChannel.write(buffer) - wrote should ===(DataSize) + serverSideChannel.socket.setSendBufferSize(150000) + val wrote = serverSideChannel.write(buffer) + wrote should ===(DataSize) - expectNoMessage(1000.millis) // data should have been transferred fully by now + expectNoMessage(1000.millis) // data should have been transferred fully by now - selector.send(connectionActor, ChannelReadable) + selector.send(connectionActor, ChannelReadable) - connectionHandler.expectMsgType[Received].data.length should ===(bufferSize) - connectionHandler.expectMsgType[Received].data.length should ===(1500) - } + connectionHandler.expectMsgType[Received].data.length should ===(bufferSize) + connectionHandler.expectMsgType[Received].data.length should ===(1500) } + } "receive data directly when the connection is established" in new UnacceptedConnectionTest() { run { @@ -156,7 +156,8 @@ class TcpConnectionSpec extends AkkaSpec(""" interestCallReceiver.expectMsg(OP_CONNECT) selector.send(connectionActor, ChannelConnectable) - userHandler.expectMsg(Connected(serverAddress, clientSideChannel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress])) + userHandler.expectMsg( + Connected(serverAddress, clientSideChannel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress])) userHandler.send(connectionActor, Register(userHandler.ref)) interestCallReceiver.expectMsg(OP_READ) @@ -270,9 +271,9 @@ class TcpConnectionSpec extends AkkaSpec(""" val writer = TestProbe() val compoundWrite = Write(ByteString("test1"), Ack(1)) +: - Write(ByteString("test2")) +: - Write(ByteString.empty, Ack(3)) +: - Write(ByteString("test4"), Ack(4)) + Write(ByteString("test2")) +: + Write(ByteString.empty, Ack(3)) +: + Write(ByteString("test4"), Ack(4)) // reply to write commander with Ack val buffer = ByteBuffer.allocate(100) @@ -301,53 +302,54 @@ class TcpConnectionSpec extends AkkaSpec(""" * SO_SNDBUF to 0." */ "stop writing in cases of backpressure and resume afterwards" in - new EstablishedConnectionTest() { - override lazy val connectionActor = createConnectionActor(options = List(Inet.SO.ReceiveBufferSize(1000000))) - run { - info("Currently ignored as SO_SNDBUF is usually a lower bound on the send buffer so the test fails as no real " + - "backpressure present.") - pending - ignoreIfWindows() - object Ack1 extends Event - object Ack2 extends Event + new EstablishedConnectionTest() { + override lazy val connectionActor = createConnectionActor(options = List(Inet.SO.ReceiveBufferSize(1000000))) + run { + info( + "Currently ignored as SO_SNDBUF is usually a lower bound on the send buffer so the test fails as no real " + + "backpressure present.") + pending + ignoreIfWindows() + object Ack1 extends Event + object Ack2 extends Event - clientSideChannel.socket.setSendBufferSize(1024) + clientSideChannel.socket.setSendBufferSize(1024) - awaitCond(clientSideChannel.socket.getSendBufferSize == 1024) + awaitCond(clientSideChannel.socket.getSendBufferSize == 1024) - val writer = TestProbe() + val writer = TestProbe() - // producing backpressure by sending much more than currently fits into - // our send buffer - val firstWrite = writeCmd(Ack1) + // producing backpressure by sending much more than currently fits into + // our send buffer + val firstWrite = writeCmd(Ack1) - // try to write the buffer but since the SO_SNDBUF is too small - // it will have to keep the rest of the piece and send it - // when possible - writer.send(connectionActor, firstWrite) - interestCallReceiver.expectMsg(OP_WRITE) + // try to write the buffer but since the SO_SNDBUF is too small + // it will have to keep the rest of the piece and send it + // when possible + writer.send(connectionActor, firstWrite) + interestCallReceiver.expectMsg(OP_WRITE) - // send another write which should fail immediately - // because we don't store more than one piece in flight - val secondWrite = writeCmd(Ack2) - writer.send(connectionActor, secondWrite) - writer.expectMsg(CommandFailed(secondWrite)) + // send another write which should fail immediately + // because we don't store more than one piece in flight + val secondWrite = writeCmd(Ack2) + writer.send(connectionActor, secondWrite) + writer.expectMsg(CommandFailed(secondWrite)) - // reject even empty writes - writer.send(connectionActor, Write.empty) - writer.expectMsg(CommandFailed(Write.empty)) + // reject even empty writes + writer.send(connectionActor, Write.empty) + writer.expectMsg(CommandFailed(Write.empty)) - // there will be immediately more space in the send buffer because - // some data will have been sent by now, so we assume we can write - // again, but still it can't write everything - selector.send(connectionActor, ChannelWritable) + // there will be immediately more space in the send buffer because + // some data will have been sent by now, so we assume we can write + // again, but still it can't write everything + selector.send(connectionActor, ChannelWritable) - // both buffers should now be filled so no more writing - // is possible - pullFromServerSide(TestSize) - writer.expectMsg(Ack1) - } + // both buffers should now be filled so no more writing + // is possible + pullFromServerSide(TestSize) + writer.expectMsg(Ack1) } + } "respect StopReading and ResumeReading" in new EstablishedConnectionTest() { run { @@ -376,8 +378,7 @@ class TcpConnectionSpec extends AkkaSpec(""" "respect pull mode" in new EstablishedConnectionTest(pullMode = true) { // override config to decrease default buffer size def config = - ConfigFactory.parseString("akka.io.tcp.direct-buffer-size = 1k") - .withFallback(AkkaSpec.testConf) + ConfigFactory.parseString("akka.io.tcp.direct-buffer-size = 1k").withFallback(AkkaSpec.testConf) override implicit lazy val system: ActorSystem = ActorSystem("respectPullModeTest", config) try run { @@ -415,62 +416,61 @@ class TcpConnectionSpec extends AkkaSpec(""" selector.send(connectionActor, ChannelReadable) connectionHandler.expectMsgType[Received].data.decodeString("ASCII") should ===(vs) - } - finally shutdown(system) + } finally shutdown(system) } "close the connection and reply with `Closed` upon reception of a `Close` command" in - new EstablishedConnectionTest() with SmallRcvBuffer { - run { - // we should test here that a pending write command is properly finished first + new EstablishedConnectionTest() with SmallRcvBuffer { + run { + // we should test here that a pending write command is properly finished first - // set an artificially small send buffer size so that the write is queued - // inside the connection actor - clientSideChannel.socket.setSendBufferSize(1024) + // set an artificially small send buffer size so that the write is queued + // inside the connection actor + clientSideChannel.socket.setSendBufferSize(1024) - // we send a write and a close command directly afterwards - connectionHandler.send(connectionActor, writeCmd(Ack)) - val closeCommander = TestProbe() - closeCommander.send(connectionActor, Close) + // we send a write and a close command directly afterwards + connectionHandler.send(connectionActor, writeCmd(Ack)) + val closeCommander = TestProbe() + closeCommander.send(connectionActor, Close) - pullFromServerSide(TestSize) - connectionHandler.expectMsg(Ack) - connectionHandler.expectMsg(Closed) - closeCommander.expectMsg(Closed) - assertThisConnectionActorTerminated() + pullFromServerSide(TestSize) + connectionHandler.expectMsg(Ack) + connectionHandler.expectMsg(Closed) + closeCommander.expectMsg(Closed) + assertThisConnectionActorTerminated() - serverSelectionKey should be(selectedAs(OP_READ, 2.seconds)) + serverSelectionKey should be(selectedAs(OP_READ, 2.seconds)) - val buffer = ByteBuffer.allocate(1) - serverSideChannel.read(buffer) should ===(-1) - } + val buffer = ByteBuffer.allocate(1) + serverSideChannel.read(buffer) should ===(-1) } + } "send only one `Closed` event to the handler, if the handler commanded the Close" in - new EstablishedConnectionTest() { - run { - connectionHandler.send(connectionActor, Close) - connectionHandler.expectMsg(Closed) - connectionHandler.expectNoMessage(500.millis) - } + new EstablishedConnectionTest() { + run { + connectionHandler.send(connectionActor, Close) + connectionHandler.expectMsg(Closed) + connectionHandler.expectNoMessage(500.millis) } + } "abort the connection and reply with `Aborted` upon reception of an `Abort` command" in - new EstablishedConnectionTest() { - run { - connectionHandler.send(connectionActor, Abort) - connectionHandler.expectMsg(Aborted) + new EstablishedConnectionTest() { + run { + connectionHandler.send(connectionActor, Abort) + connectionHandler.expectMsg(Aborted) - assertThisConnectionActorTerminated() + assertThisConnectionActorTerminated() - val buffer = ByteBuffer.allocate(1) - val thrown = the[IOException] thrownBy { + val buffer = ByteBuffer.allocate(1) + val thrown = the[IOException] thrownBy { windowsWorkaroundToDetectAbort() serverSideChannel.read(buffer) } - thrown.getMessage should ===(ConnectionResetByPeerMessage) - } + thrown.getMessage should ===(ConnectionResetByPeerMessage) } + } /* * Partly disabled on Windows: http://support.microsoft.com/kb/214397 @@ -485,70 +485,70 @@ class TcpConnectionSpec extends AkkaSpec(""" * SO_SNDBUF to 0." */ "close the connection and reply with `ConfirmedClosed` upon reception of an `ConfirmedClose` command (simplified)" in - new EstablishedConnectionTest() with SmallRcvBuffer { - run { - // we should test here that a pending write command is properly finished first + new EstablishedConnectionTest() with SmallRcvBuffer { + run { + // we should test here that a pending write command is properly finished first - // set an artificially small send buffer size so that the write is queued - // inside the connection actor - clientSideChannel.socket.setSendBufferSize(1024) + // set an artificially small send buffer size so that the write is queued + // inside the connection actor + clientSideChannel.socket.setSendBufferSize(1024) - // we send a write and a close command directly afterwards - connectionHandler.send(connectionActor, writeCmd(Ack)) - connectionHandler.send(connectionActor, ConfirmedClose) + // we send a write and a close command directly afterwards + connectionHandler.send(connectionActor, writeCmd(Ack)) + connectionHandler.send(connectionActor, ConfirmedClose) - pullFromServerSide(TestSize) - connectionHandler.expectMsg(Ack) + pullFromServerSide(TestSize) + connectionHandler.expectMsg(Ack) - selector.send(connectionActor, ChannelReadable) + selector.send(connectionActor, ChannelReadable) - val buffer = ByteBuffer.allocate(1) - serverSelectionKey should be(selectedAs(OP_READ, 2.seconds)) - serverSideChannel.read(buffer) should ===(-1) + val buffer = ByteBuffer.allocate(1) + serverSelectionKey should be(selectedAs(OP_READ, 2.seconds)) + serverSideChannel.read(buffer) should ===(-1) - closeServerSideAndWaitForClientReadable() + closeServerSideAndWaitForClientReadable() - selector.send(connectionActor, ChannelReadable) - connectionHandler.expectMsg(ConfirmedClosed) + selector.send(connectionActor, ChannelReadable) + connectionHandler.expectMsg(ConfirmedClosed) - assertThisConnectionActorTerminated() - } + assertThisConnectionActorTerminated() } + } "close the connection and reply with `ConfirmedClosed` upon reception of an `ConfirmedClose` command" in - new EstablishedConnectionTest() with SmallRcvBuffer { - run { - ignoreIfWindows() + new EstablishedConnectionTest() with SmallRcvBuffer { + run { + ignoreIfWindows() - // we should test here that a pending write command is properly finished first + // we should test here that a pending write command is properly finished first - // set an artificially small send buffer size so that the write is queued - // inside the connection actor - clientSideChannel.socket.setSendBufferSize(1024) + // set an artificially small send buffer size so that the write is queued + // inside the connection actor + clientSideChannel.socket.setSendBufferSize(1024) - // we send a write and a close command directly afterwards - connectionHandler.send(connectionActor, writeCmd(Ack)) - connectionHandler.send(connectionActor, ConfirmedClose) + // we send a write and a close command directly afterwards + connectionHandler.send(connectionActor, writeCmd(Ack)) + connectionHandler.send(connectionActor, ConfirmedClose) - connectionHandler.expectNoMessage(100.millis) - pullFromServerSide(TestSize) - connectionHandler.expectMsg(Ack) + connectionHandler.expectNoMessage(100.millis) + pullFromServerSide(TestSize) + connectionHandler.expectMsg(Ack) - selector.send(connectionActor, ChannelReadable) - connectionHandler.expectNoMessage(100.millis) // not yet + selector.send(connectionActor, ChannelReadable) + connectionHandler.expectNoMessage(100.millis) // not yet - val buffer = ByteBuffer.allocate(1) - serverSelectionKey should be(selectedAs(SelectionKey.OP_READ, 2.seconds)) - serverSideChannel.read(buffer) should ===(-1) + val buffer = ByteBuffer.allocate(1) + serverSelectionKey should be(selectedAs(SelectionKey.OP_READ, 2.seconds)) + serverSideChannel.read(buffer) should ===(-1) - closeServerSideAndWaitForClientReadable() + closeServerSideAndWaitForClientReadable() - selector.send(connectionActor, ChannelReadable) - connectionHandler.expectMsg(ConfirmedClosed) + selector.send(connectionActor, ChannelReadable) + connectionHandler.expectMsg(ConfirmedClosed) - assertThisConnectionActorTerminated() - } + assertThisConnectionActorTerminated() } + } "report when peer closed the connection" in new EstablishedConnectionTest() { run { @@ -562,40 +562,40 @@ class TcpConnectionSpec extends AkkaSpec(""" } "report when peer closed the connection but allow further writes and acknowledge normal close" in - new EstablishedConnectionTest(keepOpenOnPeerClosed = true) { - run { - closeServerSideAndWaitForClientReadable(fullClose = false) // send EOF (fin) from the server side + new EstablishedConnectionTest(keepOpenOnPeerClosed = true) { + run { + closeServerSideAndWaitForClientReadable(fullClose = false) // send EOF (fin) from the server side - selector.send(connectionActor, ChannelReadable) - connectionHandler.expectMsg(PeerClosed) + selector.send(connectionActor, ChannelReadable) + connectionHandler.expectMsg(PeerClosed) - connectionHandler.send(connectionActor, writeCmd(Ack)) - pullFromServerSide(TestSize) - connectionHandler.expectMsg(Ack) - connectionHandler.send(connectionActor, Close) - connectionHandler.expectMsg(Closed) + connectionHandler.send(connectionActor, writeCmd(Ack)) + pullFromServerSide(TestSize) + connectionHandler.expectMsg(Ack) + connectionHandler.send(connectionActor, Close) + connectionHandler.expectMsg(Closed) - assertThisConnectionActorTerminated() - } + assertThisConnectionActorTerminated() } + } "report when peer closed the connection but allow further writes and acknowledge confirmed close" in - new EstablishedConnectionTest(keepOpenOnPeerClosed = true) { - run { - closeServerSideAndWaitForClientReadable(fullClose = false) // send EOF (fin) from the server side + new EstablishedConnectionTest(keepOpenOnPeerClosed = true) { + run { + closeServerSideAndWaitForClientReadable(fullClose = false) // send EOF (fin) from the server side - selector.send(connectionActor, ChannelReadable) - connectionHandler.expectMsg(PeerClosed) + selector.send(connectionActor, ChannelReadable) + connectionHandler.expectMsg(PeerClosed) - connectionHandler.send(connectionActor, writeCmd(Ack)) - pullFromServerSide(TestSize) - connectionHandler.expectMsg(Ack) - connectionHandler.send(connectionActor, ConfirmedClose) - connectionHandler.expectMsg(ConfirmedClosed) + connectionHandler.send(connectionActor, writeCmd(Ack)) + pullFromServerSide(TestSize) + connectionHandler.expectMsg(Ack) + connectionHandler.send(connectionActor, ConfirmedClose) + connectionHandler.expectMsg(ConfirmedClosed) - assertThisConnectionActorTerminated() - } + assertThisConnectionActorTerminated() } + } "report when peer aborted the connection" in new EstablishedConnectionTest() { run { @@ -628,54 +628,56 @@ class TcpConnectionSpec extends AkkaSpec(""" val UnboundAddress = temporaryServerAddress() "report failed connection attempt when target is unreachable" in - new UnacceptedConnectionTest() { - override lazy val connectionActor = createConnectionActor(serverAddress = UnboundAddress) - run { - val sel = SelectorProvider.provider().openSelector() - try { - val key = clientSideChannel.register(sel, SelectionKey.OP_CONNECT | SelectionKey.OP_READ) - // This timeout should be large enough to work on Windows - sel.select(3000) + new UnacceptedConnectionTest() { + override lazy val connectionActor = createConnectionActor(serverAddress = UnboundAddress) + run { + val sel = SelectorProvider.provider().openSelector() + try { + val key = clientSideChannel.register(sel, SelectionKey.OP_CONNECT | SelectionKey.OP_READ) + // This timeout should be large enough to work on Windows + sel.select(3000) - key.isConnectable should ===(true) - val forceThisLazyVal = connectionActor.toString - Thread.sleep(300) - selector.send(connectionActor, ChannelConnectable) - userHandler.expectMsg(CommandFailed(Connect(UnboundAddress))) + key.isConnectable should ===(true) + val forceThisLazyVal = connectionActor.toString + Thread.sleep(300) + selector.send(connectionActor, ChannelConnectable) + userHandler.expectMsg(CommandFailed(Connect(UnboundAddress))) - watch(connectionActor) - expectTerminated(connectionActor) - } finally sel.close() - } - } - - "report failed connection attempt when target cannot be resolved" in - new UnacceptedConnectionTest() { - val address = new InetSocketAddress("notthere.local", 666) - override lazy val connectionActor = createConnectionActorWithoutRegistration(serverAddress = address) - run { - connectionActor ! newChannelRegistration - userHandler.expectMsg(30.seconds, CommandFailed(Connect(address))) - } - } - - "report failed connection attempt when timing out" in - new UnacceptedConnectionTest() { - override lazy val connectionActor = createConnectionActor(serverAddress = UnboundAddress, timeout = Option(100.millis)) - run { - connectionActor.toString should not be ("") - userHandler.expectMsg(CommandFailed(Connect(UnboundAddress, timeout = Option(100.millis)))) watch(connectionActor) expectTerminated(connectionActor) - } + } finally sel.close() } + } + + "report failed connection attempt when target cannot be resolved" in + new UnacceptedConnectionTest() { + val address = new InetSocketAddress("notthere.local", 666) + override lazy val connectionActor = createConnectionActorWithoutRegistration(serverAddress = address) + run { + connectionActor ! newChannelRegistration + userHandler.expectMsg(30.seconds, CommandFailed(Connect(address))) + } + } + + "report failed connection attempt when timing out" in + new UnacceptedConnectionTest() { + override lazy val connectionActor = + createConnectionActor(serverAddress = UnboundAddress, timeout = Option(100.millis)) + run { + connectionActor.toString should not be ("") + userHandler.expectMsg(CommandFailed(Connect(UnboundAddress, timeout = Option(100.millis)))) + watch(connectionActor) + expectTerminated(connectionActor) + } + } "time out when Connected isn't answered with Register" in new UnacceptedConnectionTest { run { localServerChannel.accept() selector.send(connectionActor, ChannelConnectable) - userHandler.expectMsg(Connected(serverAddress, clientSideChannel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress])) + userHandler.expectMsg( + Connected(serverAddress, clientSideChannel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress])) watch(connectionActor) expectTerminated(connectionActor) @@ -684,7 +686,7 @@ class TcpConnectionSpec extends AkkaSpec(""" "close the connection when user handler dies while connecting" in new UnacceptedConnectionTest { run { - EventFilter[DeathPactException](occurrences = 1) intercept { + EventFilter[DeathPactException](occurrences = 1).intercept { userHandler.ref ! PoisonPill watch(connectionActor) @@ -697,7 +699,7 @@ class TcpConnectionSpec extends AkkaSpec(""" run { watch(connectionHandler.ref) watch(connectionActor) - EventFilter[DeathPactException](occurrences = 1) intercept { + EventFilter[DeathPactException](occurrences = 1).intercept { system.stop(connectionHandler.ref) val deaths = Set(expectMsgType[Terminated].actor, expectMsgType[Terminated].actor) deaths should ===(Set(connectionHandler.ref, connectionActor)) @@ -879,6 +881,7 @@ class TcpConnectionSpec extends AkkaSpec(""" } abstract class LocalServerTest extends ChannelRegistry { + /** Allows overriding the system used */ implicit def system: ActorSystem = thisSpecs.system @@ -909,11 +912,10 @@ class TcpConnectionSpec extends AkkaSpec(""" def setServerSocketOptions() = () - def createConnectionActor( - serverAddress: InetSocketAddress = serverAddress, - options: immutable.Seq[SocketOption] = Nil, - timeout: Option[FiniteDuration] = None, - pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = { + def createConnectionActor(serverAddress: InetSocketAddress = serverAddress, + options: immutable.Seq[SocketOption] = Nil, + timeout: Option[FiniteDuration] = None, + pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = { val ref = createConnectionActorWithoutRegistration(serverAddress, options, timeout, pullMode) ref ! newChannelRegistration ref @@ -928,14 +930,15 @@ class TcpConnectionSpec extends AkkaSpec(""" protected def onCancelAndClose(andThen: () => Unit): Unit = andThen() - def createConnectionActorWithoutRegistration( - serverAddress: InetSocketAddress = serverAddress, - options: immutable.Seq[SocketOption] = Nil, - timeout: Option[FiniteDuration] = None, - pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = + def createConnectionActorWithoutRegistration(serverAddress: InetSocketAddress = serverAddress, + options: immutable.Seq[SocketOption] = Nil, + timeout: Option[FiniteDuration] = None, + pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = TestActorRef( - new TcpOutgoingConnection(Tcp(system), this, userHandler.ref, - Connect(serverAddress, options = options, timeout = timeout, pullMode = pullMode)) { + new TcpOutgoingConnection(Tcp(system), + this, + userHandler.ref, + Connect(serverAddress, options = options, timeout = timeout, pullMode = pullMode)) { override def postRestart(reason: Throwable): Unit = context.stop(self) // ensure we never restart }) } @@ -957,11 +960,10 @@ class TcpConnectionSpec extends AkkaSpec(""" } } - abstract class EstablishedConnectionTest( - keepOpenOnPeerClosed: Boolean = false, - useResumeWriting: Boolean = true, - pullMode: Boolean = false) - extends UnacceptedConnectionTest(pullMode) { + abstract class EstablishedConnectionTest(keepOpenOnPeerClosed: Boolean = false, + useResumeWriting: Boolean = true, + pullMode: Boolean = false) + extends UnacceptedConnectionTest(pullMode) { // lazy init since potential exceptions should not be triggered in the constructor but during execution of `run` lazy val serverSideChannel = acceptServerSideConnection(localServerChannel) @@ -991,7 +993,8 @@ class TcpConnectionSpec extends AkkaSpec(""" interestCallReceiver.expectMsg(OP_CONNECT) selector.send(connectionActor, ChannelConnectable) - userHandler.expectMsg(Connected(serverAddress, clientSideChannel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress])) + userHandler.expectMsg( + Connected(serverAddress, clientSideChannel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress])) userHandler.send(connectionActor, Register(connectionHandler.ref, keepOpenOnPeerClosed, useResumeWriting)) ignoreWindowsWorkaroundForTicket15766() @@ -1030,7 +1033,7 @@ class TcpConnectionSpec extends AkkaSpec(""" key.interestOps(0) ret > 0 && nioSelector.selectedKeys().contains(key) && key.isValid && - (key.readyOps() & interest) != 0 + (key.readyOps() & interest) != 0 } else false def openSelectorFor(channel: SocketChannel, interests: Int): (Selector, SelectionKey) = { @@ -1051,7 +1054,8 @@ class TcpConnectionSpec extends AkkaSpec(""" /** * Tries to simultaneously act on client and server side to read from the server all pending data from the client. */ - @tailrec final def pullFromServerSide(remaining: Int, remainingTries: Int = 1000, + @tailrec final def pullFromServerSide(remaining: Int, + remainingTries: Int = 1000, into: ByteBuffer = defaultbuffer): Unit = if (remainingTries <= 0) throw new AssertionError("Pulling took too many loops, remaining data: " + remaining) @@ -1072,7 +1076,8 @@ class TcpConnectionSpec extends AkkaSpec(""" if (nioSelector.selectedKeys().contains(serverSelectionKey)) { if (into eq defaultbuffer) into.clear() serverSideChannel.read(into) match { - case -1 => throw new IllegalStateException("Connection was closed unexpectedly with remaining bytes " + remaining) + case -1 => + throw new IllegalStateException("Connection was closed unexpectedly with remaining bytes " + remaining) case 0 => throw new IllegalStateException("Made no progress") case other => other } @@ -1104,10 +1109,11 @@ class TcpConnectionSpec extends AkkaSpec(""" def selectedAs(interest: Int, duration: Duration): BeMatcher[SelectionKey] = new BeMatcher[SelectionKey] { def apply(key: SelectionKey) = - MatchResult( - checkFor(key, interest, duration.toMillis.toInt), - "%s key was not selected for %s after %s" format (key.attachment(), interestsDesc(interest), duration), - "%s key was selected for %s after %s" format (key.attachment(), interestsDesc(interest), duration)) + MatchResult(checkFor(key, interest, duration.toMillis.toInt), + "%s key was not selected for %s after %s".format(key.attachment(), + interestsDesc(interest), + duration), + "%s key was selected for %s after %s".format(key.attachment(), interestsDesc(interest), duration)) } val interestsNames = diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala index 50eca143b4..1b9a67a29d 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpec.scala @@ -195,15 +195,14 @@ class TcpIntegrationSpec extends AkkaSpec(""" } } - def chitchat( - clientHandler: TestProbe, - clientConnection: ActorRef, - serverHandler: TestProbe, - serverConnection: ActorRef, - rounds: Int = 100) = { + def chitchat(clientHandler: TestProbe, + clientConnection: ActorRef, + serverHandler: TestProbe, + serverConnection: ActorRef, + rounds: Int = 100) = { val testData = ByteString(0) - (1 to rounds) foreach { _ => + (1 to rounds).foreach { _ => clientHandler.send(clientConnection, Write(testData)) serverHandler.expectMsg(Received(testData)) serverHandler.send(serverConnection, Write(testData)) diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala index ef5942be1d..3005886a75 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpIntegrationSpecSupport.scala @@ -21,7 +21,9 @@ trait TcpIntegrationSpecSupport { _: AkkaSpec => if (runClientInExtraSystem) { val res = ActorSystem("TcpIntegrationSpec-client", system.settings.config) // terminate clientSystem after server system - system.whenTerminated.onComplete { _ => res.terminate() }(ExecutionContexts.sameThreadExecutionContext) + system.whenTerminated.onComplete { _ => + res.terminate() + }(ExecutionContexts.sameThreadExecutionContext) res } else system val bindHandler = TestProbe() diff --git a/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala b/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala index 3cef395921..477d89e266 100644 --- a/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/TcpListenerSpec.scala @@ -123,7 +123,7 @@ class TcpListenerSpec extends AkkaSpec(""" listener ! ChannelAcceptable val channel = expectWorkerForCommand - EventFilter.warning(pattern = "selector capacity limit", occurrences = 1) intercept { + EventFilter.warning(pattern = "selector capacity limit", occurrences = 1).intercept { listener ! FailedRegisterIncoming(channel) awaitCond(!channel.isOpen) } @@ -175,12 +175,16 @@ class TcpListenerSpec extends AkkaSpec(""" private class ListenerParent(pullMode: Boolean) extends Actor with ChannelRegistry { val listener = context.actorOf( - props = Props(classOf[TcpListener], selectorRouter.ref, Tcp(system), this, bindCommander.ref, - Bind(handler.ref, endpoint, 100, Nil, pullMode)).withDeploy(Deploy.local), + props = Props(classOf[TcpListener], + selectorRouter.ref, + Tcp(system), + this, + bindCommander.ref, + Bind(handler.ref, endpoint, 100, Nil, pullMode)).withDeploy(Deploy.local), name = "test-listener-" + counter.next()) parent.watch(listener) def receive: Receive = { - case msg => parent.ref forward msg + case msg => parent.ref.forward(msg) } override def supervisorStrategy = SupervisorStrategy.stoppingStrategy @@ -191,5 +195,6 @@ class TcpListenerSpec extends AkkaSpec(""" } object TcpListenerSpec { - final case class RegisterChannel(channel: SelectableChannel, initialOps: Int) extends NoSerializationVerificationNeeded + final case class RegisterChannel(channel: SelectableChannel, initialOps: Int) + extends NoSerializationVerificationNeeded } diff --git a/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala index aa7d7584a5..b2a4bf93fe 100644 --- a/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/UdpConnectedIntegrationSpec.scala @@ -5,7 +5,7 @@ package akka.io import java.net.InetSocketAddress -import akka.testkit.{ TestProbe, ImplicitSender, AkkaSpec } +import akka.testkit.{ AkkaSpec, ImplicitSender, TestProbe } import akka.util.ByteString import akka.actor.ActorRef import akka.testkit.SocketUtil.temporaryServerAddresses @@ -24,7 +24,9 @@ class UdpConnectedIntegrationSpec extends AkkaSpec(""" commander.sender() } - def connectUdp(localAddress: Option[InetSocketAddress], remoteAddress: InetSocketAddress, handler: ActorRef): ActorRef = { + def connectUdp(localAddress: Option[InetSocketAddress], + remoteAddress: InetSocketAddress, + handler: ActorRef): ActorRef = { val commander = TestProbe() commander.send(IO(UdpConnected), UdpConnected.Connect(handler, remoteAddress, localAddress, Nil)) commander.expectMsg(UdpConnected.Connected) diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala index 8731a14925..22ccdc954c 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/AsyncDnsResolverIntegrationSpec.scala @@ -23,9 +23,8 @@ These tests rely on a DNS server with 2 zones configured, foo.test and bar.examp The configuration to start a bind DNS server in Docker with this configuration is included, and the test will automatically start this container when the test starts and tear it down when it finishes. -*/ -class AsyncDnsResolverIntegrationSpec extends AkkaSpec( - s""" + */ +class AsyncDnsResolverIntegrationSpec extends AkkaSpec(s""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.io.dns.resolver = async-dns @@ -62,17 +61,16 @@ class AsyncDnsResolverIntegrationSpec extends AkkaSpec( val name = "a-double.foo.test" val answer = resolve(name) answer.name shouldEqual name - answer.records.map(_.asInstanceOf[ARecord].ip).toSet shouldEqual Set( - InetAddress.getByName("192.168.1.21"), - InetAddress.getByName("192.168.1.22") - ) + answer.records.map(_.asInstanceOf[ARecord].ip).toSet shouldEqual Set(InetAddress.getByName("192.168.1.21"), + InetAddress.getByName("192.168.1.22")) } "resolve single AAAA record" in { val name = "aaaa-single.foo.test" val answer = resolve(name) answer.name shouldEqual name - answer.records.map(_.asInstanceOf[AAAARecord].ip) shouldEqual Seq(InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:1")) + answer.records.map(_.asInstanceOf[AAAARecord].ip) shouldEqual Seq( + InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:1")) } "resolve double AAAA records" in { @@ -81,8 +79,7 @@ class AsyncDnsResolverIntegrationSpec extends AkkaSpec( answer.name shouldEqual name answer.records.map(_.asInstanceOf[AAAARecord].ip).toSet shouldEqual Set( InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:2"), - InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:3") - ) + InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:3")) } "resolve mixed A/AAAA records" in { @@ -90,40 +87,29 @@ class AsyncDnsResolverIntegrationSpec extends AkkaSpec( val answer = resolve(name) answer.name shouldEqual name - answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set( - InetAddress.getByName("192.168.1.23"), - InetAddress.getByName("192.168.1.24") - ) + answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.1.23"), + InetAddress.getByName("192.168.1.24")) answer.records.collect { case r: AAAARecord => r.ip }.toSet shouldEqual Set( InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:4"), - InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:5") - ) + InetAddress.getByName("fd4d:36b2:3eca:a2d8:0:0:0:5")) } "resolve external CNAME record" in { val name = "cname-ext.foo.test" val answer = (IO(Dns) ? DnsProtocol.Resolve(name)).mapTo[DnsProtocol.Resolved].futureValue answer.name shouldEqual name - answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set( - "a-single.bar.example" - ) - answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set( - InetAddress.getByName("192.168.2.20") - ) + answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set("a-single.bar.example") + answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.2.20")) } "resolve internal CNAME record" in { val name = "cname-in.foo.test" val answer = resolve(name) answer.name shouldEqual name - answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set( - "a-double.foo.test" - ) - answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set( - InetAddress.getByName("192.168.1.21"), - InetAddress.getByName("192.168.1.22") - ) + answer.records.collect { case r: CNameRecord => r.canonicalName }.toSet shouldEqual Set("a-double.foo.test") + answer.records.collect { case r: ARecord => r.ip }.toSet shouldEqual Set(InetAddress.getByName("192.168.1.21"), + InetAddress.getByName("192.168.1.22")) } "resolve SRV record" in { @@ -133,13 +119,14 @@ class AsyncDnsResolverIntegrationSpec extends AkkaSpec( answer.name shouldEqual name answer.records.collect { case r: SRVRecord => r }.toSet shouldEqual Set( SRVRecord("_service._tcp.foo.test", Ttl.fromPositive(86400.seconds), 10, 65534, 5060, "a-single.foo.test"), - SRVRecord("_service._tcp.foo.test", Ttl.fromPositive(86400.seconds), 65533, 40, 65535, "a-double.foo.test") - ) + SRVRecord("_service._tcp.foo.test", Ttl.fromPositive(86400.seconds), 65533, 40, 65535, "a-double.foo.test")) } "resolve same address twice" in { - resolve("a-single.foo.test").records.map(_.asInstanceOf[ARecord].ip) shouldEqual Seq(InetAddress.getByName("192.168.1.20")) - resolve("a-single.foo.test").records.map(_.asInstanceOf[ARecord].ip) shouldEqual Seq(InetAddress.getByName("192.168.1.20")) + resolve("a-single.foo.test").records.map(_.asInstanceOf[ARecord].ip) shouldEqual Seq( + InetAddress.getByName("192.168.1.20")) + resolve("a-single.foo.test").records.map(_.asInstanceOf[ARecord].ip) shouldEqual Seq( + InetAddress.getByName("192.168.1.20")) } "handle nonexistent domains" in { diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala index 518b1d7195..531b576433 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/DnsSettingsSpec.scala @@ -17,8 +17,8 @@ class DnsSettingsSpec extends AkkaSpec { "DNS settings" must { "use host servers if set to default" in { - val dnsSettings = new DnsSettings(eas, ConfigFactory.parseString( - """ + val dnsSettings = new DnsSettings(eas, + ConfigFactory.parseString(""" nameservers = "default" resolve-timeout = 1s search-domains = [] @@ -30,8 +30,8 @@ class DnsSettingsSpec extends AkkaSpec { } "parse a single name server" in { - val dnsSettings = new DnsSettings(eas, ConfigFactory.parseString( - """ + val dnsSettings = new DnsSettings(eas, + ConfigFactory.parseString(""" nameservers = "127.0.0.1" resolve-timeout = 1s search-domains = [] @@ -42,22 +42,21 @@ class DnsSettingsSpec extends AkkaSpec { } "parse a list of name servers" in { - val dnsSettings = new DnsSettings(eas, ConfigFactory.parseString( - """ + val dnsSettings = new DnsSettings(eas, + ConfigFactory.parseString(""" nameservers = ["127.0.0.1", "127.0.0.2"] resolve-timeout = 1s search-domains = [] ndots = 1 """)) - dnsSettings.NameServers.map(_.getAddress) shouldEqual List( - InetAddress.getByName("127.0.0.1"), InetAddress.getByName("127.0.0.2") - ) + dnsSettings.NameServers.map(_.getAddress) shouldEqual List(InetAddress.getByName("127.0.0.1"), + InetAddress.getByName("127.0.0.2")) } "use host search domains if set to default" in { - val dnsSettings = new DnsSettings(eas, ConfigFactory.parseString( - """ + val dnsSettings = new DnsSettings(eas, + ConfigFactory.parseString(""" nameservers = "127.0.0.1" resolve-timeout = 1s search-domains = "default" @@ -69,8 +68,8 @@ class DnsSettingsSpec extends AkkaSpec { } "parse a single search domain" in { - val dnsSettings = new DnsSettings(eas, ConfigFactory.parseString( - """ + val dnsSettings = new DnsSettings(eas, + ConfigFactory.parseString(""" nameservers = "127.0.0.1" resolve-timeout = 1s search-domains = "example.com" @@ -81,8 +80,8 @@ class DnsSettingsSpec extends AkkaSpec { } "parse a single list of search domains" in { - val dnsSettings = new DnsSettings(eas, ConfigFactory.parseString( - """ + val dnsSettings = new DnsSettings(eas, + ConfigFactory.parseString(""" nameservers = "127.0.0.1" resolve-timeout = 1s search-domains = [ "example.com", "example.net" ] @@ -93,8 +92,8 @@ class DnsSettingsSpec extends AkkaSpec { } "use host ndots if set to default" in { - val dnsSettings = new DnsSettings(eas, ConfigFactory.parseString( - """ + val dnsSettings = new DnsSettings(eas, + ConfigFactory.parseString(""" nameservers = "127.0.0.1" resolve-timeout = 1s search-domains = "example.com" @@ -106,8 +105,8 @@ class DnsSettingsSpec extends AkkaSpec { } "parse ndots" in { - val dnsSettings = new DnsSettings(eas, ConfigFactory.parseString( - """ + val dnsSettings = new DnsSettings(eas, + ConfigFactory.parseString(""" nameservers = "127.0.0.1" resolve-timeout = 1s search-domains = "example.com" diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/DockerBindDnsService.scala b/akka-actor-tests/src/test/scala/akka/io/dns/DockerBindDnsService.scala index 321c102a48..3339f7555c 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/DockerBindDnsService.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/DockerBindDnsService.scala @@ -38,25 +38,30 @@ trait DockerBindDnsService extends Eventually { self: AkkaSpec => return } - val containerConfig = ContainerConfig.builder() + val containerConfig = ContainerConfig + .builder() .image(image) .env("NO_CHOWN=true") .cmd("-4") // only listen on ipv4 .hostConfig( - HostConfig.builder() - .portBindings(Map( - "53/tcp" -> List(PortBinding.of("", hostPort)).asJava, - "53/udp" -> List(PortBinding.of("", hostPort)).asJava - ).asJava) - .binds(HostConfig.Bind.from(new java.io.File("akka-actor-tests/src/test/bind/").getAbsolutePath).to("/data/bind").build()) - .build() - ) + HostConfig + .builder() + .portBindings(Map("53/tcp" -> List(PortBinding.of("", hostPort)).asJava, + "53/udp" -> List(PortBinding.of("", hostPort)).asJava).asJava) + .binds(HostConfig.Bind + .from(new java.io.File("akka-actor-tests/src/test/bind/").getAbsolutePath) + .to("/data/bind") + .build()) + .build()) .build() val containerName = "akka-test-dns-" + getClass.getCanonicalName - client.listContainers(ListContainersParam.allContainers()).asScala - .find(_.names().asScala.exists(_.contains(containerName))).foreach(c => { + client + .listContainers(ListContainersParam.allContainers()) + .asScala + .find(_.names().asScala.exists(_.contains(containerName))) + .foreach(c => { if ("running" == c.state()) { client.killContainer(c.id) } diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/NameserverAddressParserSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/NameserverAddressParserSpec.scala index 9ee84665e3..b1d0d9cba1 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/NameserverAddressParserSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/NameserverAddressParserSpec.scala @@ -14,13 +14,17 @@ class NameserverAddressParserSpec extends WordSpec with Matchers { DnsSettings.parseNameserverAddress("8.8.8.8:153") shouldEqual new InetSocketAddress("8.8.8.8", 153) } "handle explicit port in IPv6 address" in { - DnsSettings.parseNameserverAddress("[2001:4860:4860::8888]:153") shouldEqual new InetSocketAddress("2001:4860:4860::8888", 153) + DnsSettings.parseNameserverAddress("[2001:4860:4860::8888]:153") shouldEqual new InetSocketAddress( + "2001:4860:4860::8888", + 153) } "handle default port in IPv4 address" in { DnsSettings.parseNameserverAddress("8.8.8.8") shouldEqual new InetSocketAddress("8.8.8.8", 53) } "handle default port in IPv6 address" in { - DnsSettings.parseNameserverAddress("[2001:4860:4860::8888]") shouldEqual new InetSocketAddress("2001:4860:4860::8888", 53) + DnsSettings.parseNameserverAddress("[2001:4860:4860::8888]") shouldEqual new InetSocketAddress( + "2001:4860:4860::8888", + 53) } } } diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsManagerSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsManagerSpec.scala index 08b451bf53..ce58f21df5 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsManagerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsManagerSpec.scala @@ -15,8 +15,7 @@ import akka.testkit.{ AkkaSpec, ImplicitSender } import scala.collection.immutable.Seq -class AsyncDnsManagerSpec extends AkkaSpec( - """ +class AsyncDnsManagerSpec extends AkkaSpec(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.io.dns.resolver = async-dns @@ -46,7 +45,7 @@ class AsyncDnsManagerSpec extends AkkaSpec( "provide access to cache" in { dns ! AsyncDnsManager.GetCache - expectMsgType[AsyncDnsCache] should be theSameInstanceAs Dns(system).cache + (expectMsgType[AsyncDnsCache] should be).theSameInstanceAs(Dns(system).cache) } } diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala index 97d49f9b15..9d3b9fdd6e 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/AsyncDnsResolverSpec.scala @@ -20,8 +20,7 @@ import akka.testkit.WithLogCapturing import scala.collection.{ immutable => im } import scala.concurrent.duration._ -class AsyncDnsResolverSpec extends AkkaSpec( - """ +class AsyncDnsResolverSpec extends AkkaSpec(""" akka.loglevel = DEBUG akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] """) with WithLogCapturing { @@ -106,8 +105,7 @@ class AsyncDnsResolverSpec extends AkkaSpec( dnsClient1.expectNoMessage(50.millis) val answer = senderProbe.expectMsgType[Resolved] answer.records.collect { case r: ARecord => r }.toSet shouldEqual Set( - ARecord("127.0.0.1", Ttl.effectivelyForever, InetAddress.getByName("127.0.0.1")) - ) + ARecord("127.0.0.1", Ttl.effectivelyForever, InetAddress.getByName("127.0.0.1"))) } "response immediately for IPv6 address" in new Setup { @@ -115,7 +113,9 @@ class AsyncDnsResolverSpec extends AkkaSpec( r ! Resolve(name) dnsClient1.expectNoMessage(50.millis) val answer = senderProbe.expectMsgType[Resolved] - val Seq(AAAARecord("1:2:3:0:0:0:0:0", Ttl.effectivelyForever, _)) = answer.records.collect { case r: AAAARecord => r } + val Seq(AAAARecord("1:2:3:0:0:0:0:0", Ttl.effectivelyForever, _)) = answer.records.collect { + case r: AAAARecord => r + } } "return additional records for SRV requests" in new Setup { @@ -134,8 +134,8 @@ class AsyncDnsResolverSpec extends AkkaSpec( } def resolver(clients: List[ActorRef]): ActorRef = { - val settings = new DnsSettings(system.asInstanceOf[ExtendedActorSystem], ConfigFactory.parseString( - """ + val settings = new DnsSettings(system.asInstanceOf[ExtendedActorSystem], + ConfigFactory.parseString(""" nameservers = ["one","two"] resolve-timeout = 300ms search-domains = [] diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/MessageSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/MessageSpec.scala index 19eadaa7ac..be4ec93486 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/MessageSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/MessageSpec.scala @@ -11,7 +11,8 @@ import org.scalatest.{ Matchers, WordSpec } class MessageSpec extends WordSpec with Matchers { "The Message" should { "parse a response that is truncated mid-message" in { - val bytes = ByteString(0, 4, -125, -128, 0, 1, 0, 48, 0, 0, 0, 0, 4, 109, 97, 110, 121, 4, 98, 122, 122, 116, 3, 110, 101, 116, 0, 0, 28, 0, 1) + val bytes = ByteString(0, 4, -125, -128, 0, 1, 0, 48, 0, 0, 0, 0, 4, 109, 97, 110, 121, 4, 98, 122, 122, 116, 3, + 110, 101, 116, 0, 0, 28, 0, 1) val msg = Message.parse(bytes) msg.id should be(4) msg.flags.isTruncated should be(true) diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/ResolvConfParserSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/ResolvConfParserSpec.scala index bda6e14874..f538fd3cf5 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/ResolvConfParserSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/ResolvConfParserSpec.scala @@ -15,8 +15,7 @@ class ResolvConfParserSpec extends WordSpec with Matchers { "The ResolvConfParser" should { "parse an actual Kubernetes resolv.conf file" in { - val resolvConf = parse( - """nameserver 172.30.0.2 + val resolvConf = parse("""nameserver 172.30.0.2 |search myproject.svc.cluster.local svc.cluster.local cluster.local |options ndots:5""".stripMargin) resolvConf.search should be(List("myproject.svc.cluster.local", "svc.cluster.local", "cluster.local")) @@ -24,20 +23,17 @@ class ResolvConfParserSpec extends WordSpec with Matchers { } "ignore # comments" in { - parse( - """search example.com + parse("""search example.com |#search foobar.com""".stripMargin).search should be(List("example.com")) } "ignore ; comments" in { - parse( - """search example.com + parse("""search example.com |;search foobar.com""".stripMargin).search should be(List("example.com")) } "use the last search element found" in { - parse( - """search example.com + parse("""search example.com |search foobar.com""".stripMargin).search should be(List("foobar.com")) } @@ -46,8 +42,7 @@ class ResolvConfParserSpec extends WordSpec with Matchers { } "use the last domain element found" in { - parse( - """domain example.com + parse("""domain example.com |domain foobar.com """.stripMargin).search should be(List("foobar.com")) } diff --git a/akka-actor-tests/src/test/scala/akka/io/dns/internal/TcpDnsClientSpec.scala b/akka-actor-tests/src/test/scala/akka/io/dns/internal/TcpDnsClientSpec.scala index de9c8b3078..33801a1634 100644 --- a/akka-actor-tests/src/test/scala/akka/io/dns/internal/TcpDnsClientSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/io/dns/internal/TcpDnsClientSpec.scala @@ -90,7 +90,7 @@ class TcpDnsClientSpec extends AkkaSpec with ImplicitSender { expectMsgType[Tcp.Write] val fullResponse = encodeLength(exampleResponseMessage.write().length) ++ exampleResponseMessage.write() ++ - encodeLength(exampleResponseMessage.write().length) ++ exampleResponseMessage.copy(id = 43).write() + encodeLength(exampleResponseMessage.write().length) ++ exampleResponseMessage.copy(id = 43).write() registered ! Tcp.Received(fullResponse.take(8)) registered ! Tcp.Received(fullResponse.drop(8)) diff --git a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala index 8d4731e707..d96a22f799 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala @@ -5,7 +5,7 @@ package akka.pattern import akka.actor._ -import akka.testkit.{ TestProbe, AkkaSpec } +import akka.testkit.{ AkkaSpec, TestProbe } import akka.util.Timeout import scala.concurrent.Await @@ -53,14 +53,16 @@ class AskSpec extends AkkaSpec { intercept[IllegalArgumentException] { Await.result(f, timeout.duration) - }.getMessage should ===("Unsupported recipient type, question not sent to [null]. Message of type [java.lang.Double].") + }.getMessage should ===( + "Unsupported recipient type, question not sent to [null]. Message of type [java.lang.Double].") } "return broken promises on 0 timeout" in { implicit val timeout = Timeout(0 seconds) val echo = system.actorOf(Props(new Actor { def receive = { case x => sender() ! x } })) val f = echo ? "foo" - val expectedMsg = s"Timeout length must be positive, question not sent to [$echo]. Message of type [java.lang.String]." + val expectedMsg = + s"Timeout length must be positive, question not sent to [$echo]. Message of type [java.lang.String]." intercept[IllegalArgumentException] { Await.result(f, timeout.duration) }.getMessage should ===(expectedMsg) @@ -70,7 +72,8 @@ class AskSpec extends AkkaSpec { implicit val timeout = Timeout(-1000 seconds) val echo = system.actorOf(Props(new Actor { def receive = { case x => sender() ! x } })) val f = echo ? "foo" - val expectedMsg = s"Timeout length must be positive, question not sent to [$echo]. Message of type [java.lang.String]." + val expectedMsg = + s"Timeout length must be positive, question not sent to [$echo]. Message of type [java.lang.String]." intercept[IllegalArgumentException] { Await.result(f, timeout.duration) }.getMessage should ===(expectedMsg) @@ -114,8 +117,8 @@ class AskSpec extends AkkaSpec { implicit val timeout = Timeout(5 seconds) import system.dispatcher val echo = system.actorOf(Props(new Actor { def receive = { case x => sender() ! x } }), "select-echo") - val identityFuture = (system.actorSelection("/user/select-echo") ? Identify(None)) - .mapTo[ActorIdentity].map(_.ref.get) + val identityFuture = + (system.actorSelection("/user/select-echo") ? Identify(None)).mapTo[ActorIdentity].map(_.ref.get) Await.result(identityFuture, 5 seconds) should ===(echo) } @@ -125,7 +128,9 @@ class AskSpec extends AkkaSpec { val deadListener = TestProbe() system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) - val echo = system.actorOf(Props(new Actor { def receive = { case x => context.actorSelection(sender().path) ! x } }), "select-echo2") + val echo = system.actorOf(Props(new Actor { + def receive = { case x => context.actorSelection(sender().path) ! x } + }), "select-echo2") val f = echo ? "hi" Await.result(f, 1 seconds) should ===("hi") @@ -138,7 +143,8 @@ class AskSpec extends AkkaSpec { val deadListener = TestProbe() system.eventStream.subscribe(deadListener.ref, classOf[DeadLetter]) - val echo = system.actorOf(Props(new Actor { def receive = { case x => context.actorSelection("/temp/*") ! x } }), "select-echo3") + val echo = system.actorOf(Props(new Actor { def receive = { case x => context.actorSelection("/temp/*") ! x } }), + "select-echo3") val f = echo ? "hi" intercept[AskTimeoutException] { Await.result(f, 1 seconds) diff --git a/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala index 0ca342998d..3b4bf069b7 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/BackoffOnRestartSupervisorSpec.scala @@ -4,10 +4,10 @@ package akka.pattern -import java.util.concurrent.{ TimeUnit, CountDownLatch } +import java.util.concurrent.{ CountDownLatch, TimeUnit } import akka.pattern.TestActor.NormalException -import akka.testkit.{ ImplicitSender, AkkaSpec, TestProbe, filterException } +import akka.testkit.{ filterException, AkkaSpec, ImplicitSender, TestProbe } import scala.concurrent.duration._ import akka.actor._ import scala.language.postfixOps @@ -52,7 +52,8 @@ class TestParentActor(probe: ActorRef, supervisorProps: Props) extends Actor { class BackoffOnRestartSupervisorSpec extends AkkaSpec with ImplicitSender { def supervisorProps(probeRef: ActorRef) = { - val options = Backoff.onFailure(TestActor.props(probeRef), "someChildName", 200 millis, 10 seconds, 0.0, maxNrOfRetries = -1) + val options = Backoff + .onFailure(TestActor.props(probeRef), "someChildName", 200 millis, 10 seconds, 0.0, maxNrOfRetries = -1) .withSupervisorStrategy(OneForOneStrategy(maxNrOfRetries = 4, withinTimeRange = 30 seconds) { case _: TestActor.StoppingException => SupervisorStrategy.Stop }) @@ -139,7 +140,13 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec with ImplicitSender { "accept commands while child is terminating" in { val postStopLatch = new CountDownLatch(1) - val options = Backoff.onFailure(Props(new SlowlyFailingActor(postStopLatch)), "someChildName", 1 nanos, 1 nanos, 0.0, maxNrOfRetries = -1) + val options = Backoff + .onFailure(Props(new SlowlyFailingActor(postStopLatch)), + "someChildName", + 1 nanos, + 1 nanos, + 0.0, + maxNrOfRetries = -1) .withSupervisorStrategy(OneForOneStrategy(loggingEnabled = false) { case _: TestActor.StoppingException => SupervisorStrategy.Stop }) @@ -197,7 +204,8 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec with ImplicitSender { // withinTimeRange indicates the time range in which maxNrOfRetries will cause the child to // stop. IE: If we restart more than maxNrOfRetries in a time range longer than withinTimeRange // that is acceptable. - val options = Backoff.onFailure(TestActor.props(probe.ref), "someChildName", 300 millis, 10 seconds, 0.0, maxNrOfRetries = -1) + val options = Backoff + .onFailure(TestActor.props(probe.ref), "someChildName", 300 millis, 10 seconds, 0.0, maxNrOfRetries = -1) .withSupervisorStrategy(OneForOneStrategy(withinTimeRange = 1 seconds, maxNrOfRetries = 3) { case _: TestActor.StoppingException => SupervisorStrategy.Stop }) diff --git a/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala index 6947cec26c..dcb7847395 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/BackoffSupervisorSpec.scala @@ -46,8 +46,10 @@ object BackoffSupervisorSpec { class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually { import BackoffSupervisorSpec._ - def onStopOptions(props: Props = Child.props(testActor), maxNrOfRetries: Int = -1) = Backoff.onStop(props, "c1", 100.millis, 3.seconds, 0.2, maxNrOfRetries) - def onFailureOptions(props: Props = Child.props(testActor), maxNrOfRetries: Int = -1) = Backoff.onFailure(props, "c1", 100.millis, 3.seconds, 0.2, maxNrOfRetries) + def onStopOptions(props: Props = Child.props(testActor), maxNrOfRetries: Int = -1) = + Backoff.onStop(props, "c1", 100.millis, 3.seconds, 0.2, maxNrOfRetries) + def onFailureOptions(props: Props = Child.props(testActor), maxNrOfRetries: Int = -1) = + Backoff.onFailure(props, "c1", 100.millis, 3.seconds, 0.2, maxNrOfRetries) def create(options: BackoffOptions) = system.actorOf(BackoffSupervisor.props(options)) "BackoffSupervisor" must { @@ -95,13 +97,9 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually case _: TestException => SupervisorStrategy.Restart } - assertCustomStrategy( - create(onStopOptions() - .withSupervisorStrategy(stoppingStrategy))) + assertCustomStrategy(create(onStopOptions().withSupervisorStrategy(stoppingStrategy))) - assertCustomStrategy( - create(onFailureOptions() - .withSupervisorStrategy(restartingStrategy))) + assertCustomStrategy(create(onFailureOptions().withSupervisorStrategy(restartingStrategy))) } } @@ -166,20 +164,20 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually } assertManualReset( - create(onStopOptions(ManualChild.props(testActor)) - .withManualReset - .withSupervisorStrategy(stoppingStrategy))) + create(onStopOptions(ManualChild.props(testActor)).withManualReset.withSupervisorStrategy(stoppingStrategy))) assertManualReset( - create(onFailureOptions(ManualChild.props(testActor)) - .withManualReset - .withSupervisorStrategy(restartingStrategy))) + create( + onFailureOptions(ManualChild.props(testActor)).withManualReset.withSupervisorStrategy(restartingStrategy))) } } "reply to sender if replyWhileStopped is specified" in { filterException[TestException] { - val supervisor = create(Backoff.onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2, maxNrOfRetries = -1).withReplyWhileStopped("child was stopped")) + val supervisor = create( + Backoff + .onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2, maxNrOfRetries = -1) + .withReplyWhileStopped("child was stopped")) supervisor ! BackoffSupervisor.GetCurrentChild val c1 = expectMsgType[BackoffSupervisor.CurrentChild].ref.get watch(c1) @@ -201,7 +199,8 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually "not reply to sender if replyWhileStopped is NOT specified" in { filterException[TestException] { - val supervisor = create(Backoff.onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2, maxNrOfRetries = -1)) + val supervisor = + create(Backoff.onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2, maxNrOfRetries = -1)) supervisor ! BackoffSupervisor.GetCurrentChild val c1 = expectMsgType[BackoffSupervisor.CurrentChild].ref.get watch(c1) @@ -223,24 +222,22 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually "correctly calculate the delay" in { val delayTable = - Table( - ("restartCount", "minBackoff", "maxBackoff", "randomFactor", "expectedResult"), - (0, 0.minutes, 0.minutes, 0d, 0.minutes), - (0, 5.minutes, 7.minutes, 0d, 5.minutes), - (2, 5.seconds, 7.seconds, 0d, 7.seconds), - (2, 5.seconds, 7.days, 0d, 20.seconds), - (29, 5.minutes, 10.minutes, 0d, 10.minutes), - (29, 10000.days, 10000.days, 0d, 10000.days), - (Int.MaxValue, 10000.days, 10000.days, 0d, 10000.days)) - forAll(delayTable) { ( - restartCount: Int, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - expectedResult: FiniteDuration) => - - val calculatedValue = BackoffSupervisor.calculateDelay(restartCount, minBackoff, maxBackoff, randomFactor) - assert(calculatedValue === expectedResult) + Table(("restartCount", "minBackoff", "maxBackoff", "randomFactor", "expectedResult"), + (0, 0.minutes, 0.minutes, 0d, 0.minutes), + (0, 5.minutes, 7.minutes, 0d, 5.minutes), + (2, 5.seconds, 7.seconds, 0d, 7.seconds), + (2, 5.seconds, 7.days, 0d, 20.seconds), + (29, 5.minutes, 10.minutes, 0d, 10.minutes), + (29, 10000.days, 10000.days, 0d, 10000.days), + (Int.MaxValue, 10000.days, 10000.days, 0d, 10000.days)) + forAll(delayTable) { + (restartCount: Int, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + expectedResult: FiniteDuration) => + val calculatedValue = BackoffSupervisor.calculateDelay(restartCount, minBackoff, maxBackoff, randomFactor) + assert(calculatedValue === expectedResult) } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala index 92bebbaf9e..a9976fca08 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala @@ -7,7 +7,7 @@ package akka.pattern import akka.testkit._ import scala.collection.immutable import scala.concurrent.duration._ -import scala.concurrent.{ Future, Await } +import scala.concurrent.{ Await, Future } class CircuitBreakerMTSpec extends AkkaSpec { implicit val ec = system.dispatcher @@ -21,28 +21,34 @@ class CircuitBreakerMTSpec extends AkkaSpec { def openBreaker(): Unit = { // returns true if the breaker is open def failingCall(): Boolean = - Await.result(breaker.withCircuitBreaker(Future(throw new RuntimeException("FAIL"))) recover { + Await.result(breaker.withCircuitBreaker(Future(throw new RuntimeException("FAIL"))).recover { case _: CircuitBreakerOpenException => true case _ => false }, remainingOrDefault) // fire some failing calls - 1 to (maxFailures + 1) foreach { _ => failingCall() } + (1 to (maxFailures + 1)).foreach { _ => + failingCall() + } // and then continue with failing calls until the breaker is open awaitCond(failingCall()) } def testCallsWithBreaker(): immutable.IndexedSeq[Future[String]] = { val aFewActive = new TestLatch(5) - for (_ <- 1 to numberOfTestCalls) yield breaker.withCircuitBreaker(Future { - aFewActive.countDown() - Await.ready(aFewActive, 5.seconds.dilated) - "succeed" - }) recoverWith { - case _: CircuitBreakerOpenException => - aFewActive.countDown() - Future.successful("CBO") - } + for (_ <- 1 to numberOfTestCalls) + yield + breaker + .withCircuitBreaker(Future { + aFewActive.countDown() + Await.ready(aFewActive, 5.seconds.dilated) + "succeed" + }) + .recoverWith { + case _: CircuitBreakerOpenException => + aFewActive.countDown() + Future.successful("CBO") + } } "allow many calls while in closed state with no errors" in { diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala index f425914e7b..7b6aa86cb0 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala @@ -8,7 +8,7 @@ import akka.actor.ActorSystem import language.postfixOps import scala.concurrent.duration._ import scala.concurrent.{ Await, ExecutionContext, Future, TimeoutException } -import scala.util.{ Try, Success, Failure } +import scala.util.{ Failure, Success, Try } import akka.testkit._ import org.mockito.ArgumentCaptor import org.scalatest.BeforeAndAfter @@ -641,7 +641,8 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter with MockitoSugar "even number is considered failure" in { val breaker = CircuitBreakerSpec.longCallTimeoutCb() breaker().currentFailureCount should ===(0) - val result = Await.result(breaker().withCircuitBreaker(Future(2), CircuitBreakerSpec.evenNumberIsFailure), awaitTimeout) + val result = + Await.result(breaker().withCircuitBreaker(Future(2), CircuitBreakerSpec.evenNumberIsFailure), awaitTimeout) checkLatch(breaker.openLatch) breaker().currentFailureCount should ===(1) result should ===(2) @@ -669,7 +670,9 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter with MockitoSugar val breaker: CircuitBreakerSpec.Breaker = CircuitBreakerSpec.multiFailureCb() for (_ <- 1 to 4) breaker().withCircuitBreaker(Future(throwException)) - awaitCond(breaker().currentFailureCount == 4, awaitTimeout, message = s"Current failure count: ${breaker().currentFailureCount}") + awaitCond(breaker().currentFailureCount == 4, + awaitTimeout, + message = s"Current failure count: ${breaker().currentFailureCount}") val harmlessException = new TestException val harmlessExceptionAsSuccess: Try[String] => Boolean = { diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala index 595f2da8fb..ff84f014b1 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -6,9 +6,9 @@ package akka.pattern import language.postfixOps -import akka.testkit.{ TestLatch, AkkaSpec } -import akka.actor.{ Props, Actor } -import scala.concurrent.{ Future, Promise, Await } +import akka.testkit.{ AkkaSpec, TestLatch } +import akka.actor.{ Actor, Props } +import scala.concurrent.{ Await, Future, Promise } import scala.concurrent.duration._ object PatternSpec { diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PipeToSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PipeToSpec.scala index adccf58f34..85ac6be645 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PipeToSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PipeToSpec.scala @@ -17,20 +17,20 @@ class PipeToSpec extends AkkaSpec { "work" in { val p = TestProbe() - Future(42) pipeTo p.ref + Future(42).pipeTo(p.ref) p.expectMsg(42) } "signal failure" in { val p = TestProbe() - Future.failed(new Exception("failed")) pipeTo p.ref + Future.failed(new Exception("failed")).pipeTo(p.ref) p.expectMsgType[Status.Failure].cause.getMessage should ===("failed") } "pick up an implicit sender()" in { val p = TestProbe() implicit val s = testActor - Future(42) pipeTo p.ref + Future(42).pipeTo(p.ref) p.expectMsg(42) p.lastSender should ===(s) } @@ -55,14 +55,14 @@ class PipeToSpec extends AkkaSpec { "work" in { val p = TestProbe() val sel = system.actorSelection(p.ref.path) - Future(42) pipeToSelection sel + Future(42).pipeToSelection(sel) p.expectMsg(42) } "signal failure" in { val p = TestProbe() val sel = system.actorSelection(p.ref.path) - Future.failed(new Exception("failed")) pipeToSelection sel + Future.failed(new Exception("failed")).pipeToSelection(sel) p.expectMsgType[Status.Failure].cause.getMessage should ===("failed") } @@ -70,7 +70,7 @@ class PipeToSpec extends AkkaSpec { val p = TestProbe() val sel = system.actorSelection(p.ref.path) implicit val s = testActor - Future(42) pipeToSelection sel + Future(42).pipeToSelection(sel) p.expectMsg(42) p.lastSender should ===(s) } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PromiseRefSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PromiseRefSpec.scala index 2f49fc80dc..3052c1a726 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PromiseRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PromiseRefSpec.scala @@ -5,7 +5,7 @@ package akka.pattern import akka.actor._ -import akka.testkit.{ AkkaSpec, TestProbe, ImplicitSender } +import akka.testkit.{ AkkaSpec, ImplicitSender, TestProbe } import scala.concurrent.Promise import scala.concurrent.duration._ diff --git a/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala index d726a90f9c..3c5f0d2b16 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/RetrySpec.scala @@ -18,11 +18,7 @@ class RetrySpec extends AkkaSpec with RetrySupport { "pattern.retry" must { "run a successful Future immediately" in { - val retried = retry( - () => Future.successful(5), - 5, - 1 second - ) + val retried = retry(() => Future.successful(5), 5, 1 second) within(3 seconds) { Await.result(retried, remaining) should ===(5) @@ -31,14 +27,13 @@ class RetrySpec extends AkkaSpec with RetrySupport { "run a successful Future only once" in { @volatile var counter = 0 - val retried = retry( - () => Future.successful({ - counter += 1 - counter - }), - 5, - 1 second - ) + val retried = retry(() => + Future.successful({ + counter += 1 + counter + }), + 5, + 1 second) within(3 seconds) { Await.result(retried, remaining) should ===(1) @@ -46,11 +41,7 @@ class RetrySpec extends AkkaSpec with RetrySupport { } "eventually return a failure for a Future that will never succeed" in { - val retried = retry( - () => Future.failed(new IllegalStateException("Mexico")), - 5, - 100 milliseconds - ) + val retried = retry(() => Future.failed(new IllegalStateException("Mexico")), 5, 100 milliseconds) within(3 second) { intercept[IllegalStateException] { Await.result(retried, remaining) }.getMessage should ===("Mexico") @@ -67,11 +58,7 @@ class RetrySpec extends AkkaSpec with RetrySupport { } else Future.successful(5) } - val retried = retry( - () => attempt, - 10, - 100 milliseconds - ) + val retried = retry(() => attempt, 10, 100 milliseconds) within(3 seconds) { Await.result(retried, remaining) should ===(5) @@ -88,11 +75,7 @@ class RetrySpec extends AkkaSpec with RetrySupport { } else Future.successful(5) } - val retried = retry( - () => attempt, - 5, - 100 milliseconds - ) + val retried = retry(() => attempt, 5, 100 milliseconds) within(3 seconds) { intercept[IllegalStateException] { Await.result(retried, remaining) }.getMessage should ===("6") diff --git a/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala index 83b11d8def..72bb37d7e8 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/BalancingSpec.scala @@ -7,8 +7,8 @@ package akka.routing import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.Await import scala.concurrent.duration._ -import akka.actor.{ Props, Actor } -import akka.testkit.{ TestLatch, ImplicitSender, AkkaSpec } +import akka.actor.{ Actor, Props } +import akka.testkit.{ AkkaSpec, ImplicitSender, TestLatch } import akka.actor.ActorRef import org.scalatest.BeforeAndAfterEach import java.net.URLEncoder @@ -32,8 +32,8 @@ object BalancingSpec { } class Parent extends Actor { - val pool = context.actorOf(BalancingPool(2).props(routeeProps = - Props(classOf[Worker], TestLatch(0)(context.system)))) + val pool = + context.actorOf(BalancingPool(2).props(routeeProps = Props(classOf[Worker], TestLatch(0)(context.system)))) def receive = { case msg => pool.forward(msg) @@ -41,8 +41,7 @@ object BalancingSpec { } } -class BalancingSpec extends AkkaSpec( - """ +class BalancingSpec extends AkkaSpec(""" akka.actor.deployment { /balancingPool-2 { router = balancing-pool @@ -97,22 +96,22 @@ class BalancingSpec extends AkkaSpec( "deliver messages in a balancing fashion when defined programatically" in { val latch = TestLatch(poolSize) - val pool = system.actorOf(BalancingPool(poolSize).props(routeeProps = - Props(classOf[Worker], latch)), name = "balancingPool-1") + val pool = system.actorOf(BalancingPool(poolSize).props(routeeProps = Props(classOf[Worker], latch)), + name = "balancingPool-1") test(pool, latch) } "deliver messages in a balancing fashion when defined in config" in { val latch = TestLatch(poolSize) - val pool = system.actorOf(FromConfig().props(routeeProps = - Props(classOf[Worker], latch)), name = "balancingPool-2") + val pool = + system.actorOf(FromConfig().props(routeeProps = Props(classOf[Worker], latch)), name = "balancingPool-2") test(pool, latch) } "deliver messages in a balancing fashion when overridden in config" in { val latch = TestLatch(poolSize) - val pool = system.actorOf(BalancingPool(1).props(routeeProps = - Props(classOf[Worker], latch)), name = "balancingPool-3") + val pool = + system.actorOf(BalancingPool(1).props(routeeProps = Props(classOf[Worker], latch)), name = "balancingPool-3") test(pool, latch) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/BroadcastSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/BroadcastSpec.scala index 3873c99d83..764736bae9 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/BroadcastSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/BroadcastSpec.scala @@ -6,8 +6,8 @@ package akka.routing import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.Await -import akka.actor.{ Props, Actor } -import akka.testkit.{ TestLatch, ImplicitSender, DefaultTimeout, AkkaSpec } +import akka.actor.{ Actor, Props } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender, TestLatch } import akka.pattern.ask object BroadcastSpec { diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index 616df89c66..2aba6a703e 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -9,9 +9,9 @@ import scala.concurrent.Await import scala.concurrent.duration._ import scala.collection.immutable import akka.ConfigurationException -import akka.actor.{ Props, Deploy, Actor, ActorRef } +import akka.actor.{ Actor, ActorRef, Deploy, Props } import akka.actor.UnstartedCell -import akka.testkit.{ ImplicitSender, DefaultTimeout, AkkaSpec } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender } import akka.pattern.gracefulStop import com.typesafe.config.Config import akka.actor.ActorSystem @@ -100,7 +100,10 @@ object ConfiguredLocalRoutingSpec { } -class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.config) with DefaultTimeout with ImplicitSender { +class ConfiguredLocalRoutingSpec + extends AkkaSpec(ConfiguredLocalRoutingSpec.config) + with DefaultTimeout + with ImplicitSender { import ConfiguredLocalRoutingSpec._ def routerConfig(ref: ActorRef): akka.routing.RouterConfig = ref match { @@ -141,15 +144,17 @@ class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.con } "be overridable in explicit deployment" in { - val actor = system.actorOf(FromConfig.props(routeeProps = Props[EchoProps]). - withDeploy(Deploy(routerConfig = RoundRobinPool(12))), "someOther") + val actor = system.actorOf( + FromConfig.props(routeeProps = Props[EchoProps]).withDeploy(Deploy(routerConfig = RoundRobinPool(12))), + "someOther") routerConfig(actor) should ===(RoundRobinPool(12)) Await.result(gracefulStop(actor, 3 seconds), 3 seconds) } "be overridable in config even with explicit deployment" in { - val actor = system.actorOf(FromConfig.props(routeeProps = Props[EchoProps]). - withDeploy(Deploy(routerConfig = RoundRobinPool(12))), "config") + val actor = system.actorOf( + FromConfig.props(routeeProps = Props[EchoProps]).withDeploy(Deploy(routerConfig = RoundRobinPool(12))), + "config") routerConfig(actor) should ===(RandomPool(nrOfInstances = 4, usePoolDispatcher = true)) Await.result(gracefulStop(actor, 3 seconds), 3 seconds) } @@ -163,7 +168,7 @@ class ConfiguredLocalRoutingSpec extends AkkaSpec(ConfiguredLocalRoutingSpec.con "not get confused when trying to wildcard-configure children" in { system.actorOf(FromConfig.props(routeeProps = Props(classOf[SendRefAtStartup], testActor)), "weird") val recv = Set() ++ (for (_ <- 1 to 3) yield expectMsgType[ActorRef]) - val expc = Set('a', 'b', 'c') map (i => system.actorFor("/user/weird/$" + i)) + val expc = Set('a', 'b', 'c').map(i => system.actorFor("/user/weird/$" + i)) recv should ===(expc) expectNoMessage(1 second) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala index f6ea1d585d..6164b0d439 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala @@ -47,7 +47,10 @@ object ConsistentHashingRouterSpec { final case class Msg2(key: Any, data: String) } -class ConsistentHashingRouterSpec extends AkkaSpec(ConsistentHashingRouterSpec.config) with DefaultTimeout with ImplicitSender { +class ConsistentHashingRouterSpec + extends AkkaSpec(ConsistentHashingRouterSpec.config) + with DefaultTimeout + with ImplicitSender { import ConsistentHashingRouterSpec._ implicit val ec = system.dispatcher @@ -80,8 +83,9 @@ class ConsistentHashingRouterSpec extends AkkaSpec(ConsistentHashingRouterSpec.c def hashMapping: ConsistentHashMapping = { case Msg2(key, _) => key } - val router2 = system.actorOf(ConsistentHashingPool(nrOfInstances = 1, hashMapping = hashMapping). - props(Props[Echo]), "router2") + val router2 = + system.actorOf(ConsistentHashingPool(nrOfInstances = 1, hashMapping = hashMapping).props(Props[Echo]), + "router2") router2 ! Msg2("a", "A") val destinationA = expectMsgType[ActorRef] diff --git a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala index e26d848f45..e798a9670b 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/MetricsBasedResizerSpec.scala @@ -16,7 +16,7 @@ import akka.util.Timeout import scala.concurrent.Await import scala.concurrent.duration._ -import scala.util.{ Try, Random } +import scala.util.{ Random, Try } import akka.pattern.ask object MetricsBasedResizerSpec { @@ -45,10 +45,9 @@ object MetricsBasedResizerSpec { var msgs: Set[TestLatch] = Set() - def mockSend( - await: Boolean, - l: TestLatch = TestLatch(), - routeeIdx: Int = Random.nextInt(routees.length)): Latches = { + def mockSend(await: Boolean, + l: TestLatch = TestLatch(), + routeeIdx: Int = Random.nextInt(routees.length)): Latches = { val target = routees(routeeIdx) val first = TestLatch() val latches = Latches(first, l) @@ -135,7 +134,8 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "stop an underutilizationStreak when fully utilized" in { val resizer = DefaultOptimalSizeExploringResizer() resizer.record = ResizeRecord( - underutilizationStreak = Some(UnderUtilizationStreak(start = LocalDateTime.now.minusHours(1), highestUtilization = 1))) + underutilizationStreak = + Some(UnderUtilizationStreak(start = LocalDateTime.now.minusHours(1), highestUtilization = 1))) val router = TestRouter(routees(2)) router.sendToAll(await = true) @@ -149,8 +149,8 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "leave the underutilizationStreak start date unchanged when not fully utilized" in { val start: LocalDateTime = LocalDateTime.now.minusHours(1) val resizer = DefaultOptimalSizeExploringResizer() - resizer.record = ResizeRecord( - underutilizationStreak = Some(UnderUtilizationStreak(start = start, highestUtilization = 1))) + resizer.record = + ResizeRecord(underutilizationStreak = Some(UnderUtilizationStreak(start = start, highestUtilization = 1))) resizer.reportMessageCount(routees(2), 0) resizer.record.underutilizationStreak.get.start shouldBe start @@ -200,10 +200,7 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "not record the performance log when no message is processed" in { val resizer = DefaultOptimalSizeExploringResizer() - resizer.record = ResizeRecord( - totalQueueLength = 2, - messageCount = 2, - checkTime = System.nanoTime()) + resizer.record = ResizeRecord(totalQueueLength = 2, messageCount = 2, checkTime = System.nanoTime()) val router = TestRouter(routees(2)) @@ -259,8 +256,7 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "update the old performance log entry with updated speed " in { val oldSpeed = 50 - val resizer = DefaultOptimalSizeExploringResizer( - weightOfLatestMetric = 0.5) + val resizer = DefaultOptimalSizeExploringResizer(weightOfLatestMetric = 0.5) resizer.performanceLog = Map(2 -> oldSpeed.milliseconds) @@ -295,12 +291,11 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "MetricsBasedResizer resize" must { "downsize to close to the highest retention when a streak of underutilization started downsizeAfterUnderutilizedFor" in { - val resizer = DefaultOptimalSizeExploringResizer( - downsizeAfterUnderutilizedFor = 72.hours, - downsizeRatio = 0.5) + val resizer = DefaultOptimalSizeExploringResizer(downsizeAfterUnderutilizedFor = 72.hours, downsizeRatio = 0.5) - resizer.record = ResizeRecord(underutilizationStreak = Some( - UnderUtilizationStreak(start = LocalDateTime.now.minusHours(73), highestUtilization = 8))) + resizer.record = ResizeRecord( + underutilizationStreak = + Some(UnderUtilizationStreak(start = LocalDateTime.now.minusHours(73), highestUtilization = 8))) resizer.resize(routees(20)) should be(4 - 20) } @@ -320,9 +315,7 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT } "explore when there is performance log but not go beyond exploreStepSize" in { - val resizer = DefaultOptimalSizeExploringResizer( - exploreStepSize = 0.3, - explorationProbability = 1) + val resizer = DefaultOptimalSizeExploringResizer(exploreStepSize = 0.3, explorationProbability = 1) resizer.performanceLog = Map(11 -> 1.milli, 13 -> 1.millis, 12 -> 3.millis) val exploreSamples = (1 to 100).map(_ => resizer.resize(routees(10))) @@ -341,14 +334,10 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT } "ignore further away sample data when optmizing" in { - val resizer = DefaultOptimalSizeExploringResizer(explorationProbability = 0, numOfAdjacentSizesToConsiderDuringOptimization = 4) - resizer.performanceLog = Map( - 7 -> 5.millis, - 8 -> 2.millis, - 10 -> 3.millis, - 11 -> 4.millis, - 12 -> 3.millis, - 13 -> 1.millis) + val resizer = DefaultOptimalSizeExploringResizer(explorationProbability = 0, + numOfAdjacentSizesToConsiderDuringOptimization = 4) + resizer.performanceLog = + Map(7 -> 5.millis, 8 -> 2.millis, 10 -> 3.millis, 11 -> 4.millis, 12 -> 3.millis, 13 -> 1.millis) resizer.resize(routees(10)) should be(-1) } @@ -362,7 +351,8 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT "start with lowerbound pool size" in { val resizer = DefaultOptimalSizeExploringResizer(lowerBound = 2) - val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props(new TestLatchingActor))) + val router = + system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props(new TestLatchingActor))) val latches = Latches(TestLatch(), TestLatch(0)) router ! latches Await.ready(latches.first, timeout.duration) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala index 2dab6eaa16..e5b25fd228 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RandomSpec.scala @@ -8,8 +8,8 @@ import language.postfixOps import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.Await import scala.concurrent.duration._ -import akka.actor.{ Props, Actor } -import akka.testkit.{ TestLatch, ImplicitSender, DefaultTimeout, AkkaSpec } +import akka.actor.{ Actor, Props } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender, TestLatch } import akka.pattern.ask class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { @@ -54,14 +54,13 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { replies = replies + (i -> 0) } - val actor = system.actorOf(RandomPool(connectionCount).props(routeeProps = - Props(new Actor { - lazy val id = counter.getAndIncrement() - def receive = { - case "hit" => sender() ! id - case "end" => doneLatch.countDown() - } - })), name = "random") + val actor = system.actorOf(RandomPool(connectionCount).props(routeeProps = Props(new Actor { + lazy val id = counter.getAndIncrement() + def receive = { + case "hit" => sender() ! id + case "end" => doneLatch.countDown() + } + })), name = "random") for (_ <- 0 until iterationCount) { for (_ <- 0 until connectionCount) { @@ -75,7 +74,7 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! akka.routing.Broadcast("end") Await.ready(doneLatch, 5 seconds) - replies.values foreach { _ should be > (0) } + replies.values.foreach { _ should be > (0) } replies.values.sum should ===(iterationCount * connectionCount) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 846099359b..6d4c99481b 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -7,7 +7,7 @@ package akka.routing import com.typesafe.config.{ Config, ConfigFactory } import language.postfixOps -import akka.actor.{ ActorSystem, Actor, Props, ActorRef } +import akka.actor.{ Actor, ActorRef, ActorSystem, Props } import akka.testkit._ import akka.testkit.TestEvent._ import scala.concurrent.Await @@ -95,25 +95,19 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with "DefaultResizer" must { "use settings to evaluate capacity" in { - val resizer = DefaultResizer( - lowerBound = 2, - upperBound = 3) + val resizer = DefaultResizer(lowerBound = 2, upperBound = 3) val c1 = resizer.capacity(Vector.empty[Routee]) c1 should ===(2) - val current = Vector( - ActorRefRoutee(system.actorOf(Props[TestActor])), - ActorRefRoutee(system.actorOf(Props[TestActor]))) + val current = + Vector(ActorRefRoutee(system.actorOf(Props[TestActor])), ActorRefRoutee(system.actorOf(Props[TestActor]))) val c2 = resizer.capacity(current) c2 should ===(0) } "use settings to evaluate rampUp" in { - val resizer = DefaultResizer( - lowerBound = 2, - upperBound = 10, - rampupRate = 0.2) + val resizer = DefaultResizer(lowerBound = 2, upperBound = 10, rampupRate = 0.2) resizer.rampup(pressure = 9, capacity = 10) should ===(0) resizer.rampup(pressure = 5, capacity = 5) should ===(1) @@ -121,11 +115,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with } "use settings to evaluate backoff" in { - val resizer = DefaultResizer( - lowerBound = 2, - upperBound = 10, - backoffThreshold = 0.3, - backoffRate = 0.1) + val resizer = DefaultResizer(lowerBound = 2, upperBound = 10, backoffThreshold = 0.3, backoffRate = 0.1) resizer.backoff(pressure = 10, capacity = 10) should ===(0) resizer.backoff(pressure = 4, capacity = 10) should ===(0) @@ -139,11 +129,8 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with "be possible to define programmatically" in { val latch = new TestLatch(3) - val resizer = DefaultResizer( - lowerBound = 2, - upperBound = 3) - val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)). - props(Props[TestActor])) + val resizer = DefaultResizer(lowerBound = 2, upperBound = 3) + val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props[TestActor])) router ! latch router ! latch @@ -173,23 +160,21 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with // make sure the pool starts at the expected lower limit and grows to the upper as needed // as influenced by the backlog of blocking pooled actors - val resizer = DefaultResizer( - lowerBound = 3, - upperBound = 5, - rampupRate = 0.1, - backoffRate = 0.0, - pressureThreshold = 1, - messagesPerResize = 1, - backoffThreshold = 0.0) + val resizer = DefaultResizer(lowerBound = 3, + upperBound = 5, + rampupRate = 0.1, + backoffRate = 0.0, + pressureThreshold = 1, + messagesPerResize = 1, + backoffThreshold = 0.0) - val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props( - Props(new Actor { - def receive = { - case d: FiniteDuration => - Thread.sleep(d.dilated.toMillis); sender() ! "done" - case "echo" => sender() ! "reply" - } - }))) + val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props(new Actor { + def receive = { + case d: FiniteDuration => + Thread.sleep(d.dilated.toMillis); sender() ! "done" + case "echo" => sender() ! "reply" + } + }))) // first message should create the minimum number of routees router ! "echo" @@ -218,22 +203,20 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with } "backoff" in within(10 seconds) { - val resizer = DefaultResizer( - lowerBound = 2, - upperBound = 5, - rampupRate = 1.0, - backoffRate = 1.0, - backoffThreshold = 0.40, - pressureThreshold = 1, - messagesPerResize = 2) + val resizer = DefaultResizer(lowerBound = 2, + upperBound = 5, + rampupRate = 1.0, + backoffRate = 1.0, + backoffThreshold = 0.40, + pressureThreshold = 1, + messagesPerResize = 2) - val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props( - Props(new Actor { - def receive = { - case n: Int if n <= 0 => // done - case n: Int => Thread.sleep((n millis).dilated.toMillis) - } - }))) + val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props(new Actor { + def receive = { + case n: Int if n <= 0 => // done + case n: Int => Thread.sleep((n millis).dilated.toMillis) + } + }))) // put some pressure on the router for (_ <- 0 until 15) { diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala index e1188a7e0b..28bd657026 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoundRobinSpec.scala @@ -73,7 +73,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! akka.routing.Broadcast("end") Await.ready(doneLatch, 5 seconds) - replies.values foreach { _ should ===(iterationCount) } + replies.values.foreach { _ should ===(iterationCount) } } "deliver a broadcast message using the !" in { @@ -125,7 +125,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { var replies: Map[String, Int] = Map.empty.withDefaultValue(0) - val paths = (1 to connectionCount) map { n => + val paths = (1 to connectionCount).map { n => val ref = system.actorOf(Props(new Actor { def receive = { case "hit" => sender() ! self.path.name @@ -145,7 +145,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! akka.routing.Broadcast("end") Await.ready(doneLatch, 5 seconds) - replies.values foreach { _ should ===(iterationCount) } + replies.values.foreach { _ should ===(iterationCount) } } } @@ -181,7 +181,9 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } }) - (1 to connectionCount) foreach { _ => actor ! childProps } + (1 to connectionCount).foreach { _ => + actor ! childProps + } for (_ <- 1 to iterationCount; _ <- 1 to connectionCount) { val id = Await.result((actor ? "hit").mapTo[String], timeout.duration) @@ -192,7 +194,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { actor ! akka.routing.Broadcast("end") expectTerminated(actor) - replies.values foreach { _ should ===(iterationCount) } + replies.values.foreach { _ should ===(iterationCount) } } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RouteeCreationSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RouteeCreationSpec.scala index 72e7e51911..8422b767c0 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RouteeCreationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RouteeCreationSpec.scala @@ -34,7 +34,7 @@ class RouteeCreationSpec extends AkkaSpec { system.actorOf(RoundRobinPool(N).props(Props(new Actor { context.parent ! "one" def receive = { - case "one" => testActor forward "two" + case "one" => testActor.forward("two") } }))) val gotit = receiveWhile(messages = N) { @@ -42,7 +42,7 @@ class RouteeCreationSpec extends AkkaSpec { } expectNoMsg(100.millis) if (gotit.size != N) { - fail(s"got only ${gotit.size} from [${gotit mkString ", "}]") + fail(s"got only ${gotit.size} from [${gotit.mkString(", ")}]") } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 3b7e0b8101..e093866518 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -86,14 +86,14 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with 2 } } - val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props( - routeeProps = Props[TestActor])) + val router = + system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(routeeProps = Props[TestActor])) watch(router) Await.ready(latch, remainingOrDefault) router ! GetRoutees val routees = expectMsgType[Routees].routees routees.size should ===(2) - routees foreach { _.send(PoisonPill, testActor) } + routees.foreach { _.send(PoisonPill, testActor) } // expect no Terminated expectNoMsg(2.seconds) } @@ -123,8 +123,9 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with 3 } } - val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props( - routeeProps = Props[TestActor]), "router3") + val router = + system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(routeeProps = Props[TestActor]), + "router3") Await.ready(latch, remainingOrDefault) router ! GetRoutees expectMsgType[Routees].routees.size should ===(3) @@ -138,19 +139,19 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with case e => testActor ! e; SupervisorStrategy.Escalate //#custom-strategy } - val router = system.actorOf(RoundRobinPool(1, supervisorStrategy = escalator).props( - routeeProps = Props[TestActor])) + val router = + system.actorOf(RoundRobinPool(1, supervisorStrategy = escalator).props(routeeProps = Props[TestActor])) //#supervision router ! GetRoutees - EventFilter[ActorKilledException](occurrences = 1) intercept { + EventFilter[ActorKilledException](occurrences = 1).intercept { expectMsgType[Routees].routees.head.send(Kill, testActor) } expectMsgType[ActorKilledException] - val router2 = system.actorOf(RoundRobinPool(1).withSupervisorStrategy(escalator).props( - routeeProps = Props[TestActor])) + val router2 = + system.actorOf(RoundRobinPool(1).withSupervisorStrategy(escalator).props(routeeProps = Props[TestActor])) router2 ! GetRoutees - EventFilter[ActorKilledException](occurrences = 1) intercept { + EventFilter[ActorKilledException](occurrences = 1).intercept { expectMsgType[Routees].routees.head.send(Kill, testActor) } expectMsgType[ActorKilledException] @@ -160,10 +161,10 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with val escalator = OneForOneStrategy() { case e => testActor ! e; SupervisorStrategy.Escalate } - val router = system.actorOf(FromConfig.withSupervisorStrategy(escalator).props( - routeeProps = Props[TestActor]), "router1") + val router = + system.actorOf(FromConfig.withSupervisorStrategy(escalator).props(routeeProps = Props[TestActor]), "router1") router ! GetRoutees - EventFilter[ActorKilledException](occurrences = 1) intercept { + EventFilter[ActorKilledException](occurrences = 1).intercept { expectMsgType[Routees].routees.head.send(Kill, testActor) } expectMsgType[ActorKilledException] @@ -181,7 +182,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with override def postRestart(reason: Throwable): Unit = testActor ! "restarted" })) val router = expectMsgType[ActorRef] - EventFilter[Exception]("die", occurrences = 1) intercept { + EventFilter[Exception]("die", occurrences = 1).intercept { router ! "die" } expectMsgType[Exception].getMessage should ===("die") @@ -194,9 +195,9 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with system.actorOf(Props(new Actor { def receive = { case "start" => - context.actorOf(RoundRobinPool(2).props(routeeProps = Props(new Actor { + (context.actorOf(RoundRobinPool(2).props(routeeProps = Props(new Actor { def receive = { case x => sender() ! x } - }))) ? "hello" pipeTo sender() + }))) ? "hello").pipeTo(sender()) } })) ! "start" expectMsg("hello") @@ -209,7 +210,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with "send message to connection" in { class Actor1 extends Actor { def receive = { - case msg => testActor forward msg + case msg => testActor.forward(msg) } } @@ -231,9 +232,10 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with } "allow external configuration" in { - val sys = ActorSystem("FromConfig", ConfigFactory - .parseString("akka.actor.deployment./routed.router=round-robin-pool") - .withFallback(system.settings.config)) + val sys = ActorSystem("FromConfig", + ConfigFactory + .parseString("akka.actor.deployment./routed.router=round-robin-pool") + .withFallback(system.settings.config)) try { sys.actorOf(FromConfig.props(routeeProps = Props[TestActor]), "routed") } finally { diff --git a/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala index 488be74d94..4d6b31c468 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ScatterGatherFirstCompletedSpec.scala @@ -7,9 +7,9 @@ package akka.routing import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.Await import scala.concurrent.duration._ -import akka.actor.{ Props, Actor } +import akka.actor.{ Actor, Props } import akka.pattern.ask -import akka.testkit.{ TestLatch, ImplicitSender, DefaultTimeout, AkkaSpec } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender, TestLatch } import akka.actor.ActorSystem import akka.actor.Status import java.util.concurrent.TimeoutException @@ -23,21 +23,23 @@ object ScatterGatherFirstCompletedSpec { final case class Stop(id: Option[Int] = None) def newActor(id: Int, shudownLatch: Option[TestLatch] = None)(implicit system: ActorSystem) = - system.actorOf(Props(new Actor { - def receive = { - case Stop(None) => context.stop(self) - case Stop(Some(_id)) if (_id == id) => context.stop(self) - case _id: Int if (_id == id) => - case _ => { - Thread sleep 100 * id - sender() ! id + system.actorOf( + Props(new Actor { + def receive = { + case Stop(None) => context.stop(self) + case Stop(Some(_id)) if (_id == id) => context.stop(self) + case _id: Int if (_id == id) => + case _ => { + Thread.sleep(100 * id) + sender() ! id + } } - } - override def postStop = { - shudownLatch foreach (_.countDown()) - } - }), "Actor:" + id) + override def postStop = { + shudownLatch.foreach(_.countDown()) + } + }), + "Actor:" + id) } class ScatterGatherFirstCompletedSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { @@ -93,7 +95,8 @@ class ScatterGatherFirstCompletedSpec extends AkkaSpec with DefaultTimeout with "without routees should reply immediately" in { val probe = TestProbe() - val router = system.actorOf(ScatterGatherFirstCompletedPool(nrOfInstances = 0, within = 5.seconds).props(Props.empty)) + val router = + system.actorOf(ScatterGatherFirstCompletedPool(nrOfInstances = 0, within = 5.seconds).props(Props.empty)) router.tell("hello", probe.ref) probe.expectMsgType[Status.Failure](2.seconds).cause.getClass should be(classOf[TimeoutException]) } diff --git a/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala index f072f02ef9..e07c72c232 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/SmallestMailboxSpec.scala @@ -6,11 +6,13 @@ package akka.routing import java.util.concurrent.ConcurrentHashMap import scala.concurrent.Await -import akka.actor.{ Props, Actor } -import akka.testkit.{ TestLatch, ImplicitSender, DefaultTimeout, AkkaSpec } +import akka.actor.{ Actor, Props } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender, TestLatch } -class SmallestMailboxSpec extends AkkaSpec("akka.actor.serialize-messages = off") - with DefaultTimeout with ImplicitSender { +class SmallestMailboxSpec + extends AkkaSpec("akka.actor.serialize-messages = off") + with DefaultTimeout + with ImplicitSender { "smallest mailbox pool" must { diff --git a/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala index 578afe2808..26d5c4039b 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/TailChoppingSpec.scala @@ -8,8 +8,8 @@ import java.util.concurrent.atomic.AtomicInteger import akka.actor.Status.Failure import scala.concurrent.Await import scala.concurrent.duration._ -import akka.actor.{ ActorRef, Props, Actor, ActorSystem } -import akka.pattern.{ AskTimeoutException, ask } +import akka.actor.{ Actor, ActorRef, ActorSystem, Props } +import akka.pattern.{ ask, AskTimeoutException } import akka.testkit._ object TailChoppingSpec { @@ -22,7 +22,7 @@ object TailChoppingSpec { case "times" => sender() ! times case _ => times += 1 - Thread sleep sleepTime.toMillis + Thread.sleep(sleepTime.toMillis) sender ! "ack" } }), "Actor:" + id) @@ -94,8 +94,8 @@ class TailChoppingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender val actor2 = newActor(4, 500.millis) val probe = TestProbe() val paths = List(actor1, actor2).map(_.path.toString) - val routedActor = system.actorOf(TailChoppingGroup(paths, within = 300.milliseconds, - interval = 50.milliseconds).props()) + val routedActor = + system.actorOf(TailChoppingGroup(paths, within = 300.milliseconds, interval = 50.milliseconds).props()) probe.send(routedActor, "") probe.expectMsgPF() { @@ -112,7 +112,8 @@ class TailChoppingSpec extends AkkaSpec with DefaultTimeout with ImplicitSender val actor2 = newActor(6, 4.seconds) val probe = TestProbe() val paths = List(actor1, actor2).map(_.path.toString) - val routedActor = system.actorOf(TailChoppingGroup(paths, within = 5.seconds, interval = 100.milliseconds).props()) + val routedActor = + system.actorOf(TailChoppingGroup(paths, within = 5.seconds, interval = 100.milliseconds).props()) probe.send(routedActor, "") probe.expectMsg(max = 2.seconds, "ack") diff --git a/akka-actor-tests/src/test/scala/akka/serialization/AsyncSerializeSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/AsyncSerializeSpec.scala index 7f482ad689..4041e6e979 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/AsyncSerializeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/AsyncSerializeSpec.scala @@ -19,8 +19,7 @@ object AsyncSerializeSpec { case class Message3(str: String) case class Message4(str: String) - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka { actor { serializers { @@ -105,7 +104,7 @@ class AsyncSerializeSpec extends AkkaSpec(AsyncSerializeSpec.config) { } "logs warning if sync methods called" in { - EventFilter.warning(start = "Async serializer called synchronously", occurrences = 1) intercept { + EventFilter.warning(start = "Async serializer called synchronously", occurrences = 1).intercept { ser.serialize(Message1("to async")) } } @@ -115,7 +114,7 @@ class AsyncSerializeSpec extends AkkaSpec(AsyncSerializeSpec.config) { val serializer = ser.findSerializerFor(msg3).asInstanceOf[TestAsyncSerializerCS] - EventFilter.warning(start = "Async serializer called synchronously", occurrences = 2) intercept { + EventFilter.warning(start = "Async serializer called synchronously", occurrences = 2).intercept { val binary = ser.serialize(msg3).get val back = ser.deserialize(binary, serializer.identifier, serializer.manifest(msg3)).get back shouldEqual msg3 diff --git a/akka-actor-tests/src/test/scala/akka/serialization/DisabledJavaSerializerWarningSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/DisabledJavaSerializerWarningSpec.scala index e7c73121a8..9e62794e54 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/DisabledJavaSerializerWarningSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/DisabledJavaSerializerWarningSpec.scala @@ -15,8 +15,7 @@ object DisabledJavaSerializerWarningSpec { final case class Msg(s: String) } -class DisabledJavaSerializerWarningSpec extends AkkaSpec( - """ +class DisabledJavaSerializerWarningSpec extends AkkaSpec(""" akka.actor { allow-java-serialization = off serialize-messages = on diff --git a/akka-actor-tests/src/test/scala/akka/serialization/NoVerification.scala b/akka-actor-tests/src/test/scala/akka/serialization/NoVerification.scala index 7d63df4f98..2dadfea482 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/NoVerification.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/NoVerification.scala @@ -10,5 +10,4 @@ import akka.actor.NoSerializationVerificationNeeded * This is currently used in NoSerializationVerificationNeeded test cases in SerializeSpec, * as they needed a serializable class whose top package is not akka. */ -class NoVerification extends NoSerializationVerificationNeeded with java.io.Serializable { -} +class NoVerification extends NoSerializationVerificationNeeded with java.io.Serializable {} diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala index 1de7f02180..d12d0f83fa 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializationSetupSpec.scala @@ -50,10 +50,12 @@ object SerializationSetupSpec { val testSerializer = new NoopSerializer val serializationSettings = SerializationSetup { _ => - List( - SerializerDetails("test", programmaticDummySerializer, List(classOf[ProgrammaticDummy]))) + List(SerializerDetails("test", programmaticDummySerializer, List(classOf[ProgrammaticDummy]))) } - val bootstrapSettings = BootstrapSetup(None, Some(ConfigFactory.parseString(""" + val bootstrapSettings = BootstrapSetup(None, + Some( + ConfigFactory.parseString( + """ akka { actor { serialize-messages = off @@ -66,11 +68,13 @@ object SerializationSetupSpec { } } } - """)), None) + """)), + None) val actorSystemSettings = ActorSystemSetup(bootstrapSettings, serializationSettings) - val noJavaSerializationSystem = ActorSystem("SerializationSettingsSpec" + "NoJavaSerialization", ConfigFactory.parseString( - """ + val noJavaSerializationSystem = ActorSystem("SerializationSettingsSpec" + "NoJavaSerialization", + ConfigFactory.parseString( + """ akka { actor { allow-java-serialization = off @@ -83,8 +87,8 @@ object SerializationSetupSpec { } -class SerializationSetupSpec extends AkkaSpec( - ActorSystem("SerializationSettingsSpec", SerializationSetupSpec.actorSystemSettings)) { +class SerializationSetupSpec + extends AkkaSpec(ActorSystem("SerializationSettingsSpec", SerializationSetupSpec.actorSystemSettings)) { import SerializationSetupSpec._ @@ -102,8 +106,7 @@ class SerializationSetupSpec extends AkkaSpec( "fail during ActorSystem creation when misconfigured" in { val config = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.loglevel = OFF akka.stdout-loglevel = OFF akka.actor.serializers.doe = "john.is.not.here" @@ -122,11 +125,15 @@ class SerializationSetupSpec extends AkkaSpec( // allow-java-serialization=on to create the SerializationSetup and use that SerializationSetup // in another system with allow-java-serialization=off val addedJavaSerializationSettings = SerializationSetup { _ => - List( - SerializerDetails("test", programmaticDummySerializer, List(classOf[ProgrammaticDummy])), - SerializerDetails("java-manual", new JavaSerializer(system.asInstanceOf[ExtendedActorSystem]), List(classOf[ProgrammaticJavaDummy]))) + List(SerializerDetails("test", programmaticDummySerializer, List(classOf[ProgrammaticDummy])), + SerializerDetails("java-manual", + new JavaSerializer(system.asInstanceOf[ExtendedActorSystem]), + List(classOf[ProgrammaticJavaDummy]))) } - val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup(None, Some(ConfigFactory.parseString(""" + val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup(None, + Some( + ConfigFactory.parseString( + """ akka { loglevel = debug actor { @@ -135,14 +142,13 @@ class SerializationSetupSpec extends AkkaSpec( warn-about-java-serializer-usage = on } } - """)), None) + """)), + None) val addedJavaSerializationViaSettingsSystem = ActorSystem( "addedJavaSerializationSystem", - ActorSystemSetup( - addedJavaSerializationProgramaticallyButDisabledSettings, - addedJavaSerializationSettings)) + ActorSystemSetup(addedJavaSerializationProgramaticallyButDisabledSettings, addedJavaSerializationSettings)) "Disabling java serialization" should { @@ -152,7 +158,9 @@ class SerializationSetupSpec extends AkkaSpec( }.getMessage should include("akka.actor.allow-java-serialization = off") intercept[DisabledJavaSerializer.JavaSerializationException] { - SerializationExtension(addedJavaSerializationViaSettingsSystem).findSerializerFor(new ProgrammaticJavaDummy).toBinary(new ProgrammaticJavaDummy) + SerializationExtension(addedJavaSerializationViaSettingsSystem) + .findSerializerFor(new ProgrammaticJavaDummy) + .toBinary(new ProgrammaticJavaDummy) } } diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala index 2a0e1cffd7..fa34eb56e3 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala @@ -132,17 +132,16 @@ object SerializationTests { } """ - val systemMessageClasses = List[Class[_]]( - classOf[Create], - classOf[Recreate], - classOf[Suspend], - classOf[Resume], - classOf[Terminate], - classOf[Supervise], - classOf[Watch], - classOf[Unwatch], - classOf[Failed], - NoMessage.getClass) + val systemMessageClasses = List[Class[_]](classOf[Create], + classOf[Recreate], + classOf[Suspend], + classOf[Resume], + classOf[Terminate], + classOf[Supervise], + classOf[Watch], + classOf[Unwatch], + classOf[Failed], + NoMessage.getClass) } class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { @@ -157,8 +156,10 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { "Serialization" must { "have correct bindings" in { - ser.bindings.collectFirst { case (c, s) if c == address.getClass => s.getClass } should ===(Some(classOf[JavaSerializer])) - ser.bindings.collectFirst { case (c, s) if c == classOf[PlainMessage] => s.getClass } should ===(Some(classOf[NoopSerializer])) + ser.bindings.collectFirst { case (c, s) if c == address.getClass => s.getClass } should ===( + Some(classOf[JavaSerializer])) + ser.bindings.collectFirst { case (c, s) if c == classOf[PlainMessage] => s.getClass } should ===( + Some(classOf[NoopSerializer])) } "serialize Address" in { @@ -178,7 +179,8 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { val a = system.actorOf(Props(new Actor { def receive = { case o: ObjectOutputStream => - try o.writeObject(this) catch { case _: NotSerializableException => testActor ! "pass" } + try o.writeObject(this) + catch { case _: NotSerializableException => testActor ! "pass" } } })) a ! new ObjectOutputStream(new ByteArrayOutputStream()) @@ -234,9 +236,9 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { } "give warning for message with several bindings" in { - EventFilter.warning(start = "Multiple serializers found", occurrences = 1) intercept { - ser.serializerFor(classOf[BothTestSerializableAndTestSerializable2]).getClass should ( - be(classOf[NoopSerializer]) or be(classOf[NoopSerializer2])) + EventFilter.warning(start = "Multiple serializers found", occurrences = 1).intercept { + ser.serializerFor(classOf[BothTestSerializableAndTestSerializable2]).getClass should (be( + classOf[NoopSerializer]).or(be(classOf[NoopSerializer2]))) } } @@ -261,14 +263,15 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { "use ByteArraySerializer for byte arrays" in { val byteSerializer = ser.serializerFor(classOf[Array[Byte]]) - byteSerializer.getClass should be theSameInstanceAs classOf[ByteArraySerializer] + (byteSerializer.getClass should be).theSameInstanceAs(classOf[ByteArraySerializer]) for (a <- Seq("foo".getBytes("UTF-8"), null: Array[Byte], Array[Byte]())) - byteSerializer.fromBinary(byteSerializer.toBinary(a)) should be theSameInstanceAs a + (byteSerializer.fromBinary(byteSerializer.toBinary(a)) should be).theSameInstanceAs(a) intercept[IllegalArgumentException] { byteSerializer.toBinary("pigdog") - }.getMessage should ===(s"${classOf[ByteArraySerializer].getName} only serializes byte arrays, not [java.lang.String]") + }.getMessage should ===( + s"${classOf[ByteArraySerializer].getName} only serializes byte arrays, not [java.lang.String]") } "support ByteBuffer serialization for byte arrays" in { @@ -286,11 +289,12 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { intercept[IllegalArgumentException] { byteSerializer.toBinary("pigdog", byteBuffer) - }.getMessage should ===(s"${classOf[ByteArraySerializer].getName} only serializes byte arrays, not [java.lang.String]") + }.getMessage should ===( + s"${classOf[ByteArraySerializer].getName} only serializes byte arrays, not [java.lang.String]") } "log warning if non-Akka serializer is configured for Akka message" in { - EventFilter.warning(pattern = ".*not implemented by Akka.*", occurrences = 1) intercept { + EventFilter.warning(pattern = ".*not implemented by Akka.*", occurrences = 1).intercept { ser.serialize(new Other).get } } @@ -308,10 +312,10 @@ class VerifySerializabilitySpec extends AkkaSpec(SerializationTests.verifySerial "verify creators" in { val a = system.actorOf(Props[FooActor]) - system stop a + system.stop(a) val b = system.actorOf(Props(new FooAbstractActor)) - system stop b + system.stop(b) intercept[IllegalArgumentException] { val d = system.actorOf(Props(new NonSerializableActor(system))) @@ -323,10 +327,12 @@ class VerifySerializabilitySpec extends AkkaSpec(SerializationTests.verifySerial val a = system.actorOf(Props[FooActor]) Await.result(a ? "pigdog", timeout.duration) should ===("pigdog") - EventFilter[SerializationCheckFailedException](start = "Failed to serialize and deserialize message of type java.lang.Object", occurrences = 1) intercept { + EventFilter[SerializationCheckFailedException]( + start = "Failed to serialize and deserialize message of type java.lang.Object", + occurrences = 1).intercept { a ! (new AnyRef) } - system stop a + system.stop(a) } } @@ -381,74 +387,64 @@ class SerializationCompatibilitySpec extends AkkaSpec(SerializationTests.mostlyR "be preserved for the Create SystemMessage" in { // Using null as the cause to avoid a large serialized message and JDK differences - verify( - Create(Some(null)), - "aced00057372001b616b6b612e64697370617463682e7379736d73672e4372656174650000000000" + - "0000010200014c00076661696c75726574000e4c7363616c612f4f7074696f6e3b78707372000a73" + - "63616c612e536f6d651122f2695ea18b740200014c0001787400124c6a6176612f6c616e672f4f62" + - "6a6563743b7872000c7363616c612e4f7074696f6efe6937fddb0e6674020000787070") + verify(Create(Some(null)), + "aced00057372001b616b6b612e64697370617463682e7379736d73672e4372656174650000000000" + + "0000010200014c00076661696c75726574000e4c7363616c612f4f7074696f6e3b78707372000a73" + + "63616c612e536f6d651122f2695ea18b740200014c0001787400124c6a6176612f6c616e672f4f62" + + "6a6563743b7872000c7363616c612e4f7074696f6efe6937fddb0e6674020000787070") } "be preserved for the Recreate SystemMessage" in { - verify( - Recreate(null), - "aced00057372001d616b6b612e64697370617463682e7379736d73672e5265637265617465000000" + - "00000000010200014c000563617573657400154c6a6176612f6c616e672f5468726f7761626c653b" + - "787070") + verify(Recreate(null), + "aced00057372001d616b6b612e64697370617463682e7379736d73672e5265637265617465000000" + + "00000000010200014c000563617573657400154c6a6176612f6c616e672f5468726f7761626c653b" + + "787070") } "be preserved for the Suspend SystemMessage" in { - verify( - Suspend(), - "aced00057372001c616b6b612e64697370617463682e7379736d73672e53757370656e6400000000" + - "000000010200007870") + verify(Suspend(), + "aced00057372001c616b6b612e64697370617463682e7379736d73672e53757370656e6400000000" + + "000000010200007870") } "be preserved for the Resume SystemMessage" in { - verify( - Resume(null), - "aced00057372001b616b6b612e64697370617463682e7379736d73672e526573756d650000000000" + - "0000010200014c000f63617573656442794661696c7572657400154c6a6176612f6c616e672f5468" + - "726f7761626c653b787070") + verify(Resume(null), + "aced00057372001b616b6b612e64697370617463682e7379736d73672e526573756d650000000000" + + "0000010200014c000f63617573656442794661696c7572657400154c6a6176612f6c616e672f5468" + + "726f7761626c653b787070") } "be preserved for the Terminate SystemMessage" in { - verify( - Terminate(), - "aced00057372001e616b6b612e64697370617463682e7379736d73672e5465726d696e6174650000" + - "0000000000010200007870") + verify(Terminate(), + "aced00057372001e616b6b612e64697370617463682e7379736d73672e5465726d696e6174650000" + + "0000000000010200007870") } "be preserved for the Supervise SystemMessage" in { - verify( - Supervise(null, true), - "aced00057372001e616b6b612e64697370617463682e7379736d73672e5375706572766973650000" + - "0000000000010200025a00056173796e634c00056368696c647400154c616b6b612f6163746f722f" + - "4163746f725265663b78700170") + verify(Supervise(null, true), + "aced00057372001e616b6b612e64697370617463682e7379736d73672e5375706572766973650000" + + "0000000000010200025a00056173796e634c00056368696c647400154c616b6b612f6163746f722f" + + "4163746f725265663b78700170") } "be preserved for the Watch SystemMessage" in { - verify( - Watch(null, null), - "aced00057372001a616b6b612e64697370617463682e7379736d73672e5761746368000000000000" + - "00010200024c00077761746368656574001d4c616b6b612f6163746f722f496e7465726e616c4163" + - "746f725265663b4c00077761746368657271007e000178707070") + verify(Watch(null, null), + "aced00057372001a616b6b612e64697370617463682e7379736d73672e5761746368000000000000" + + "00010200024c00077761746368656574001d4c616b6b612f6163746f722f496e7465726e616c4163" + + "746f725265663b4c00077761746368657271007e000178707070") } "be preserved for the Unwatch SystemMessage" in { - verify( - Unwatch(null, null), - "aced00057372001c616b6b612e64697370617463682e7379736d73672e556e776174636800000000" + - "000000010200024c0007776174636865657400154c616b6b612f6163746f722f4163746f72526566" + - "3b4c00077761746368657271007e000178707070") + verify(Unwatch(null, null), + "aced00057372001c616b6b612e64697370617463682e7379736d73672e556e776174636800000000" + + "000000010200024c0007776174636865657400154c616b6b612f6163746f722f4163746f72526566" + + "3b4c00077761746368657271007e000178707070") } "be preserved for the NoMessage SystemMessage" in { - verify( - NoMessage, - "aced00057372001f616b6b612e64697370617463682e7379736d73672e4e6f4d6573736167652400" + - "000000000000010200007870") + verify(NoMessage, + "aced00057372001f616b6b612e64697370617463682e7379736d73672e4e6f4d6573736167652400" + + "000000000000010200007870") } "be preserved for the Failed SystemMessage" in { // Using null as the cause to avoid a large serialized message and JDK differences - verify( - Failed(null, cause = null, uid = 0), - "aced00057372001b616b6b612e64697370617463682e7379736d73672e4661696c65640000000000" + - "0000010200034900037569644c000563617573657400154c6a6176612f6c616e672f5468726f7761" + - "626c653b4c00056368696c647400154c616b6b612f6163746f722f4163746f725265663b78700000" + - "00007070") + verify(Failed(null, cause = null, uid = 0), + "aced00057372001b616b6b612e64697370617463682e7379736d73672e4661696c65640000000000" + + "0000010200034900037569644c000563617573657400154c6a6176612f6c616e672f5468726f7761" + + "626c653b4c00056368696c647400154c616b6b612f6163746f722f4163746f725265663b78700000" + + "00007070") } } @@ -462,7 +458,7 @@ class OverriddenSystemMessageSerializationSpec extends AkkaSpec(SerializationTes "Overridden SystemMessage serialization" must { "resolve to a single serializer" in { - EventFilter.warning(start = "Multiple serializers found", occurrences = 0) intercept { + EventFilter.warning(start = "Multiple serializers found", occurrences = 0).intercept { for (smc <- systemMessageClasses) { ser.serializerFor(smc).getClass should ===(classOf[NoopSerializer]) } @@ -472,8 +468,8 @@ class OverriddenSystemMessageSerializationSpec extends AkkaSpec(SerializationTes } } -class DefaultSerializationWarningSpec extends AkkaSpec( - ConfigFactory.parseString("akka.actor.warn-about-java-serializer-usage = on")) { +class DefaultSerializationWarningSpec + extends AkkaSpec(ConfigFactory.parseString("akka.actor.warn-about-java-serializer-usage = on")) { val ser = SerializationExtension(system) val messagePrefix = "Using the default Java serializer for class" @@ -481,13 +477,13 @@ class DefaultSerializationWarningSpec extends AkkaSpec( "Using the default Java serializer" must { "log a warning when serializing classes outside of java.lang package" in { - EventFilter.warning(start = messagePrefix, occurrences = 1) intercept { + EventFilter.warning(start = messagePrefix, occurrences = 1).intercept { ser.serializerFor(classOf[java.math.BigDecimal]) } } "not log warning when serializing classes from java.lang package" in { - EventFilter.warning(start = messagePrefix, occurrences = 0) intercept { + EventFilter.warning(start = messagePrefix, occurrences = 0).intercept { ser.serializerFor(classOf[java.lang.String]) } } @@ -496,10 +492,11 @@ class DefaultSerializationWarningSpec extends AkkaSpec( } -class NoVerificationWarningSpec extends AkkaSpec( - ConfigFactory.parseString( - "akka.actor.warn-about-java-serializer-usage = on\n" + - "akka.actor.warn-on-no-serialization-verification = on")) { +class NoVerificationWarningSpec + extends AkkaSpec( + ConfigFactory.parseString( + "akka.actor.warn-about-java-serializer-usage = on\n" + + "akka.actor.warn-on-no-serialization-verification = on")) { val ser = SerializationExtension(system) val messagePrefix = "Using the default Java serializer for class" @@ -507,23 +504,24 @@ class NoVerificationWarningSpec extends AkkaSpec( "When warn-on-no-serialization-verification = on, using the default Java serializer" must { "log a warning on classes without extending NoSerializationVerificationNeeded" in { - EventFilter.warning(start = messagePrefix, occurrences = 1) intercept { + EventFilter.warning(start = messagePrefix, occurrences = 1).intercept { ser.serializerFor(classOf[java.math.BigDecimal]) } } "still log warning on classes extending NoSerializationVerificationNeeded" in { - EventFilter.warning(start = messagePrefix, occurrences = 1) intercept { + EventFilter.warning(start = messagePrefix, occurrences = 1).intercept { ser.serializerFor(classOf[NoVerification]) } } } } -class NoVerificationWarningOffSpec extends AkkaSpec( - ConfigFactory.parseString( - "akka.actor.warn-about-java-serializer-usage = on\n" + - "akka.actor.warn-on-no-serialization-verification = off")) { +class NoVerificationWarningOffSpec + extends AkkaSpec( + ConfigFactory.parseString( + "akka.actor.warn-about-java-serializer-usage = on\n" + + "akka.actor.warn-on-no-serialization-verification = off")) { val ser = SerializationExtension(system) val messagePrefix = "Using the default Java serializer for class" @@ -531,13 +529,13 @@ class NoVerificationWarningOffSpec extends AkkaSpec( "When warn-on-no-serialization-verification = off, using the default Java serializer" must { "log a warning on classes without extending NoSerializationVerificationNeeded" in { - EventFilter.warning(start = messagePrefix, occurrences = 1) intercept { + EventFilter.warning(start = messagePrefix, occurrences = 1).intercept { ser.serializerFor(classOf[java.math.BigDecimal]) } } "not log warning on classes extending NoSerializationVerificationNeeded" in { - EventFilter.warning(start = messagePrefix, occurrences = 0) intercept { + EventFilter.warning(start = messagePrefix, occurrences = 0).intercept { ser.serializerFor(classOf[NoVerification]) } } diff --git a/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala b/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala index 64fbd934d7..c36920e1e2 100644 --- a/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala @@ -21,15 +21,16 @@ object CallingThreadDispatcherModelSpec { type = PinnedDispatcher } """ + - // use unique dispatcher id for each test, since MessageDispatcherInterceptor holds state - (for (n <- 1 to 30) yield """ + // use unique dispatcher id for each test, since MessageDispatcherInterceptor holds state + (for (n <- 1 to 30) + yield """ test-calling-thread-%s { type = "akka.testkit.CallingThreadDispatcherModelSpec$CallingThreadDispatcherInterceptorConfigurator" }""".format(n)).mkString } class CallingThreadDispatcherInterceptorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends MessageDispatcherConfigurator(config, prerequisites) { + extends MessageDispatcherConfigurator(config, prerequisites) { private val instance: MessageDispatcher = new CallingThreadDispatcher(this) with MessageDispatcherInterceptor { @@ -49,7 +50,9 @@ class CallingThreadDispatcherModelSpec extends ActorModelSpec(CallingThreadDispa override def interceptedDispatcher(): MessageDispatcherInterceptor = { // use new id for each test, since the MessageDispatcherInterceptor holds state - system.dispatchers.lookup("test-calling-thread-" + dispatcherCount.incrementAndGet()).asInstanceOf[MessageDispatcherInterceptor] + system.dispatchers + .lookup("test-calling-thread-" + dispatcherCount.incrementAndGet()) + .asInstanceOf[MessageDispatcherInterceptor] } override def dispatcherType = "Calling Thread Dispatcher" diff --git a/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala b/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala index aaa685277a..304b51d99e 100644 --- a/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/BoundedBlockingQueueSpec.scala @@ -23,11 +23,11 @@ import scala.concurrent.{ Await, ExecutionContext, ExecutionContextExecutor, Fut import scala.util.control.Exception class BoundedBlockingQueueSpec - extends WordSpec - with Matchers - with QueueSetupHelper - with CustomContainsMatcher - with BlockingHelpers { + extends WordSpec + with Matchers + with QueueSetupHelper + with CustomContainsMatcher + with BlockingHelpers { import QueueTestEvents._ @@ -86,7 +86,7 @@ class BoundedBlockingQueueSpec val TestContext(queue, events, _, _, _, _) = newBoundedBlockingQueue(2) queue.put("Hello") queue.put("World") - events should contain inOrder (offer("Hello"), offer("World")) + (events should contain).inOrder(offer("Hello"), offer("World")) } "signal notEmpty when an element is inserted" in { @@ -102,7 +102,7 @@ class BoundedBlockingQueueSpec mustBlockFor(100 milliseconds) { queue.put("2") } - events should contain inOrder (offer("1"), awaitNotFull) + (events should contain).inOrder(offer("1"), awaitNotFull) events should not contain offer("2") } @@ -118,7 +118,7 @@ class BoundedBlockingQueueSpec } Await.result(f, 3 seconds) - events should contain inOrder (offer("a"), poll, offer("b")) + (events should contain).inOrder(offer("a"), poll, offer("b")) } "check the backing queue size before offering" in { @@ -147,7 +147,7 @@ class BoundedBlockingQueueSpec queue.put("Hello") queue.take() - events should contain inOrder (offer("Hello"), poll) + (events should contain).inOrder(offer("Hello"), poll) } "signal notFull when taking an element" in { @@ -177,7 +177,7 @@ class BoundedBlockingQueueSpec } Await.ready(f, 3 seconds) - events should contain inOrder (awaitNotEmpty, offer("a"), poll) + (events should contain).inOrder(awaitNotEmpty, offer("a"), poll) } "check the backing queue size before polling" in { @@ -220,7 +220,7 @@ class BoundedBlockingQueueSpec val TestContext(queue, events, _, _, _, _) = newBoundedBlockingQueue(2) queue.offer("Hello") should equal(true) queue.offer("World") should equal(true) - events should contain inOrder (offer("Hello"), offer("World")) + (events should contain).inOrder(offer("Hello"), offer("World")) } "signal notEmpty when the call succeeds" in { @@ -278,7 +278,7 @@ class BoundedBlockingQueueSpec queue.take() } Await.result(f, 3 seconds) should equal(true) - events should contain inOrder (awaitNotFull, signalNotFull, offer("World")) + (events should contain).inOrder(awaitNotFull, signalNotFull, offer("World")) } "check the backing queue size before offering" in { @@ -385,7 +385,7 @@ class BoundedBlockingQueueSpec queue.put("Hello") } Await.result(f, 3 seconds) should equal("Hello") - events should contain inOrder (awaitNotEmpty, signalNotEmpty, poll) + (events should contain).inOrder(awaitNotEmpty, signalNotEmpty, poll) } } @@ -564,7 +564,7 @@ class BoundedBlockingQueueSpec queue.put("World") queue.retainAll(elems.asJava) should equal(false) - queue.toArray() should contain allOf ("Hello", "World") + (queue.toArray() should contain).allOf("Hello", "World") } } @@ -624,11 +624,9 @@ trait CustomContainsMatcher { } def matchResult(success: Boolean): MatchResult = - MatchResult( - success, - s"""$left did not contain all of $right in sequence""", - s"""$left contains all of $right in sequence""" - ) + MatchResult(success, + s"""$left did not contain all of $right in sequence""", + s"""$left contains all of $right in sequence""") attemptMatch(left.toList, right) } @@ -706,13 +704,17 @@ trait QueueSetupHelper { import akka.util.QueueTestEvents._ - case class TestContext(queue: BoundedBlockingQueue[String], events: mutable.Buffer[QueueEvent], notEmpty: TestCondition, notFull: TestCondition, lock: ReentrantLock, backingQueue: util.Queue[String]) + case class TestContext(queue: BoundedBlockingQueue[String], + events: mutable.Buffer[QueueEvent], + notEmpty: TestCondition, + notFull: TestCondition, + lock: ReentrantLock, + backingQueue: util.Queue[String]) /** * Backing queue that records all poll and offer calls in `events` */ - class TestBackingQueue(events: mutable.Buffer[QueueEvent]) - extends util.LinkedList[String] { + class TestBackingQueue(events: mutable.Buffer[QueueEvent]) extends util.LinkedList[String] { override def poll(): String = { events += Poll() @@ -733,8 +735,11 @@ trait QueueSetupHelper { /** * Reentrant lock condition that records when the condition is signaled or `await`ed. */ - class TestCondition(events: mutable.Buffer[QueueEvent], condition: Condition, signalEvent: QueueEvent, awaitEvent: QueueEvent) - extends Condition { + class TestCondition(events: mutable.Buffer[QueueEvent], + condition: Condition, + signalEvent: QueueEvent, + awaitEvent: QueueEvent) + extends Condition { case class Manual(waitTime: Long = 0, waitingThread: Option[Thread] = None) @@ -744,13 +749,12 @@ trait QueueSetupHelper { waiting match { case Some(manual) => val newWaitTime = manual.waitTime - timespan.toNanos - waiting = - if (newWaitTime <= 0 && manual.waitingThread.isDefined) { - manual.waitingThread.get.interrupt() - Some(Manual(newWaitTime, None)) - } else { - Some(manual.copy(waitTime = newWaitTime)) - } + waiting = if (newWaitTime <= 0 && manual.waitingThread.isDefined) { + manual.waitingThread.get.interrupt() + Some(Manual(newWaitTime, None)) + } else { + Some(manual.copy(waitTime = newWaitTime)) + } case None => sys.error("Called advance time but hasn't enabled manualTimeControl") @@ -809,17 +813,14 @@ trait QueueSetupHelper { /** * Class under test with the necessary backing queue, lock and conditions injected. */ - class TestBoundedBlockingQueue() - extends BoundedBlockingQueue[String](maxCapacity, backingQueue) { + class TestBoundedBlockingQueue() extends BoundedBlockingQueue[String](maxCapacity, backingQueue) { override def createLock(): ReentrantLock = realLock override def createNotEmptyCondition(): Condition = wrappedNotEmpty override def createNotFullCondition(): Condition = wrappedNotFull } - TestContext(new TestBoundedBlockingQueue(), events, - wrappedNotEmpty, wrappedNotFull, realLock, - backingQueue) + TestContext(new TestBoundedBlockingQueue(), events, wrappedNotEmpty, wrappedNotFull, realLock, backingQueue) } } @@ -829,8 +830,9 @@ trait QueueSetupHelper { */ object DefaultExecutionContext { implicit val ec: ExecutionContextExecutor = ExecutionContext.fromExecutor(new Executor { - override def execute(command: Runnable): Unit = new Thread() { - override def run(): Unit = command.run() - }.start() + override def execute(command: Runnable): Unit = + new Thread() { + override def run(): Unit = command.run() + }.start() }) } diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala index 9fbb586040..20f7d1f5f2 100644 --- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala @@ -23,12 +23,13 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { implicit val betterGeneratorDrivenConfig = PropertyCheckConfig().copy(minSuccessful = 1000) - def genSimpleByteString(min: Int, max: Int) = for { - n <- Gen.choose(min, max) - b <- Gen.containerOfN[Array, Byte](n, arbitrary[Byte]) - from <- Gen.choose(0, b.length) - until <- Gen.choose(from, from max b.length) - } yield ByteString(b).slice(from, until) + def genSimpleByteString(min: Int, max: Int) = + for { + n <- Gen.choose(min, max) + b <- Gen.containerOfN[Array, Byte](n, arbitrary[Byte]) + from <- Gen.choose(0, b.length) + until <- Gen.choose(from, from max b.length) + } yield ByteString(b).slice(from, until) implicit val arbitraryByteString: Arbitrary[ByteString] = Arbitrary { Gen.sized { s => @@ -92,20 +93,44 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { val os = new ByteArrayOutputStream val bos = new ObjectOutputStream(os) bos.writeObject(obj) - String valueOf encodeHex(os.toByteArray) + String.valueOf(encodeHex(os.toByteArray)) } - val arbitraryByteArray: Arbitrary[Array[Byte]] = Arbitrary { Gen.sized { n => Gen.containerOfN[Array, Byte](n, arbitrary[Byte]) } } + val arbitraryByteArray: Arbitrary[Array[Byte]] = Arbitrary { + Gen.sized { n => + Gen.containerOfN[Array, Byte](n, arbitrary[Byte]) + } + } implicit val arbitraryByteArraySlice: Arbitrary[ArraySlice[Byte]] = arbSlice(arbitraryByteArray) - val arbitraryShortArray: Arbitrary[Array[Short]] = Arbitrary { Gen.sized { n => Gen.containerOfN[Array, Short](n, arbitrary[Short]) } } + val arbitraryShortArray: Arbitrary[Array[Short]] = Arbitrary { + Gen.sized { n => + Gen.containerOfN[Array, Short](n, arbitrary[Short]) + } + } implicit val arbitraryShortArraySlice: Arbitrary[ArraySlice[Short]] = arbSlice(arbitraryShortArray) - val arbitraryIntArray: Arbitrary[Array[Int]] = Arbitrary { Gen.sized { n => Gen.containerOfN[Array, Int](n, arbitrary[Int]) } } + val arbitraryIntArray: Arbitrary[Array[Int]] = Arbitrary { + Gen.sized { n => + Gen.containerOfN[Array, Int](n, arbitrary[Int]) + } + } implicit val arbitraryIntArraySlice: Arbitrary[ArraySlice[Int]] = arbSlice(arbitraryIntArray) - val arbitraryLongArray: Arbitrary[Array[Long]] = Arbitrary { Gen.sized { n => Gen.containerOfN[Array, Long](n, arbitrary[Long]) } } + val arbitraryLongArray: Arbitrary[Array[Long]] = Arbitrary { + Gen.sized { n => + Gen.containerOfN[Array, Long](n, arbitrary[Long]) + } + } implicit val arbitraryLongArraySlice: Arbitrary[ArraySlice[Long]] = arbSlice(arbitraryLongArray) - val arbitraryFloatArray: Arbitrary[Array[Float]] = Arbitrary { Gen.sized { n => Gen.containerOfN[Array, Float](n, arbitrary[Float]) } } + val arbitraryFloatArray: Arbitrary[Array[Float]] = Arbitrary { + Gen.sized { n => + Gen.containerOfN[Array, Float](n, arbitrary[Float]) + } + } implicit val arbitraryFloatArraySlice: Arbitrary[ArraySlice[Float]] = arbSlice(arbitraryFloatArray) - val arbitraryDoubleArray: Arbitrary[Array[Double]] = Arbitrary { Gen.sized { n => Gen.containerOfN[Array, Double](n, arbitrary[Double]) } } + val arbitraryDoubleArray: Arbitrary[Array[Double]] = Arbitrary { + Gen.sized { n => + Gen.containerOfN[Array, Double](n, arbitrary[Double]) + } + } implicit val arbitraryDoubleArraySlice: Arbitrary[ArraySlice[Double]] = arbSlice(arbitraryDoubleArray) type ArrayNumBytes[A] = (Array[A], Int) @@ -136,14 +161,15 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { val bsIterator = bs.iterator val vecIterator = Vector(bs: _*).iterator.buffered (body(bsIterator) == body(vecIterator)) && - (!strict || (bsIterator.toSeq == vecIterator.toSeq)) + (!strict || (bsIterator.toSeq == vecIterator.toSeq)) } - def likeVecIts(a: ByteString, b: ByteString)(body: (BufferedIterator[Byte], BufferedIterator[Byte]) => Any, strict: Boolean = true): Boolean = { + def likeVecIts(a: ByteString, b: ByteString)(body: (BufferedIterator[Byte], BufferedIterator[Byte]) => Any, + strict: Boolean = true): Boolean = { val (bsAIt, bsBIt) = (a.iterator, b.iterator) val (vecAIt, vecBIt) = (Vector(a: _*).iterator.buffered, Vector(b: _*).iterator.buffered) (body(bsAIt, bsBIt) == body(vecAIt, vecBIt)) && - (!strict || (bsAIt.toSeq -> bsBIt.toSeq) == (vecAIt.toSeq -> vecBIt.toSeq)) + (!strict || (bsAIt.toSeq -> bsBIt.toSeq) == (vecAIt.toSeq -> vecBIt.toSeq)) } def likeVecBld(body: Builder[Byte, _] => Unit): Boolean = { @@ -209,8 +235,8 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { for (i <- 0 until a) decoded(i) = input.getFloat(byteOrder) input.getFloats(decoded, a, b - a)(byteOrder) for (i <- b until n) decoded(i) = input.getFloat(byteOrder) - ((decoded.toSeq map floatToRawIntBits) == (reference.toSeq map floatToRawIntBits)) && - (input.toSeq == bytes.drop(n * elemSize)) + ((decoded.toSeq.map(floatToRawIntBits)) == (reference.toSeq.map(floatToRawIntBits))) && + (input.toSeq == bytes.drop(n * elemSize)) } def testDoubleDecoding(slice: ByteStringSlice, byteOrder: ByteOrder): Boolean = { @@ -224,8 +250,8 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { for (i <- 0 until a) decoded(i) = input.getDouble(byteOrder) input.getDoubles(decoded, a, b - a)(byteOrder) for (i <- b until n) decoded(i) = input.getDouble(byteOrder) - ((decoded.toSeq map doubleToRawLongBits) == (reference.toSeq map doubleToRawLongBits)) && - (input.toSeq == bytes.drop(n * elemSize)) + ((decoded.toSeq.map(doubleToRawLongBits)) == (reference.toSeq.map(doubleToRawLongBits))) && + (input.toSeq == bytes.drop(n * elemSize)) } def testShortEncoding(slice: ArraySlice[Short], byteOrder: ByteOrder): Boolean = { @@ -273,10 +299,12 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { val builder = ByteString.newBuilder for (i <- 0 until data.length) builder.putLongPart(data(i), nBytes)(byteOrder) - reference.zipWithIndex.collect({ // Since there is no partial put on LongBuffer, we need to collect only the interesting bytes - case (r, i) if byteOrder == ByteOrder.LITTLE_ENDIAN && i % elemSize < nBytes => r - case (r, i) if byteOrder == ByteOrder.BIG_ENDIAN && i % elemSize >= (elemSize - nBytes) => r - }).toSeq == builder.result + reference.zipWithIndex + .collect({ // Since there is no partial put on LongBuffer, we need to collect only the interesting bytes + case (r, i) if byteOrder == ByteOrder.LITTLE_ENDIAN && i % elemSize < nBytes => r + case (r, i) if byteOrder == ByteOrder.BIG_ENDIAN && i % elemSize >= (elemSize - nBytes) => r + }) + .toSeq == builder.result } def testFloatEncoding(slice: ArraySlice[Float], byteOrder: ByteOrder): Boolean = { @@ -403,24 +431,24 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).drop(1) should ===(ByteString("")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).drop(Int.MaxValue) should ===(ByteString("")) - ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(Int.MinValue) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(Int.MinValue) should ===( + ByteString("a")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(-1) should ===(ByteString("a")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(0) should ===(ByteString("a")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(1) should ===(ByteString("")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(2) should ===(ByteString("")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).drop(Int.MaxValue) should ===(ByteString("")) - ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(Int.MinValue) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(Int.MinValue) should ===( + ByteString("a")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(-1) should ===(ByteString("a")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(0) should ===(ByteString("a")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(1) should ===(ByteString("")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(2) should ===(ByteString("")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).drop(Int.MaxValue) should ===(ByteString("")) - val bss = ByteStrings(Vector( - ByteString1.fromString("a"), - ByteString1.fromString("bc"), - ByteString1.fromString("def"))) + val bss = + ByteStrings(Vector(ByteString1.fromString("a"), ByteString1.fromString("bc"), ByteString1.fromString("def"))) bss.drop(Int.MinValue) should ===(ByteString("abcdef")) bss.drop(-1) should ===(ByteString("abcdef")) @@ -439,30 +467,34 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { (ByteString1C.fromString("a") ++ ByteString1.fromString("bc")).drop(2) should ===(ByteString("c")) } "dropRight" in { - ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).dropRight(Int.MinValue) should ===(ByteString("")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).dropRight(Int.MinValue) should ===( + ByteString("")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).dropRight(-1) should ===(ByteString("")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).dropRight(0) should ===(ByteString("")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).dropRight(1) should ===(ByteString("")) - ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).dropRight(Int.MaxValue) should ===(ByteString("")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("")).dropRight(Int.MaxValue) should ===( + ByteString("")) - ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(Int.MinValue) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(Int.MinValue) should ===( + ByteString("a")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(-1) should ===(ByteString("a")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(0) should ===(ByteString("a")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(1) should ===(ByteString("")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(2) should ===(ByteString("")) - ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(Int.MaxValue) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).dropRight(Int.MaxValue) should ===( + ByteString("")) - ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).dropRight(Int.MinValue) should ===(ByteString("a")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).dropRight(Int.MinValue) should ===( + ByteString("a")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).dropRight(-1) should ===(ByteString("a")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).dropRight(0) should ===(ByteString("a")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).dropRight(1) should ===(ByteString("")) ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).dropRight(2) should ===(ByteString("")) - ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).dropRight(Int.MaxValue) should ===(ByteString("")) + ByteStrings(ByteString1.fromString(""), ByteString1.fromString("a")).dropRight(Int.MaxValue) should ===( + ByteString("")) - val bss = ByteStrings(Vector( - ByteString1.fromString("a"), - ByteString1.fromString("bc"), - ByteString1.fromString("def"))) + val bss = + ByteStrings(Vector(ByteString1.fromString("a"), ByteString1.fromString("bc"), ByteString1.fromString("def"))) bss.dropRight(Int.MinValue) should ===(ByteString("abcdef")) bss.dropRight(-1) should ===(ByteString("abcdef")) @@ -500,14 +532,19 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(2, 4) should ===(ByteString("cd")) ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(3, 4) should ===(ByteString("d")) // Can obtain expected results from 6 basic patterns - ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, 10) should ===(ByteString("abcd")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, 10) should ===( + ByteString("abcd")) ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, 0) should ===(ByteString("")) - ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, 4) should ===(ByteString("abcd")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, 4) should ===( + ByteString("abcd")) ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(0, 4) should ===(ByteString("abcd")) ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(1, -2) should ===(ByteString("")) - ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(0, 10) should ===(ByteString("abcd")) - ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, -100) should ===(ByteString("")) - ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-100, -10) should ===(ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(0, 10) should ===( + ByteString("abcd")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-10, -100) should ===( + ByteString("")) + ByteStrings(ByteString1.fromString("ab"), ByteString1.fromString("cd")).slice(-100, -10) should ===( + ByteString("")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(1, -2) should ===(ByteString("")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("")).slice(-10, -100) should ===(ByteString("")) @@ -531,10 +568,12 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(0) should ===(ByteString("")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(-1) should ===(ByteString("")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(-2) should ===(ByteString("")) - (ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")) ++ ByteString1.fromString("defg")).drop(2) should ===(ByteString("cdefg")) + (ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")) ++ ByteString1.fromString("defg")) + .drop(2) should ===(ByteString("cdefg")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(2).take(1) should ===(ByteString("c")) ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).take(100) should ===(ByteString("abc")) - ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(100) should ===(ByteString("bc")) + ByteStrings(ByteString1.fromString("a"), ByteString1.fromString("bc")).drop(1).take(100) should ===( + ByteString("bc")) } "indexOf" in { ByteString.empty.indexOf(5) should ===(-1) @@ -620,7 +659,9 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { "dropping" in { check((a: ByteString, b: ByteString) => (a ++ b).drop(b.size).size == a.size) } "taking" in { check((a: ByteString, b: ByteString) => (a ++ b).take(a.size) == a) } "takingRight" in { check((a: ByteString, b: ByteString) => (a ++ b).takeRight(b.size) == b) } - "dropping then taking" in { check((a: ByteString, b: ByteString) => (b ++ a ++ b).drop(b.size).take(a.size) == a) } + "dropping then taking" in { + check((a: ByteString, b: ByteString) => (b ++ a ++ b).drop(b.size).take(a.size) == a) + } "droppingRight" in { check((a: ByteString, b: ByteString) => (b ++ a ++ b).drop(b.size).dropRight(b.size) == a) } } @@ -630,7 +671,11 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { } "be equal to the original" when { - "compacting" in { check { xs: ByteString => val ys = xs.compact; (xs == ys) && ys.isCompact } } + "compacting" in { + check { xs: ByteString => + val ys = xs.compact; (xs == ys) && ys.isCompact + } + } "recombining" in { check { (xs: ByteString, from: Int, until: Int) => val (tmp, c) = xs.splitAt(until) @@ -644,35 +689,52 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { (a ++ b ++ c) should ===(xs) } "recombining - edge cases" in { - excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](1)), ByteString1(Array[Byte](2)))), -2147483648, 112121212) + excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](1)), ByteString1(Array[Byte](2)))), + -2147483648, + 112121212) excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](100)))), 0, 2) excerciseRecombining(ByteStrings(Vector(ByteString1(Array[Byte](100)))), -2147483648, 2) excerciseRecombining(ByteStrings(Vector(ByteString1.fromString("ab"), ByteString1.fromString("cd"))), 0, 1) excerciseRecombining(ByteString1.fromString("abc").drop(1).take(1), -324234, 234232) excerciseRecombining(ByteString("a"), 0, 2147483647) - excerciseRecombining(ByteStrings(Vector(ByteString1.fromString("ab"), ByteString1.fromString("cd"))).drop(2), 2147483647, 1) + excerciseRecombining(ByteStrings(Vector(ByteString1.fromString("ab"), ByteString1.fromString("cd"))).drop(2), + 2147483647, + 1) excerciseRecombining(ByteString1.fromString("ab").drop1(1), Int.MaxValue, Int.MaxValue) } } "behave as expected" when { - "created from and decoding to String" in { check { s: String => ByteString(s, "UTF-8").decodeString("UTF-8") == s } } + "created from and decoding to String" in { + check { s: String => + ByteString(s, "UTF-8").decodeString("UTF-8") == s + } + } "compacting" in { check { a: ByteString => val wasCompact = a.isCompact val b = a.compact ((!wasCompact) || (b eq a)) && - (b == a) && - b.isCompact && - (b.compact eq b) + (b == a) && + b.isCompact && + (b.compact eq b) } } "asByteBuffers" in { - check { (a: ByteString) => if (a.isCompact) a.asByteBuffers.size == 1 && a.asByteBuffers.head == a.asByteBuffer else a.asByteBuffers.size > 0 } - check { (a: ByteString) => a.asByteBuffers.foldLeft(ByteString.empty) { (bs, bb) => bs ++ ByteString(bb) } == a } - check { (a: ByteString) => a.asByteBuffers.forall(_.isReadOnly) } + check { (a: ByteString) => + if (a.isCompact) a.asByteBuffers.size == 1 && a.asByteBuffers.head == a.asByteBuffer + else a.asByteBuffers.size > 0 + } + check { (a: ByteString) => + a.asByteBuffers.foldLeft(ByteString.empty) { (bs, bb) => + bs ++ ByteString(bb) + } == a + } + check { (a: ByteString) => + a.asByteBuffers.forall(_.isReadOnly) + } check { (a: ByteString) => import scala.collection.JavaConverters.iterableAsScalaIterableConverter a.asByteBuffers.zip(a.getByteBuffers().asScala).forall(x => x._1 == x._2) @@ -686,45 +748,108 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { } } "behave like a Vector" when { - "concatenating" in { check { (a: ByteString, b: ByteString) => likeVectors(a, b) { _ ++ _ } } } + "concatenating" in { + check { (a: ByteString, b: ByteString) => + likeVectors(a, b) { _ ++ _ } + } + } "calling apply" in { check { slice: ByteStringSlice => slice match { - case (xs, i1, i2) => likeVector(xs) { seq => - (if ((i1 >= 0) && (i1 < seq.length)) seq(i1) else 0, - if ((i2 >= 0) && (i2 < seq.length)) seq(i2) else 0) - } + case (xs, i1, i2) => + likeVector(xs) { seq => + (if ((i1 >= 0) && (i1 < seq.length)) seq(i1) else 0, if ((i2 >= 0) && (i2 < seq.length)) seq(i2) else 0) + } } } } - "calling head" in { check { a: ByteString => a.isEmpty || likeVector(a) { _.head } } } - "calling tail" in { check { a: ByteString => a.isEmpty || likeVector(a) { _.tail } } } - "calling last" in { check { a: ByteString => a.isEmpty || likeVector(a) { _.last } } } - "calling init" in { check { a: ByteString => a.isEmpty || likeVector(a) { _.init } } } - "calling length" in { check { a: ByteString => likeVector(a) { _.length } } } + "calling head" in { + check { a: ByteString => + a.isEmpty || likeVector(a) { _.head } + } + } + "calling tail" in { + check { a: ByteString => + a.isEmpty || likeVector(a) { _.tail } + } + } + "calling last" in { + check { a: ByteString => + a.isEmpty || likeVector(a) { _.last } + } + } + "calling init" in { + check { a: ByteString => + a.isEmpty || likeVector(a) { _.init } + } + } + "calling length" in { + check { a: ByteString => + likeVector(a) { _.length } + } + } - "calling span" in { check { (a: ByteString, b: Byte) => likeVector(a)({ _.span(_ != b) match { case (a, b) => (a, b) } }) } } + "calling span" in { + check { (a: ByteString, b: Byte) => + likeVector(a)({ _.span(_ != b) match { case (a, b) => (a, b) } }) + } + } - "calling takeWhile" in { check { (a: ByteString, b: Byte) => likeVector(a)({ _.takeWhile(_ != b) }) } } - "calling dropWhile" in { check { (a: ByteString, b: Byte) => likeVector(a) { _.dropWhile(_ != b) } } } - "calling indexWhere" in { check { (a: ByteString, b: Byte) => likeVector(a) { _.indexWhere(_ == b) } } } - "calling indexOf" in { check { (a: ByteString, b: Byte) => likeVector(a) { _.indexOf(b) } } } + "calling takeWhile" in { + check { (a: ByteString, b: Byte) => + likeVector(a)({ _.takeWhile(_ != b) }) + } + } + "calling dropWhile" in { + check { (a: ByteString, b: Byte) => + likeVector(a) { _.dropWhile(_ != b) } + } + } + "calling indexWhere" in { + check { (a: ByteString, b: Byte) => + likeVector(a) { _.indexWhere(_ == b) } + } + } + "calling indexOf" in { + check { (a: ByteString, b: Byte) => + likeVector(a) { _.indexOf(b) } + } + } // this actually behave weird for Vector and negative indexes - SI9936, fixed in Scala 2.12 // so let's just skip negative indexes (doesn't make much sense anyway) - "calling indexOf(elem, idx)" in { check { (a: ByteString, b: Byte, idx: Int) => likeVector(a) { _.indexOf(b, math.max(0, idx)) } } } + "calling indexOf(elem, idx)" in { + check { (a: ByteString, b: Byte, idx: Int) => + likeVector(a) { _.indexOf(b, math.max(0, idx)) } + } + } - "calling foreach" in { check { a: ByteString => likeVector(a) { it => var acc = 0; it foreach { acc += _ }; acc } } } - "calling foldLeft" in { check { a: ByteString => likeVector(a) { _.foldLeft(0) { _ + _ } } } } - "calling toArray" in { check { a: ByteString => likeVector(a) { _.toArray.toSeq } } } + "calling foreach" in { + check { a: ByteString => + likeVector(a) { it => + var acc = 0; it.foreach { acc += _ }; acc + } + } + } + "calling foldLeft" in { + check { a: ByteString => + likeVector(a) { _.foldLeft(0) { _ + _ } } + } + } + "calling toArray" in { + check { a: ByteString => + likeVector(a) { _.toArray.toSeq } + } + } "calling slice" in { check { slice: ByteStringSlice => slice match { - case (xs, from, until) => likeVector(xs)({ - _.slice(from, until) - }) + case (xs, from, until) => + likeVector(xs)({ + _.slice(from, until) + }) } } } @@ -732,9 +857,10 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { "calling take and drop" in { check { slice: ByteStringSlice => slice match { - case (xs, from, until) => likeVector(xs)({ - _.drop(from).take(until - from) - }) + case (xs, from, until) => + likeVector(xs)({ + _.drop(from).take(until - from) + }) } } } @@ -750,11 +876,12 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { "calling copyToArray" in { check { slice: ByteStringSlice => slice match { - case (xs, from, until) => likeVector(xs)({ it => - val array = new Array[Byte](xs.length) - it.slice(from, until).copyToArray(array, from, until) - array.toSeq - }) + case (xs, from, until) => + likeVector(xs)({ it => + val array = new Array[Byte](xs.length) + it.slice(from, until).copyToArray(array, from, until) + array.toSeq + }) } } } @@ -763,7 +890,8 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { "serialize correctly" when { "parsing regular ByteString1C as compat" in { val oldSerd = - if (util.Properties.versionNumberString.startsWith("2.11") || util.Properties.versionNumberString.startsWith("2.12")) + if (util.Properties.versionNumberString.startsWith("2.11") || util.Properties.versionNumberString.startsWith( + "2.12")) "aced000573720021616b6b612e7574696c2e42797465537472696e672442797465537472696e67314336e9eed0afcfe4a40200015b000562797465737400025b427872001b616b6b612e7574696c2e436f6d7061637442797465537472696e67fa2925150f93468f0200007870757200025b42acf317f8060854e002000078700000000a74657374737472696e67" else // The data is the same, but the class hierarchy changed in 2.13: @@ -791,34 +919,99 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { "A ByteStringIterator" must { "behave like a buffered Vector Iterator" when { - "concatenating" in { check { (a: ByteString, b: ByteString) => likeVecIts(a, b) { (a, b) => (a ++ b).toSeq } } } + "concatenating" in { + check { (a: ByteString, b: ByteString) => + likeVecIts(a, b) { (a, b) => + (a ++ b).toSeq + } + } + } - "calling head" in { check { a: ByteString => a.isEmpty || likeVecIt(a) { _.head } } } - "calling next" in { check { a: ByteString => a.isEmpty || likeVecIt(a) { _.next() } } } - "calling hasNext" in { check { a: ByteString => likeVecIt(a) { _.hasNext } } } - "calling length" in { check { a: ByteString => likeVecIt(a) { _.length } } } - "calling duplicate" in { check { a: ByteString => likeVecIt(a)({ _.duplicate match { case (a, b) => (a.toSeq, b.toSeq) } }, strict = false) } } + "calling head" in { + check { a: ByteString => + a.isEmpty || likeVecIt(a) { _.head } + } + } + "calling next" in { + check { a: ByteString => + a.isEmpty || likeVecIt(a) { _.next() } + } + } + "calling hasNext" in { + check { a: ByteString => + likeVecIt(a) { _.hasNext } + } + } + "calling length" in { + check { a: ByteString => + likeVecIt(a) { _.length } + } + } + "calling duplicate" in { + check { a: ByteString => + likeVecIt(a)({ _.duplicate match { case (a, b) => (a.toSeq, b.toSeq) } }, strict = false) + } + } // Have to used toList instead of toSeq here, iterator.span (new in // Scala-2.9) seems to be broken in combination with toSeq for the // scala.collection default Iterator (see Scala issue SI-5838). - "calling span" in { check { (a: ByteString, b: Byte) => likeVecIt(a)({ _.span(_ != b) match { case (a, b) => (a.toList, b.toList) } }, strict = false) } } + "calling span" in { + check { (a: ByteString, b: Byte) => + likeVecIt(a)({ _.span(_ != b) match { case (a, b) => (a.toList, b.toList) } }, strict = false) + } + } - "calling takeWhile" in { check { (a: ByteString, b: Byte) => likeVecIt(a)({ _.takeWhile(_ != b).toSeq }, strict = false) } } - "calling dropWhile" in { check { (a: ByteString, b: Byte) => likeVecIt(a) { _.dropWhile(_ != b).toSeq } } } - "calling indexWhere" in { check { (a: ByteString, b: Byte) => likeVecIt(a) { _.indexWhere(_ == b) } } } - "calling indexOf" in { check { (a: ByteString, b: Byte) => likeVecIt(a) { _.indexOf(b) } } } - "calling toSeq" in { check { a: ByteString => likeVecIt(a) { _.toSeq } } } - "calling foreach" in { check { a: ByteString => likeVecIt(a) { it => var acc = 0; it foreach { acc += _ }; acc } } } - "calling foldLeft" in { check { a: ByteString => likeVecIt(a) { _.foldLeft(0) { _ + _ } } } } - "calling toArray" in { check { a: ByteString => likeVecIt(a) { _.toArray.toSeq } } } + "calling takeWhile" in { + check { (a: ByteString, b: Byte) => + likeVecIt(a)({ _.takeWhile(_ != b).toSeq }, strict = false) + } + } + "calling dropWhile" in { + check { (a: ByteString, b: Byte) => + likeVecIt(a) { _.dropWhile(_ != b).toSeq } + } + } + "calling indexWhere" in { + check { (a: ByteString, b: Byte) => + likeVecIt(a) { _.indexWhere(_ == b) } + } + } + "calling indexOf" in { + check { (a: ByteString, b: Byte) => + likeVecIt(a) { _.indexOf(b) } + } + } + "calling toSeq" in { + check { a: ByteString => + likeVecIt(a) { _.toSeq } + } + } + "calling foreach" in { + check { a: ByteString => + likeVecIt(a) { it => + var acc = 0; it.foreach { acc += _ }; acc + } + } + } + "calling foldLeft" in { + check { a: ByteString => + likeVecIt(a) { _.foldLeft(0) { _ + _ } } + } + } + "calling toArray" in { + check { a: ByteString => + likeVecIt(a) { _.toArray.toSeq } + } + } "calling slice" in { check { slice: ByteStringSlice => slice match { - case (xs, from, until) => likeVecIt(xs)({ - _.slice(from, until).toSeq - }, strict = false) + case (xs, from, until) => + likeVecIt(xs)({ + _.slice(from, until).toSeq + }, strict = false) } } } @@ -826,9 +1019,10 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { "calling take and drop" in { check { slice: ByteStringSlice => slice match { - case (xs, from, until) => likeVecIt(xs)({ - _.drop(from).take(until - from).toSeq - }, strict = false) + case (xs, from, until) => + likeVecIt(xs)({ + _.drop(from).take(until - from).toSeq + }, strict = false) } } } @@ -836,11 +1030,12 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { "calling copyToArray" in { check { slice: ByteStringSlice => slice match { - case (xs, from, until) => likeVecIt(xs)({ it => - val array = new Array[Byte](xs.length) - it.slice(from, until).copyToArray(array, from, until) - array.toSeq - }, strict = false) + case (xs, from, until) => + likeVecIt(xs)({ it => + val array = new Array[Byte](xs.length) + it.slice(from, until).copyToArray(array, from, until) + array.toSeq + }, strict = false) } } } @@ -861,20 +1056,18 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { } "getting Bytes with a given length" in { - check { - slice: ByteStringSlice => - val (bytes, _, _) = slice - val input = bytes.iterator - (input.getBytes(bytes.length).toSeq == bytes) && input.isEmpty + check { slice: ByteStringSlice => + val (bytes, _, _) = slice + val input = bytes.iterator + (input.getBytes(bytes.length).toSeq == bytes) && input.isEmpty } } "getting ByteString with a given length" in { - check { - slice: ByteStringSlice => - val (bytes, _, _) = slice - val input = bytes.iterator - (input.getByteString(bytes.length) == bytes) && input.isEmpty + check { slice: ByteStringSlice => + val (bytes, _, _) = slice + val input = bytes.iterator + (input.getByteString(bytes.length) == bytes) && input.isEmpty } } @@ -901,8 +1094,8 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { for (i <- b until bytes.length) output(i) = input.asInputStream.read().toByte (output.toSeq.drop(a) == bytes.drop(a)) && - (input.asInputStream.read() == -1) && - ((output.length < 1) || (input.asInputStream.read(output, 0, 1) == -1)) + (input.asInputStream.read() == -1) && + ((output.length < 1) || (input.asInputStream.read(output, 0, 1) == -1)) } } @@ -929,16 +1122,56 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { } "decode data correctly" when { - "decoding Short in big-endian" in { check { slice: ByteStringSlice => testShortDecoding(slice, BIG_ENDIAN) } } - "decoding Short in little-endian" in { check { slice: ByteStringSlice => testShortDecoding(slice, LITTLE_ENDIAN) } } - "decoding Int in big-endian" in { check { slice: ByteStringSlice => testIntDecoding(slice, BIG_ENDIAN) } } - "decoding Int in little-endian" in { check { slice: ByteStringSlice => testIntDecoding(slice, LITTLE_ENDIAN) } } - "decoding Long in big-endian" in { check { slice: ByteStringSlice => testLongDecoding(slice, BIG_ENDIAN) } } - "decoding Long in little-endian" in { check { slice: ByteStringSlice => testLongDecoding(slice, LITTLE_ENDIAN) } } - "decoding Float in big-endian" in { check { slice: ByteStringSlice => testFloatDecoding(slice, BIG_ENDIAN) } } - "decoding Float in little-endian" in { check { slice: ByteStringSlice => testFloatDecoding(slice, LITTLE_ENDIAN) } } - "decoding Double in big-endian" in { check { slice: ByteStringSlice => testDoubleDecoding(slice, BIG_ENDIAN) } } - "decoding Double in little-endian" in { check { slice: ByteStringSlice => testDoubleDecoding(slice, LITTLE_ENDIAN) } } + "decoding Short in big-endian" in { + check { slice: ByteStringSlice => + testShortDecoding(slice, BIG_ENDIAN) + } + } + "decoding Short in little-endian" in { + check { slice: ByteStringSlice => + testShortDecoding(slice, LITTLE_ENDIAN) + } + } + "decoding Int in big-endian" in { + check { slice: ByteStringSlice => + testIntDecoding(slice, BIG_ENDIAN) + } + } + "decoding Int in little-endian" in { + check { slice: ByteStringSlice => + testIntDecoding(slice, LITTLE_ENDIAN) + } + } + "decoding Long in big-endian" in { + check { slice: ByteStringSlice => + testLongDecoding(slice, BIG_ENDIAN) + } + } + "decoding Long in little-endian" in { + check { slice: ByteStringSlice => + testLongDecoding(slice, LITTLE_ENDIAN) + } + } + "decoding Float in big-endian" in { + check { slice: ByteStringSlice => + testFloatDecoding(slice, BIG_ENDIAN) + } + } + "decoding Float in little-endian" in { + check { slice: ByteStringSlice => + testFloatDecoding(slice, LITTLE_ENDIAN) + } + } + "decoding Double in big-endian" in { + check { slice: ByteStringSlice => + testDoubleDecoding(slice, BIG_ENDIAN) + } + } + "decoding Double in little-endian" in { + check { slice: ByteStringSlice => + testDoubleDecoding(slice, LITTLE_ENDIAN) + } + } } } @@ -948,9 +1181,13 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { check { (array1: Array[Byte], array2: Array[Byte], bs1: ByteString, bs2: ByteString, bs3: ByteString) => likeVecBld { builder => builder ++= array1 - bs1 foreach { b => builder += b } + bs1.foreach { b => + builder += b + } builder ++= bs2 - bs3 foreach { b => builder += b } + bs3.foreach { b => + builder += b + } builder ++= Vector(array2: _*) } } @@ -983,23 +1220,73 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers { } "encode data correctly" when { - "encoding Short in big-endian" in { check { slice: ArraySlice[Short] => testShortEncoding(slice, BIG_ENDIAN) } } - "encoding Short in little-endian" in { check { slice: ArraySlice[Short] => testShortEncoding(slice, LITTLE_ENDIAN) } } - "encoding Int in big-endian" in { check { slice: ArraySlice[Int] => testIntEncoding(slice, BIG_ENDIAN) } } - "encoding Int in little-endian" in { check { slice: ArraySlice[Int] => testIntEncoding(slice, LITTLE_ENDIAN) } } - "encoding Long in big-endian" in { check { slice: ArraySlice[Long] => testLongEncoding(slice, BIG_ENDIAN) } } - "encoding Long in little-endian" in { check { slice: ArraySlice[Long] => testLongEncoding(slice, LITTLE_ENDIAN) } } - "encoding LongPart in big-endian" in { check { slice: ArrayNumBytes[Long] => testLongPartEncoding(slice, BIG_ENDIAN) } } - "encoding LongPart in little-endian" in { check { slice: ArrayNumBytes[Long] => testLongPartEncoding(slice, LITTLE_ENDIAN) } } - "encoding Float in big-endian" in { check { slice: ArraySlice[Float] => testFloatEncoding(slice, BIG_ENDIAN) } } - "encoding Float in little-endian" in { check { slice: ArraySlice[Float] => testFloatEncoding(slice, LITTLE_ENDIAN) } } - "encoding Double in big-endian" in { check { slice: ArraySlice[Double] => testDoubleEncoding(slice, BIG_ENDIAN) } } - "encoding Double in little-endian" in { check { slice: ArraySlice[Double] => testDoubleEncoding(slice, LITTLE_ENDIAN) } } + "encoding Short in big-endian" in { + check { slice: ArraySlice[Short] => + testShortEncoding(slice, BIG_ENDIAN) + } + } + "encoding Short in little-endian" in { + check { slice: ArraySlice[Short] => + testShortEncoding(slice, LITTLE_ENDIAN) + } + } + "encoding Int in big-endian" in { + check { slice: ArraySlice[Int] => + testIntEncoding(slice, BIG_ENDIAN) + } + } + "encoding Int in little-endian" in { + check { slice: ArraySlice[Int] => + testIntEncoding(slice, LITTLE_ENDIAN) + } + } + "encoding Long in big-endian" in { + check { slice: ArraySlice[Long] => + testLongEncoding(slice, BIG_ENDIAN) + } + } + "encoding Long in little-endian" in { + check { slice: ArraySlice[Long] => + testLongEncoding(slice, LITTLE_ENDIAN) + } + } + "encoding LongPart in big-endian" in { + check { slice: ArrayNumBytes[Long] => + testLongPartEncoding(slice, BIG_ENDIAN) + } + } + "encoding LongPart in little-endian" in { + check { slice: ArrayNumBytes[Long] => + testLongPartEncoding(slice, LITTLE_ENDIAN) + } + } + "encoding Float in big-endian" in { + check { slice: ArraySlice[Float] => + testFloatEncoding(slice, BIG_ENDIAN) + } + } + "encoding Float in little-endian" in { + check { slice: ArraySlice[Float] => + testFloatEncoding(slice, LITTLE_ENDIAN) + } + } + "encoding Double in big-endian" in { + check { slice: ArraySlice[Double] => + testDoubleEncoding(slice, BIG_ENDIAN) + } + } + "encoding Double in little-endian" in { + check { slice: ArraySlice[Double] => + testDoubleEncoding(slice, LITTLE_ENDIAN) + } + } } "have correct empty info" when { "is empty" in { - check { a: ByteStringBuilder => a.isEmpty } + check { a: ByteStringBuilder => + a.isEmpty + } } "is nonEmpty" in { check { a: ByteStringBuilder => diff --git a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala index 35a1ab73b5..15c486c032 100644 --- a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala @@ -23,7 +23,7 @@ class DurationSpec extends AkkaSpec { (2 * one) should ===(two) (three - two) should ===(one) (three / 3) should ===(one) - (two / one) should ===(2D) + (two / one) should ===(2d) (one + zero) should ===(one) (one / 1000000) should ===(1.micro) } @@ -85,7 +85,7 @@ class DurationSpec extends AkkaSpec { "support fromNow" in { val dead = 2.seconds.fromNow - val dead2 = 2 seconds fromNow + val dead2 = 2.seconds(fromNow) // view bounds vs. very local type inference vs. operator precedence: sigh dead.timeLeft should be > (1 second: Duration) dead2.timeLeft should be > (1 second: Duration) diff --git a/akka-actor-tests/src/test/scala/akka/util/IgnoreForScala212.scala b/akka-actor-tests/src/test/scala/akka/util/IgnoreForScala212.scala index fdcd96305b..50f4f2dc84 100644 --- a/akka-actor-tests/src/test/scala/akka/util/IgnoreForScala212.scala +++ b/akka-actor-tests/src/test/scala/akka/util/IgnoreForScala212.scala @@ -7,4 +7,5 @@ package akka.util import org.scalatest.{ Ignore, Tag } import scala.util.Properties -object IgnoreForScala212 extends Tag(if (Properties.versionNumberString.startsWith("2.12")) classOf[Ignore].getName else "") +object IgnoreForScala212 + extends Tag(if (Properties.versionNumberString.startsWith("2.12")) classOf[Ignore].getName else "") diff --git a/akka-actor-tests/src/test/scala/akka/util/ImmutableIntMapSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ImmutableIntMapSpec.scala index ac33fb7bb1..8f8500a59c 100644 --- a/akka-actor-tests/src/test/scala/akka/util/ImmutableIntMapSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/ImmutableIntMapSpec.scala @@ -44,8 +44,7 @@ class ImmutableIntMapSpec extends WordSpec with Matchers { val m1 = ImmutableIntMap.empty.updated(10, 10).updated(10, 11) m1.keysIterator.map(m1.get).toList should be(List(11)) - val m2 = m1.updated(20, 20).updated(30, 30) - .updated(20, 21).updated(30, 31) + val m2 = m1.updated(20, 20).updated(30, 30).updated(20, 21).updated(30, 31) m2.keysIterator.map(m2.get).toList should be(List(11, 21, 31)) } @@ -58,14 +57,12 @@ class ImmutableIntMapSpec extends WordSpec with Matchers { "have toString" in { ImmutableIntMap.empty.toString should be("ImmutableIntMap()") ImmutableIntMap.empty.updated(10, 10).toString should be("ImmutableIntMap(10 -> 10)") - ImmutableIntMap.empty.updated(10, 10).updated(20, 20).toString should be( - "ImmutableIntMap(10 -> 10, 20 -> 20)") + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).toString should be("ImmutableIntMap(10 -> 10, 20 -> 20)") } "have equals and hashCode" in { ImmutableIntMap.empty.updated(10, 10) should be(ImmutableIntMap.empty.updated(10, 10)) - ImmutableIntMap.empty.updated(10, 10).hashCode should be( - ImmutableIntMap.empty.updated(10, 10).hashCode) + ImmutableIntMap.empty.updated(10, 10).hashCode should be(ImmutableIntMap.empty.updated(10, 10).hashCode) ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) should be( ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30)) @@ -75,10 +72,10 @@ class ImmutableIntMapSpec extends WordSpec with Matchers { ImmutableIntMap.empty.updated(10, 10).updated(20, 20) should not be ImmutableIntMap.empty.updated(10, 10) ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) should not be - ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 31) + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 31) ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(30, 30) should not be - ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(31, 30) + ImmutableIntMap.empty.updated(10, 10).updated(20, 20).updated(31, 30) ImmutableIntMap.empty should be(ImmutableIntMap.empty) ImmutableIntMap.empty.hashCode should be(ImmutableIntMap.empty.hashCode) diff --git a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala index 147c5fa79d..136a64c3bc 100644 --- a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala @@ -14,9 +14,10 @@ import akka.testkit.DefaultTimeout class IndexSpec extends AkkaSpec with Matchers with DefaultTimeout { implicit val ec = system.dispatcher - private def emptyIndex = new Index[String, Int](100, new Comparator[Int] { - override def compare(a: Int, b: Int): Int = Integer.compare(a, b) - }) + private def emptyIndex = + new Index[String, Int](100, new Comparator[Int] { + override def compare(a: Int, b: Int): Int = Integer.compare(a, b) + }) private def indexWithValues = { val index = emptyIndex diff --git a/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala index 32095aae56..57319a9308 100644 --- a/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/PrettyDurationSpec.scala @@ -14,20 +14,20 @@ class PrettyDurationSpec extends WordSpec with Matchers { val cases: Seq[(Duration, String)] = 9.nanos -> "9.000 ns" :: - 95.nanos -> "95.00 ns" :: - 999.nanos -> "999.0 ns" :: - 1000.nanos -> "1.000 μs" :: - 9500.nanos -> "9.500 μs" :: - 9500.micros -> "9.500 ms" :: - 9500.millis -> "9.500 s" :: - 95.seconds -> "1.583 min" :: - 95.minutes -> "1.583 h" :: - 95.hours -> "3.958 d" :: - Nil + 95.nanos -> "95.00 ns" :: + 999.nanos -> "999.0 ns" :: + 1000.nanos -> "1.000 μs" :: + 9500.nanos -> "9.500 μs" :: + 9500.micros -> "9.500 ms" :: + 9500.millis -> "9.500 s" :: + 95.seconds -> "1.583 min" :: + 95.minutes -> "1.583 h" :: + 95.hours -> "3.958 d" :: + Nil "PrettyDuration" should { - cases foreach { + cases.foreach { case (d, expectedValue) => s"print $d nanos as $expectedValue" in { d.pretty should ===(expectedValue) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorContextSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorContextSpec.scala index dfef579cde..efc0a600c5 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorContextSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ActorContextSpec.scala @@ -63,8 +63,7 @@ object ActorSpecMessages { } -abstract class ActorContextSpec extends ScalaTestWithActorTestKit( - """ +abstract class ActorContextSpec extends ScalaTestWithActorTestKit(""" akka.loggers = [akka.testkit.TestEventListener] """) with WordSpecLike { @@ -92,21 +91,23 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "canonicalize behaviors" in { val probe = TestProbe[Event]() - lazy val behavior: Behavior[Command] = Behaviors.receive[Command] { (_, message) => - message match { - case Ping => - probe.ref ! Pong - Behaviors.same - case Miss => - probe.ref ! Missed - Behaviors.unhandled - case Renew(ref) => - ref ! Renewed - behavior - case other => - throw new RuntimeException(s"Unexpected message: $other") + lazy val behavior: Behavior[Command] = Behaviors + .receive[Command] { (_, message) => + message match { + case Ping => + probe.ref ! Pong + Behaviors.same + case Miss => + probe.ref ! Missed + Behaviors.unhandled + case Renew(ref) => + ref ! Renewed + behavior + case other => + throw new RuntimeException(s"Unexpected message: $other") + } } - }.decorate + .decorate val actor = spawn(behavior) actor ! Ping @@ -125,14 +126,17 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "correctly wire the lifecycle hook" in { val probe = TestProbe[Event]() - val internal = (Behaviors.receivePartial[Command] { - case (_, Fail) => - throw new TestException("Boom") - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same - }).decorate + val internal = Behaviors + .receivePartial[Command] { + case (_, Fail) => + throw new TestException("Boom") + } + .receiveSignal { + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same + } + .decorate val behavior = Behaviors.supervise(internal).onFailure(SupervisorStrategy.restart) val actor = spawn(behavior) @@ -145,14 +149,17 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "signal post stop after voluntary termination" in { val probe = TestProbe[Event]() - val behavior: Behavior[Command] = ( - Behaviors.receivePartial[Command] { - case (_, Stop) => Behaviors.stopped - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same - }).decorate + val behavior: Behavior[Command] = + Behaviors + .receivePartial[Command] { + case (_, Stop) => Behaviors.stopped + } + .receiveSignal { + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same + } + .decorate val actor = spawn(behavior) actor ! Stop @@ -162,33 +169,37 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "restart and stop a child actor" in { val probe = TestProbe[Event]() - val child: Behavior[Command] = (Behaviors.receivePartial[Command] { - case (_, Fail) => throw new TestException("Boom") - case (_, Ping) => - probe.ref ! Pong - Behaviors.same - } receiveSignal { - case (_, signal) => - probe.ref ! GotChildSignal(signal) - Behavior.stopped - }).decorate + val child: Behavior[Command] = Behaviors + .receivePartial[Command] { + case (_, Fail) => throw new TestException("Boom") + case (_, Ping) => + probe.ref ! Pong + Behaviors.same + } + .receiveSignal { + case (_, signal) => + probe.ref ! GotChildSignal(signal) + Behavior.stopped + } + .decorate val parent: Behavior[Command] = Behaviors.setup[Command](context => { - val childRef = context.spawnAnonymous( - Behaviors.supervise(child).onFailure(SupervisorStrategy.restart) - ) + val childRef = context.spawnAnonymous(Behaviors.supervise(child).onFailure(SupervisorStrategy.restart)) context.watch(childRef) probe.ref ! ChildMade(childRef) - (Behaviors.receivePartial[Command] { - case (context, StopRef(ref)) => - context.stop(ref) - Behavior.same - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behavior.stopped - }).decorate + Behaviors + .receivePartial[Command] { + case (context, StopRef(ref)) => + context.stop(ref) + Behavior.same + } + .receiveSignal { + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behavior.stopped + } + .decorate }) val parentRef = spawn(parent) @@ -207,20 +218,24 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( val probe = TestProbe[Event]() val child: Behavior[Command] = Behaviors.empty[Command].decorate - val parent: Behavior[Command] = Behaviors.setup[Command](context => { - val childRef = context.spawnAnonymous(child) - context.watch(childRef) - probe.ref ! ChildMade(childRef) - Behaviors.receivePartial[Command] { - case (context, StopRef(ref)) => - context.stop(ref) - Behaviors.same - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behavior.stopped - } - }).decorate + val parent: Behavior[Command] = Behaviors + .setup[Command](context => { + val childRef = context.spawnAnonymous(child) + context.watch(childRef) + probe.ref ! ChildMade(childRef) + Behaviors + .receivePartial[Command] { + case (context, StopRef(ref)) => + context.stop(ref) + Behaviors.same + } + .receiveSignal { + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behavior.stopped + } + }) + .decorate val parentRef = spawn(parent) val childRef = probe.expectMessageType[ChildMade].ref parentRef ! StopRef(childRef) @@ -229,17 +244,19 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "reset behavior upon restart" in { val probe = TestProbe[Int]() - val internal = Behaviors.setup[Command](_ => { - var counter = 0 - Behaviors.receivePartial[Command] { - case (_, Ping) => - counter += 1 - probe.ref ! counter - Behavior.same - case (_, Fail) => - throw new TestException("Boom") - } - }).decorate + val internal = Behaviors + .setup[Command](_ => { + var counter = 0 + Behaviors.receivePartial[Command] { + case (_, Ping) => + counter += 1 + probe.ref ! counter + Behavior.same + case (_, Fail) => + throw new TestException("Boom") + } + }) + .decorate val behavior = Behaviors.supervise(internal).onFailure(SupervisorStrategy.restart) val actor = spawn(behavior) actor ! Ping @@ -253,17 +270,19 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "not reset behavior upon resume" in { val probe = TestProbe[Int]() - val internal = Behaviors.setup[Command](_ => { - var counter = 0 - Behaviors.receivePartial[Command] { - case (_, Ping) => - counter += 1 - probe.ref ! counter - Behavior.same - case (_, Fail) => - throw new TestException("Boom") - } - }).decorate + val internal = Behaviors + .setup[Command](_ => { + var counter = 0 + Behaviors.receivePartial[Command] { + case (_, Ping) => + counter += 1 + probe.ref ! counter + Behavior.same + case (_, Fail) => + throw new TestException("Boom") + } + }) + .decorate val behavior = Behaviors.supervise(internal).onFailure(SupervisorStrategy.resume) val actor = spawn(behavior) actor ! Ping @@ -277,30 +296,35 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "stop upon stop" in { val probe = TestProbe[Event]() - val behavior = (Behaviors.receivePartial[Command] { - case (_, Ping) => - probe.ref ! Pong - Behaviors.same - case (_, Fail) => - throw new TestException("boom") - } receiveSignal { - case (_, PostStop) => - probe.ref ! ReceivedSignal(PostStop) - Behavior.same - }).decorate - val actorToWatch = spawn(behavior) - val watcher: ActorRef[Command] = spawn(( - Behaviors.receivePartial[Any] { - case (context, Ping) => - context.watch(actorToWatch) + val behavior = Behaviors + .receivePartial[Command] { + case (_, Ping) => probe.ref ! Pong - Behavior.same - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) + Behaviors.same + case (_, Fail) => + throw new TestException("boom") + } + .receiveSignal { + case (_, PostStop) => + probe.ref ! ReceivedSignal(PostStop) Behavior.same } - ).decorate) + .decorate + val actorToWatch = spawn(behavior) + val watcher: ActorRef[Command] = spawn( + Behaviors + .receivePartial[Any] { + case (context, Ping) => + context.watch(actorToWatch) + probe.ref ! Pong + Behavior.same + } + .receiveSignal { + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behavior.same + } + .decorate) actorToWatch ! Ping probe.expectMessage(Pong) watcher ! Ping @@ -315,18 +339,21 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "not stop non-child actor" in { val probe = TestProbe[Event]() val victim = spawn(Behaviors.empty[Command]) - val actor = spawn(Behaviors.receivePartial[Command] { - case (_, Ping) => - probe.ref ! Pong - Behaviors.same - case (context, StopRef(ref)) => - assertThrows[IllegalArgumentException] { - context.stop(ref) - probe.ref ! Pong + val actor = spawn( + Behaviors + .receivePartial[Command] { + case (_, Ping) => + probe.ref ! Pong + Behaviors.same + case (context, StopRef(ref)) => + assertThrows[IllegalArgumentException] { + context.stop(ref) + probe.ref ! Pong + } + probe.ref ! Missed + Behaviors.same } - probe.ref ! Missed - Behaviors.same - }.decorate) + .decorate) actor ! Ping probe.expectMessage(Pong) actor ! StopRef(victim) @@ -337,26 +364,31 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "watch a child actor before its termination" in { val probe = TestProbe[Event]() - val child = Behaviors.receivePartial[Command] { - case (_, Stop) => - Behaviors.stopped - }.decorate + val child = Behaviors + .receivePartial[Command] { + case (_, Stop) => + Behaviors.stopped + } + .decorate spawn( - Behaviors.setup[Command](context => { - val childRef = context.spawn(child, "A") - context.watch(childRef) - probe.ref ! ChildMade(childRef) - Behaviors.receivePartial[Command] { - case (_, Ping) => - probe.ref ! Pong - Behaviors.same - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same - } - }).decorate - ) + Behaviors + .setup[Command](context => { + val childRef = context.spawn(child, "A") + context.watch(childRef) + probe.ref ! ChildMade(childRef) + Behaviors + .receivePartial[Command] { + case (_, Ping) => + probe.ref ! Pong + Behaviors.same + } + .receiveSignal { + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same + } + }) + .decorate) val childRef = probe.expectMessageType[ChildMade].ref childRef ! Stop probe.expectTerminated(childRef, timeout.duration) @@ -364,26 +396,31 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "watch a child actor after its termination" in { val probe = TestProbe[Event]() - val child = Behaviors.receivePartial[Command] { - case (_, Stop) => - Behaviors.stopped - }.decorate + val child = Behaviors + .receivePartial[Command] { + case (_, Stop) => + Behaviors.stopped + } + .decorate val actor = spawn( - Behaviors.setup[Command](context => { - val childRef = context.spawn(child, "A") - probe.ref ! ChildMade(childRef) - Behaviors.receivePartial[Command] { - case (context, Watch(ref)) => - context.watch(ref) - probe.ref ! Pong - Behaviors.same - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same - } - }).decorate - ) + Behaviors + .setup[Command](context => { + val childRef = context.spawn(child, "A") + probe.ref ! ChildMade(childRef) + Behaviors + .receivePartial[Command] { + case (context, Watch(ref)) => + context.watch(ref) + probe.ref ! Pong + Behaviors.same + } + .receiveSignal { + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same + } + }) + .decorate) val childRef = probe.expectMessageType[ChildMade].ref actor ! Watch(childRef) probe.expectMessage(Pong) @@ -395,30 +432,35 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "unwatch a child actor before its termination" in { val probe = TestProbe[Event]() - val child = Behaviors.receivePartial[Command] { - case (_, Stop) => - Behaviors.stopped - }.decorate + val child = Behaviors + .receivePartial[Command] { + case (_, Stop) => + Behaviors.stopped + } + .decorate val actor = spawn( - Behaviors.setup[Command](context => { - val childRef = context.spawn(child, "A") - probe.ref ! ChildMade(childRef) - Behaviors.receivePartial[Command] { - case (context, Watch(ref)) => - context.watch(ref) - probe.ref ! Pong - Behaviors.same - case (context, UnWatch(ref)) => - context.unwatch(ref) - probe.ref ! Pong - Behaviors.same - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same - } - }).decorate - ) + Behaviors + .setup[Command](context => { + val childRef = context.spawn(child, "A") + probe.ref ! ChildMade(childRef) + Behaviors + .receivePartial[Command] { + case (context, Watch(ref)) => + context.watch(ref) + probe.ref ! Pong + Behaviors.same + case (context, UnWatch(ref)) => + context.unwatch(ref) + probe.ref ! Pong + Behaviors.same + } + .receiveSignal { + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same + } + }) + .decorate) val childRef = probe.expectMessageType[ChildMade].ref actor ! Watch(childRef) probe.expectMessage(Pong) @@ -430,37 +472,45 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "terminate upon not handling Terminated" in { val probe = TestProbe[Event]() - val child = (Behaviors.receivePartial[Command] { - case (_, Stop) => - Behaviors.stopped - } receiveSignal { - case (_, signal) => - probe.ref ! GotChildSignal(signal) - Behavior.same - }).decorate + val child = Behaviors + .receivePartial[Command] { + case (_, Stop) => + Behaviors.stopped + } + .receiveSignal { + case (_, signal) => + probe.ref ! GotChildSignal(signal) + Behavior.same + } + .decorate val actor = spawn( - Behaviors.setup[Command](context => { - val childRef = context.spawn(child, "A") - context.watch(childRef) - probe.ref ! ChildMade(childRef) - Behaviors.receivePartial[Command] { - case (_, Inert) => - probe.ref ! InertEvent - Behaviors.receive[Command] { - case (_, _) => Behaviors.unhandled - } receiveSignal { - case (_, Terminated(_)) => Behaviors.unhandled + Behaviors + .setup[Command](context => { + val childRef = context.spawn(child, "A") + context.watch(childRef) + probe.ref ! ChildMade(childRef) + Behaviors + .receivePartial[Command] { + case (_, Inert) => + probe.ref ! InertEvent + Behaviors + .receive[Command] { + case (_, _) => Behaviors.unhandled + } + .receiveSignal { + case (_, Terminated(_)) => Behaviors.unhandled + case (_, signal) => + probe.ref ! ReceivedSignal(signal) + Behaviors.same + } + } + .receiveSignal { case (_, signal) => probe.ref ! ReceivedSignal(signal) Behaviors.same } - } receiveSignal { - case (_, signal) => - probe.ref ! ReceivedSignal(signal) - Behaviors.same - } - }).decorate - ) + }) + .decorate) val childRef = probe.expectMessageType[ChildMade].ref actor ! Inert probe.expectMessage(InertEvent) @@ -475,11 +525,14 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "return the right context info" in { type Info = (ActorSystem[Nothing], ActorRef[String]) val probe = TestProbe[Info] - val actor = spawn(Behaviors.receivePartial[String] { - case (context, "info") => - probe.ref ! (context.system -> context.self) - Behaviors.same - }.decorate) + val actor = spawn( + Behaviors + .receivePartial[String] { + case (context, "info") => + probe.ref ! (context.system -> context.self) + Behaviors.same + } + .decorate) actor ! "info" probe.expectMessage((system, actor)) } @@ -487,18 +540,21 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "return right info about children" in { type Children = Seq[ActorRef[Nothing]] val probe = TestProbe[Children]() - val actor = spawn(Behaviors.receivePartial[String] { - case (context, "create") => - context.spawn(Behaviors.empty, "B") - probe.ref ! context.child("B").toSeq - Behaviors.same - case (context, "all") => - probe.ref ! context.children.toSeq - Behaviors.same - case (context, get) => - probe.ref ! context.child(get).toSeq - Behaviors.same - }.decorate) + val actor = spawn( + Behaviors + .receivePartial[String] { + case (context, "create") => + context.spawn(Behaviors.empty, "B") + probe.ref ! context.child("B").toSeq + Behaviors.same + case (context, "all") => + probe.ref ! context.children.toSeq + Behaviors.same + case (context, get) => + probe.ref ! context.child(get).toSeq + Behaviors.same + } + .decorate) actor ! "create" val children = probe.receiveMessage() actor ! "A" @@ -511,15 +567,18 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "set small receive timeout" in { val probe = TestProbe[Event]() - val actor = spawn(Behaviors.receivePartial[Command] { - case (_, ReceiveTimeout) => - probe.ref ! GotReceiveTimeout - Behaviors.same - case (context, SetTimeout(duration)) => - context.setReceiveTimeout(duration, ReceiveTimeout) - probe.ref ! TimeoutSet - Behaviors.same - }.decorate) + val actor = spawn( + Behaviors + .receivePartial[Command] { + case (_, ReceiveTimeout) => + probe.ref ! GotReceiveTimeout + Behaviors.same + case (context, SetTimeout(duration)) => + context.setReceiveTimeout(duration, ReceiveTimeout) + probe.ref ! TimeoutSet + Behaviors.same + } + .decorate) actor ! SetTimeout(1.nano) probe.expectMessage(TimeoutSet) probe.expectMessage(GotReceiveTimeout) @@ -527,21 +586,24 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "set large receive timeout" in { val probe = TestProbe[String]() - val actor = spawn(Behaviors.receivePartial[String] { - case (context, "schedule") => - context.scheduleOnce(1.second, probe.ref, "scheduled") - Behaviors.same - case (_, "ping") => - probe.ref ! "pong" - Behaviors.same - case (_, "receive timeout") => - probe.ref ! "received timeout" - Behaviors.same - case (context, duration) => - context.setReceiveTimeout(Duration(duration).asInstanceOf[FiniteDuration], "receive timeout") - probe.ref ! "timeout set" - Behaviors.same - }.decorate) + val actor = spawn( + Behaviors + .receivePartial[String] { + case (context, "schedule") => + context.scheduleOnce(1.second, probe.ref, "scheduled") + Behaviors.same + case (_, "ping") => + probe.ref ! "pong" + Behaviors.same + case (_, "receive timeout") => + probe.ref ! "received timeout" + Behaviors.same + case (context, duration) => + context.setReceiveTimeout(Duration(duration).asInstanceOf[FiniteDuration], "receive timeout") + probe.ref ! "timeout set" + Behaviors.same + } + .decorate) actor ! "1 minute" probe.expectMessage("timeout set") actor ! "schedule" @@ -552,11 +614,14 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "schedule a message" in { val probe = TestProbe[Event]() - val actor = spawn(Behaviors.receivePartial[Command] { - case (context, Ping) => - context.scheduleOnce(1.nano, probe.ref, Pong) - Behaviors.same - }.decorate) + val actor = spawn( + Behaviors + .receivePartial[Command] { + case (context, Ping) => + context.scheduleOnce(1.nano, probe.ref, Pong) + Behaviors.same + } + .decorate) actor ! Ping probe.expectMessage(Pong) } @@ -565,14 +630,17 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( type Envelope = (ActorRef[String], String) val messages = TestProbe[Envelope]() val probe = TestProbe[ActorRef[String]]() - val actor = spawn(Behaviors.receivePartial[String] { - case (context, "message") => - messages.ref.tell((context.self, "received message")) - Behaviors.same - case (context, name) => - probe.ref ! context.spawnMessageAdapter(identity, name) - Behaviors.same - }.decorate) + val actor = spawn( + Behaviors + .receivePartial[String] { + case (context, "message") => + messages.ref.tell((context.self, "received message")) + Behaviors.same + case (context, name) => + probe.ref ! context.spawnMessageAdapter(identity, name) + Behaviors.same + } + .decorate) val adapterName = "hello" actor ! adapterName val adapter = probe.receiveMessage() @@ -590,18 +658,16 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit( "not have problems stopping already stopped child" in { val probe = TestProbe[Event]() - val actor = spawn( - Behaviors.setup[Command](context => { - val child = context.spawnAnonymous(Behaviors.empty[Command]) - probe.ref ! ChildMade(child) - Behaviors.receivePartial[Command] { - case (context, StopRef(ref)) => - context.stop(ref) - probe.ref ! Pong - Behaviors.same - } - }) - ) + val actor = spawn(Behaviors.setup[Command](context => { + val child = context.spawnAnonymous(Behaviors.empty[Command]) + probe.ref ! ChildMade(child) + Behaviors.receivePartial[Command] { + case (context, StopRef(ref)) => + context.stop(ref) + probe.ref ! Pong + Behaviors.same + } + })) val child = probe.expectMessageType[ChildMade].ref actor ! StopRef(child) probe.expectMessage(Pong) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/AskSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/AskSpec.scala index 98a62d7078..a92f30bd8c 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/AskSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/AskSpec.scala @@ -172,9 +172,10 @@ class AskSpec extends ScalaTestWithActorTestKit(""" ref ! "start-ask" val Question(replyRef2) = probe.expectMessageType[Question] - EventFilter[RuntimeException](message = "Exception thrown out of adapter. Stopping myself.", occurrences = 1).intercept { - replyRef2 ! 42L - }(system.toUntyped) + EventFilter[RuntimeException](message = "Exception thrown out of adapter. Stopping myself.", occurrences = 1) + .intercept { + replyRef2 ! 42L + }(system.toUntyped) probe.expectTerminated(ref, probe.remainingOrDefault) } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala index ab2f83b961..43c0a986e4 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/BehaviorSpec.scala @@ -143,36 +143,38 @@ object BehaviorSpec { } def mkFull(monitor: ActorRef[Event], state: State = StateA): Behavior[Command] = { - SBehaviors.receive[Command] { - case (context, GetSelf) => - monitor ! Self(context.self) - SBehaviors.same - case (_, Miss) => - monitor ! Missed - SBehaviors.unhandled - case (_, Ignore) => - monitor ! Ignored - SBehaviors.same - case (_, Ping) => - monitor ! Pong - mkFull(monitor, state) - case (_, Swap) => - monitor ! Swapped - mkFull(monitor, state.next) - case (_, GetState()) => - monitor ! state - SBehaviors.same - case (_, Stop) => SBehaviors.stopped - case (_, _) => SBehaviors.unhandled - } receiveSignal { - case (_, signal) => - monitor ! ReceivedSignal(signal) - SBehaviors.same - } + SBehaviors + .receive[Command] { + case (context, GetSelf) => + monitor ! Self(context.self) + SBehaviors.same + case (_, Miss) => + monitor ! Missed + SBehaviors.unhandled + case (_, Ignore) => + monitor ! Ignored + SBehaviors.same + case (_, Ping) => + monitor ! Pong + mkFull(monitor, state) + case (_, Swap) => + monitor ! Swapped + mkFull(monitor, state.next) + case (_, GetState()) => + monitor ! state + SBehaviors.same + case (_, Stop) => SBehaviors.stopped + case (_, _) => SBehaviors.unhandled + } + .receiveSignal { + case (_, signal) => + monitor ! ReceivedSignal(signal) + SBehaviors.same + } } /* - * function converters for Java, to ease the pain on Scala 2.11 - */ + * function converters for Java, to ease the pain on Scala 2.11 + */ def fs(f: (JActorContext[Command], Signal) => Behavior[Command]) = new F2[JActorContext[Command], Signal, Behavior[Command]] { override def apply(context: JActorContext[Command], sig: Signal) = f(context, sig) @@ -349,32 +351,34 @@ class FullBehaviorSpec extends ScalaTestWithActorTestKit with Messages with Beco class ReceiveBehaviorSpec extends Messages with BecomeWithLifecycle with Stoppable { override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = behv(monitor, StateA) -> null private def behv(monitor: ActorRef[Event], state: State): Behavior[Command] = { - SBehaviors.receive[Command] { - case (context, GetSelf) => - monitor ! Self(context.self) - SBehaviors.same - case (_, Miss) => - monitor ! Missed - SBehaviors.unhandled - case (_, Ignore) => - monitor ! Ignored - SBehaviors.same - case (_, Ping) => - monitor ! Pong - behv(monitor, state) - case (_, Swap) => - monitor ! Swapped - behv(monitor, state.next) - case (_, GetState()) => - monitor ! state - SBehaviors.same - case (_, Stop) => SBehaviors.stopped - case (_, _: AuxPing) => SBehaviors.unhandled - } receiveSignal { - case (_, signal) => - monitor ! ReceivedSignal(signal) - SBehaviors.same - } + SBehaviors + .receive[Command] { + case (context, GetSelf) => + monitor ! Self(context.self) + SBehaviors.same + case (_, Miss) => + monitor ! Missed + SBehaviors.unhandled + case (_, Ignore) => + monitor ! Ignored + SBehaviors.same + case (_, Ping) => + monitor ! Pong + behv(monitor, state) + case (_, Swap) => + monitor ! Swapped + behv(monitor, state.next) + case (_, GetState()) => + monitor ! state + SBehaviors.same + case (_, Stop) => SBehaviors.stopped + case (_, _: AuxPing) => SBehaviors.unhandled + } + .receiveSignal { + case (_, signal) => + monitor ! ReceivedSignal(signal) + SBehaviors.same + } } } @@ -383,8 +387,8 @@ class ImmutableWithSignalScalaBehaviorSpec extends Messages with BecomeWithLifec override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = behv(monitor) -> null def behv(monitor: ActorRef[Event], state: State = StateA): Behavior[Command] = - SBehaviors.receive[Command] { - (context, message) => + SBehaviors + .receive[Command] { (context, message) => message match { case GetSelf => monitor ! Self(context.self) @@ -407,11 +411,12 @@ class ImmutableWithSignalScalaBehaviorSpec extends Messages with BecomeWithLifec case Stop => SBehaviors.stopped case _: AuxPing => SBehaviors.unhandled } - } receiveSignal { - case (_, sig) => - monitor ! ReceivedSignal(sig) - SBehaviors.same - } + } + .receiveSignal { + case (_, sig) => + monitor ! ReceivedSignal(sig) + SBehaviors.same + } } class ImmutableScalaBehaviorSpec extends Messages with Become with Stoppable { @@ -512,12 +517,16 @@ class InterceptScalaBehaviorSpec extends ImmutableWithSignalScalaBehaviorSpec wi override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { val inbox = TestInbox[Either[Signal, Command]]("tapListener") val tap = new BehaviorInterceptor[Command, Command] { - override def aroundReceive(context: TypedActorContext[Command], message: Command, target: ReceiveTarget[Command]): Behavior[Command] = { + override def aroundReceive(context: TypedActorContext[Command], + message: Command, + target: ReceiveTarget[Command]): Behavior[Command] = { inbox.ref ! Right(message) target(context, message) } - override def aroundSignal(context: TypedActorContext[Command], signal: Signal, target: SignalTarget[Command]): Behavior[Command] = { + override def aroundSignal(context: TypedActorContext[Command], + signal: Signal, + target: SignalTarget[Command]): Behavior[Command] = { inbox.ref ! Left(signal) target(context, signal) } @@ -536,28 +545,29 @@ class ImmutableWithSignalJavaBehaviorSpec extends Messages with BecomeWithLifecy override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = behv(monitor) -> null def behv(monitor: ActorRef[Event], state: State = StateA): Behavior[Command] = JBehaviors.receive( - fc((context, message) => message match { - case GetSelf => - monitor ! Self(context.getSelf) - SBehaviors.same - case Miss => - monitor ! Missed - SBehaviors.unhandled - case Ignore => - monitor ! Ignored - SBehaviors.same - case Ping => - monitor ! Pong - behv(monitor, state) - case Swap => - monitor ! Swapped - behv(monitor, state.next) - case GetState() => - monitor ! state - SBehaviors.same - case Stop => SBehaviors.stopped - case _: AuxPing => SBehaviors.unhandled - }), + fc((context, message) => + message match { + case GetSelf => + monitor ! Self(context.getSelf) + SBehaviors.same + case Miss => + monitor ! Missed + SBehaviors.unhandled + case Ignore => + monitor ! Ignored + SBehaviors.same + case Ping => + monitor ! Pong + behv(monitor, state) + case Swap => + monitor ! Swapped + behv(monitor, state.next) + case GetState() => + monitor ! state + SBehaviors.same + case Stop => SBehaviors.stopped + case _: AuxPing => SBehaviors.unhandled + }), fs((_, sig) => { monitor ! ReceivedSignal(sig) SBehaviors.same @@ -625,12 +635,16 @@ class TapJavaBehaviorSpec extends ImmutableWithSignalJavaBehaviorSpec with Reuse override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { val inbox = TestInbox[Either[Signal, Command]]("tapListener") val tap = new BehaviorInterceptor[Command, Command] { - override def aroundReceive(context: TypedActorContext[Command], message: Command, target: ReceiveTarget[Command]): Behavior[Command] = { + override def aroundReceive(context: TypedActorContext[Command], + message: Command, + target: ReceiveTarget[Command]): Behavior[Command] = { inbox.ref ! Right(message) target(context, message) } - override def aroundSignal(context: TypedActorContext[Command], signal: Signal, target: SignalTarget[Command]): Behavior[Command] = { + override def aroundSignal(context: TypedActorContext[Command], + signal: Signal, + target: SignalTarget[Command]): Behavior[Command] = { inbox.ref ! Left(signal) target(context, signal) } @@ -641,7 +655,6 @@ class TapJavaBehaviorSpec extends ImmutableWithSignalJavaBehaviorSpec with Reuse class RestarterJavaBehaviorSpec extends ImmutableWithSignalJavaBehaviorSpec with Reuse { override def behavior(monitor: ActorRef[Event]): (Behavior[Command], Aux) = { - JBehaviors.supervise(super.behavior(monitor)._1) - .onFailure(classOf[Exception], SupervisorStrategy.restart) -> null + JBehaviors.supervise(super.behavior(monitor)._1).onFailure(classOf[Exception], SupervisorStrategy.restart) -> null } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala index d4d3b66f93..c22c51dd62 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/DeferredSpec.scala @@ -22,15 +22,15 @@ object DeferredSpec { case object Started extends Event def target(monitor: ActorRef[Event]): Behavior[Command] = - Behaviors.receive((_, cmd) => cmd match { - case Ping => - monitor ! Pong - Behaviors.same - }) + Behaviors.receive((_, cmd) => + cmd match { + case Ping => + monitor ! Pong + Behaviors.same + }) } -class DeferredSpec extends ScalaTestWithActorTestKit( - """ +class DeferredSpec extends ScalaTestWithActorTestKit(""" akka.loggers = [akka.testkit.TestEventListener] """) with WordSpecLike { @@ -104,12 +104,14 @@ class DeferredSpec extends ScalaTestWithActorTestKit( "must un-defer underlying when wrapped by widen" in { val probe = TestProbe[Event]("evt") - val behv = Behaviors.setup[Command] { _ => - probe.ref ! Started - target(probe.ref) - }.widen[Command] { - case m => m - } + val behv = Behaviors + .setup[Command] { _ => + probe.ref ! Started + target(probe.ref) + } + .widen[Command] { + case m => m + } probe.expectNoMessage() // not yet val ref = spawn(behv) // it's supposed to be created immediately (not waiting for first message) @@ -138,7 +140,9 @@ class DeferredSpec extends ScalaTestWithActorTestKit( "must not allow setup(same)" in { val probe = TestProbe[Any]() val behv = Behaviors.setup[Command] { _ => - Behaviors.setup[Command] { _ => Behaviors.same } + Behaviors.setup[Command] { _ => + Behaviors.same + } } EventFilter[ActorInitializationException](occurrences = 1).intercept { val ref = spawn(behv) @@ -177,4 +181,3 @@ class DeferredStubbedSpec extends WordSpec with Matchers { } } - diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala index 3c2e45869a..f18879d3c4 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ExtensionsSpec.scala @@ -20,7 +20,7 @@ object DummyExtension1 extends ExtensionId[DummyExtension1] { def get(system: ActorSystem[_]): DummyExtension1 = apply(system) } class DummyExtension1Setup(factory: ActorSystem[_] => DummyExtension1) - extends AbstractExtensionSetup[DummyExtension1](DummyExtension1, factory) + extends AbstractExtensionSetup[DummyExtension1](DummyExtension1, factory) class DummyExtension1ViaSetup extends DummyExtension1 @@ -53,8 +53,7 @@ object InstanceCountingExtension extends ExtensionId[DummyExtension1] { } object ExtensionsSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.actor.typed { library-extensions += "akka.actor.typed.InstanceCountingExtension" } @@ -65,54 +64,55 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike { "The extensions subsystem" must { "return the same instance for the same id" in - withEmptyActorSystem("ExtensionsSpec01") { sys => - val instance1 = sys.registerExtension(DummyExtension1) - val instance2 = sys.registerExtension(DummyExtension1) + withEmptyActorSystem("ExtensionsSpec01") { sys => + val instance1 = sys.registerExtension(DummyExtension1) + val instance2 = sys.registerExtension(DummyExtension1) - instance1 should be theSameInstanceAs instance2 + (instance1 should be).theSameInstanceAs(instance2) - val instance3 = DummyExtension1(sys) - instance3 should be theSameInstanceAs instance2 + val instance3 = DummyExtension1(sys) + (instance3 should be).theSameInstanceAs(instance2) - val instance4 = DummyExtension1.get(sys) - instance4 should be theSameInstanceAs instance3 - } + val instance4 = DummyExtension1.get(sys) + (instance4 should be).theSameInstanceAs(instance3) + } "return the same instance for the same id concurrently" in - withEmptyActorSystem("ExtensionsSpec02") { sys => - // not exactly water tight but better than nothing - import sys.executionContext - val futures = (0 to 1000).map(n => - Future { - sys.registerExtension(SlowExtension) - } - ) + withEmptyActorSystem("ExtensionsSpec02") { sys => + // not exactly water tight but better than nothing + import sys.executionContext + val futures = (0 to 1000).map(n => + Future { + sys.registerExtension(SlowExtension) + }) - val instances = Future.sequence(futures).futureValue + val instances = Future.sequence(futures).futureValue - instances.reduce { (a, b) => - a should be theSameInstanceAs b - b - } + instances.reduce { (a, b) => + (a should be).theSameInstanceAs(b) + b } + } "load extensions from the configuration" in - withEmptyActorSystem("ExtensionsSpec03", Some(ConfigFactory.parseString( - """ + withEmptyActorSystem("ExtensionsSpec03", + Some(ConfigFactory.parseString( + """ akka.actor.typed.extensions = ["akka.actor.typed.DummyExtension1$", "akka.actor.typed.SlowExtension$"] - """)) - ) { sys => - sys.hasExtension(DummyExtension1) should ===(true) - sys.extension(DummyExtension1) shouldBe a[DummyExtension1] + """))) { sys => + sys.hasExtension(DummyExtension1) should ===(true) + sys.extension(DummyExtension1) shouldBe a[DummyExtension1] - sys.hasExtension(SlowExtension) should ===(true) - sys.extension(SlowExtension) shouldBe a[SlowExtension] - } + sys.hasExtension(SlowExtension) should ===(true) + sys.extension(SlowExtension) shouldBe a[SlowExtension] + } "handle extensions that fail to initialize" in { def create(): Unit = { - ActorSystem[Any](Behavior.EmptyBehavior, "ExtensionsSpec04", ConfigFactory.parseString( - """ + ActorSystem[Any](Behavior.EmptyBehavior, + "ExtensionsSpec04", + ConfigFactory.parseString( + """ akka.actor.typed.extensions = ["akka.actor.typed.FailingToLoadExtension$"] """)) } @@ -127,47 +127,51 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike { } "support multiple instances of the same type of extension (with different ids)" in - withEmptyActorSystem("ExtensionsSpec06") { sys => - val id1 = new MultiExtensionId(1) - val id2 = new MultiExtensionId(2) + withEmptyActorSystem("ExtensionsSpec06") { sys => + val id1 = new MultiExtensionId(1) + val id2 = new MultiExtensionId(2) - sys.registerExtension(id1).n should ===(1) - sys.registerExtension(id2).n should ===(2) - sys.registerExtension(id1).n should ===(1) - } + sys.registerExtension(id1).n should ===(1) + sys.registerExtension(id2).n should ===(2) + sys.registerExtension(id1).n should ===(1) + } "allow for auto-loading of library-extensions" in - withEmptyActorSystem("ExtensionsSpec06") { sys => - val listedExtensions = sys.settings.config.getStringList("akka.actor.typed.library-extensions") - listedExtensions.size should be > 0 - // could be initialized by other tests, so at least once - InstanceCountingExtension.createCount.get() should be > 0 - } + withEmptyActorSystem("ExtensionsSpec06") { sys => + val listedExtensions = sys.settings.config.getStringList("akka.actor.typed.library-extensions") + listedExtensions.size should be > 0 + // could be initialized by other tests, so at least once + InstanceCountingExtension.createCount.get() should be > 0 + } "fail the system if a library-extension cannot be loaded" in - intercept[RuntimeException] { - withEmptyActorSystem( - "ExtensionsSpec07", - Some(ConfigFactory.parseString("""akka.actor.typed.library-extensions += "akka.actor.typed.FailingToLoadExtension$"""")) - ) { _ => () } + intercept[RuntimeException] { + withEmptyActorSystem( + "ExtensionsSpec07", + Some( + ConfigFactory.parseString( + """akka.actor.typed.library-extensions += "akka.actor.typed.FailingToLoadExtension$""""))) { _ => + () } + } "fail the system if a library-extension is missing" in - intercept[RuntimeException] { - withEmptyActorSystem( - "ExtensionsSpec08", - Some(ConfigFactory.parseString("""akka.actor.typed.library-extensions += "akka.actor.typed.MissingExtension"""")) - ) { _ => () } + intercept[RuntimeException] { + withEmptyActorSystem("ExtensionsSpec08", + Some(ConfigFactory.parseString( + """akka.actor.typed.library-extensions += "akka.actor.typed.MissingExtension""""))) { _ => + () } + } "load an extension implemented in Java" in - withEmptyActorSystem("ExtensionsSpec09") { sys => - // no way to make apply work cleanly with extensions implemented in Java - val instance1 = ExtensionsTest.MyExtension.get(sys) - val instance2 = ExtensionsTest.MyExtension.get(sys) + withEmptyActorSystem("ExtensionsSpec09") { sys => + // no way to make apply work cleanly with extensions implemented in Java + val instance1 = ExtensionsTest.MyExtension.get(sys) + val instance2 = ExtensionsTest.MyExtension.get(sys) - instance1 should be theSameInstanceAs instance2 - } + (instance1 should be).theSameInstanceAs(instance2) + } "load registered typed extensions eagerly even for untyped system" in { import akka.actor.typed.scaladsl.adapter._ @@ -194,7 +198,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike { val ext1 = DummyExtension1(untypedSystem.toTyped) val ext2 = DummyExtension1(untypedSystem.toTyped) - ext1 should be theSameInstanceAs ext2 + (ext1 should be).theSameInstanceAs(ext2) } finally { untypedSystem.terminate().futureValue @@ -202,24 +206,24 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike { } "override extensions via ActorSystemSetup" in - withEmptyActorSystem("ExtensionsSpec10", Some(ConfigFactory.parseString( - """ + withEmptyActorSystem("ExtensionsSpec10", + Some(ConfigFactory.parseString( + """ akka.actor.typed.extensions = ["akka.actor.typed.DummyExtension1$", "akka.actor.typed.SlowExtension$"] """)), - Some(ActorSystemSetup(new DummyExtension1Setup(sys => new DummyExtension1ViaSetup))) - ) { sys => - sys.hasExtension(DummyExtension1) should ===(true) - sys.extension(DummyExtension1) shouldBe a[DummyExtension1ViaSetup] - DummyExtension1(sys) shouldBe a[DummyExtension1ViaSetup] - DummyExtension1(sys) shouldBe theSameInstanceAs(DummyExtension1(sys)) + Some(ActorSystemSetup(new DummyExtension1Setup(sys => new DummyExtension1ViaSetup)))) { sys => + sys.hasExtension(DummyExtension1) should ===(true) + sys.extension(DummyExtension1) shouldBe a[DummyExtension1ViaSetup] + DummyExtension1(sys) shouldBe a[DummyExtension1ViaSetup] + DummyExtension1(sys) shouldBe theSameInstanceAs(DummyExtension1(sys)) - sys.hasExtension(SlowExtension) should ===(true) - sys.extension(SlowExtension) shouldBe a[SlowExtension] - } + sys.hasExtension(SlowExtension) should ===(true) + sys.extension(SlowExtension) shouldBe a[SlowExtension] + } } def withEmptyActorSystem[T](name: String, config: Option[Config] = None, setup: Option[ActorSystemSetup] = None)( - f: ActorSystem[_] => T): T = { + f: ActorSystem[_] => T): T = { val bootstrap = config match { case Some(c) => BootstrapSetup(c) @@ -230,6 +234,7 @@ class ExtensionsSpec extends ScalaTestWithActorTestKit with WordSpecLike { case Some(s) => ActorSystem[Any](Behavior.EmptyBehavior, name, s.and(bootstrap)) } - try f(sys) finally sys.terminate().futureValue + try f(sys) + finally sys.terminate().futureValue } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala index cdcaf1b028..5674156b8f 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/InterceptSpec.scala @@ -20,8 +20,7 @@ object InterceptSpec { case object MyPoisonPill } -class InterceptSpec extends ScalaTestWithActorTestKit( - """ +class InterceptSpec extends ScalaTestWithActorTestKit(""" akka.loggers = [akka.testkit.TestEventListener] """) with WordSpecLike { import BehaviorInterceptor._ @@ -32,14 +31,18 @@ class InterceptSpec extends ScalaTestWithActorTestKit( implicit val untypedSystem = system.toUntyped private def snitchingInterceptor(probe: ActorRef[String]) = new BehaviorInterceptor[String, String] { - override def aroundReceive(context: TypedActorContext[String], message: String, target: ReceiveTarget[String]): Behavior[String] = { + override def aroundReceive(context: TypedActorContext[String], + message: String, + target: ReceiveTarget[String]): Behavior[String] = { probe ! ("before " + message) val b = target(context, message) probe ! ("after " + message) b } - override def aroundSignal(context: TypedActorContext[String], signal: Signal, target: SignalTarget[String]): Behavior[String] = { + override def aroundSignal(context: TypedActorContext[String], + signal: Signal, + target: SignalTarget[String]): Behavior[String] = { target(context, signal) } @@ -52,13 +55,10 @@ class InterceptSpec extends ScalaTestWithActorTestKit( val probe = TestProbe[String]() val interceptor = snitchingInterceptor(probe.ref) - val ref: ActorRef[String] = spawn(Behaviors.intercept( - interceptor)( - Behaviors.receiveMessage[String] { m => - probe.ref ! s"actual behavior $m" - Behaviors.same - } - )) + val ref: ActorRef[String] = spawn(Behaviors.intercept(interceptor)(Behaviors.receiveMessage[String] { m => + probe.ref ! s"actual behavior $m" + Behaviors.same + })) ref ! "message" @@ -73,17 +73,10 @@ class InterceptSpec extends ScalaTestWithActorTestKit( val interceptor = snitchingInterceptor(probe.ref) def intercept(beh: Behavior[String]): Behavior[String] = - Behaviors.intercept( - interceptor)(beh) + Behaviors.intercept(interceptor)(beh) val beh: Behavior[String] = - intercept( - intercept( - Behaviors.receiveMessage(m => - Behaviors.same - ) - ) - ) + intercept(intercept(Behaviors.receiveMessage(m => Behaviors.same))) val ref = spawn(beh) @@ -98,12 +91,7 @@ class InterceptSpec extends ScalaTestWithActorTestKit( val interceptor = snitchingInterceptor(probe.ref) def next(count: Int): Behavior[String] = - Behaviors.intercept( - interceptor)( - Behaviors.receiveMessage(m => - next(count + 1) - ) - ) + Behaviors.intercept(interceptor)(Behaviors.receiveMessage(m => next(count + 1))) val ref = spawn(next(1)) @@ -130,13 +118,7 @@ class InterceptSpec extends ScalaTestWithActorTestKit( Behaviors.intercept(snitchingInterceptor(probe.ref))(beh) val beh: Behavior[String] = - intercept( - intercept( - Behaviors.receiveMessage(m => - Behaviors.same - ) - ) - ) + intercept(intercept(Behaviors.receiveMessage(m => Behaviors.same))) val ref = spawn(beh) @@ -154,11 +136,7 @@ class InterceptSpec extends ScalaTestWithActorTestKit( def next(count: Int): Behavior[String] = Behaviors.intercept( // a new instance every "recursion" - snitchingInterceptor(probe.ref))( - Behaviors.receiveMessage(m => - next(count + 1) - ) - ) + snitchingInterceptor(probe.ref))(Behaviors.receiveMessage(m => next(count + 1))) val ref = spawn(next(1)) @@ -176,14 +154,19 @@ class InterceptSpec extends ScalaTestWithActorTestKit( "allow an interceptor to replace started behavior" in { val interceptor = new BehaviorInterceptor[String, String] { - override def aroundStart(context: TypedActorContext[String], target: PreStartTarget[String]): Behavior[String] = { + override def aroundStart(context: TypedActorContext[String], + target: PreStartTarget[String]): Behavior[String] = { Behaviors.stopped } - def aroundReceive(context: TypedActorContext[String], message: String, target: ReceiveTarget[String]): Behavior[String] = + def aroundReceive(context: TypedActorContext[String], + message: String, + target: ReceiveTarget[String]): Behavior[String] = target(context, message) - def aroundSignal(context: TypedActorContext[String], signal: Signal, target: SignalTarget[String]): Behavior[String] = + def aroundSignal(context: TypedActorContext[String], + signal: Signal, + target: SignalTarget[String]): Behavior[String] = target(context, signal) } @@ -202,16 +185,14 @@ class InterceptSpec extends ScalaTestWithActorTestKit( val probe = TestProbe[String]() val interceptor = snitchingInterceptor(probe.ref) - val ref: ActorRef[String] = spawn(Behaviors.intercept(interceptor)( - Behaviors.setup { _ => - var count = 0 - Behaviors.receiveMessage[String] { m => - count += 1 - probe.ref ! s"actual behavior $m-$count" - Behaviors.same - } + val ref: ActorRef[String] = spawn(Behaviors.intercept(interceptor)(Behaviors.setup { _ => + var count = 0 + Behaviors.receiveMessage[String] { m => + count += 1 + probe.ref ! s"actual behavior $m-$count" + Behaviors.same } - )) + })) ref ! "a" probe.expectMessage("before a") @@ -229,16 +210,14 @@ class InterceptSpec extends ScalaTestWithActorTestKit( val interceptor = snitchingInterceptor(probe.ref) def next(count1: Int): Behavior[String] = { - Behaviors.intercept(interceptor)( - Behaviors.setup { _ => - var count2 = 0 - Behaviors.receiveMessage[String] { m => - count2 += 1 - probe.ref ! s"actual behavior $m-$count1-$count2" - next(count1 + 1) - } + Behaviors.intercept(interceptor)(Behaviors.setup { _ => + var count2 = 0 + Behaviors.receiveMessage[String] { m => + count2 += 1 + probe.ref ! s"actual behavior $m-$count1-$count2" + next(count1 + 1) } - ) + }) } val ref: ActorRef[String] = spawn(next(1)) @@ -264,8 +243,9 @@ class InterceptSpec extends ScalaTestWithActorTestKit( val interceptor = snitchingInterceptor(probe.ref) EventFilter[ActorInitializationException](occurrences = 1).intercept { - val ref = spawn(Behaviors.intercept(interceptor)( - Behaviors.setup[String] { _ => Behaviors.same[String] })) + val ref = spawn(Behaviors.intercept(interceptor)(Behaviors.setup[String] { _ => + Behaviors.same[String] + })) probe.expectTerminated(ref, probe.remainingOrDefault) } @@ -280,14 +260,18 @@ class InterceptSpec extends ScalaTestWithActorTestKit( } val poisonInterceptor = new BehaviorInterceptor[Any, Msg] { - override def aroundReceive(context: TypedActorContext[Any], message: Any, target: ReceiveTarget[Msg]): Behavior[Msg] = + override def aroundReceive(context: TypedActorContext[Any], + message: Any, + target: ReceiveTarget[Msg]): Behavior[Msg] = message match { case MyPoisonPill => Behaviors.stopped case m: Msg => target(context, m) case _ => Behaviors.unhandled } - override def aroundSignal(context: TypedActorContext[Any], signal: Signal, target: SignalTarget[Msg]): Behavior[Msg] = + override def aroundSignal(context: TypedActorContext[Any], + signal: Signal, + target: SignalTarget[Msg]): Behavior[Msg] = target.apply(context, signal) } @@ -318,12 +302,16 @@ class InterceptSpec extends ScalaTestWithActorTestKit( override def interceptMessageType = classOf[B] - override def aroundReceive(ctx: TypedActorContext[Message], msg: Message, target: ReceiveTarget[Message]): Behavior[Message] = { + override def aroundReceive(ctx: TypedActorContext[Message], + msg: Message, + target: ReceiveTarget[Message]): Behavior[Message] = { interceptProbe.ref ! msg target(ctx, msg) } - override def aroundSignal(ctx: TypedActorContext[Message], signal: Signal, target: SignalTarget[Message]): Behavior[Message] = + override def aroundSignal(ctx: TypedActorContext[Message], + signal: Signal, + target: SignalTarget[Message]): Behavior[Message] = target(ctx, signal) } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/LogMessagesSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/LogMessagesSpec.scala index f4f23e4cbb..c331a18c96 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/LogMessagesSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/LogMessagesSpec.scala @@ -78,15 +78,19 @@ class LogMessagesSpec extends ScalaTestWithActorTestKit(""" val behavior = Behaviors.withMdc[String](Map("mdc" -> true))(Behaviors.logMessages(Behaviors.empty)) val ref = spawn(behavior) - EventFilter.custom({ - case logEvent if logEvent.level == Logging.DebugLevel => - logEvent.message should ===("received message Hello") - logEvent.mdc should ===(Map("mdc" -> true)) - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! "Hello" - } + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.DebugLevel => + logEvent.message should ===("received message Hello") + logEvent.mdc should ===(Map("mdc" -> true)) + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! "Hello" + } } } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MonitorSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MonitorSpec.scala index 055cf324f7..369ac26fa7 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MonitorSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/MonitorSpec.scala @@ -16,9 +16,7 @@ class MonitorSpec extends ScalaTestWithActorTestKit with WordSpecLike { "monitor messages" in { val probe = TestProbe[String]() - val beh: Behavior[String] = Behaviors.monitor(probe.ref, Behaviors.receiveMessage(message => - Behaviors.same - )) + val beh: Behavior[String] = Behaviors.monitor(probe.ref, Behaviors.receiveMessage(message => Behaviors.same)) val ref: ActorRef[String] = spawn(beh) ref ! "message" @@ -33,13 +31,7 @@ class MonitorSpec extends ScalaTestWithActorTestKit with WordSpecLike { Behaviors.monitor(probe.ref, beh) val beh: Behavior[String] = - monitor( - monitor( - Behaviors.receiveMessage(message => - Behaviors.same - ) - ) - ) + monitor(monitor(Behaviors.receiveMessage(message => Behaviors.same))) val ref: ActorRef[String] = spawn(beh) ref ! "message 1" diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/OrElseSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/OrElseSpec.scala index 74389ec3d5..a92b6fcfb0 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/OrElseSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/OrElseSpec.scala @@ -84,11 +84,13 @@ class OrElseSpec extends ScalaTestWithActorTestKit with WordSpecLike { "Behavior.orElse" must { "work for deferred behavior on the left" in { - val orElseDeferred = Behaviors.setup[Ping] { _ => - Behaviors.receiveMessage { _ => - Behaviors.unhandled + val orElseDeferred = Behaviors + .setup[Ping] { _ => + Behaviors.receiveMessage { _ => + Behaviors.unhandled + } } - }.orElse(ping(Map.empty)) + .orElse(ping(Map.empty)) val p = spawn(orElseDeferred) val probe = TestProbe[Pong] @@ -145,10 +147,10 @@ class OrElseSpec extends ScalaTestWithActorTestKit with WordSpecLike { val y = spawn(dealer(Set.empty)) - (0 to 10000) foreach { i => + (0 to 10000).foreach { i => y ! Add(i) } - (0 to 9999) foreach { i => + (0 to 9999).foreach { i => y ! Remove(i) } val probe = TestProbe[Set[Any]] @@ -158,4 +160,3 @@ class OrElseSpec extends ScalaTestWithActorTestKit with WordSpecLike { } } - diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/PropsSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/PropsSpec.scala index 9a6a5ef014..357931708c 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/PropsSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/PropsSpec.scala @@ -18,7 +18,8 @@ class PropsSpec extends WordSpec with Matchers { } "yield all configs of some type" in { - dispatcherFirst.allOf[DispatcherSelector] should ===(DispatcherSelector.default() :: DispatcherSelector.fromConfig("pool") :: Nil) + dispatcherFirst.allOf[DispatcherSelector] should ===( + DispatcherSelector.default() :: DispatcherSelector.fromConfig("pool") :: Nil) } } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SpawnProtocolSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SpawnProtocolSpec.scala index 6968ede558..40b664baac 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SpawnProtocolSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SpawnProtocolSpec.scala @@ -119,4 +119,3 @@ class StubbedSpawnProtocolSpec extends WordSpec with Matchers { } } - diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala index 3f975a12d2..db6a58b2f1 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/SupervisionSpec.scala @@ -42,7 +42,9 @@ object SupervisionSpec { class Exc2 extends Exc1("exc-2") class Exc3(message: String = "exc-3") extends RuntimeException(message) with NoStackTrace - def targetBehavior(monitor: ActorRef[Event], state: State = State(0, Map.empty), slowStop: Option[CountDownLatch] = None): Behavior[Command] = + def targetBehavior(monitor: ActorRef[Event], + state: State = State(0, Map.empty), + slowStop: Option[CountDownLatch] = None): Behavior[Command] = receive[Command] { (context, cmd) => cmd match { case Ping(n) => @@ -63,7 +65,7 @@ object SupervisionSpec { case Throw(e) => throw e } - } receiveSignal { + }.receiveSignal { case (_, sig) => if (sig == PostStop) slowStop.foreach(latch => latch.await(10, TimeUnit.SECONDS)) @@ -145,11 +147,8 @@ class StubbedSupervisionSpec extends WordSpec with Matchers { "support nesting to handle different exceptions" in { val inbox = TestInbox[Event]("evt") val behv = - supervise( - supervise( - targetBehavior(inbox.ref) - ).onFailure[Exc2](SupervisorStrategy.resume) - ).onFailure[Exc3](SupervisorStrategy.restart) + supervise(supervise(targetBehavior(inbox.ref)).onFailure[Exc2](SupervisorStrategy.resume)) + .onFailure[Exc3](SupervisorStrategy.restart) val testkit = BehaviorTestKit(behv) testkit.run(IncrementState) testkit.run(GetState) @@ -175,8 +174,7 @@ class StubbedSupervisionSpec extends WordSpec with Matchers { "not catch fatal error" in { val inbox = TestInbox[Event]() - val behv = Behaviors.supervise(targetBehavior(inbox.ref)) - .onFailure[Throwable](SupervisorStrategy.restart) + val behv = Behaviors.supervise(targetBehavior(inbox.ref)).onFailure[Throwable](SupervisorStrategy.restart) val testkit = BehaviorTestKit(behv) intercept[StackOverflowError] { testkit.run(Throw(new StackOverflowError)) @@ -224,8 +222,7 @@ class StubbedSupervisionSpec extends WordSpec with Matchers { "stop at first exception when restart retries limit is 0" in { val inbox = TestInbox[Event]("evt") val strategy = SupervisorStrategy.restart.withLimit(maxNrOfRetries = 0, withinTimeRange = 1.minute) - val behv = supervise(targetBehavior(inbox.ref)) - .onFailure[Exc1](strategy) + val behv = supervise(targetBehavior(inbox.ref)).onFailure[Exc1](strategy) val testkit = BehaviorTestKit(behv) intercept[Exc1] { testkit.run(Throw(new Exc1)) @@ -246,8 +243,7 @@ class StubbedSupervisionSpec extends WordSpec with Matchers { } } -class SupervisionSpec extends ScalaTestWithActorTestKit( - """ +class SupervisionSpec extends ScalaTestWithActorTestKit(""" akka.loggers = [akka.testkit.TestEventListener] akka.log-dead-letters = off """) with WordSpecLike { @@ -280,31 +276,32 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( class FailingDeferredTestSetup(failCount: Int, strategy: SupervisorStrategy) { val probe = TestProbe[AnyRef]("evt") val failCounter = new AtomicInteger(0) - def behv = supervise(setup[Command] { _ => - val count = failCounter.getAndIncrement() - if (count < failCount) { - probe.ref ! StartFailed - throw TestException(s"construction ${count} failed") - } else { - probe.ref ! Started - Behaviors.empty - } - }).onFailure[TestException](strategy) + def behv = + supervise(setup[Command] { _ => + val count = failCounter.getAndIncrement() + if (count < failCount) { + probe.ref ! StartFailed + throw TestException(s"construction ${count} failed") + } else { + probe.ref ! Started + Behaviors.empty + } + }).onFailure[TestException](strategy) } class FailingUnhandledTestSetup(strategy: SupervisorStrategy) { val probe = TestProbe[AnyRef]("evt") - def behv = supervise(setup[Command] { _ => - probe.ref ! StartFailed - throw new TestException("construction failed") - }).onFailure[IllegalArgumentException](strategy) + def behv = + supervise(setup[Command] { _ => + probe.ref ! StartFailed + throw new TestException("construction failed") + }).onFailure[IllegalArgumentException](strategy) } "A supervised actor" must { "receive message" in { val probe = TestProbe[Event]("evt") - val behv = Behaviors.supervise(targetBehavior(probe.ref)) - .onFailure[Throwable](SupervisorStrategy.restart) + val behv = Behaviors.supervise(targetBehavior(probe.ref)).onFailure[Throwable](SupervisorStrategy.restart) val ref = spawn(behv) ref ! Ping(1) probe.expectMessage(Pong(1)) @@ -312,8 +309,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "stop when strategy is stop" in { val probe = TestProbe[Event]("evt") - val behv = Behaviors.supervise(targetBehavior(probe.ref)) - .onFailure[Throwable](SupervisorStrategy.stop) + val behv = Behaviors.supervise(targetBehavior(probe.ref)).onFailure[Throwable](SupervisorStrategy.stop) val ref = spawn(behv) EventFilter[Exc3](occurrences = 1).intercept { ref ! Throw(new Exc3) @@ -336,10 +332,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "support nesting exceptions with different strategies" in { val probe = TestProbe[Event]("evt") val behv = - supervise( - supervise(targetBehavior(probe.ref)) - .onFailure[RuntimeException](SupervisorStrategy.stop) - ).onFailure[Exception](SupervisorStrategy.restart) + supervise(supervise(targetBehavior(probe.ref)).onFailure[RuntimeException](SupervisorStrategy.stop)) + .onFailure[Exception](SupervisorStrategy.restart) val ref = spawn(behv) @@ -359,8 +353,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( val behv = supervise( supervise(targetBehavior(probe.ref)) - .onFailure[IllegalArgumentException](SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0.0)) - ).onFailure[IOException](SupervisorStrategy.restart) + .onFailure[IllegalArgumentException](SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0.0))) + .onFailure[IOException](SupervisorStrategy.restart) val ref = spawn(behv) @@ -385,10 +379,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "support nesting exceptions with inner restart and outer backoff strategies" in { val probe = TestProbe[Event]("evt") val behv = - supervise( - supervise(targetBehavior(probe.ref)) - .onFailure[IllegalArgumentException](SupervisorStrategy.restart) - ).onFailure[IOException](SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0.0)) + supervise(supervise(targetBehavior(probe.ref)).onFailure[IllegalArgumentException](SupervisorStrategy.restart)) + .onFailure[IOException](SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0.0)) val ref = spawn(behv) @@ -422,8 +414,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "stop when unhandled exception" in { val probe = TestProbe[Event]("evt") - val behv = Behaviors.supervise(targetBehavior(probe.ref)) - .onFailure[Exc1](SupervisorStrategy.restart) + val behv = Behaviors.supervise(targetBehavior(probe.ref)).onFailure[Exc1](SupervisorStrategy.restart) val ref = spawn(behv) EventFilter[Exc3](occurrences = 1).intercept { ref ! Throw(new Exc3) @@ -433,8 +424,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "restart when handled exception" in { val probe = TestProbe[Event]("evt") - val behv = Behaviors.supervise(targetBehavior(probe.ref)) - .onFailure[Exc1](SupervisorStrategy.restart) + val behv = Behaviors.supervise(targetBehavior(probe.ref)).onFailure[Exc1](SupervisorStrategy.restart) val ref = spawn(behv) ref ! IncrementState ref ! GetState @@ -451,7 +441,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "stop when restart limit is hit" in { val probe = TestProbe[Event]("evt") val resetTimeout = 500.millis - val behv = Behaviors.supervise(targetBehavior(probe.ref)) + val behv = Behaviors + .supervise(targetBehavior(probe.ref)) .onFailure[Exc1](SupervisorStrategy.restart.withLimit(2, resetTimeout)) val ref = spawn(behv) ref ! IncrementState @@ -475,7 +466,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "reset fixed limit after timeout" in { val probe = TestProbe[Event]("evt") val resetTimeout = 500.millis - val behv = Behaviors.supervise(targetBehavior(probe.ref)) + val behv = Behaviors + .supervise(targetBehavior(probe.ref)) .onFailure[Exc1](SupervisorStrategy.restart.withLimit(2, resetTimeout)) val ref = spawn(behv) ref ! IncrementState @@ -507,8 +499,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( def testStopChildren(strategy: SupervisorStrategy): Unit = { val parentProbe = TestProbe[Event]("evt") - val behv = Behaviors.supervise(targetBehavior(parentProbe.ref)) - .onFailure[Exc1](strategy) + val behv = Behaviors.supervise(targetBehavior(parentProbe.ref)).onFailure[Exc1](strategy) val ref = spawn(behv) val anotherProbe = TestProbe[String]("another") @@ -546,14 +537,13 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( } "optionally NOT stop children when backoff" in { - testNotStopChildren(strategy = SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0) - .withStopChildren(enabled = false)) + testNotStopChildren( + strategy = SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0).withStopChildren(enabled = false)) } def testNotStopChildren(strategy: SupervisorStrategy): Unit = { val parentProbe = TestProbe[Event]("evt") - val behv = Behaviors.supervise(targetBehavior(parentProbe.ref)) - .onFailure[Exc1](strategy) + val behv = Behaviors.supervise(targetBehavior(parentProbe.ref)).onFailure[Exc1](strategy) val ref = spawn(behv) val childProbe = TestProbe[Event]("childEvt") @@ -576,14 +566,12 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( } "stop children when backoff second time during unstash" in { - testStopChildrenWhenExceptionFromUnstash( - SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0)) + testStopChildrenWhenExceptionFromUnstash(SupervisorStrategy.restartWithBackoff(10.millis, 10.millis, 0)) } def testStopChildrenWhenExceptionFromUnstash(strategy: SupervisorStrategy): Unit = { val parentProbe = TestProbe[Event]("evt") - val behv = Behaviors.supervise(targetBehavior(parentProbe.ref)) - .onFailure[Exc1](strategy) + val behv = Behaviors.supervise(targetBehavior(parentProbe.ref)).onFailure[Exc1](strategy) val ref = spawn(behv) val childProbe = TestProbe[Event]("childEvt") @@ -633,19 +621,20 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( val slowStop1 = new CountDownLatch(1) val slowStop2 = new CountDownLatch(1) val throwFromSetup = new AtomicBoolean(true) - val behv = Behaviors.supervise { - Behaviors.setup[Command] { ctx => - ctx.spawn(targetBehavior(child1Probe.ref, slowStop = Some(slowStop1)), "child1") - if (throwFromSetup.get()) { - // note that this second child waiting on slowStop2 will prevent a restart loop that could exhaust the - // limit before throwFromSetup is set back to false - ctx.spawn(targetBehavior(child2Probe.ref, slowStop = Some(slowStop2)), "child2") - throw TestException("exc from setup") - } + val behv = Behaviors + .supervise { + Behaviors.setup[Command] { ctx => + ctx.spawn(targetBehavior(child1Probe.ref, slowStop = Some(slowStop1)), "child1") + if (throwFromSetup.get()) { + // note that this second child waiting on slowStop2 will prevent a restart loop that could exhaust the + // limit before throwFromSetup is set back to false + ctx.spawn(targetBehavior(child2Probe.ref, slowStop = Some(slowStop2)), "child2") + throw TestException("exc from setup") + } - targetBehavior(parentProbe.ref) + targetBehavior(parentProbe.ref) + } } - } .onFailure[RuntimeException](strategy) EventFilter[TestException](occurrences = 1).intercept { @@ -676,19 +665,20 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( val slowStop1 = new CountDownLatch(1) val slowStop2 = new CountDownLatch(1) val throwFromSetup = new AtomicBoolean(false) - val behv = Behaviors.supervise { - Behaviors.setup[Command] { ctx => - ctx.spawn(targetBehavior(child1Probe.ref, slowStop = Some(slowStop1)), "child1") - if (throwFromSetup.get()) { - // note that this second child waiting on slowStop2 will prevent a restart loop that could exhaust the - // limit before throwFromSetup is set back to false - ctx.spawn(targetBehavior(child2Probe.ref, slowStop = Some(slowStop2)), "child2") - throw TestException("exc from setup") - } + val behv = Behaviors + .supervise { + Behaviors.setup[Command] { ctx => + ctx.spawn(targetBehavior(child1Probe.ref, slowStop = Some(slowStop1)), "child1") + if (throwFromSetup.get()) { + // note that this second child waiting on slowStop2 will prevent a restart loop that could exhaust the + // limit before throwFromSetup is set back to false + ctx.spawn(targetBehavior(child2Probe.ref, slowStop = Some(slowStop2)), "child2") + throw TestException("exc from setup") + } - targetBehavior(parentProbe.ref) + targetBehavior(parentProbe.ref) + } } - } .onFailure[RuntimeException](strategy) val ref = spawn(behv) @@ -733,10 +723,9 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "support nesting to handle different exceptions" in { val probe = TestProbe[Event]("evt") - val behv = Behaviors.supervise( - Behaviors.supervise(targetBehavior(probe.ref)) - .onFailure[Exc2](SupervisorStrategy.resume) - ).onFailure[Exc3](SupervisorStrategy.restart) + val behv = Behaviors + .supervise(Behaviors.supervise(targetBehavior(probe.ref)).onFailure[Exc2](SupervisorStrategy.resume)) + .onFailure[Exc3](SupervisorStrategy.restart) val ref = spawn(behv) ref ! IncrementState ref ! GetState @@ -769,12 +758,13 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( val probe = TestProbe[Event]("evt") val startedProbe = TestProbe[Event]("started") val minBackoff = 1.seconds - val strategy = SupervisorStrategy - .restartWithBackoff(minBackoff, minBackoff, 0.0).withStashCapacity(2) - val behv = Behaviors.supervise(Behaviors.setup[Command] { _ => - startedProbe.ref ! Started - targetBehavior(probe.ref) - }).onFailure[Exception](strategy) + val strategy = SupervisorStrategy.restartWithBackoff(minBackoff, minBackoff, 0.0).withStashCapacity(2) + val behv = Behaviors + .supervise(Behaviors.setup[Command] { _ => + startedProbe.ref ! Started + targetBehavior(probe.ref) + }) + .onFailure[Exception](strategy) val droppedMessagesProbe = TestProbe[Dropped]() system.toUntyped.eventStream.subscribe(droppedMessagesProbe.ref.toUntyped, classOf[Dropped]) @@ -802,10 +792,12 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( .restartWithBackoff(minBackoff, 10.seconds, 0.0) .withResetBackoffAfter(10.seconds) .withStashCapacity(0) - val behv = Behaviors.supervise(Behaviors.setup[Command] { _ => - startedProbe.ref ! Started - targetBehavior(probe.ref) - }).onFailure[Exception](strategy) + val behv = Behaviors + .supervise(Behaviors.setup[Command] { _ => + startedProbe.ref ! Started + targetBehavior(probe.ref) + }) + .onFailure[Exception](strategy) val ref = spawn(behv) EventFilter[Exc1](occurrences = 1).intercept { @@ -847,15 +839,17 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( .withResetBackoffAfter(10.seconds) val alreadyStarted = new AtomicBoolean(false) - val behv = Behaviors.supervise(Behaviors.setup[Command] { _ => - if (alreadyStarted.get()) throw TestException("failure to restart") - alreadyStarted.set(true) - startedProbe.ref ! Started + val behv = Behaviors + .supervise(Behaviors.setup[Command] { _ => + if (alreadyStarted.get()) throw TestException("failure to restart") + alreadyStarted.set(true) + startedProbe.ref ! Started - Behaviors.receiveMessagePartial { - case Throw(boom) => throw boom - } - }).onFailure[Exception](strategy) + Behaviors.receiveMessagePartial { + case Throw(boom) => throw boom + } + }) + .onFailure[Exception](strategy) val ref = spawn(behv) EventFilter[Exc1](occurrences = 1).intercept { @@ -870,7 +864,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "reset exponential backoff count after reset timeout" in { val probe = TestProbe[Event]("evt") val minBackoff = 1.seconds - val strategy = SupervisorStrategy.restartWithBackoff(minBackoff, 10.seconds, 0.0) + val strategy = SupervisorStrategy + .restartWithBackoff(minBackoff, 10.seconds, 0.0) .withResetBackoffAfter(100.millis) .withStashCapacity(0) val behv = supervise(targetBehavior(probe.ref)).onFailure[Exc1](strategy) @@ -915,7 +910,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( } "fail instead of restart when deferred factory throws" in new FailingDeferredTestSetup( - failCount = 1, strategy = SupervisorStrategy.restart) { + failCount = 1, + strategy = SupervisorStrategy.restart) { EventFilter[ActorInitializationException](occurrences = 1).intercept { spawn(behv) @@ -932,8 +928,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "fail to resume when deferred factory throws" in new FailingDeferredTestSetup( failCount = 1, - strategy = SupervisorStrategy.resume - ) { + strategy = SupervisorStrategy.resume) { EventFilter[TestException](occurrences = 1).intercept { EventFilter[ActorInitializationException](occurrences = 1).intercept { spawn(behv) @@ -943,8 +938,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "restart with exponential backoff when deferred factory throws" in new FailingDeferredTestSetup( failCount = 1, - strategy = SupervisorStrategy.restartWithBackoff(minBackoff = 100.millis.dilated, maxBackoff = 1.second, 0) - ) { + strategy = SupervisorStrategy.restartWithBackoff(minBackoff = 100.millis.dilated, maxBackoff = 1.second, 0)) { EventFilter[TestException](occurrences = 1).intercept { spawn(behv) @@ -967,8 +961,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "restart.withLimit when deferred factory throws" in new FailingDeferredTestSetup( failCount = 1, - strategy = SupervisorStrategy.restart.withLimit(3, 1.second) - ) { + strategy = SupervisorStrategy.restart.withLimit(3, 1.second)) { EventFilter[TestException](occurrences = 1).intercept { spawn(behv) @@ -980,8 +973,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "fail after more than limit in restart.withLimit when deferred factory throws" in new FailingDeferredTestSetup( failCount = 20, - strategy = SupervisorStrategy.restart.withLimit(2, 1.second) - ) { + strategy = SupervisorStrategy.restart.withLimit(2, 1.second)) { EventFilter[ActorInitializationException](occurrences = 1).intercept { EventFilter[TestException](occurrences = 2).intercept { @@ -1024,8 +1016,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( supervise[String](setup { _ => probe.ref ! Started Behaviors.empty[String] - }).onFailure[RuntimeException](strategy) - )).onFailure[Exception](strategy) + }).onFailure[RuntimeException](strategy))).onFailure[Exception](strategy) spawn(beh) probe.expectMessage(Started) @@ -1037,23 +1028,19 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( case "boom" => throw TestException("boom indeed") case "switch" => supervise[String]( - supervise[String]( - supervise[String]( - supervise[String]( - supervise[String]( - Behaviors.receiveMessage { - case "boom" => throw TestException("boom indeed") - case "ping" => - probe.ref ! "pong" - Behaviors.same - case "give me stacktrace" => - probe.ref ! new RuntimeException().getStackTrace.toVector - Behaviors.stopped - }).onFailure[RuntimeException](SupervisorStrategy.resume) - ).onFailure[RuntimeException](SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 23D)) - ).onFailure[RuntimeException](SupervisorStrategy.restart.withLimit(23, 10.seconds)) - ).onFailure[IllegalArgumentException](SupervisorStrategy.restart) - ).onFailure[RuntimeException](SupervisorStrategy.restart) + supervise[String](supervise[String](supervise[String](supervise[String](Behaviors.receiveMessage { + case "boom" => throw TestException("boom indeed") + case "ping" => + probe.ref ! "pong" + Behaviors.same + case "give me stacktrace" => + probe.ref ! new RuntimeException().getStackTrace.toVector + Behaviors.stopped + }).onFailure[RuntimeException](SupervisorStrategy.resume)) + .onFailure[RuntimeException](SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 23d))) + .onFailure[RuntimeException](SupervisorStrategy.restart.withLimit(23, 10.seconds))) + .onFailure[IllegalArgumentException](SupervisorStrategy.restart)) + .onFailure[RuntimeException](SupervisorStrategy.restart) }).onFailure[RuntimeException](SupervisorStrategy.stop) val actor = spawn(behv) @@ -1079,34 +1066,32 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( // irrelevant for test case but needed to use intercept in the pyramid of doom below val whateverInterceptor = new BehaviorInterceptor[String, String] { // identity intercept - override def aroundReceive(context: TypedActorContext[String], message: String, target: ReceiveTarget[String]): Behavior[String] = + override def aroundReceive(context: TypedActorContext[String], + message: String, + target: ReceiveTarget[String]): Behavior[String] = target(context, message) - override def aroundSignal(context: TypedActorContext[String], signal: Signal, target: SignalTarget[String]): Behavior[String] = + override def aroundSignal(context: TypedActorContext[String], + signal: Signal, + target: SignalTarget[String]): Behavior[String] = target(context, signal) } val behv = supervise[String](Behaviors.receiveMessage { case "boom" => throw TestException("boom indeed") case "switch" => - supervise[String]( - setup(_ => - supervise[String]( - Behaviors.intercept(whateverInterceptor)( - supervise[String]( - Behaviors.receiveMessage { - case "boom" => throw TestException("boom indeed") - case "ping" => - probe.ref ! "pong" - Behaviors.same - case "give me stacktrace" => - probe.ref ! new RuntimeException().getStackTrace.toVector - Behaviors.stopped - }).onFailure[RuntimeException](SupervisorStrategy.resume) - ) - ).onFailure[IllegalArgumentException](SupervisorStrategy.restart.withLimit(23, 10.seconds)) - ) - ).onFailure[RuntimeException](SupervisorStrategy.restart) + supervise[String](setup(_ => + supervise[String](Behaviors.intercept(whateverInterceptor)(supervise[String](Behaviors.receiveMessage { + case "boom" => throw TestException("boom indeed") + case "ping" => + probe.ref ! "pong" + Behaviors.same + case "give me stacktrace" => + probe.ref ! new RuntimeException().getStackTrace.toVector + Behaviors.stopped + }).onFailure[RuntimeException](SupervisorStrategy.resume))) + .onFailure[IllegalArgumentException](SupervisorStrategy.restart.withLimit(23, 10.seconds)))) + .onFailure[RuntimeException](SupervisorStrategy.restart) }).onFailure[RuntimeException](SupervisorStrategy.stop) val actor = spawn(behv) @@ -1127,35 +1112,32 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "replace backoff supervision duplicate when behavior is created in a setup" in { val probe = TestProbe[AnyRef]("probeMcProbeFace") val restartCount = new AtomicInteger(0) - val behv = supervise[String]( - Behaviors.setup { _ => - - // a bit superficial, but just to be complete - if (restartCount.incrementAndGet() == 1) { - probe.ref ! "started 1" - Behaviors.receiveMessage { + val behv = supervise[String](Behaviors.setup { _ => + // a bit superficial, but just to be complete + if (restartCount.incrementAndGet() == 1) { + probe.ref ! "started 1" + Behaviors.receiveMessage { + case "boom" => + probe.ref ! "crashing 1" + throw TestException("boom indeed") + case "ping" => + probe.ref ! "pong 1" + Behaviors.same + } + } else { + probe.ref ! "started 2" + Behaviors + .supervise[String](Behaviors.receiveMessage { case "boom" => - probe.ref ! "crashing 1" + probe.ref ! "crashing 2" throw TestException("boom indeed") case "ping" => - probe.ref ! "pong 1" + probe.ref ! "pong 2" Behaviors.same - } - } else { - probe.ref ! "started 2" - Behaviors.supervise[String]( - Behaviors.receiveMessage { - case "boom" => - probe.ref ! "crashing 2" - throw TestException("boom indeed") - case "ping" => - probe.ref ! "pong 2" - Behaviors.same - } - ).onFailure[TestException](SupervisorStrategy.resume) - } + }) + .onFailure[TestException](SupervisorStrategy.resume) } - ).onFailure(SupervisorStrategy.restartWithBackoff(100.millis, 1.second, 0)) + }).onFailure(SupervisorStrategy.restartWithBackoff(100.millis, 1.second, 0)) val ref = spawn(behv) probe.expectMessage("started 1") @@ -1181,25 +1163,28 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "be able to recover from a DeathPactException" in { val probe = TestProbe[AnyRef]() - val actor = spawn(Behaviors.supervise(Behaviors.setup[String] { context => - val child = context.spawnAnonymous(Behaviors.receive[String] { (context, message) => - message match { - case "boom" => - probe.ref ! context.self - Behaviors.stopped - } - }) - context.watch(child) + val actor = spawn( + Behaviors + .supervise(Behaviors.setup[String] { context => + val child = context.spawnAnonymous(Behaviors.receive[String] { (context, message) => + message match { + case "boom" => + probe.ref ! context.self + Behaviors.stopped + } + }) + context.watch(child) - Behaviors.receiveMessage { - case "boom" => - child ! "boom" - Behaviors.same - case "ping" => - probe.ref ! "pong" - Behaviors.same - } - }).onFailure[DeathPactException](SupervisorStrategy.restart)) + Behaviors.receiveMessage { + case "boom" => + child ! "boom" + Behaviors.same + case "ping" => + probe.ref ! "pong" + Behaviors.same + } + }) + .onFailure[DeathPactException](SupervisorStrategy.restart)) EventFilter[DeathPactException](occurrences = 1).intercept { actor ! "boom" @@ -1212,33 +1197,22 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( } - val allStrategies = Seq( - SupervisorStrategy.stop, - SupervisorStrategy.restart, - SupervisorStrategy.resume, - SupervisorStrategy.restartWithBackoff(1.millis, 100.millis, 2D), - SupervisorStrategy.restart.withLimit(1, 100.millis) - ) + val allStrategies = Seq(SupervisorStrategy.stop, + SupervisorStrategy.restart, + SupervisorStrategy.resume, + SupervisorStrategy.restartWithBackoff(1.millis, 100.millis, 2d), + SupervisorStrategy.restart.withLimit(1, 100.millis)) allStrategies.foreach { strategy => - s"Supervision with the strategy $strategy" should { "that is initially stopped should be stopped" in { - val actor = spawn( - Behaviors.supervise(Behaviors.stopped[Command]) - .onFailure(strategy) - ) + val actor = spawn(Behaviors.supervise(Behaviors.stopped[Command]).onFailure(strategy)) createTestProbe().expectTerminated(actor, 3.second) } "that is stopped after setup should be stopped" in { - val actor = spawn( - Behaviors.supervise[Command]( - Behaviors.setup(_ => - Behaviors.stopped) - ).onFailure(strategy) - ) + val actor = spawn(Behaviors.supervise[Command](Behaviors.setup(_ => Behaviors.stopped)).onFailure(strategy)) createTestProbe().expectTerminated(actor, 3.second) } @@ -1247,8 +1221,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( "that is stopped after restart should be stopped" in { val stopInSetup = new AtomicBoolean(false) val actor = spawn( - Behaviors.supervise[String]( - Behaviors.setup { _ => + Behaviors + .supervise[String](Behaviors.setup { _ => if (stopInSetup.get()) { Behaviors.stopped } else { @@ -1257,8 +1231,8 @@ class SupervisionSpec extends ScalaTestWithActorTestKit( case "boom" => throw TestException("boom") } } - }).onFailure[TestException](strategy) - ) + }) + .onFailure[TestException](strategy)) EventFilter[TestException](occurrences = 1).intercept { actor ! "boom" diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TimerSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TimerSpec.scala index 2450281e5b..7845231387 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TimerSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/TimerSpec.scala @@ -16,8 +16,7 @@ import akka.actor.typed.scaladsl.TimerScheduler import akka.testkit.{ EventFilter, TimingTest } import org.scalatest.WordSpecLike -class TimerSpec extends ScalaTestWithActorTestKit( - """ +class TimerSpec extends ScalaTestWithActorTestKit(""" akka.loggers = [ akka.testkit.TestEventListener ] """) with WordSpecLike { @@ -51,36 +50,38 @@ class TimerSpec extends ScalaTestWithActorTestKit( target(monitor, timer, nextCount) } - Behaviors.receive[Command] { (context, cmd) => - cmd match { - case Tick(n) => - monitor ! Tock(n) - Behaviors.same - case Bump => - bump() - case SlowThenBump(latch) => - latch.await(10, TimeUnit.SECONDS) - bump() - case End => - Behaviors.stopped - case Cancel => - timer.cancel("T") - monitor ! Cancelled - Behaviors.same - case Throw(e) => - throw e - case SlowThenThrow(latch, e) => - latch.await(10, TimeUnit.SECONDS) - throw e + Behaviors + .receive[Command] { (context, cmd) => + cmd match { + case Tick(n) => + monitor ! Tock(n) + Behaviors.same + case Bump => + bump() + case SlowThenBump(latch) => + latch.await(10, TimeUnit.SECONDS) + bump() + case End => + Behaviors.stopped + case Cancel => + timer.cancel("T") + monitor ! Cancelled + Behaviors.same + case Throw(e) => + throw e + case SlowThenThrow(latch, e) => + latch.await(10, TimeUnit.SECONDS) + throw e + } + } + .receiveSignal { + case (context, PreRestart) => + monitor ! GotPreRestart(timer.isTimerActive("T")) + Behaviors.same + case (context, PostStop) => + monitor ! GotPostStop(timer.isTimerActive("T")) + Behaviors.same } - } receiveSignal { - case (context, PreRestart) => - monitor ! GotPreRestart(timer.isTimerActive("T")) - Behaviors.same - case (context, PostStop) => - monitor ! GotPostStop(timer.isTimerActive("T")) - Behaviors.same - } } "A timer" must { @@ -149,7 +150,7 @@ class TimerSpec extends ScalaTestWithActorTestKit( ref ! Cancel probe.fishForMessage(3.seconds) { // we don't know that we will see exactly one tock - case _: Tock => FishingOutcomes.continue + case _: Tock => FishingOutcomes.continue // but we know that after we saw Cancelled we won't see any more case Cancelled => FishingOutcomes.complete case message => FishingOutcomes.fail(s"unexpected message: $message") @@ -163,10 +164,12 @@ class TimerSpec extends ScalaTestWithActorTestKit( "discard timers from old incarnation after restart, alt 1" taggedAs TimingTest in { val probe = TestProbe[Event]("evt") val startCounter = new AtomicInteger(0) - val behv = Behaviors.supervise(Behaviors.withTimers[Command] { timer => - timer.startPeriodicTimer("T", Tick(startCounter.incrementAndGet()), interval) - target(probe.ref, timer, 1) - }).onFailure[Exception](SupervisorStrategy.restart) + val behv = Behaviors + .supervise(Behaviors.withTimers[Command] { timer => + timer.startPeriodicTimer("T", Tick(startCounter.incrementAndGet()), interval) + target(probe.ref, timer, 1) + }) + .onFailure[Exception](SupervisorStrategy.restart) val ref = spawn(behv) probe.expectMessage(Tock(1)) @@ -188,10 +191,12 @@ class TimerSpec extends ScalaTestWithActorTestKit( "discard timers from old incarnation after restart, alt 2" taggedAs TimingTest in { val probe = TestProbe[Event]("evt") - val behv = Behaviors.supervise(Behaviors.withTimers[Command] { timer => - timer.startPeriodicTimer("T", Tick(1), interval) - target(probe.ref, timer, 1) - }).onFailure[Exception](SupervisorStrategy.restart) + val behv = Behaviors + .supervise(Behaviors.withTimers[Command] { timer => + timer.startPeriodicTimer("T", Tick(1), interval) + target(probe.ref, timer, 1) + }) + .onFailure[Exception](SupervisorStrategy.restart) val ref = spawn(behv) probe.expectMessage(Tock(1)) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala index 8fe972b171..9b12181d67 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WatchSpec.scala @@ -54,18 +54,20 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe val watchProbe = TestProbe[Done]() val watcher = spawn( - Behaviors.supervise( - Behaviors.receive[StartWatching] { - case (context, StartWatching(watchee)) => - context.watch(watchee) - watchProbe.ref ! Done - Behaviors.same - }.receiveSignal { - case (_, t: Terminated) => - receivedTerminationSignal.success(t) - Behaviors.stopped - } - ).onFailure[Throwable](SupervisorStrategy.stop)) + Behaviors + .supervise(Behaviors + .receive[StartWatching] { + case (context, StartWatching(watchee)) => + context.watch(watchee) + watchProbe.ref ! Done + Behaviors.same + } + .receiveSignal { + case (_, t: Terminated) => + receivedTerminationSignal.success(t) + Behaviors.stopped + }) + .onFailure[Throwable](SupervisorStrategy.stop)) } "Actor monitoring" must { @@ -85,24 +87,26 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe "notify a parent of child termination because of failure" in { val probe = TestProbe[Any]() val ex = new TestException("boom") - val parent = spawn(Behaviors.setup[Any] { context => - val child = context.spawn(Behaviors.receive[Any]((context, message) => - throw ex - ), "child") - context.watch(child) + val parent = spawn( + Behaviors.setup[Any] { context => + val child = context.spawn(Behaviors.receive[Any]((context, message) => throw ex), "child") + context.watch(child) - Behaviors.receive[Any] { (context, message) => - child ! message - Behaviors.same - }.receiveSignal { - case (_, t: ChildFailed) => - probe.ref ! ChildHasFailed(t) - Behaviors.same - case (_, t: Terminated) => - probe.ref ! HasTerminated(t) - Behaviors.same - } - }, "supervised-child-parent") + Behaviors + .receive[Any] { (context, message) => + child ! message + Behaviors.same + } + .receiveSignal { + case (_, t: ChildFailed) => + probe.ref ! ChildHasFailed(t) + Behaviors.same + case (_, t: Terminated) => + probe.ref ! HasTerminated(t) + Behaviors.same + } + }, + "supervised-child-parent") EventFilter[TestException](occurrences = 1).intercept { parent ! "boom" @@ -114,22 +118,27 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe val probe = TestProbe[Any]() val ex = new TestException("boom") val behavior = Behaviors.setup[Any] { context => - val child = context.spawn(Behaviors.supervise(Behaviors.receive[Any]((context, message) => { - throw ex - })).onFailure[Throwable](SupervisorStrategy.stop), "child") + val child = context.spawn(Behaviors + .supervise(Behaviors.receive[Any]((context, message) => { + throw ex + })) + .onFailure[Throwable](SupervisorStrategy.stop), + "child") context.watch(child) - Behaviors.receive[Any] { (context, message) => - child ! message - Behaviors.same - }.receiveSignal { - case (_, t: ChildFailed) => - probe.ref ! ChildHasFailed(t) + Behaviors + .receive[Any] { (context, message) => + child ! message Behaviors.same - case (_, t: Terminated) => - probe.ref ! HasTerminated(t) - Behaviors.same - } + } + .receiveSignal { + case (_, t: ChildFailed) => + probe.ref ! ChildHasFailed(t) + Behaviors.same + case (_, t: Terminated) => + probe.ref ! HasTerminated(t) + Behaviors.same + } } val parent = spawn(behavior, "parent") @@ -143,31 +152,34 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe case class Failed(t: Terminated) // we need to wrap it as it is handled specially val probe = TestProbe[Any]() val ex = new TestException("boom") - val grossoBosso = spawn(Behaviors.setup[Any] { context => - val middleManagement = context.spawn(Behaviors.setup[Any] { context => - val sixPackJoe = context.spawn(Behaviors.receive[Any]((context, message) => - throw ex - ), "joe") - context.watch(sixPackJoe) + val grossoBosso = + spawn( + Behaviors.setup[Any] { context => + val middleManagement = context.spawn(Behaviors.setup[Any] { context => + val sixPackJoe = context.spawn(Behaviors.receive[Any]((context, message) => throw ex), "joe") + context.watch(sixPackJoe) - Behaviors.receive[Any] { (context, message) => - sixPackJoe ! message - Behaviors.same - } // no handling of terminated, even though we watched!!! - }, "middle-management") + Behaviors.receive[Any] { (context, message) => + sixPackJoe ! message + Behaviors.same + } // no handling of terminated, even though we watched!!! + }, "middle-management") - context.watch(middleManagement) + context.watch(middleManagement) - Behaviors.receive[Any] { (context, message) => - middleManagement ! message - Behaviors.same - }.receiveSignal { - case (_, t: Terminated) => - probe.ref ! Failed(t) - Behaviors.stopped - } + Behaviors + .receive[Any] { (context, message) => + middleManagement ! message + Behaviors.same + } + .receiveSignal { + case (_, t: Terminated) => + probe.ref ! Failed(t) + Behaviors.stopped + } - }, "grosso-bosso") + }, + "grosso-bosso") EventFilter[TestException](occurrences = 1).intercept { EventFilter[DeathPactException](occurrences = 1).intercept { @@ -193,8 +205,8 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe val watchProbe = TestProbe[Done]() val watcher = spawn( - Behaviors.supervise( - Behaviors.receive[Message] { + Behaviors + .supervise(Behaviors.receive[Message] { case (context, StartWatchingWith(watchee, message)) => context.watchWith(watchee, message) watchProbe.ref ! Done @@ -202,8 +214,8 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe case (_, message) => receivedTerminationSignal.success(message) Behaviors.stopped - }).onFailure[Throwable](SupervisorStrategy.stop) - ) + }) + .onFailure[Throwable](SupervisorStrategy.stop)) } "get notified of actor termination with a custom message" in new WatchWithSetup { watcher ! StartWatchingWith(terminator, CustomTerminationMessage) @@ -229,8 +241,8 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe val watchProbe = TestProbe[Done]() val watcher = spawn( - Behaviors.supervise( - Behaviors.receive[Message] { + Behaviors + .supervise(Behaviors.receive[Message] { case (context, StartWatching(watchee)) => context.watch(watchee) Behaviors.same @@ -242,8 +254,8 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe case (_, message) => receivedTerminationSignal.success(message) Behaviors.stopped - }).onFailure[Throwable](SupervisorStrategy.stop) - ) + }) + .onFailure[Throwable](SupervisorStrategy.stop)) watcher ! StartWatching(terminator) watcher ! StartWatchingWith(terminator, CustomTerminationMessage) @@ -259,8 +271,8 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe val watchProbe = TestProbe[Done]() val watcher = spawn( - Behaviors.supervise( - Behaviors.receive[Message] { + Behaviors + .supervise(Behaviors.receive[Message] { case (context, StartWatchingWith(watchee, message)) => context.unwatch(watchee) context.watchWith(watchee, message) @@ -269,8 +281,8 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe case (_, message) => receivedTerminationSignal.success(message) Behaviors.stopped - }).onFailure[Throwable](SupervisorStrategy.stop) - ) + }) + .onFailure[Throwable](SupervisorStrategy.stop)) watcher ! StartWatchingWith(terminator, CustomTerminationMessage) watcher ! StartWatchingWith(terminator, CustomTerminationMessage2) @@ -285,22 +297,23 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe private val stopProbe = TestProbe[Done]() val watcher = spawn( - Behaviors.supervise( - Behaviors.receive[Message] { - case (context, StartWatchingWith(watchee, message)) => - context.watchWith(watchee, message) - Behaviors.same - case (context, StartWatching(watchee)) => - context.watch(watchee) - Behaviors.same - case (_, _) => - Behaviors.stopped - }.receiveSignal { - case (_, PostStop) => - Behaviors.stopped - } - ).onFailure[Throwable](SupervisorStrategy.stop) - ) + Behaviors + .supervise(Behaviors + .receive[Message] { + case (context, StartWatchingWith(watchee, message)) => + context.watchWith(watchee, message) + Behaviors.same + case (context, StartWatching(watchee)) => + context.watch(watchee) + Behaviors.same + case (_, _) => + Behaviors.stopped + } + .receiveSignal { + case (_, PostStop) => + Behaviors.stopped + }) + .onFailure[Throwable](SupervisorStrategy.stop)) def expectStopped(): Unit = stopProbe.expectTerminated(watcher, 1.second) } @@ -308,9 +321,10 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe "fail when watch is used after watchWith on same subject" in new ErrorTestSetup { watcher ! StartWatchingWith(terminator, CustomTerminationMessage) - EventFilter[IllegalStateException](pattern = ".*termination message was not overwritten.*", occurrences = 1) intercept { - watcher ! StartWatching(terminator) - } + EventFilter[IllegalStateException](pattern = ".*termination message was not overwritten.*", occurrences = 1) + .intercept { + watcher ! StartWatching(terminator) + } // supervisor should have stopped the actor expectStopped() } @@ -318,18 +332,20 @@ class WatchSpec extends ScalaTestWithActorTestKit(WatchSpec.config) with WordSpe "fail when watchWitch is used after watchWith with different termination message" in new ErrorTestSetup { watcher ! StartWatchingWith(terminator, CustomTerminationMessage) - EventFilter[IllegalStateException](pattern = ".*termination message was not overwritten.*", occurrences = 1) intercept { - watcher ! StartWatchingWith(terminator, CustomTerminationMessage2) - } + EventFilter[IllegalStateException](pattern = ".*termination message was not overwritten.*", occurrences = 1) + .intercept { + watcher ! StartWatchingWith(terminator, CustomTerminationMessage2) + } // supervisor should have stopped the actor expectStopped() } "fail when watchWith is used after watch on same subject" in new ErrorTestSetup { watcher ! StartWatching(terminator) - EventFilter[IllegalStateException](pattern = ".*termination message was not overwritten.*", occurrences = 1) intercept { - watcher ! StartWatchingWith(terminator, CustomTerminationMessage) - } + EventFilter[IllegalStateException](pattern = ".*termination message was not overwritten.*", occurrences = 1) + .intercept { + watcher ! StartWatchingWith(terminator, CustomTerminationMessage) + } // supervisor should have stopped the actor expectStopped() } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WidenSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WidenSpec.scala index f0de3378cc..e1e6945476 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WidenSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/WidenSpec.scala @@ -16,20 +16,21 @@ import org.scalatest.WordSpecLike import scala.concurrent.duration._ -class WidenSpec extends ScalaTestWithActorTestKit( - """ +class WidenSpec extends ScalaTestWithActorTestKit(""" akka.loggers = [akka.testkit.TestEventListener] """) with WordSpecLike { implicit val untypedSystem = system.toUntyped def intToString(probe: ActorRef[String]): Behavior[Int] = { - Behaviors.receiveMessage[String] { message => - probe ! message - Behaviors.same - }.widen[Int] { - case n if n != 13 => n.toString - } + Behaviors + .receiveMessage[String] { message => + probe ! message + Behaviors.same + } + .widen[Int] { + case n if n != 13 => n.toString + } } "Widen" should { @@ -70,14 +71,10 @@ class WidenSpec extends ScalaTestWithActorTestKit( behavior.widen(transform) val beh = - widen( - widen( - Behaviors.receiveMessage[String] { message => - probe.ref ! message - Behaviors.same - } - ) - ) + widen(widen(Behaviors.receiveMessage[String] { message => + probe.ref ! message + Behaviors.same + })) val ref = spawn(beh) ref ! "42" @@ -100,12 +97,10 @@ class WidenSpec extends ScalaTestWithActorTestKit( behavior.widen(transform) def next: Behavior[String] = - widen( - Behaviors.receiveMessage[String] { message => - probe.ref ! message - next - } - ) + widen(Behaviors.receiveMessage[String] { message => + probe.ref ! message + next + }) val ref = spawn(next) @@ -128,15 +123,9 @@ class WidenSpec extends ScalaTestWithActorTestKit( } EventFilter[ActorInitializationException](occurrences = 1).intercept { - val ref = spawn( - widen( - widen( - Behaviors.receiveMessage[String] { message => - Behaviors.same - } - ) - ) - ) + val ref = spawn(widen(widen(Behaviors.receiveMessage[String] { message => + Behaviors.same + }))) probe.expectTerminated(ref, 3.seconds) } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorRefSerializationSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorRefSerializationSpec.scala index 99fe47256b..7338de2922 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorRefSerializationSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorRefSerializationSpec.scala @@ -13,8 +13,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike object ActorRefSerializationSpec { - def config = ConfigFactory.parseString( - """ + def config = ConfigFactory.parseString(""" akka.actor { serialize-messages = off allow-java-serialization = true diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala index 8901c796cc..ea78401c0c 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/ActorSystemSpec.scala @@ -16,8 +16,7 @@ import scala.concurrent.duration._ import scala.concurrent.{ Future, Promise } import scala.util.control.NonFatal -class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll - with ScalaFutures with Eventually { +class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll with ScalaFutures with Eventually { override implicit val patienceConfig = PatienceConfig(1.second) def system[T](behavior: Behavior[T], name: String) = ActorSystem(behavior, name) @@ -25,7 +24,8 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll case class Probe(message: String, replyTo: ActorRef[String]) - def withSystem[T](name: String, behavior: Behavior[T], doTerminate: Boolean = true)(block: ActorSystem[T] => Unit): Terminated = { + def withSystem[T](name: String, behavior: Behavior[T], doTerminate: Boolean = true)( + block: ActorSystem[T] => Unit): Terminated = { val sys = system(behavior, s"$suite-$name") try { block(sys) @@ -39,16 +39,16 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll "An ActorSystem" must { "start the guardian actor and terminate when it terminates" in { - val t = withSystem( - "a", - Behaviors.receive[Probe] { case (_, p) => p.replyTo ! p.message; Behaviors.stopped }, doTerminate = false) { sys => - val inbox = TestInbox[String]("a") - sys ! Probe("hello", inbox.ref) - eventually { - inbox.hasMessages should ===(true) - } - inbox.receiveAll() should ===("hello" :: Nil) + val t = withSystem("a", + Behaviors.receive[Probe] { case (_, p) => p.replyTo ! p.message; Behaviors.stopped }, + doTerminate = false) { sys => + val inbox = TestInbox[String]("a") + sys ! Probe("hello", inbox.ref) + eventually { + inbox.hasMessages should ===(true) } + inbox.receiveAll() should ===("hello" :: Nil) + } val p = t.ref.path p.name should ===("/") p.address.system should ===(suite + "-a") @@ -68,15 +68,16 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll "terminate the guardian actor" in { val inbox = TestInbox[String]("terminate") - val sys = system( - Behaviors.receive[Probe] { - case (_, _) => Behaviors.unhandled - } receiveSignal { - case (_, PostStop) => - inbox.ref ! "done" - Behaviors.same - }, - "terminate") + val sys = system(Behaviors + .receive[Probe] { + case (_, _) => Behaviors.unhandled + } + .receiveSignal { + case (_, PostStop) => + inbox.ref ! "done" + Behaviors.same + }, + "terminate") sys.terminate().futureValue inbox.receiveAll() should ===("done" :: Nil) } @@ -102,9 +103,11 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll "have a working thread factory" in { withSystem("thread", Behaviors.empty[String]) { sys => val p = Promise[Int] - sys.threadFactory.newThread(new Runnable { - def run(): Unit = p.success(42) - }).start() + sys.threadFactory + .newThread(new Runnable { + def run(): Unit = p.success(42) + }) + .start() p.future.futureValue should ===(42) } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/receptionist/LocalReceptionistSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/receptionist/LocalReceptionistSpec.scala index 79f8f98e79..6de9f0731e 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/receptionist/LocalReceptionistSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/receptionist/LocalReceptionistSpec.scala @@ -128,7 +128,7 @@ class LocalReceptionistBehaviorSpec extends WordSpec with Matchers { import LocalReceptionistSpec._ def assertEmpty(inboxes: TestInbox[_]*): Unit = { - inboxes foreach (i => withClue(s"inbox $i had messages")(i.hasMessages should be(false))) + inboxes.foreach(i => withClue(s"inbox $i had messages")(i.hasMessages should be(false))) } "A local receptionist behavior" must { diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializationSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializationSpec.scala index cc0f6a29e3..d069bc8c8d 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializationSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializationSpec.scala @@ -11,7 +11,9 @@ import akka.serialization.SerializationExtension import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import org.scalatest.WordSpecLike -class ServiceKeySerializationSpec extends ScalaTestWithActorTestKit(ActorRefSerializationSpec.config) with WordSpecLike { +class ServiceKeySerializationSpec + extends ScalaTestWithActorTestKit(ActorRefSerializationSpec.config) + with WordSpecLike { val serialization = SerializationExtension(system.toUntyped) diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/receptionist/ReceptionistApiSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/receptionist/ReceptionistApiSpec.scala index b71fb5b5ff..fdcc2988f0 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/receptionist/ReceptionistApiSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/receptionist/ReceptionistApiSpec.scala @@ -83,4 +83,3 @@ object ReceptionistApiSpec { } } - diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala index cfcfea09a0..ae9ed41dfb 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextAskSpec.scala @@ -18,8 +18,7 @@ import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import org.scalatest.WordSpecLike object ActorContextAskSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.loggers = ["akka.testkit.TestEventListener"] ping-pong-dispatcher { executor = thread-pool-executor @@ -50,7 +49,6 @@ class ActorContextAskSpec extends ScalaTestWithActorTestKit(ActorContextAskSpec. val probe = TestProbe[Pong]() val snitch = Behaviors.setup[Pong] { context => - // Timeout comes from TypedAkkaSpec context.ask(pingPong)(Ping) { @@ -84,8 +82,7 @@ class ActorContextAskSpec extends ScalaTestWithActorTestKit(ActorContextAskSpec. case Ping(respondTo) => respondTo ! Pong Behaviors.same - } - )) + })) val snitch = Behaviors.setup[AnyRef] { context => context.ask(pingPong)(Ping) { @@ -93,16 +90,18 @@ class ActorContextAskSpec extends ScalaTestWithActorTestKit(ActorContextAskSpec. case Failure(x) => x } - Behaviors.receive[AnyRef] { - case (_, message) => - probe.ref ! message - Behaviors.same - }.receiveSignal { + Behaviors + .receive[AnyRef] { + case (_, message) => + probe.ref ! message + Behaviors.same + } + .receiveSignal { - case (_, PostStop) => - probe.ref ! "stopped" - Behaviors.same - } + case (_, PostStop) => + probe.ref ! "stopped" + Behaviors.same + } } EventFilter[NotImplementedError](occurrences = 1, start = "Pong").intercept { @@ -116,7 +115,6 @@ class ActorContextAskSpec extends ScalaTestWithActorTestKit(ActorContextAskSpec. "deal with timeouts in ask" in { val probe = TestProbe[AnyRef]() val snitch = Behaviors.setup[AnyRef] { context => - context.ask[String, String](system.deadLetters)(ref => "boo") { case Success(m) => m case Failure(x) => x @@ -142,7 +140,6 @@ class ActorContextAskSpec extends ScalaTestWithActorTestKit(ActorContextAskSpec. val target = spawn(Behaviors.ignore[String]) val probe = TestProbe[AnyRef]() val snitch = Behaviors.setup[AnyRef] { context => - context.ask[String, String](target)(_ => "bar") { case Success(m) => m case Failure(x) => x diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextPipeToSelfSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextPipeToSelfSpec.scala index 79bfeb87d4..76809c4d5c 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextPipeToSelfSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorContextPipeToSelfSpec.scala @@ -14,8 +14,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike object ActorContextPipeToSelfSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" |pipe-to-self-spec-dispatcher { | executor = thread-pool-executor | type = PinnedDispatcher @@ -23,8 +22,9 @@ object ActorContextPipeToSelfSpec { """.stripMargin) } -final class ActorContextPipeToSelfSpec extends ScalaTestWithActorTestKit(ActorContextPipeToSelfSpec.config) - with WordSpecLike { +final class ActorContextPipeToSelfSpec + extends ScalaTestWithActorTestKit(ActorContextPipeToSelfSpec.config) + with WordSpecLike { "The Scala DSL ActorContext pipeToSelf" must { "handle success" in { responseFrom(Future.successful("hi")) should ===("ok: hi") } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala index b90b1a429e..ded025773f 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/ActorLoggingSpec.scala @@ -53,26 +53,31 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" "Logging in a typed actor" must { "be conveniently available from the context" in { - val actor = EventFilter.info("Started", source = "akka://ActorLoggingSpec/user/the-actor", occurrences = 1).intercept { - spawn(Behaviors.setup[String] { context => - context.log.info("Started") + val actor = + EventFilter.info("Started", source = "akka://ActorLoggingSpec/user/the-actor", occurrences = 1).intercept { + spawn( + Behaviors.setup[String] { context => + context.log.info("Started") - Behaviors.receive { (context, message) => - context.log.info("got message {}", message) - Behaviors.same - } - }, "the-actor") - } + Behaviors.receive { (context, message) => + context.log.info("got message {}", message) + Behaviors.same + } + }, + "the-actor") + } - EventFilter.info("got message Hello", source = "akka://ActorLoggingSpec/user/the-actor", occurrences = 1).intercept { - actor ! "Hello" - } + EventFilter + .info("got message Hello", source = "akka://ActorLoggingSpec/user/the-actor", occurrences = 1) + .intercept { + actor ! "Hello" + } } "contain the class name where the first log was called" in { val eventFilter = EventFilter.custom({ case l: LogEvent if l.logClass == classOf[ActorLoggingSpec] => true - case l: LogEvent if isScala211 => + case l: LogEvent if isScala211 => // TODO remove in Akka 2.6 when we drop Scala 2.11 // the class with 2.11 is like // ActorLoggingSpec$$anonfun$1$$anonfun$apply$mcV$sp$26$$anonfun$apply$6$$anonfun$apply$7 @@ -97,7 +102,7 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" "contain the object class name where the first log was called" in { val eventFilter = EventFilter.custom({ case l: LogEvent if l.logClass == WhereTheBehaviorIsDefined.getClass => true - case l: LogEvent if isScala211 => + case l: LogEvent if isScala211 => // TODO remove in Akka 2.6 when we drop Scala 2.11 // the class with 2.11 is like // WhereTheBehaviorIsDefined$$anonfun$behavior$1 @@ -129,27 +134,31 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" val eventFilter = EventFilter.custom({ case l: LogEvent => l.logClass == classOf[SomeClass] && - l.logSource == "who-knows-where-it-came-from" && - l.mdc == Map("mdc" -> true) // mdc should be kept + l.logSource == "who-knows-where-it-came-from" && + l.mdc == Map("mdc" -> true) // mdc should be kept }, occurrences = 1) eventFilter.intercept { - spawn(Behaviors.setup[String] { context => - val log = context.log.withMdc(Map("mdc" -> true)) - .withLoggerClass(classOf[SomeClass]) - .withLogSource("who-knows-where-it-came-from") - log.info("Started") + spawn( + Behaviors.setup[String] { context => + val log = context.log + .withMdc(Map("mdc" -> true)) + .withLoggerClass(classOf[SomeClass]) + .withLogSource("who-knows-where-it-came-from") + log.info("Started") - Behaviors.empty - }, "the-actor-with-custom-class") + Behaviors.empty + }, + "the-actor-with-custom-class") } } "pass markers to the log" in { - EventFilter.custom({ - case event: LogEventWithMarker if event.marker == marker => true - }, occurrences = 9).intercept( - spawn(Behaviors.setup[Any] { context => + EventFilter + .custom({ + case event: LogEventWithMarker if event.marker == marker => true + }, occurrences = 9) + .intercept(spawn(Behaviors.setup[Any] { context => context.log.debug(marker, "whatever") context.log.info(marker, "whatever") context.log.warning(marker, "whatever") @@ -159,127 +168,128 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" context.log.log(level, marker, "whatever") }) Behaviors.stopped - }) - ) + })) } "pass cause with warning" in { - EventFilter.custom({ - case event: LogEventWithCause if event.cause == cause => true - }, occurrences = 2).intercept( - spawn(Behaviors.setup[Any] { context => + EventFilter + .custom({ + case event: LogEventWithCause if event.cause == cause => true + }, occurrences = 2) + .intercept(spawn(Behaviors.setup[Any] { context => context.log.warning(cause, "whatever") context.log.warning(marker, cause, "whatever") Behaviors.stopped - }) - ) + })) } "provide a whole bunch of logging overloads" in { // Not the best test but at least it exercises every log overload ;) - EventFilter.custom({ - case _ => true // any is fine, we're just after the right count of statements reaching the listener - }, occurrences = 120).intercept { - spawn(Behaviors.setup[String] { context => - context.log.debug("message") - context.log.debug("{}", "arg1") - context.log.debug("{} {}", "arg1", "arg2") - context.log.debug("{} {} {}", "arg1", "arg2", "arg3") - context.log.debug("{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.debug("{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.debug(marker, "message") - context.log.debug(marker, "{}", "arg1") - context.log.debug(marker, "{} {}", "arg1", "arg2") - context.log.debug(marker, "{} {} {}", "arg1", "arg2", "arg3") - context.log.debug(marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.debug(marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + EventFilter + .custom({ + case _ => true // any is fine, we're just after the right count of statements reaching the listener + }, occurrences = 120) + .intercept { + spawn(Behaviors.setup[String] { context => + context.log.debug("message") + context.log.debug("{}", "arg1") + context.log.debug("{} {}", "arg1", "arg2") + context.log.debug("{} {} {}", "arg1", "arg2", "arg3") + context.log.debug("{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.debug("{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.debug(marker, "message") + context.log.debug(marker, "{}", "arg1") + context.log.debug(marker, "{} {}", "arg1", "arg2") + context.log.debug(marker, "{} {} {}", "arg1", "arg2", "arg3") + context.log.debug(marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.debug(marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.info("message") - context.log.info("{}", "arg1") - context.log.info("{} {}", "arg1", "arg2") - context.log.info("{} {} {}", "arg1", "arg2", "arg3") - context.log.info("{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.info("{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.info(marker, "message") - context.log.info(marker, "{}", "arg1") - context.log.info(marker, "{} {}", "arg1", "arg2") - context.log.info(marker, "{} {} {}", "arg1", "arg2", "arg3") - context.log.info(marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.info(marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.info("message") + context.log.info("{}", "arg1") + context.log.info("{} {}", "arg1", "arg2") + context.log.info("{} {} {}", "arg1", "arg2", "arg3") + context.log.info("{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.info("{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.info(marker, "message") + context.log.info(marker, "{}", "arg1") + context.log.info(marker, "{} {}", "arg1", "arg2") + context.log.info(marker, "{} {} {}", "arg1", "arg2", "arg3") + context.log.info(marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.info(marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.warning("message") - context.log.warning("{}", "arg1") - context.log.warning("{} {}", "arg1", "arg2") - context.log.warning("{} {} {}", "arg1", "arg2", "arg3") - context.log.warning("{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.warning("{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.warning(marker, "message") - context.log.warning(marker, "{}", "arg1") - context.log.warning(marker, "{} {}", "arg1", "arg2") - context.log.warning(marker, "{} {} {}", "arg1", "arg2", "arg3") - context.log.warning(marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.warning(marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.warning("message") + context.log.warning("{}", "arg1") + context.log.warning("{} {}", "arg1", "arg2") + context.log.warning("{} {} {}", "arg1", "arg2", "arg3") + context.log.warning("{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.warning("{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.warning(marker, "message") + context.log.warning(marker, "{}", "arg1") + context.log.warning(marker, "{} {}", "arg1", "arg2") + context.log.warning(marker, "{} {} {}", "arg1", "arg2", "arg3") + context.log.warning(marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.warning(marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.warning(cause, "message") - context.log.warning(cause, "{}", "arg1") - context.log.warning(cause, "{} {}", "arg1", "arg2") - context.log.warning(cause, "{} {} {}", "arg1", "arg2", "arg3") - context.log.warning(cause, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.warning(cause, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.warning(marker, cause, "message") - context.log.warning(marker, cause, "{}", "arg1") - context.log.warning(marker, cause, "{} {}", "arg1", "arg2") - context.log.warning(marker, cause, "{} {} {}", "arg1", "arg2", "arg3") - context.log.warning(marker, cause, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.warning(marker, cause, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.warning(cause, "message") + context.log.warning(cause, "{}", "arg1") + context.log.warning(cause, "{} {}", "arg1", "arg2") + context.log.warning(cause, "{} {} {}", "arg1", "arg2", "arg3") + context.log.warning(cause, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.warning(cause, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.warning(marker, cause, "message") + context.log.warning(marker, cause, "{}", "arg1") + context.log.warning(marker, cause, "{} {}", "arg1", "arg2") + context.log.warning(marker, cause, "{} {} {}", "arg1", "arg2", "arg3") + context.log.warning(marker, cause, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.warning(marker, cause, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.error("message") - context.log.error("{}", "arg1") - context.log.error("{} {}", "arg1", "arg2") - context.log.error("{} {} {}", "arg1", "arg2", "arg3") - context.log.error("{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.error("{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.error(marker, "message") - context.log.error(marker, "{}", "arg1") - context.log.error(marker, "{} {}", "arg1", "arg2") - context.log.error(marker, "{} {} {}", "arg1", "arg2", "arg3") - context.log.error(marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.error(marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.error("message") + context.log.error("{}", "arg1") + context.log.error("{} {}", "arg1", "arg2") + context.log.error("{} {} {}", "arg1", "arg2", "arg3") + context.log.error("{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.error("{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.error(marker, "message") + context.log.error(marker, "{}", "arg1") + context.log.error(marker, "{} {}", "arg1", "arg2") + context.log.error(marker, "{} {} {}", "arg1", "arg2", "arg3") + context.log.error(marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.error(marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.error(cause, "message") - context.log.error(cause, "{}", "arg1") - context.log.error(cause, "{} {}", "arg1", "arg2") - context.log.error(cause, "{} {} {}", "arg1", "arg2", "arg3") - context.log.error(cause, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.error(cause, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.error(marker, cause, "message") - context.log.error(marker, cause, "{}", "arg1") - context.log.error(marker, cause, "{} {}", "arg1", "arg2") - context.log.error(marker, cause, "{} {} {}", "arg1", "arg2", "arg3") - context.log.error(marker, cause, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.error(marker, cause, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.error(cause, "message") + context.log.error(cause, "{}", "arg1") + context.log.error(cause, "{} {}", "arg1", "arg2") + context.log.error(cause, "{} {} {}", "arg1", "arg2", "arg3") + context.log.error(cause, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.error(cause, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.error(marker, cause, "message") + context.log.error(marker, cause, "{}", "arg1") + context.log.error(marker, cause, "{} {}", "arg1", "arg2") + context.log.error(marker, cause, "{} {} {}", "arg1", "arg2", "arg3") + context.log.error(marker, cause, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.error(marker, cause, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - Logging.AllLogLevels.foreach(level => { - context.log.log(level, "message") - context.log.log(level, "{}", "arg1") - context.log.log(level, "{} {}", "arg1", "arg2") - context.log.log(level, "{} {} {}", "arg1", "arg2", "arg3") - context.log.log(level, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.log(level, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + Logging.AllLogLevels.foreach(level => { + context.log.log(level, "message") + context.log.log(level, "{}", "arg1") + context.log.log(level, "{} {}", "arg1", "arg2") + context.log.log(level, "{} {} {}", "arg1", "arg2", "arg3") + context.log.log(level, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.log(level, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) - context.log.log(level, marker, "message") - context.log.log(level, marker, "{}", "arg1") - context.log.log(level, marker, "{} {}", "arg1", "arg2") - context.log.log(level, marker, "{} {} {}", "arg1", "arg2", "arg3") - context.log.log(level, marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") - context.log.log(level, marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + context.log.log(level, marker, "message") + context.log.log(level, marker, "{}", "arg1") + context.log.log(level, marker, "{} {}", "arg1", "arg2") + context.log.log(level, marker, "{} {} {}", "arg1", "arg2", "arg3") + context.log.log(level, marker, "{} {} {} {}", "arg1", "arg2", "arg3", "arg4") + context.log.log(level, marker, "{} {} {} {} {}", Array("arg1", "arg2", "arg3", "arg4", "arg5")) + }) + + Behaviors.stopped }) - - Behaviors.stopped - }) - } + } } } @@ -292,58 +302,65 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" "Logging with MDC for a typed actor" must { "provide the MDC values in the log" in { - val behaviors = Behaviors.withMdc[Protocol]( - Map("static" -> 1), - // FIXME why u no infer the type here Scala?? - (message: Protocol) => - if (message.transactionId == 1) - Map( - "txId" -> message.transactionId, - "first" -> true - ) - else Map("txId" -> message.transactionId) - ) { - Behaviors.setup { context => - context.log.info("Starting") - Behaviors.receiveMessage { _ => - context.log.info("Got message!") - Behaviors.same - } + val behaviors = Behaviors.withMdc[Protocol](Map("static" -> 1), + // FIXME why u no infer the type here Scala?? + (message: Protocol) => + if (message.transactionId == 1) + Map("txId" -> message.transactionId, "first" -> true) + else Map("txId" -> message.transactionId)) { + Behaviors.setup { context => + context.log.info("Starting") + Behaviors.receiveMessage { _ => + context.log.info("Got message!") + Behaviors.same } } + } // mdc on defer is empty (thread and timestamp MDC is added by logger backend) - val ref = EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("Starting") - logEvent.mdc shouldBe empty - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - spawn(behaviors) - } + val ref = EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("Starting") + logEvent.mdc shouldBe empty + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + spawn(behaviors) + } // mdc on message - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("Got message!") - logEvent.mdc should ===(Map("static" -> 1, "txId" -> 1L, "first" -> true)) - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! Message(1, "first") - } + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("Got message!") + logEvent.mdc should ===(Map("static" -> 1, "txId" -> 1L, "first" -> true)) + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! Message(1, "first") + } // mdc does not leak between messages - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("Got message!") - logEvent.mdc should ===(Map("static" -> 1, "txId" -> 2L)) - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! Message(2, "second") - } + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("Got message!") + logEvent.mdc should ===(Map("static" -> 1, "txId" -> 2L)) + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! Message(2, "second") + } } "use the outermost initial mdc" in { @@ -359,15 +376,19 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" } val ref = spawn(behavior) - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("message") - logEvent.mdc should ===(Map("outermost" -> true)) - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! "message" - } + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("message") + logEvent.mdc should ===(Map("outermost" -> true)) + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! "message" + } } "keep being applied when behavior changes to other behavior" in { @@ -383,27 +404,35 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" } val ref = spawn(Behaviors.withMdc(Map("hasMdc" -> true))(behavior)) - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("message") - logEvent.mdc should ===(Map("hasMdc" -> true)) - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! "message" - } + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("message") + logEvent.mdc should ===(Map("hasMdc" -> true)) + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! "message" + } ref ! "new-behavior" - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("message") - logEvent.mdc should ===(Map("hasMdc" -> true)) // original mdc should stay - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! "message" - } + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("message") + logEvent.mdc should ===(Map("hasMdc" -> true)) // original mdc should stay + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! "message" + } } @@ -424,59 +453,73 @@ class ActorLoggingSpec extends ScalaTestWithActorTestKit(""" } val ref = spawn(behavior) - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("message") - logEvent.mdc should ===(Map("mdc-version" -> 1)) - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! "message" - } + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("message") + logEvent.mdc should ===(Map("mdc-version" -> 1)) + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! "message" + } ref ! "new-mdc" - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("message") - logEvent.mdc should ===(Map("mdc-version" -> 2)) // mdc should have been replaced - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! "message" - } + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("message") + logEvent.mdc should ===(Map("mdc-version" -> 2)) // mdc should have been replaced + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! "message" + } } "provide a withMdc decorator" in { - val behavior = Behaviors.withMdc[Protocol](Map("mdc" -> "outer"))( - Behaviors.setup { context => - Behaviors.receiveMessage { _ => - context.log.withMdc(Map("mdc" -> "inner")).info("Got message log.withMDC!") - // after log.withMdc so we know it didn't change the outer mdc - context.log.info("Got message behavior.withMdc!") - Behaviors.same - } + val behavior = Behaviors.withMdc[Protocol](Map("mdc" -> "outer"))(Behaviors.setup { context => + Behaviors.receiveMessage { _ => + context.log.withMdc(Map("mdc" -> "inner")).info("Got message log.withMDC!") + // after log.withMdc so we know it didn't change the outer mdc + context.log.info("Got message behavior.withMdc!") + Behaviors.same } - ) + }) // mdc on message val ref = spawn(behavior) - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("Got message behavior.withMdc!") - logEvent.mdc should ===(Map("mdc" -> "outer")) - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - EventFilter.custom({ - case logEvent if logEvent.level == Logging.InfoLevel => - logEvent.message should ===("Got message log.withMDC!") - logEvent.mdc should ===(Map("mdc" -> "inner")) - true - case other => system.log.error(s"Unexpected log event: {}", other); false - }, occurrences = 1).intercept { - ref ! Message(1, "first") + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("Got message behavior.withMdc!") + logEvent.mdc should ===(Map("mdc" -> "outer")) + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + EventFilter + .custom( + { + case logEvent if logEvent.level == Logging.InfoLevel => + logEvent.message should ===("Got message log.withMDC!") + logEvent.mdc should ===(Map("mdc" -> "inner")) + true + case other => system.log.error(s"Unexpected log event: {}", other); false + }, + occurrences = 1) + .intercept { + ref ! Message(1, "first") + } } - } } } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala index 145d58f091..bd35d8d231 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/MessageAdapterSpec.scala @@ -17,8 +17,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike object MessageAdapterSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.loggers = ["akka.testkit.TestEventListener"] akka.log-dead-letters = off ping-pong-dispatcher { @@ -53,14 +52,13 @@ class MessageAdapterSpec extends ScalaTestWithActorTestKit(MessageAdapterSpec.co val probe = TestProbe[AnotherPong]() val snitch = Behaviors.setup[AnotherPong] { context => - - val replyTo = context.messageAdapter[Response](_ => - AnotherPong(context.self.path.name, Thread.currentThread().getName)) + val replyTo = + context.messageAdapter[Response](_ => AnotherPong(context.self.path.name, Thread.currentThread().getName)) pingPong ! Ping(replyTo) // also verify the internal spawnMessageAdapter - val replyTo2: ActorRef[Response] = context.spawnMessageAdapter(_ => - AnotherPong(context.self.path.name, Thread.currentThread().getName)) + val replyTo2: ActorRef[Response] = + context.spawnMessageAdapter(_ => AnotherPong(context.self.path.name, Thread.currentThread().getName)) pingPong ! Ping(replyTo2) Behaviors.receiveMessage { anotherPong => @@ -103,7 +101,6 @@ class MessageAdapterSpec extends ScalaTestWithActorTestKit(MessageAdapterSpec.co val probe = TestProbe[Wrapped]() val snitch = Behaviors.setup[Wrapped] { context => - context.messageAdapter[Response](pong => Wrapped(qualifier = "wrong", pong)) // this is replaced val replyTo1: ActorRef[Response] = context.messageAdapter(pong => Wrapped(qualifier = "1", pong)) val replyTo2 = context.messageAdapter[Pong2](pong => Wrapped(qualifier = "2", pong)) @@ -145,7 +142,6 @@ class MessageAdapterSpec extends ScalaTestWithActorTestKit(MessageAdapterSpec.co val probe = TestProbe[Wrapped]() val snitch = Behaviors.setup[Wrapped] { context => - val replyTo1 = context.messageAdapter[Pong1](pong => Wrapped(qualifier = "1", pong)) pingPong ! Ping1(replyTo1) // doing something terribly wrong @@ -181,7 +177,6 @@ class MessageAdapterSpec extends ScalaTestWithActorTestKit(MessageAdapterSpec.co val probe = TestProbe[Any]() val snitch = Behaviors.setup[Wrapped] { context => - var count = 0 val replyTo = context.messageAdapter[Pong] { pong => count += 1 @@ -192,14 +187,16 @@ class MessageAdapterSpec extends ScalaTestWithActorTestKit(MessageAdapterSpec.co pingPong ! Ping(replyTo) } - Behaviors.receiveMessage[Wrapped] { wrapped => - probe.ref ! wrapped - Behaviors.same - }.receiveSignal { - case (_, PostStop) => - probe.ref ! "stopped" + Behaviors + .receiveMessage[Wrapped] { wrapped => + probe.ref ! wrapped Behaviors.same - } + } + .receiveSignal { + case (_, PostStop) => + probe.ref ! "stopped" + Behaviors.same + } } EventFilter.warning(pattern = ".*received dead letter.*", occurrences = 1).intercept { @@ -228,7 +225,6 @@ class MessageAdapterSpec extends ScalaTestWithActorTestKit(MessageAdapterSpec.co val probe = TestProbe[Any]() val snitch = Behaviors.setup[Wrapped] { context => - val replyTo = context.messageAdapter[Pong] { pong => Wrapped(pong) } @@ -236,17 +232,20 @@ class MessageAdapterSpec extends ScalaTestWithActorTestKit(MessageAdapterSpec.co pingPong ! Ping(replyTo) } - def behv(count: Int): Behavior[Wrapped] = Behaviors.receiveMessage[Wrapped] { wrapped => - probe.ref ! count - if (count == 3) { - throw new TestException("boom") - } - behv(count + 1) - }.receiveSignal { - case (_, PostStop) => - probe.ref ! "stopped" - Behaviors.same - } + def behv(count: Int): Behavior[Wrapped] = + Behaviors + .receiveMessage[Wrapped] { wrapped => + probe.ref ! count + if (count == 3) { + throw new TestException("boom") + } + behv(count + 1) + } + .receiveSignal { + case (_, PostStop) => + probe.ref ! "stopped" + Behaviors.same + } behv(count = 1) } diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashBufferSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashBufferSpec.scala index 516d0fea8f..49cfda9374 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashBufferSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashBufferSpec.scala @@ -159,4 +159,3 @@ class StashBufferSpec extends WordSpec with Matchers { } } - diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashSpec.scala index 5826b56f70..39e8518917 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StashSpec.scala @@ -267,40 +267,39 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" Behaviors.same } - private def stashingBehavior( - probe: ActorRef[String], - withSlowStoppingChild: Option[CountDownLatch] = None) = { + private def stashingBehavior(probe: ActorRef[String], withSlowStoppingChild: Option[CountDownLatch] = None) = { Behaviors.setup[String] { ctx => - withSlowStoppingChild.foreach(latch => ctx.spawnAnonymous(slowStoppingChild(latch))) val stash = StashBuffer[String](10) def unstashing(n: Int): Behavior[String] = - Behaviors.receiveMessage[String] { - case "stash" => - probe.ref ! s"unstashing-$n" - unstashing(n + 1) - case "stash-fail" => - probe.ref ! s"stash-fail-$n" - throw TestException("unstash-fail") - case "get-current" => - probe.ref ! s"current-$n" - Behaviors.same - case "get-stash-size" => - probe.ref ! s"stash-size-${stash.size}" - Behaviors.same - case "unstash" => - // when testing resume - stash.unstashAll(ctx, unstashing(n)) - }.receiveSignal { - case (_, PreRestart) => - probe.ref ! s"pre-restart-$n" - Behaviors.same - case (_, PostStop) => - probe.ref ! s"post-stop-$n" - Behaviors.same - } + Behaviors + .receiveMessage[String] { + case "stash" => + probe.ref ! s"unstashing-$n" + unstashing(n + 1) + case "stash-fail" => + probe.ref ! s"stash-fail-$n" + throw TestException("unstash-fail") + case "get-current" => + probe.ref ! s"current-$n" + Behaviors.same + case "get-stash-size" => + probe.ref ! s"stash-size-${stash.size}" + Behaviors.same + case "unstash" => + // when testing resume + stash.unstashAll(ctx, unstashing(n)) + } + .receiveSignal { + case (_, PreRestart) => + probe.ref ! s"pre-restart-$n" + Behaviors.same + case (_, PostStop) => + probe.ref ! s"post-stop-$n" + Behaviors.same + } Behaviors.receiveMessage[String] { case msg if msg.startsWith("stash") => @@ -368,16 +367,19 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" val probe = TestProbe[String]() // unstashing is inside setup - val ref = spawn(Behaviors.supervise(Behaviors.receivePartial[String] { - case (ctx, "unstash") => - val stash = StashBuffer[String](10) - stash.stash("one") - stash.unstashAll(ctx, Behavior.same) + val ref = spawn( + Behaviors + .supervise(Behaviors.receivePartial[String] { + case (ctx, "unstash") => + val stash = StashBuffer[String](10) + stash.stash("one") + stash.unstashAll(ctx, Behavior.same) - case (_, msg) => - probe.ref ! msg - Behaviors.same - }).onFailure[TestException](SupervisorStrategy.stop)) + case (_, msg) => + probe.ref ! msg + Behaviors.same + }) + .onFailure[TestException](SupervisorStrategy.stop)) ref ! "unstash" probe.expectMessage("one") @@ -388,16 +390,19 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" "work with supervised intermediate Behaviors.same" in { val probe = TestProbe[String]() // unstashing is inside setup - val ref = spawn(Behaviors.supervise(Behaviors.receivePartial[String] { - case (ctx, "unstash") => - val stash = StashBuffer[String](10) - stash.stash("one") - stash.stash("two") - stash.unstashAll(ctx, Behaviors.receiveMessage { msg => - probe.ref ! msg - Behaviors.same + val ref = spawn( + Behaviors + .supervise(Behaviors.receivePartial[String] { + case (ctx, "unstash") => + val stash = StashBuffer[String](10) + stash.stash("one") + stash.stash("two") + stash.unstashAll(ctx, Behaviors.receiveMessage { msg => + probe.ref ! msg + Behaviors.same + }) }) - }).onFailure[TestException](SupervisorStrategy.stop)) + .onFailure[TestException](SupervisorStrategy.stop)) ref ! "unstash" probe.expectMessage("one") @@ -406,22 +411,18 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" probe.expectMessage("three") } - def testPostStop( - probe: TestProbe[String], - ref: ActorRef[String] - ): Unit = { + def testPostStop(probe: TestProbe[String], ref: ActorRef[String]): Unit = { ref ! "stash" ref ! "stash" ref ! "stash-fail" ref ! "stash" - EventFilter[TestException](start = "unstash-fail", occurrences = 1) - .intercept { - ref ! "unstash" - probe.expectMessage("unstashing-0") - probe.expectMessage("unstashing-1") - probe.expectMessage("stash-fail-2") - probe.expectMessage("post-stop-2") - } + EventFilter[TestException](start = "unstash-fail", occurrences = 1).intercept { + ref ! "unstash" + probe.expectMessage("unstashing-0") + probe.expectMessage("unstashing-1") + probe.expectMessage("stash-fail-2") + probe.expectMessage("post-stop-2") + } } "signal PostStop to the latest unstashed behavior on failure" in { @@ -433,47 +434,39 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" "signal PostStop to the latest unstashed behavior on failure with stop supervision" in { val probe = TestProbe[String]() val ref = - spawn(Behaviors.supervise(stashingBehavior(probe.ref)) - .onFailure[TestException](SupervisorStrategy.stop)) + spawn(Behaviors.supervise(stashingBehavior(probe.ref)).onFailure[TestException](SupervisorStrategy.stop)) testPostStop(probe, ref) } - def testPreRestart( - probe: TestProbe[String], - childLatch: Option[CountDownLatch], - ref: ActorRef[String] - ): Unit = { + def testPreRestart(probe: TestProbe[String], childLatch: Option[CountDownLatch], ref: ActorRef[String]): Unit = { ref ! "stash" ref ! "stash" ref ! "stash-fail" ref ! "stash" - EventFilter[TestException]( - start = "Supervisor RestartSupervisor saw failure: unstash-fail", - occurrences = 1 - ).intercept { - ref ! "unstash" - // when childLatch is defined this be stashed in the internal stash of the RestartSupervisor - // because it's waiting for child to stop - ref ! "get-current" + EventFilter[TestException](start = "Supervisor RestartSupervisor saw failure: unstash-fail", occurrences = 1) + .intercept { + ref ! "unstash" + // when childLatch is defined this be stashed in the internal stash of the RestartSupervisor + // because it's waiting for child to stop + ref ! "get-current" - probe.expectMessage("unstashing-0") - probe.expectMessage("unstashing-1") - probe.expectMessage("stash-fail-2") - probe.expectMessage("pre-restart-2") + probe.expectMessage("unstashing-0") + probe.expectMessage("unstashing-1") + probe.expectMessage("stash-fail-2") + probe.expectMessage("pre-restart-2") - childLatch.foreach(_.countDown()) - probe.expectMessage("current-00") + childLatch.foreach(_.countDown()) + probe.expectMessage("current-00") - ref ! "get-stash-size" - probe.expectMessage("stash-size-0") - } + ref ! "get-stash-size" + probe.expectMessage("stash-size-0") + } } "signal PreRestart to the latest unstashed behavior on failure with restart supervision" in { val probe = TestProbe[String]() val ref = - spawn(Behaviors.supervise(stashingBehavior(probe.ref)) - .onFailure[TestException](SupervisorStrategy.restart)) + spawn(Behaviors.supervise(stashingBehavior(probe.ref)).onFailure[TestException](SupervisorStrategy.restart)) testPreRestart(probe, None, ref) // one more time to ensure that the restart strategy is kept @@ -484,8 +477,10 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" val probe = TestProbe[String]() val childLatch = new CountDownLatch(1) val ref = - spawn(Behaviors.supervise(stashingBehavior(probe.ref, Some(childLatch))) - .onFailure[TestException](SupervisorStrategy.restart)) + spawn( + Behaviors + .supervise(stashingBehavior(probe.ref, Some(childLatch))) + .onFailure[TestException](SupervisorStrategy.restart)) testPreRestart(probe, Some(childLatch), ref) } @@ -493,8 +488,10 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" "signal PreRestart to the latest unstashed behavior on failure with backoff supervision" in { val probe = TestProbe[String]() val ref = - spawn(Behaviors.supervise(stashingBehavior(probe.ref)) - .onFailure[TestException](SupervisorStrategy.restartWithBackoff(100.millis, 100.millis, 0.0))) + spawn( + Behaviors + .supervise(stashingBehavior(probe.ref)) + .onFailure[TestException](SupervisorStrategy.restartWithBackoff(100.millis, 100.millis, 0.0))) testPreRestart(probe, None, ref) @@ -506,8 +503,10 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" val probe = TestProbe[String]() val childLatch = new CountDownLatch(1) val ref = - spawn(Behaviors.supervise(stashingBehavior(probe.ref, Some(childLatch))) - .onFailure[TestException](SupervisorStrategy.restartWithBackoff(100.millis, 100.millis, 0.0))) + spawn( + Behaviors + .supervise(stashingBehavior(probe.ref, Some(childLatch))) + .onFailure[TestException](SupervisorStrategy.restartWithBackoff(100.millis, 100.millis, 0.0))) testPreRestart(probe, Some(childLatch), ref) } @@ -515,8 +514,7 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" "handle resume correctly on failure unstashing" in { val probe = TestProbe[String]() val ref = - spawn(Behaviors.supervise(stashingBehavior(probe.ref)) - .onFailure[TestException](SupervisorStrategy.resume)) + spawn(Behaviors.supervise(stashingBehavior(probe.ref)).onFailure[TestException](SupervisorStrategy.resume)) ref ! "stash" ref ! "stash" @@ -526,17 +524,18 @@ class UnstashingSpec extends ScalaTestWithActorTestKit(""" ref ! "stash" ref ! "stash-fail" ref ! "stash" - EventFilter[TestException](start = "Supervisor ResumeSupervisor saw failure: unstash-fail", occurrences = 1).intercept { - ref ! "unstash" - ref ! "get-current" + EventFilter[TestException](start = "Supervisor ResumeSupervisor saw failure: unstash-fail", occurrences = 1) + .intercept { + ref ! "unstash" + ref ! "get-current" - probe.expectMessage("unstashing-0") - probe.expectMessage("unstashing-1") - probe.expectMessage("stash-fail-2") - probe.expectMessage("current-2") - ref ! "get-stash-size" - probe.expectMessage("stash-size-5") - } + probe.expectMessage("unstashing-0") + probe.expectMessage("unstashing-1") + probe.expectMessage("stash-fail-2") + probe.expectMessage("current-2") + ref ! "get-stash-size" + probe.expectMessage("stash-size-5") + } ref ! "unstash" ref ! "get-current" diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala index cb467bdf45..a405693bbf 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/StopSpec.scala @@ -37,21 +37,23 @@ class StopSpec extends ScalaTestWithActorTestKit with WordSpecLike { "execute the post stop when wrapped" in { val sawSignal = Promise[Done]() val ref = spawn(Behaviors.setup[AnyRef] { _ => - Behaviors.intercept( - new BehaviorInterceptor[AnyRef, AnyRef] { - override def aroundReceive(context: typed.TypedActorContext[AnyRef], message: AnyRef, target: ReceiveTarget[AnyRef]): Behavior[AnyRef] = { - target(context, message) - } - - override def aroundSignal(context: typed.TypedActorContext[AnyRef], signal: Signal, target: SignalTarget[AnyRef]): Behavior[AnyRef] = { - target(context, signal) - } + Behaviors.intercept(new BehaviorInterceptor[AnyRef, AnyRef] { + override def aroundReceive(context: typed.TypedActorContext[AnyRef], + message: AnyRef, + target: ReceiveTarget[AnyRef]): Behavior[AnyRef] = { + target(context, message) } - )(Behaviors.stopped[AnyRef](Behaviors.receiveSignal[AnyRef] { - case (context, PostStop) => - sawSignal.success(Done) - Behaviors.empty - })) + + override def aroundSignal(context: typed.TypedActorContext[AnyRef], + signal: Signal, + target: SignalTarget[AnyRef]): Behavior[AnyRef] = { + target(context, signal) + } + })(Behaviors.stopped[AnyRef](Behaviors.receiveSignal[AnyRef] { + case (context, PostStop) => + sawSignal.success(Done) + Behaviors.empty + })) }) ref ! "stopit" sawSignal.future.futureValue should ===(Done) @@ -78,8 +80,7 @@ class StopSpec extends ScalaTestWithActorTestKit with WordSpecLike { // illegal: Behaviors.setup[String] { _ => throw TestException("boom!") - } - ) + }) } ex.getMessage should include("Behavior used as `postStop` behavior in Stopped(...) was a deferred one ") diff --git a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala index d48b2c7e15..eda729910a 100644 --- a/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/akka/actor/typed/scaladsl/adapter/AdapterSpec.scala @@ -38,8 +38,8 @@ object AdapterSpec { } def typed1(ref: untyped.ActorRef, probe: ActorRef[String]): Behavior[String] = - Behaviors.receive[String] { - (context, message) => + Behaviors + .receive[String] { (context, message) => message match { case "send" => val replyTo = context.self.toUntyped @@ -67,11 +67,12 @@ object AdapterSpec { context.stop(child) Behaviors.same } - } receiveSignal { - case (context, Terminated(ref)) => - probe ! "terminated" - Behaviors.same - } + } + .receiveSignal { + case (context, Terminated(ref)) => + probe ! "terminated" + Behaviors.same + } def unhappyTyped(msg: String): Behavior[String] = Behaviors.setup[String] { ctx => val child = ctx.spawnAnonymous(Behaviors.receiveMessage[String] { _ => @@ -160,8 +161,7 @@ object AdapterSpec { } -class AdapterSpec extends AkkaSpec( - """ +class AdapterSpec extends AkkaSpec(""" akka.loggers = [akka.testkit.TestEventListener] """) { import AdapterSpec._ @@ -171,14 +171,15 @@ class AdapterSpec extends AkkaSpec( val typed1 = system.toTyped val typed2 = system.toTyped - typed1 should be theSameInstanceAs typed2 + (typed1 should be).theSameInstanceAs(typed2) } "not crash if guardian is stopped" in { for { _ <- 0 to 10 } { var system: akka.actor.typed.ActorSystem[NotUsed] = null try { - system = ActorSystem.create(Behaviors.setup[NotUsed](_ => Behavior.stopped[NotUsed]), "AdapterSpec-stopping-guardian") + system = ActorSystem.create(Behaviors.setup[NotUsed](_ => Behavior.stopped[NotUsed]), + "AdapterSpec-stopping-guardian") } finally if (system != null) shutdown(system.toUntyped) } } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala index b149e8589c..da9d2099fc 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/DispatchersDocSpec.scala @@ -17,8 +17,7 @@ import org.scalatest.WordSpecLike object DispatchersDocSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" //#config your-dispatcher { type = Dispatcher @@ -41,7 +40,6 @@ object DispatchersDocSpec { val yourBehavior: Behavior[String] = Behaviors.same val example = Behaviors.receive[Any] { (context, message) => - //#spawn-dispatcher import akka.actor.typed.DispatcherSelector @@ -71,7 +69,8 @@ class DispatchersDocSpec extends ScalaTestWithActorTestKit(DispatchersDocSpec.co withBlocking ! WhichDispatcher(probe.ref) probe.receiveMessage().id shouldEqual "akka.actor.default-blocking-io-dispatcher" - val withCustom = actor.ask(Spawn(giveMeYourDispatcher, "default", DispatcherSelector.fromConfig("your-dispatcher"))).futureValue + val withCustom = + actor.ask(Spawn(giveMeYourDispatcher, "default", DispatcherSelector.fromConfig("your-dispatcher"))).futureValue withCustom ! WhichDispatcher(probe.ref) probe.receiveMessage().id shouldEqual "your-dispatcher" } diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala index 0ec8450810..9dba554b6a 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/FaultToleranceDocSpec.scala @@ -9,8 +9,7 @@ import akka.actor.typed.{ DeathPactException, SupervisorStrategy } import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import org.scalatest.WordSpecLike -class FaultToleranceDocSpec extends ScalaTestWithActorTestKit( - """ +class FaultToleranceDocSpec extends ScalaTestWithActorTestKit(""" # silenced to not put noise in test logs akka.loglevel = off """) with WordSpecLike { @@ -45,19 +44,21 @@ class FaultToleranceDocSpec extends ScalaTestWithActorTestKit( } } - val bossBehavior = Behaviors.supervise(Behaviors.setup[Message] { context => - context.log.info("Boss starting up") - val middleManagement = context.spawn(middleManagementBehavior, "middle-management") - context.watch(middleManagement) + val bossBehavior = Behaviors + .supervise(Behaviors.setup[Message] { context => + context.log.info("Boss starting up") + val middleManagement = context.spawn(middleManagementBehavior, "middle-management") + context.watch(middleManagement) - // here we don't handle Terminated at all which means that - // when middle management fails with a DeathWatchException - // this actor will also fail - Behaviors.receiveMessage[Message] { message => - middleManagement ! message - Behaviors.same - } - }).onFailure[DeathPactException](SupervisorStrategy.restart) + // here we don't handle Terminated at all which means that + // when middle management fails with a DeathWatchException + // this actor will also fail + Behaviors.receiveMessage[Message] { message => + middleManagement ! message + Behaviors.same + } + }) + .onFailure[DeathPactException](SupervisorStrategy.restart) // (spawn comes from the testkit) val boss = spawn(bossBehavior, "upper-management") diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala index b9147b63be..346fcc1672 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/GracefulStopDocSpec.scala @@ -28,29 +28,31 @@ object GracefulStopDocSpec { // Predefined cleanup operation def cleanup(log: Logger): Unit = log.info("Cleaning up!") - val mcpa = Behaviors.receive[JobControlLanguage] { (context, message) => - message match { - case SpawnJob(jobName) => - context.log.info("Spawning job {}!", jobName) - context.spawn(Job.job(jobName), name = jobName) - Behaviors.same - case GracefulShutdown => - context.log.info("Initiating graceful shutdown...") - // perform graceful stop, executing cleanup before final system termination - // behavior executing cleanup is passed as a parameter to Actor.stopped - Behaviors.stopped { - Behaviors.receiveSignal { - case (context, PostStop) => - cleanup(context.system.log) - Behaviors.same + val mcpa = Behaviors + .receive[JobControlLanguage] { (context, message) => + message match { + case SpawnJob(jobName) => + context.log.info("Spawning job {}!", jobName) + context.spawn(Job.job(jobName), name = jobName) + Behaviors.same + case GracefulShutdown => + context.log.info("Initiating graceful shutdown...") + // perform graceful stop, executing cleanup before final system termination + // behavior executing cleanup is passed as a parameter to Actor.stopped + Behaviors.stopped { + Behaviors.receiveSignal { + case (context, PostStop) => + cleanup(context.system.log) + Behaviors.same + } } - } + } + } + .receiveSignal { + case (context, PostStop) => + context.log.info("MCPA stopped") + Behaviors.same } - }.receiveSignal { - case (context, PostStop) => - context.log.info("MCPA stopped") - Behaviors.same - } } //#master-actor diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala index 9c02340fd4..704e7ffe74 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/InteractionPatternsSpec.scala @@ -82,11 +82,7 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with WordSpecLik object Backend { sealed trait Request - final case class StartTranslationJob( - taskId: Int, - site: URI, - replyTo: ActorRef[Response] - ) extends Request + final case class StartTranslationJob(taskId: Int, site: URI, replyTo: ActorRef[Response]) extends Request sealed trait Response final case class JobStarted(taskId: Int) extends Response @@ -105,27 +101,26 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with WordSpecLik val backendResponseMapper: ActorRef[Backend.Response] = context.messageAdapter(rsp => WrappedBackendResponse(rsp)) - def active( - inProgress: Map[Int, ActorRef[URI]], - count: Int): Behavior[Command] = { + def active(inProgress: Map[Int, ActorRef[URI]], count: Int): Behavior[Command] = { Behaviors.receiveMessage[Command] { case Translate(site, replyTo) => val taskId = count + 1 backend ! Backend.StartTranslationJob(taskId, site, backendResponseMapper) active(inProgress.updated(taskId, replyTo), taskId) - case wrapped: WrappedBackendResponse => wrapped.response match { - case Backend.JobStarted(taskId) => - context.log.info("Started {}", taskId) - Behaviors.same - case Backend.JobProgress(taskId, progress) => - context.log.info("Progress {}: {}", taskId, progress) - Behaviors.same - case Backend.JobCompleted(taskId, result) => - context.log.info("Completed {}: {}", taskId, result) - inProgress(taskId) ! result - active(inProgress - taskId, count) - } + case wrapped: WrappedBackendResponse => + wrapped.response match { + case Backend.JobStarted(taskId) => + context.log.info("Started {}", taskId) + Behaviors.same + case Backend.JobProgress(taskId, progress) => + context.log.info("Progress {}: {}", taskId, progress) + Behaviors.same + case Backend.JobCompleted(taskId, result) => + context.log.info("Completed {}: {}", taskId, result) + inProgress(taskId) ! result + active(inProgress - taskId, count) + } } } @@ -166,16 +161,21 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with WordSpecLik Behaviors.withTimers(timers => idle(timers, target, after, maxSize)) } - def idle(timers: TimerScheduler[Msg], target: ActorRef[Batch], - after: FiniteDuration, maxSize: Int): Behavior[Msg] = { + def idle(timers: TimerScheduler[Msg], + target: ActorRef[Batch], + after: FiniteDuration, + maxSize: Int): Behavior[Msg] = { Behaviors.receiveMessage[Msg] { message => timers.startSingleTimer(TimerKey, Timeout, after) active(Vector(message), timers, target, after, maxSize) } } - def active(buffer: Vector[Msg], timers: TimerScheduler[Msg], - target: ActorRef[Batch], after: FiniteDuration, maxSize: Int): Behavior[Msg] = { + def active(buffer: Vector[Msg], + timers: TimerScheduler[Msg], + target: ActorRef[Batch], + after: FiniteDuration, + maxSize: Int): Behavior[Msg] = { Behaviors.receiveMessage[Msg] { case Timeout => target ! Batch(buffer) @@ -217,7 +217,6 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with WordSpecLik case class AdaptedResponse(message: String) extends DaveMessage def daveBehavior(hal: ActorRef[HalCommand]) = Behaviors.setup[DaveMessage] { context => - // asking someone requires a timeout, if the timeout hits without response // the ask is failed with a TimeoutException implicit val timeout: Timeout = 3.seconds @@ -299,44 +298,45 @@ class InteractionPatternsSpec extends ScalaTestWithActorTestKit with WordSpecLik } // per session actor behavior - def prepareToLeaveHome( - whoIsLeaving: String, - respondTo: ActorRef[ReadyToLeaveHome], - keyCabinet: ActorRef[GetKeys], - drawer: ActorRef[GetWallet]): Behavior[NotUsed] = + def prepareToLeaveHome(whoIsLeaving: String, + respondTo: ActorRef[ReadyToLeaveHome], + keyCabinet: ActorRef[GetKeys], + drawer: ActorRef[GetWallet]): Behavior[NotUsed] = // we don't _really_ care about the actor protocol here as nobody will send us // messages except for responses to our queries, so we just accept any kind of message // but narrow that to more limited types then we interact - Behaviors.setup[AnyRef] { context => - var wallet: Option[Wallet] = None - var keys: Option[Keys] = None + Behaviors + .setup[AnyRef] { context => + var wallet: Option[Wallet] = None + var keys: Option[Keys] = None - // we narrow the ActorRef type to any subtype of the actual type we accept - keyCabinet ! GetKeys(whoIsLeaving, context.self.narrow[Keys]) - drawer ! GetWallet(whoIsLeaving, context.self.narrow[Wallet]) + // we narrow the ActorRef type to any subtype of the actual type we accept + keyCabinet ! GetKeys(whoIsLeaving, context.self.narrow[Keys]) + drawer ! GetWallet(whoIsLeaving, context.self.narrow[Wallet]) - def nextBehavior: Behavior[AnyRef] = - (keys, wallet) match { - case (Some(w), Some(k)) => - // we got both, "session" is completed! - respondTo ! ReadyToLeaveHome(whoIsLeaving, w, k) - Behavior.stopped + def nextBehavior: Behavior[AnyRef] = + (keys, wallet) match { + case (Some(w), Some(k)) => + // we got both, "session" is completed! + respondTo ! ReadyToLeaveHome(whoIsLeaving, w, k) + Behavior.stopped + case _ => + Behavior.same + } + + Behaviors.receiveMessage { + case w: Wallet => + wallet = Some(w) + nextBehavior + case k: Keys => + keys = Some(k) + nextBehavior case _ => - Behavior.same + Behaviors.unhandled } - - Behaviors.receiveMessage { - case w: Wallet => - wallet = Some(w) - nextBehavior - case k: Keys => - keys = Some(k) - nextBehavior - case _ => - Behaviors.unhandled } - }.narrow[NotUsed] // we don't let anyone else know we accept anything + .narrow[NotUsed] // we don't let anyone else know we accept anything // #per-session-child val requestor = TestProbe[ReadyToLeaveHome]() diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala index ef135a6e19..8e39948798 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/IntroSpec.scala @@ -28,12 +28,12 @@ object IntroSpec { final case class Greeted(whom: String, from: ActorRef[Greet]) val greeter: Behavior[Greet] = Behaviors.receive { (context, message) => - //#fiddle_code + //#fiddle_code context.log.info("Hello {}!", message.whom) - //#fiddle_code - //#hello-world-actor + //#fiddle_code + //#hello-world-actor println(s"Hello ${message.whom}!") - //#hello-world-actor + //#hello-world-actor message.replyTo ! Greeted(message.whom, context.self) Behaviors.same } @@ -46,12 +46,12 @@ object IntroSpec { def bot(greetingCounter: Int, max: Int): Behavior[HelloWorld.Greeted] = Behaviors.receive { (context, message) => val n = greetingCounter + 1 - //#fiddle_code + //#fiddle_code context.log.info("Greeting {} for {}", n, message.whom) - //#fiddle_code - //#hello-world-bot + //#fiddle_code + //#hello-world-bot println(s"Greeting ${n} for ${message.whom}") - //#hello-world-bot + //#hello-world-bot if (n == max) { Behaviors.stopped } else { @@ -107,12 +107,10 @@ object IntroSpec { object ChatRoom { //#chatroom-protocol sealed trait RoomCommand - final case class GetSession(screenName: String, replyTo: ActorRef[SessionEvent]) - extends RoomCommand + final case class GetSession(screenName: String, replyTo: ActorRef[SessionEvent]) extends RoomCommand //#chatroom-protocol //#chatroom-behavior - private final case class PublishSessionMessage(screenName: String, message: String) - extends RoomCommand + private final case class PublishSessionMessage(screenName: String, message: String) extends RoomCommand //#chatroom-behavior //#chatroom-protocol @@ -135,22 +133,20 @@ object IntroSpec { message match { case GetSession(screenName, client) => // create a child actor for further interaction with the client - val ses = context.spawn( - session(context.self, screenName, client), - name = URLEncoder.encode(screenName, StandardCharsets.UTF_8.name)) + val ses = context.spawn(session(context.self, screenName, client), + name = URLEncoder.encode(screenName, StandardCharsets.UTF_8.name)) client ! SessionGranted(ses) chatRoom(ses :: sessions) case PublishSessionMessage(screenName, message) => val notification = NotifyClient(MessagePosted(screenName, message)) - sessions foreach (_ ! notification) + sessions.foreach(_ ! notification) Behaviors.same } } - private def session( - room: ActorRef[PublishSessionMessage], - screenName: String, - client: ActorRef[SessionEvent]): Behavior[SessionCommand] = + private def session(room: ActorRef[PublishSessionMessage], + screenName: String, + client: ActorRef[SessionEvent]): Behavior[SessionCommand] = Behaviors.receive { (context, message) => message match { case PostMessage(message) => diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala index 2e8db0507b..21c16c9d14 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/OOIntroSpec.scala @@ -9,7 +9,7 @@ import java.net.URLEncoder import java.nio.charset.StandardCharsets import akka.actor.typed._ -import akka.actor.typed.scaladsl.{ ActorContext, Behaviors, AbstractBehavior } +import akka.actor.typed.scaladsl.{ AbstractBehavior, ActorContext, Behaviors } import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import org.scalatest.WordSpecLike @@ -21,12 +21,10 @@ object OOIntroSpec { object ChatRoom { //#chatroom-protocol sealed trait RoomCommand - final case class GetSession(screenName: String, replyTo: ActorRef[SessionEvent]) - extends RoomCommand + final case class GetSession(screenName: String, replyTo: ActorRef[SessionEvent]) extends RoomCommand //#chatroom-protocol //#chatroom-behavior - private final case class PublishSessionMessage(screenName: String, message: String) - extends RoomCommand + private final case class PublishSessionMessage(screenName: String, message: String) extends RoomCommand //#chatroom-behavior //#chatroom-protocol @@ -51,24 +49,22 @@ object OOIntroSpec { message match { case GetSession(screenName, client) => // create a child actor for further interaction with the client - val ses = context.spawn( - session(context.self, screenName, client), - name = URLEncoder.encode(screenName, StandardCharsets.UTF_8.name)) + val ses = context.spawn(session(context.self, screenName, client), + name = URLEncoder.encode(screenName, StandardCharsets.UTF_8.name)) client ! SessionGranted(ses) sessions = ses :: sessions this case PublishSessionMessage(screenName, message) => val notification = NotifyClient(MessagePosted(screenName, message)) - sessions foreach (_ ! notification) + sessions.foreach(_ ! notification) this } } } - private def session( - room: ActorRef[PublishSessionMessage], - screenName: String, - client: ActorRef[SessionEvent]): Behavior[SessionCommand] = + private def session(room: ActorRef[PublishSessionMessage], + screenName: String, + client: ActorRef[SessionEvent]): Behavior[SessionCommand] = Behaviors.receiveMessage { case PostMessage(message) => // from client, publish to others via the room @@ -115,15 +111,17 @@ class OOIntroSpec extends ScalaTestWithActorTestKit with WordSpecLike { val gabblerRef = context.spawn(gabbler, "gabbler") context.watch(gabblerRef) - Behaviors.receiveMessagePartial[String] { - case "go" => - chatRoom ! GetSession("ol’ Gabbler", gabblerRef) - Behaviors.same - } receiveSignal { - case (_, Terminated(_)) => - println("Stopping guardian") - Behaviors.stopped - } + Behaviors + .receiveMessagePartial[String] { + case "go" => + chatRoom ! GetSession("ol’ Gabbler", gabblerRef) + Behaviors.same + } + .receiveSignal { + case (_, Terminated(_)) => + println("Stopping guardian") + Behaviors.stopped + } } val system = ActorSystem(main, "ChatRoomDemo") diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala index 7c167e29fa..7daa3536eb 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/RouterSpec.scala @@ -48,8 +48,7 @@ class RouterSpec extends ScalaTestWithActorTestKit with WordSpecLike { spawn(Behaviors.setup[Unit] { ctx => // #pool // make sure the workers are restarted if they fail - val supervisedWorker = Behaviors.supervise(Worker.behavior) - .onFailure[Exception](SupervisorStrategy.restart) + val supervisedWorker = Behaviors.supervise(Worker.behavior).onFailure[Exception](SupervisorStrategy.restart) val pool = Routers.pool(poolSize = 4)(supervisedWorker) val router = ctx.spawn(pool, "worker-pool") diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StashDocSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StashDocSpec.scala index b875571cfb..92770f8b16 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StashDocSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/StashDocSpec.scala @@ -34,7 +34,6 @@ object StashDocSpec { def behavior(id: String, db: DB): Behavior[Command] = Behaviors.setup[Command] { context => - val buffer = StashBuffer[Command](capacity = 100) def init(): Behavior[Command] = diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala index 635f06c1c2..9e7c2a8a7d 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/coexistence/TypedWatchingUntypedSpec.scala @@ -37,16 +37,18 @@ object TypedWatchingUntypedSpec { // illustrating how to pass sender, toUntyped is an implicit extension method untyped.tell(Typed.Ping(context.self), context.self.toUntyped) - Behaviors.receivePartial[Command] { - case (context, Pong) => - // it's not possible to get the sender, that must be sent in message - // context.stop is an implicit extension method - context.stop(untyped) - Behaviors.same - } receiveSignal { - case (_, akka.actor.typed.Terminated(_)) => - Behaviors.stopped - } + Behaviors + .receivePartial[Command] { + case (context, Pong) => + // it's not possible to get the sender, that must be sent in message + // context.stop is an implicit extension method + context.stop(untyped) + Behaviors.same + } + .receiveSignal { + case (_, akka.actor.typed.Terminated(_)) => + Behaviors.stopped + } } } //#typed diff --git a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala index 98593fe316..7c6ec91e62 100644 --- a/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala +++ b/akka-actor-typed-tests/src/test/scala/docs/akka/typed/supervision/SupervisionCompileOnly.scala @@ -16,25 +16,23 @@ object SupervisionCompileOnly { val behavior = Behaviors.empty[String] //#restart - Behaviors.supervise(behavior) - .onFailure[IllegalStateException](SupervisorStrategy.restart) + Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.restart) //#restart //#resume - Behaviors.supervise(behavior) - .onFailure[IllegalStateException](SupervisorStrategy.resume) + Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.resume) //#resume //#restart-limit - Behaviors.supervise(behavior) - .onFailure[IllegalStateException](SupervisorStrategy.restart.withLimit( - maxNrOfRetries = 10, withinTimeRange = 10.seconds - )) + Behaviors + .supervise(behavior) + .onFailure[IllegalStateException]( + SupervisorStrategy.restart.withLimit(maxNrOfRetries = 10, withinTimeRange = 10.seconds)) //#restart-limit //#multiple - Behaviors.supervise(Behaviors.supervise(behavior) - .onFailure[IllegalStateException](SupervisorStrategy.restart)) + Behaviors + .supervise(Behaviors.supervise(behavior).onFailure[IllegalStateException](SupervisorStrategy.restart)) .onFailure[IllegalArgumentException](SupervisorStrategy.stop) //#multiple @@ -61,20 +59,22 @@ object SupervisionCompileOnly { Behaviors.receiveMessage(msg => child(size + msg.length)) def parent: Behavior[String] = { - Behaviors.supervise[String] { - Behaviors.setup { ctx => - val child1 = ctx.spawn(child(0), "child1") - val child2 = ctx.spawn(child(0), "child2") + Behaviors + .supervise[String] { + Behaviors.setup { ctx => + val child1 = ctx.spawn(child(0), "child1") + val child2 = ctx.spawn(child(0), "child2") - Behaviors.receiveMessage[String] { msg => - // there might be bugs here... - val parts = msg.split(" ") - child1 ! parts(0) - child2 ! parts(1) - Behaviors.same + Behaviors.receiveMessage[String] { msg => + // there might be bugs here... + val parts = msg.split(" ") + child1 ! parts(0) + child2 ! parts(1) + Behaviors.same + } } } - }.onFailure(SupervisorStrategy.restart) + .onFailure(SupervisorStrategy.restart) } //#restart-stop-children @@ -85,15 +85,17 @@ object SupervisionCompileOnly { val child2 = ctx.spawn(child(0), "child2") // supervision strategy inside the setup to not recreate children on restart - Behaviors.supervise { - Behaviors.receiveMessage[String] { msg => - // there might be bugs here... - val parts = msg.split(" ") - child1 ! parts(0) - child2 ! parts(1) - Behaviors.same + Behaviors + .supervise { + Behaviors.receiveMessage[String] { msg => + // there might be bugs here... + val parts = msg.split(" ") + child1 ! parts(0) + child2 ! parts(1) + Behaviors.same + } } - }.onFailure(SupervisorStrategy.restart.withStopChildren(false)) + .onFailure(SupervisorStrategy.restart.withStopChildren(false)) } } //#restart-keep-children diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRef.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRef.scala index f91c525e23..554d222fbc 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRef.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRef.scala @@ -22,7 +22,9 @@ import akka.actor.typed.internal.InternalRecipientRef * Not for user extension */ @DoNotInherit -trait ActorRef[-T] extends RecipientRef[T] with java.lang.Comparable[ActorRef[_]] with java.io.Serializable { this: InternalRecipientRef[T] => +trait ActorRef[-T] extends RecipientRef[T] with java.lang.Comparable[ActorRef[_]] with java.io.Serializable { + this: InternalRecipientRef[T] => + /** * Send a message to the Actor referenced by this ActorRef using *at-most-once* * messaging semantics. @@ -57,6 +59,7 @@ trait ActorRef[-T] extends RecipientRef[T] with java.lang.Comparable[ActorRef[_] object ActorRef { implicit final class ActorRefOps[-T](val ref: ActorRef[T]) extends AnyVal { + /** * Send a message to the Actor referenced by this ActorRef using *at-most-once* * messaging semantics. @@ -99,7 +102,7 @@ private[akka] final case class SerializedActorRef[T] private (address: String) { case null => throw new IllegalStateException( "Trying to deserialize a serialized typed ActorRef without an ActorSystem in scope." + - " Use 'akka.serialization.Serialization.currentSystem.withValue(system) { ... }'") + " Use 'akka.serialization.Serialization.currentSystem.withValue(system) { ... }'") case someSystem => val resolver = ActorRefResolver(someSystem.toTyped) resolver.resolveActorRef(address) @@ -112,6 +115,7 @@ private[akka] final case class SerializedActorRef[T] private (address: String) { * - not watchable */ trait RecipientRef[-T] { this: InternalRecipientRef[T] => + /** * Send a message to the destination referenced by this `RecipientRef` using *at-most-once* * messaging semantics. @@ -122,6 +126,7 @@ trait RecipientRef[-T] { this: InternalRecipientRef[T] => object RecipientRef { implicit final class RecipientRefOps[-T](val ref: RecipientRef[T]) extends AnyVal { + /** * Send a message to the destination referenced by this `RecipientRef` using *at-most-once* * messaging semantics. diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRefResolver.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRefResolver.scala index 8c8ee66156..ceb401d066 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRefResolver.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorRefResolver.scala @@ -70,4 +70,4 @@ object ActorRefResolverSetup { * for tests that need to replace extension with stub/mock implementations. */ final class ActorRefResolverSetup(createExtension: java.util.function.Function[ActorSystem[_], ActorRefResolver]) - extends ExtensionSetup[ActorRefResolver](ActorRefResolver, createExtension) + extends ExtensionSetup[ActorRefResolver](ActorRefResolver, createExtension) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala index ab1788b827..e2c47f4810 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/ActorSystem.scala @@ -37,6 +37,7 @@ import com.typesafe.config.ConfigFactory @DoNotInherit @ApiMayChange abstract class ActorSystem[-T] extends ActorRef[T] with Extensions { this: InternalRecipientRef[T] => + /** * The name of this actor system, used to distinguish multiple ones within * the same JVM & class loader. @@ -143,7 +144,8 @@ abstract class ActorSystem[-T] extends ActorRef[T] with Extensions { this: Inter * to which messages can immediately be sent by using the `ActorRef.apply` * method. */ - def systemActorOf[U](behavior: Behavior[U], name: String, props: Props = Props.empty)(implicit timeout: Timeout): Future[ActorRef[U]] + def systemActorOf[U](behavior: Behavior[U], name: String, props: Props = Props.empty)( + implicit timeout: Timeout): Future[ActorRef[U]] /** * Return a reference to this system’s [[akka.actor.typed.receptionist.Receptionist]]. @@ -157,26 +159,23 @@ object ActorSystem { /** * Scala API: Create an ActorSystem */ - def apply[T]( - guardianBehavior: Behavior[T], - name: String - ): ActorSystem[T] = createInternal(name, guardianBehavior, Props.empty, ActorSystemSetup.create(BootstrapSetup())) + def apply[T](guardianBehavior: Behavior[T], name: String): ActorSystem[T] = + createInternal(name, guardianBehavior, Props.empty, ActorSystemSetup.create(BootstrapSetup())) /** * Scala API: Create an ActorSystem */ - def apply[T]( - guardianBehavior: Behavior[T], - name: String, - config: Config - ): ActorSystem[T] = + def apply[T](guardianBehavior: Behavior[T], name: String, config: Config): ActorSystem[T] = createInternal(name, guardianBehavior, Props.empty, ActorSystemSetup.create(BootstrapSetup(config))) /** * Scala API: Creates a new actor system with the specified name and settings * The core actor system settings are defined in [[BootstrapSetup]] */ - def apply[T](guardianBehavior: Behavior[T], name: String, setup: ActorSystemSetup, guardianProps: Props = Props.empty): ActorSystem[T] = { + def apply[T](guardianBehavior: Behavior[T], + name: String, + setup: ActorSystemSetup, + guardianProps: Props = Props.empty): ActorSystem[T] = { createInternal(name, guardianBehavior, guardianProps, setup) } @@ -218,9 +217,10 @@ object ActorSystem { * which runs Akka Typed [[Behavior]] on an emulation layer. In this * system typed and untyped actors can coexist. */ - private def createInternal[T](name: String, guardianBehavior: Behavior[T], + private def createInternal[T](name: String, + guardianBehavior: Behavior[T], guardianProps: Props, - setup: ActorSystemSetup): ActorSystem[T] = { + setup: ActorSystemSetup): ActorSystem[T] = { Behavior.validateAsInitial(guardianBehavior) require(Behavior.isAlive(guardianBehavior)) @@ -230,8 +230,13 @@ object ActorSystem { val appConfig = bootstrapSettings.flatMap(_.config).getOrElse(ConfigFactory.load(cl)) val executionContext = bootstrapSettings.flatMap(_.defaultExecutionContext) - val system = new untyped.ActorSystemImpl(name, appConfig, cl, executionContext, - Some(PropsAdapter(() => guardianBehavior, guardianProps, isGuardian = true)), setup) + val system = new untyped.ActorSystemImpl(name, + appConfig, + cl, + executionContext, + Some( + PropsAdapter(() => guardianBehavior, guardianProps, isGuardian = true)), + setup) system.start() system.guardian ! GuardianActorAdapter.Start @@ -251,11 +256,12 @@ object ActorSystem { * This class is immutable. */ final class Settings(val config: Config, val untypedSettings: untyped.ActorSystem.Settings, val name: String) { - def this(classLoader: ClassLoader, config: Config, name: String) = this({ - val cfg = config.withFallback(ConfigFactory.defaultReference(classLoader)) - cfg.checkValid(ConfigFactory.defaultReference(classLoader), "akka") - cfg - }, new untyped.ActorSystem.Settings(classLoader, config, name), name) + def this(classLoader: ClassLoader, config: Config, name: String) = + this({ + val cfg = config.withFallback(ConfigFactory.defaultReference(classLoader)) + cfg.checkValid(ConfigFactory.defaultReference(classLoader), "akka") + cfg + }, new untyped.ActorSystem.Settings(classLoader, config, name), name) def this(settings: untyped.ActorSystem.Settings) = this(settings.config, settings, settings.name) @@ -268,6 +274,6 @@ final class Settings(val config: Config, val untypedSettings: untyped.ActorSyste private val typedConfig = config.getConfig("akka.actor.typed") - val RestartStashCapacity: Int = typedConfig.getInt("restart-stash-capacity") - .requiring(_ >= 0, "restart-stash-capacity must be >= 0") + val RestartStashCapacity: Int = + typedConfig.getInt("restart-stash-capacity").requiring(_ >= 0, "restart-stash-capacity must be >= 0") } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala index 085dd5c7bd..a17c2b2dc9 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Behavior.scala @@ -37,6 +37,7 @@ import akka.actor.typed.scaladsl.{ ActorContext => SAC } @ApiMayChange @DoNotInherit abstract class Behavior[T] { behavior => + /** * Narrow the type of this Behavior, which is always a safe operation. This * method is necessary to implement the contravariant nature of Behavior @@ -76,6 +77,7 @@ abstract class Behavior[T] { behavior => * an extensible behavior but should instead use the [[BehaviorInterceptor]] */ abstract class ExtensibleBehavior[T] extends Behavior[T] { + /** * Process an incoming message and return the next behavior. * @@ -114,6 +116,7 @@ abstract class ExtensibleBehavior[T] extends Behavior[T] { object Behavior { final implicit class BehaviorDecorators[T](val behavior: Behavior[T]) extends AnyVal { + /** * Widen the wrapped Behavior by placing a funnel in front of it: the supplied * PartialFunction decides which message to pull in (those that it is defined @@ -219,7 +222,8 @@ object Behavior { /** * INTERNAL API */ - @InternalApi private[akka] val unhandledSignal: PartialFunction[(TypedActorContext[Nothing], Signal), Behavior[Nothing]] = { + @InternalApi private[akka] val unhandledSignal + : PartialFunction[(TypedActorContext[Nothing], Signal), Behavior[Nothing]] = { case (_, _) => UnhandledBehavior } @@ -231,6 +235,7 @@ object Behavior { private[akka] abstract class DeferredBehavior[T] extends Behavior[T] { def apply(ctx: TypedActorContext[T]): Behavior[T] } + /** INTERNAL API */ @InternalApi private[akka] object DeferredBehavior { @@ -268,7 +273,8 @@ object Behavior { private final def validatePostStop(postStop: OptionVal[Behavior[T]]): Unit = { postStop match { case OptionVal.Some(b: DeferredBehavior[_]) => - throw new IllegalArgumentException(s"Behavior used as `postStop` behavior in Stopped(...) was a deferred one [${b.toString}], which is not supported (it would never be evaluated).") + throw new IllegalArgumentException( + s"Behavior used as `postStop` behavior in Stopped(...) was a deferred one [${b.toString}], which is not supported (it would never be evaluated).") case _ => // all good } } @@ -305,7 +311,8 @@ object Behavior { */ @InternalApi @tailrec - private[akka] def wrap[T, U](currentBehavior: Behavior[_], nextBehavior: Behavior[T], ctx: TypedActorContext[T])(f: Behavior[T] => Behavior[U]): Behavior[U] = + private[akka] def wrap[T, U](currentBehavior: Behavior[_], nextBehavior: Behavior[T], ctx: TypedActorContext[T])( + f: Behavior[T] => Behavior[U]): Behavior[U] = nextBehavior match { case SameBehavior | `currentBehavior` => same case UnhandledBehavior => unhandled @@ -321,7 +328,7 @@ object Behavior { def start[T](behavior: Behavior[T], ctx: TypedActorContext[T]): Behavior[T] = { // TODO can this be made @tailrec? behavior match { - case innerDeferred: DeferredBehavior[T] => start(innerDeferred(ctx), ctx) + case innerDeferred: DeferredBehavior[T] => start(innerDeferred(ctx), ctx) case wrapped: WrappingBehavior[T, Any] @unchecked => // make sure that a deferred behavior wrapped inside some other behavior is also started val startedInner = start(wrapped.nestedBehavior, ctx.asInstanceOf[TypedActorContext[Any]]) @@ -346,7 +353,7 @@ object Behavior { case d: DeferredBehavior[T] => throw new IllegalArgumentException( "Cannot verify behavior existence when there are deferred in the behavior stack, " + - s"Behavior.start the stack first. This is probably a bug, please create an issue. $d") + s"Behavior.start the stack first. This is probably a bug, please create an issue. $d") case _ => false } @@ -401,7 +408,7 @@ object Behavior { // we need to throw here to allow supervision of deathpact exception signal match { case Terminated(ref) if result == UnhandledBehavior => throw DeathPactException(ref) - case _ => result + case _ => result } } @@ -410,11 +417,12 @@ object Behavior { case null => throw new InvalidMessageException("[null] is not an allowed behavior") case SameBehavior | UnhandledBehavior => throw new IllegalArgumentException(s"cannot execute with [$behavior] as behavior") - case d: DeferredBehavior[_] => throw new IllegalArgumentException(s"deferred [$d] should not be passed to interpreter") - case IgnoreBehavior => Behavior.same[T] - case s: StoppedBehavior[T] => s - case f: FailedBehavior => f - case EmptyBehavior => Behavior.unhandled[T] + case d: DeferredBehavior[_] => + throw new IllegalArgumentException(s"deferred [$d] should not be passed to interpreter") + case IgnoreBehavior => Behavior.same[T] + case s: StoppedBehavior[T] => s + case f: FailedBehavior => f + case EmptyBehavior => Behavior.unhandled[T] case ext: ExtensibleBehavior[T] => val possiblyDeferredResult = msg match { case signal: Signal => ext.receiveSignal(ctx, signal) @@ -425,4 +433,3 @@ object Behavior { } } - diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Dispatchers.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Dispatchers.scala index ff612dded2..1c2ecd13ce 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Dispatchers.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Dispatchers.scala @@ -7,6 +7,7 @@ package akka.actor.typed import scala.concurrent.ExecutionContextExecutor object Dispatchers { + /** * The id of the default dispatcher, also the full key of the * configuration of the default dispatcher. diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala index bc60a00776..978a8069a8 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Extensions.scala @@ -140,6 +140,7 @@ trait Extensions { * of the payload, if is in the process of registration from another Thread of execution */ def registerExtension[T <: Extension](ext: ExtensionId[T]): T + /** * Returns the payload that is associated with the provided extension * throws an IllegalStateException if it is not registered. @@ -161,10 +162,9 @@ trait Extensions { * implementation of the extension. Intended for tests that need to replace * extension with stub/mock implementations. */ -abstract class ExtensionSetup[T <: Extension]( - val extId: ExtensionId[T], - val createExtension: java.util.function.Function[ActorSystem[_], T]) - extends Setup +abstract class ExtensionSetup[T <: Extension](val extId: ExtensionId[T], + val createExtension: java.util.function.Function[ActorSystem[_], T]) + extends Setup /** * Scala 2.11 API: Each extension typically provide a concrete `ExtensionSetup` that can be used in @@ -173,6 +173,6 @@ abstract class ExtensionSetup[T <: Extension]( * extension with stub/mock implementations. */ abstract class AbstractExtensionSetup[T <: Extension](extId: ExtensionId[T], createExtension: ActorSystem[_] => T) - extends ExtensionSetup[T](extId, new java.util.function.Function[ActorSystem[_], T] { - override def apply(sys: ActorSystem[_]): T = createExtension.apply(sys) - }) // TODO can be simplified when compiled only with Scala >= 2.12 + extends ExtensionSetup[T](extId, new java.util.function.Function[ActorSystem[_], T] { + override def apply(sys: ActorSystem[_]): T = createExtension.apply(sys) + }) // TODO can be simplified when compiled only with Scala >= 2.12 diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Logger.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Logger.scala index 3afb7cf4ad..0f194939d3 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Logger.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Logger.scala @@ -49,6 +49,7 @@ object LogMarker { */ @DoNotInherit abstract sealed class LogOptions { + /** * User control whether messages are logged or not. This is useful when you want to have an application configuration * to control when to log messages. @@ -68,6 +69,7 @@ abstract sealed class LogOptions { def enabled: Boolean def level: LogLevel def logger: Option[Logger] + /** Java API */ def getLogger: Optional[Logger] } @@ -76,12 +78,14 @@ abstract sealed class LogOptions { * Factories for log options */ object LogOptions { + /** * INTERNAL API */ @InternalApi private[akka] final case class LogOptionsImpl(enabled: Boolean, level: LogLevel, logger: Option[Logger]) - extends LogOptions { + extends LogOptions { + /** * User control whether messages are logged or not. This is useful when you want to have an application configuration * to control when to log messages. @@ -230,6 +234,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(message: String): Unit + /** * Message template with 1 replacement argument. * @@ -239,18 +244,21 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * * @see [[Logger]] */ def error(template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * * @see [[Logger]] */ def error(template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -266,6 +274,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(cause: Throwable, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -275,18 +284,21 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(cause: Throwable, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * * @see [[Logger]] */ def error(cause: Throwable, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * * @see [[Logger]] */ def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -302,6 +314,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(marker: LogMarker, cause: Throwable, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -311,24 +324,28 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * * @see [[Logger]] */ def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * * @see [[Logger]] */ def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * * @see [[Logger]] */ def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit + /** * Log message at error level, without providing the exception that caused the error. * @@ -337,6 +354,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(marker: LogMarker, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -348,6 +366,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(marker: LogMarker, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * @@ -356,6 +375,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * @@ -364,6 +384,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def error(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -379,6 +400,7 @@ abstract class Logger private[akka] () { * Log message at warning level. */ def warning(message: String): Unit + /** * Message template with 1 replacement argument. * @@ -388,18 +410,21 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * * @see [[Logger]] */ def warning(template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * * @see [[Logger]] */ def warning(template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -411,6 +436,7 @@ abstract class Logger private[akka] () { * Log message at warning level. */ def warning(cause: Throwable, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -420,16 +446,19 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(cause: Throwable, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * @see [[Logger]] */ def warning(cause: Throwable, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * @see [[Logger]] */ def warning(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @see [[Logger]] @@ -443,6 +472,7 @@ abstract class Logger private[akka] () { * The marker argument can be picked up by various logging frameworks such as slf4j to mark this log statement as "special". */ def warning(marker: LogMarker, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -454,6 +484,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(marker: LogMarker, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * @@ -462,6 +493,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * @@ -470,6 +502,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -484,6 +517,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(marker: LogMarker, cause: Throwable, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -495,6 +529,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(marker: LogMarker, cause: Throwable, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * @@ -503,6 +538,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * @@ -511,6 +547,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def warning(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -528,6 +565,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def info(message: String): Unit + /** * Message template with 1 replacement argument. * @@ -537,18 +575,21 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def info(template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * * @see [[Logger]] */ def info(template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * * @see [[Logger]] */ def info(template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -566,6 +607,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def info(marker: LogMarker, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -577,6 +619,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def info(marker: LogMarker, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * @@ -585,6 +628,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def info(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * @@ -593,6 +637,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def info(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -610,6 +655,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def debug(message: String): Unit + /** * Message template with 1 replacement argument. * @@ -619,18 +665,21 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def debug(template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * * @see [[Logger]] */ def debug(template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * * @see [[Logger]] */ def debug(template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -648,6 +697,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def debug(marker: LogMarker, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -659,6 +709,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def debug(marker: LogMarker, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * @@ -667,6 +718,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * @@ -675,6 +727,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -692,6 +745,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def log(level: LogLevel, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -701,18 +755,21 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def log(level: LogLevel, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * * @see [[Logger]] */ def log(level: LogLevel, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * * @see [[Logger]] */ def log(level: LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * @@ -730,6 +787,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def log(level: LogLevel, marker: LogMarker, message: String): Unit + /** * Message template with 1 replacement argument. * @@ -741,6 +799,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def log(level: LogLevel, marker: LogMarker, template: String, arg1: Any): Unit + /** * Message template with 2 replacement arguments. * @@ -749,6 +808,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def log(level: LogLevel, marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit + /** * Message template with 3 replacement arguments. * @@ -757,6 +817,7 @@ abstract class Logger private[akka] () { * @see [[Logger]] */ def log(level: LogLevel, marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit + /** * Message template with 4 replacement arguments. For more parameters see the single replacement version of this method. * diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/MessageAndSignals.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/MessageAndSignals.scala index 4023f54210..3298fc304c 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/MessageAndSignals.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/MessageAndSignals.scala @@ -11,6 +11,7 @@ import akka.annotation.DoNotInherit * dropped due to overfull queues or routers with no routees. */ final case class Dropped(msg: Any, recipient: ActorRef[Nothing]) { + /** Java API */ def getRecipient(): ActorRef[Void] = recipient.asInstanceOf[ActorRef[Void]] } @@ -18,7 +19,9 @@ final case class Dropped(msg: Any, recipient: ActorRef[Nothing]) { /** * Exception that an actor fails with if it does not handle a Terminated message. */ -final case class DeathPactException(ref: ActorRef[Nothing]) extends RuntimeException(s"death pact with $ref was triggered") { +final case class DeathPactException(ref: ActorRef[Nothing]) + extends RuntimeException(s"death pact with $ref was triggered") { + /** Java API */ def getRef(): ActorRef[Void] = ref.asInstanceOf[ActorRef[Void]] } @@ -77,6 +80,7 @@ object Terminated { */ @DoNotInherit sealed class Terminated(val ref: ActorRef[Nothing]) extends Signal { + /** Java API: The actor that was watched and got terminated */ def getRef(): ActorRef[Void] = ref.asInstanceOf[ActorRef[Void]] diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/Props.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/Props.scala index e027cfdcd6..80d4abe017 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/Props.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/Props.scala @@ -35,6 +35,7 @@ object Props { @DoNotInherit @ApiMayChange abstract class Props private[akka] () extends Product with Serializable { + /** * Reference to the tail of this Props list. * @@ -99,7 +100,7 @@ abstract class Props private[akka] () extends Product with Serializable { @tailrec def select(d: Props, acc: List[Props]): List[Props] = d match { case EmptyProps => acc.reverse - case _: T => select(d.next, (d withNext EmptyProps) :: acc) + case _: T => select(d.next, (d.withNext(EmptyProps)) :: acc) case _ => select(d.next, acc) } select(this, Nil) @@ -119,7 +120,7 @@ abstract class Props private[akka] () extends Product with Serializable { } @tailrec def link(l: List[Props], acc: Props): Props = l match { - case d :: ds => link(ds, d withNext acc) + case d :: ds => link(ds, d.withNext(acc)) case Nil => acc } link(select(this, Nil), EmptyProps) @@ -192,6 +193,7 @@ private[akka] sealed case class DispatcherDefault(next: Props) extends Dispatche object DispatcherDefault { // this is hidden in order to avoid having people match on this object private val empty = DispatcherDefault(EmptyProps) + /** * Retrieve an instance for this configuration node with empty `next` reference. */ @@ -206,6 +208,7 @@ object DispatcherDefault { * INTERNAL API */ @InternalApi -private[akka] final case class DispatcherFromConfig(path: String, next: Props = Props.empty) extends DispatcherSelector { +private[akka] final case class DispatcherFromConfig(path: String, next: Props = Props.empty) + extends DispatcherSelector { override def withNext(next: Props): Props = copy(next = next) } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/SpawnProtocol.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/SpawnProtocol.scala index 6e92190e98..a8bdaf056b 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/SpawnProtocol.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/SpawnProtocol.scala @@ -11,6 +11,7 @@ import akka.actor.typed.scaladsl.Behaviors object SpawnProtocol { object Spawn { + /** * Special factory to make using Spawn with ask easier */ @@ -36,7 +37,7 @@ object SpawnProtocol { * `InvalidActorNameException`, but it's better to use unique names to begin with. */ final case class Spawn[T](behavior: Behavior[T], name: String, props: Props, replyTo: ActorRef[ActorRef[T]]) - extends SpawnProtocol + extends SpawnProtocol /** * Behavior implementing the [[SpawnProtocol]]. diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala index fb14fab454..ac69297a38 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/SupervisorStrategy.scala @@ -65,10 +65,9 @@ object SupervisorStrategy { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. */ - def restartWithBackoff( - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double): BackoffSupervisorStrategy = + def restartWithBackoff(minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double): BackoffSupervisorStrategy = Backoff(minBackoff, maxBackoff, randomFactor, resetBackoffAfter = minBackoff) /** @@ -98,10 +97,9 @@ object SupervisorStrategy { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. */ - def restartWithBackoff( - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double): BackoffSupervisorStrategy = + def restartWithBackoff(minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double): BackoffSupervisorStrategy = restartWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor) /** @@ -135,12 +133,13 @@ object SupervisorStrategy { /** * INTERNAL API */ - @InternalApi private[akka] final case class Restart( - maxRestarts: Int, - withinTimeRange: FiniteDuration, - loggingEnabled: Boolean = true, - stopChildren: Boolean = true, - stashCapacity: Int = -1) extends RestartSupervisorStrategy with RestartOrBackoff { + @InternalApi private[akka] final case class Restart(maxRestarts: Int, + withinTimeRange: FiniteDuration, + loggingEnabled: Boolean = true, + stopChildren: Boolean = true, + stashCapacity: Int = -1) + extends RestartSupervisorStrategy + with RestartOrBackoff { override def withLimit(maxNrOfRetries: Int, withinTimeRange: FiniteDuration): RestartSupervisorStrategy = copy(maxNrOfRetries, withinTimeRange) @@ -162,15 +161,16 @@ object SupervisorStrategy { /** * INTERNAL API */ - @InternalApi private[akka] final case class Backoff( - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - resetBackoffAfter: FiniteDuration, - loggingEnabled: Boolean = true, - maxRestarts: Int = -1, - stopChildren: Boolean = true, - stashCapacity: Int = -1) extends BackoffSupervisorStrategy with RestartOrBackoff { + @InternalApi private[akka] final case class Backoff(minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + resetBackoffAfter: FiniteDuration, + loggingEnabled: Boolean = true, + maxRestarts: Int = -1, + stopChildren: Boolean = true, + stashCapacity: Int = -1) + extends BackoffSupervisorStrategy + with RestartOrBackoff { override def withResetBackoffAfter(timeout: FiniteDuration): BackoffSupervisorStrategy = copy(resetBackoffAfter = timeout) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/TypedActorContext.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/TypedActorContext.scala index 977849b494..11c8f3cfda 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/TypedActorContext.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/TypedActorContext.scala @@ -28,4 +28,3 @@ trait TypedActorContext[T] { */ def asScala: scaladsl.ActorContext[T] } - diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala index 42082aadcf..133f2615af 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorContextImpl.scala @@ -24,7 +24,10 @@ import akka.util.JavaDurationConverters._ /** * INTERNAL API */ -@InternalApi private[akka] trait ActorContextImpl[T] extends TypedActorContext[T] with javadsl.ActorContext[T] with scaladsl.ActorContext[T] { +@InternalApi private[akka] trait ActorContextImpl[T] + extends TypedActorContext[T] + with javadsl.ActorContext[T] + with scaladsl.ActorContext[T] { private var messageAdapterRef: OptionVal[ActorRef[Any]] = OptionVal.None private var _messageAdapters: List[(Class[_], Any => T)] = Nil @@ -81,13 +84,18 @@ import akka.util.JavaDurationConverters._ spawnAnonymous(behavior, Props.empty) // Scala API impl - override def ask[Req, Res](target: RecipientRef[Req])(createRequest: ActorRef[Res] => Req)(mapResponse: Try[Res] => T)(implicit responseTimeout: Timeout, classTag: ClassTag[Res]): Unit = { + override def ask[Req, Res](target: RecipientRef[Req])(createRequest: ActorRef[Res] => Req)( + mapResponse: Try[Res] => T)(implicit responseTimeout: Timeout, classTag: ClassTag[Res]): Unit = { import akka.actor.typed.scaladsl.AskPattern._ pipeToSelf((target.ask(createRequest))(responseTimeout, system.scheduler))(mapResponse) } // Java API impl - def ask[Req, Res](resClass: Class[Res], target: RecipientRef[Req], responseTimeout: Duration, createRequest: JFunction[ActorRef[Res], Req], applyToResponse: BiFunction[Res, Throwable, T]): Unit = { + def ask[Req, Res](resClass: Class[Res], + target: RecipientRef[Req], + responseTimeout: Duration, + createRequest: JFunction[ActorRef[Res], Req], + applyToResponse: BiFunction[Res, Throwable, T]): Unit = { import akka.actor.typed.javadsl.AskPattern val message = new akka.japi.function.Function[ActorRef[Res], Req] { def apply(ref: ActorRef[Res]): Req = createRequest(ref) @@ -105,7 +113,8 @@ import akka.util.JavaDurationConverters._ future.whenComplete(new BiConsumer[Value, Throwable] { def accept(value: Value, ex: Throwable): Unit = { if (value != null) self.unsafeUpcast ! AdaptMessage(value, applyToResult.apply(_: Value, null)) - if (ex != null) self.unsafeUpcast ! AdaptMessage(ex, applyToResult.apply(null.asInstanceOf[Value], _: Throwable)) + if (ex != null) + self.unsafeUpcast ! AdaptMessage(ex, applyToResult.apply(null.asInstanceOf[Value], _: Throwable)) } }) } @@ -137,9 +146,10 @@ import akka.util.JavaDurationConverters._ _messageAdapters.filterNot { case (cls, _) => cls == messageClass } val ref = messageAdapterRef match { case OptionVal.Some(ref) => ref.asInstanceOf[ActorRef[U]] - case OptionVal.None => + case OptionVal.None => // AdaptMessage is not really a T, but that is erased - val ref = internalSpawnMessageAdapter[Any](msg => AdaptWithRegisteredMessageAdapter(msg).asInstanceOf[T], "adapter") + val ref = + internalSpawnMessageAdapter[Any](msg => AdaptWithRegisteredMessageAdapter(msg).asInstanceOf[T], "adapter") messageAdapterRef = OptionVal.Some(ref) ref } @@ -151,4 +161,3 @@ import akka.util.JavaDurationConverters._ */ @InternalApi private[akka] def messageAdapters: List[(Class[_], Any => T)] = _messageAdapters } - diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorRefImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorRefImpl.scala index 3702869441..b752018045 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorRefImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ActorRefImpl.scala @@ -25,7 +25,7 @@ private[akka] trait ActorRefImpl[-T] extends ActorRef[T] { this: InternalRecipie * Comparison takes path and the unique id of the actor cell into account. */ final override def compareTo(other: ActorRef[_]) = { - val x = this.path compareTo other.path + val x = this.path.compareTo(other.path) if (x == 0) if (this.path.uid < other.path.uid) -1 else if (this.path.uid == other.path.uid) 0 else 1 else x } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala index 70434d82ea..2e09ab3195 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/BehaviorImpl.scala @@ -23,13 +23,14 @@ import akka.actor.typed.scaladsl.{ ActorContext => SAC } def widened[O, I](behavior: Behavior[I], matcher: PartialFunction[O, I]): Behavior[O] = intercept(WidenedInterceptor(matcher))(behavior) - class ReceiveBehavior[T]( - val onMessage: (SAC[T], T) => Behavior[T], - onSignal: PartialFunction[(SAC[T], Signal), Behavior[T]] = Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]]) - extends ExtensibleBehavior[T] { + class ReceiveBehavior[T](val onMessage: (SAC[T], T) => Behavior[T], + onSignal: PartialFunction[(SAC[T], Signal), Behavior[T]] = + Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]]) + extends ExtensibleBehavior[T] { override def receiveSignal(ctx: AC[T], msg: Signal): Behavior[T] = - onSignal.applyOrElse((ctx.asScala, msg), Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]]) + onSignal.applyOrElse((ctx.asScala, msg), + Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]]) override def receive(ctx: AC[T], msg: T) = onMessage(ctx.asScala, msg) @@ -41,15 +42,16 @@ import akka.actor.typed.scaladsl.{ ActorContext => SAC } * We implement it separately in order to be able to avoid wrapping each function in * another function which drops the context parameter. */ - class ReceiveMessageBehavior[T]( - val onMessage: T => Behavior[T], - onSignal: PartialFunction[(SAC[T], Signal), Behavior[T]] = Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]]) - extends ExtensibleBehavior[T] { + class ReceiveMessageBehavior[T](val onMessage: T => Behavior[T], + onSignal: PartialFunction[(SAC[T], Signal), Behavior[T]] = Behavior.unhandledSignal + .asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]]) + extends ExtensibleBehavior[T] { override def receive(ctx: AC[T], msg: T) = onMessage(msg) override def receiveSignal(ctx: AC[T], msg: Signal): Behavior[T] = - onSignal.applyOrElse((ctx.asScala, msg), Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]]) + onSignal.applyOrElse((ctx.asScala, msg), + Behavior.unhandledSignal.asInstanceOf[PartialFunction[(SAC[T], Signal), Behavior[T]]]) override def toString = s"ReceiveMessage(${LineNumbers(onMessage)})" } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala index 7783cd885b..007f349f51 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/ExtensionsImpl.scala @@ -38,8 +38,9 @@ trait ExtensionsImpl extends Extensions { self: ActorSystem[_] => settings.config.getStringList(key).asScala.foreach { extensionIdFQCN => // it is either a Scala object or it is a Java class with a static singleton accessor - val idTry = dynamicAccess.getObjectFor[AnyRef](extensionIdFQCN) - .recoverWith { case _ => idFromJavaSingletonAccessor(extensionIdFQCN) } + val idTry = dynamicAccess.getObjectFor[AnyRef](extensionIdFQCN).recoverWith { + case _ => idFromJavaSingletonAccessor(extensionIdFQCN) + } idTry match { case Success(id: ExtensionId[_]) => registerExtension(id) @@ -47,19 +48,21 @@ trait ExtensionsImpl extends Extensions { self: ActorSystem[_] => if (!throwOnLoadFail) log.error("[{}] is not an 'ExtensionId', skipping...", extensionIdFQCN) else throw new RuntimeException(s"[$extensionIdFQCN] is not an 'ExtensionId'") case Failure(problem) => - if (!throwOnLoadFail) log.error(problem, "While trying to load extension [{}], skipping...", extensionIdFQCN) + if (!throwOnLoadFail) + log.error(problem, "While trying to load extension [{}], skipping...", extensionIdFQCN) else throw new RuntimeException(s"While trying to load extension [$extensionIdFQCN]", problem) } } } def idFromJavaSingletonAccessor(extensionIdFQCN: String): Try[ExtensionId[Extension]] = - dynamicAccess.getClassFor[ExtensionId[Extension]](extensionIdFQCN).flatMap[ExtensionId[Extension]] { clazz: Class[_] => - Try { + dynamicAccess.getClassFor[ExtensionId[Extension]](extensionIdFQCN).flatMap[ExtensionId[Extension]] { + clazz: Class[_] => + Try { - val singletonAccessor = clazz.getDeclaredMethod("getInstance") - singletonAccessor.invoke(null).asInstanceOf[ExtensionId[Extension]] - } + val singletonAccessor = clazz.getDeclaredMethod("getInstance") + singletonAccessor.invoke(null).asInstanceOf[ExtensionId[Extension]] + } } loadExtensions("akka.actor.typed.library-extensions", throwOnLoadFail = true) @@ -82,27 +85,30 @@ trait ExtensionsImpl extends Extensions { self: ActorSystem[_] => private def createExtensionInstance[T <: Extension](ext: ExtensionId[T]): T = { val inProcessOfRegistration = new CountDownLatch(1) extensions.putIfAbsent(ext, inProcessOfRegistration) match { // Signal that registration is in process - case null => try { // Signal was successfully sent - // Create and initialize the extension, first look for ExtensionSetup - val instance = self.settings.setup.setups.collectFirst { - case (_, extSetup: ExtensionSetup[_]) if extSetup.extId == ext => extSetup.createExtension(self) - }.getOrElse(ext.createExtension(self)) - instance match { - case null => throw new IllegalStateException(s"Extension instance created as 'null' for extension [$ext]") - case instance: T @unchecked => - // Replace our in process signal with the initialized extension - extensions.replace(ext, inProcessOfRegistration, instance) - instance + case null => + try { // Signal was successfully sent + // Create and initialize the extension, first look for ExtensionSetup + val instance = self.settings.setup.setups + .collectFirst { + case (_, extSetup: ExtensionSetup[_]) if extSetup.extId == ext => extSetup.createExtension(self) + } + .getOrElse(ext.createExtension(self)) + instance match { + case null => throw new IllegalStateException(s"Extension instance created as 'null' for extension [$ext]") + case instance: T @unchecked => + // Replace our in process signal with the initialized extension + extensions.replace(ext, inProcessOfRegistration, instance) + instance + } + } catch { + case t: Throwable => + //In case shit hits the fan, remove the inProcess signal and escalate to caller + extensions.replace(ext, inProcessOfRegistration, t) + throw t + } finally { + //Always notify listeners of the inProcess signal + inProcessOfRegistration.countDown() } - } catch { - case t: Throwable => - //In case shit hits the fan, remove the inProcess signal and escalate to caller - extensions.replace(ext, inProcessOfRegistration, t) - throw t - } finally { - //Always notify listeners of the inProcess signal - inProcessOfRegistration.countDown() - } case _ => //Someone else is in process of registering an extension for this Extension, retry registerExtension(ext) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala index aade13f494..68b7ce0711 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/InterceptorImpl.scala @@ -33,8 +33,10 @@ private[akka] object InterceptorImpl { * INTERNAL API */ @InternalApi -private[akka] final class InterceptorImpl[O, I](val interceptor: BehaviorInterceptor[O, I], val nestedBehavior: Behavior[I]) - extends ExtensibleBehavior[O] with WrappingBehavior[O, I] { +private[akka] final class InterceptorImpl[O, I](val interceptor: BehaviorInterceptor[O, I], + val nestedBehavior: Behavior[I]) + extends ExtensibleBehavior[O] + with WrappingBehavior[O, I] { import BehaviorInterceptor._ @@ -88,7 +90,9 @@ private[akka] final class InterceptorImpl[O, I](val interceptor: BehaviorInterce } else { // returned behavior could be nested in setups, so we need to start before we deduplicate val duplicateInterceptExists = Behavior.existsInStack(started) { - case i: InterceptorImpl[O, I] if interceptor.isSame(i.interceptor.asInstanceOf[BehaviorInterceptor[Any, Any]]) => true + case i: InterceptorImpl[O, I] + if interceptor.isSame(i.interceptor.asInstanceOf[BehaviorInterceptor[Any, Any]]) => + true case _ => false } @@ -169,17 +173,19 @@ private[akka] object WidenedInterceptor { * INTERNAL API */ @InternalApi -private[akka] final case class WidenedInterceptor[O, I](matcher: PartialFunction[O, I]) extends BehaviorInterceptor[O, I] { +private[akka] final case class WidenedInterceptor[O, I](matcher: PartialFunction[O, I]) + extends BehaviorInterceptor[O, I] { import WidenedInterceptor._ import BehaviorInterceptor._ override def isSame(other: BehaviorInterceptor[Any, Any]): Boolean = other match { // If they use the same pf instance we can allow it, to have one way to workaround defining // "recursive" narrowed behaviors. - case WidenedInterceptor(`matcher`) => true + case WidenedInterceptor(`matcher`) => true case WidenedInterceptor(otherMatcher) => // there is no safe way to allow this - throw new IllegalStateException("Widen can only be used one time in the same behavior stack. " + + throw new IllegalStateException( + "Widen can only be used one time in the same behavior stack. " + s"One defined in ${LineNumbers(matcher)}, and another in ${LineNumbers(otherMatcher)}") case _ => false } @@ -187,8 +193,8 @@ private[akka] final case class WidenedInterceptor[O, I](matcher: PartialFunction def aroundReceive(ctx: TypedActorContext[O], msg: O, target: ReceiveTarget[I]): Behavior[I] = { // widen would wrap the TimerMessage, which would be wrong, see issue #25318 msg match { - case t: TimerMsg => throw new IllegalArgumentException( - s"Timers and widen can't be used together, [${t.key}]. See issue #25318") + case t: TimerMsg => + throw new IllegalArgumentException(s"Timers and widen can't be used together, [${t.key}]. See issue #25318") case _ => () } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/MiscMessageSerializer.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/MiscMessageSerializer.scala index 0fdef18ab5..b1ae772d99 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/MiscMessageSerializer.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/MiscMessageSerializer.scala @@ -13,7 +13,9 @@ import akka.annotation.InternalApi import akka.serialization.{ BaseSerializer, SerializerWithStringManifest } @InternalApi -class MiscMessageSerializer(val system: akka.actor.ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { +class MiscMessageSerializer(val system: akka.actor.ExtendedActorSystem) + extends SerializerWithStringManifest + with BaseSerializer { // Serializers are initialized early on. `toTyped` might then try to initialize the untyped ActorSystemAdapter extension. private lazy val resolver = ActorRefResolver(system.toTyped) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala index 5c42453407..511bbe9af1 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/PoisonPill.scala @@ -31,10 +31,14 @@ import akka.annotation.InternalApi * and process stashed messages before stopping. */ @InternalApi private[akka] final class PoisonPillInterceptor[M] extends BehaviorInterceptor[M, M] { - override def aroundReceive(ctx: TypedActorContext[M], msg: M, target: BehaviorInterceptor.ReceiveTarget[M]): Behavior[M] = + override def aroundReceive(ctx: TypedActorContext[M], + msg: M, + target: BehaviorInterceptor.ReceiveTarget[M]): Behavior[M] = target(ctx, msg) - override def aroundSignal(ctx: TypedActorContext[M], signal: Signal, target: BehaviorInterceptor.SignalTarget[M]): Behavior[M] = { + override def aroundSignal(ctx: TypedActorContext[M], + signal: Signal, + target: BehaviorInterceptor.SignalTarget[M]): Behavior[M] = { signal match { case p: PoisonPill => val next = target(ctx, p) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala index e5704734e5..24242af848 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/StashBufferImpl.scala @@ -33,11 +33,11 @@ import akka.util.ConstantFun /** * INTERNAL API */ -@InternalApi private[akka] final class StashBufferImpl[T] private ( - val capacity: Int, - private var _first: StashBufferImpl.Node[T], - private var _last: StashBufferImpl.Node[T]) - extends javadsl.StashBuffer[T] with scaladsl.StashBuffer[T] { +@InternalApi private[akka] final class StashBufferImpl[T] private (val capacity: Int, + private var _first: StashBufferImpl.Node[T], + private var _last: StashBufferImpl.Node[T]) + extends javadsl.StashBuffer[T] + with scaladsl.StashBuffer[T] { import StashBufferImpl.Node @@ -54,7 +54,8 @@ import akka.util.ConstantFun override def stash(message: T): StashBufferImpl[T] = { if (message == null) throw new NullPointerException if (isFull) - throw new javadsl.StashOverflowException(s"Couldn't add [${message.getClass.getName}] " + + throw new javadsl.StashOverflowException( + s"Couldn't add [${message.getClass.getName}] " + s"because stash with capacity [$capacity] is full") val node = new Node(null, message) @@ -99,8 +100,10 @@ import akka.util.ConstantFun override def unstashAll(ctx: javadsl.ActorContext[T], behavior: Behavior[T]): Behavior[T] = unstashAll(ctx.asScala, behavior) - override def unstash(ctx: scaladsl.ActorContext[T], behavior: Behavior[T], - numberOfMessages: Int, wrap: T => T): Behavior[T] = { + override def unstash(ctx: scaladsl.ActorContext[T], + behavior: Behavior[T], + numberOfMessages: Int, + wrap: T => T): Behavior[T] = { if (isEmpty) behavior // optimization else { @@ -112,7 +115,9 @@ import akka.util.ConstantFun } } - private def interpretUnstashedMessages(behavior: Behavior[T], ctx: TypedActorContext[T], messages: Iterator[T]): Behavior[T] = { + private def interpretUnstashedMessages(behavior: Behavior[T], + ctx: TypedActorContext[T], + messages: Iterator[T]): Behavior[T] = { @tailrec def interpretOne(b: Behavior[T]): Behavior[T] = { val b2 = Behavior.start(b, ctx) if (!Behavior.isAlive(b2) || !messages.hasNext) b2 @@ -133,8 +138,10 @@ import akka.util.ConstantFun interpretOne(Behavior.start(behavior, ctx)) } - override def unstash(ctx: javadsl.ActorContext[T], behavior: Behavior[T], - numberOfMessages: Int, wrap: JFunction[T, T]): Behavior[T] = + override def unstash(ctx: javadsl.ActorContext[T], + behavior: Behavior[T], + numberOfMessages: Int, + wrap: JFunction[T, T]): Behavior[T] = unstash(ctx.asScala, behavior, numberOfMessages, x => wrap.apply(x)) override def toString: String = @@ -160,4 +167,4 @@ import akka.util.ConstantFun * to emit the PreRestart and PostStop to the right behavior and install the latest behavior for resume strategy. */ @InternalApi private[akka] final case class UnstashException[T](cause: Throwable, behavior: Behavior[T]) - extends RuntimeException(s"[$cause] when unstashing in [$behavior]", cause) + extends RuntimeException(s"[$cause] when unstashing in [$behavior]", cause) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala index 08d2914a56..91f2e20a26 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/Supervision.scala @@ -45,7 +45,9 @@ import akka.util.unused * INTERNAL API */ @InternalApi -private abstract class AbstractSupervisor[O, I, Thr <: Throwable](strategy: SupervisorStrategy)(implicit ev: ClassTag[Thr]) extends BehaviorInterceptor[O, I] { +private abstract class AbstractSupervisor[O, I, Thr <: Throwable](strategy: SupervisorStrategy)( + implicit ev: ClassTag[Thr]) + extends BehaviorInterceptor[O, I] { private val throwableClass = implicitly[ClassTag[Thr]].runtimeClass @@ -55,7 +57,7 @@ private abstract class AbstractSupervisor[O, I, Thr <: Throwable](strategy: Supe override def isSame(other: BehaviorInterceptor[Any, Any]): Boolean = { other match { case as: AbstractSupervisor[_, _, Thr] if throwableClass == as.throwableClass => true - case _ => false + case _ => false } } @@ -93,7 +95,8 @@ private abstract class AbstractSupervisor[O, I, Thr <: Throwable](strategy: Supe /** * For cases where O == I for BehaviorInterceptor. */ -private abstract class SimpleSupervisor[T, Thr <: Throwable: ClassTag](ss: SupervisorStrategy) extends AbstractSupervisor[T, T, Thr](ss) { +private abstract class SimpleSupervisor[T, Thr <: Throwable: ClassTag](ss: SupervisorStrategy) + extends AbstractSupervisor[T, T, Thr](ss) { override def aroundReceive(ctx: TypedActorContext[T], msg: T, target: ReceiveTarget[T]): Behavior[T] = { try { @@ -116,7 +119,7 @@ private abstract class SimpleSupervisor[T, Thr <: Throwable: ClassTag](ss: Super } private class StopSupervisor[T, Thr <: Throwable: ClassTag](@unused initial: Behavior[T], strategy: Stop) - extends SimpleSupervisor[T, Thr](strategy) { + extends SimpleSupervisor[T, Thr](strategy) { override def handleException(ctx: TypedActorContext[T]): Catcher[Behavior[T]] = { case NonFatal(t) if isInstanceOfTheThrowableClass(t) => @@ -137,14 +140,14 @@ private class ResumeSupervisor[T, Thr <: Throwable: ClassTag](ss: Resume) extend } private object RestartSupervisor { + /** * Calculates an exponential back off delay. */ - def calculateDelay( - restartCount: Int, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double): FiniteDuration = { + def calculateDelay(restartCount: Int, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double): FiniteDuration = { val rnd = 1.0 + ThreadLocalRandom.current().nextDouble() * randomFactor if (restartCount >= 30) // Duration overflow protection (> 100 years) maxBackoff @@ -156,11 +159,12 @@ private object RestartSupervisor { } final case class ScheduledRestart(owner: RestartSupervisor[_, _, _ <: Throwable]) extends DeadLetterSuppression - final case class ResetRestartCount(current: Int, owner: RestartSupervisor[_, _, _ <: Throwable]) extends DeadLetterSuppression + final case class ResetRestartCount(current: Int, owner: RestartSupervisor[_, _, _ <: Throwable]) + extends DeadLetterSuppression } private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behavior[T], strategy: RestartOrBackoff) - extends AbstractSupervisor[O, T, Thr](strategy) { + extends AbstractSupervisor[O, T, Thr](strategy) { import RestartSupervisor._ private var restartingInProgress: OptionVal[(StashBuffer[Any], Set[ActorRef[Nothing]])] = OptionVal.None @@ -246,7 +250,8 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav } } - override protected def handleExceptionOnStart(ctx: TypedActorContext[O], @unused target: PreStartTarget[T]): Catcher[Behavior[T]] = { + override protected def handleExceptionOnStart(ctx: TypedActorContext[O], + @unused target: PreStartTarget[T]): Catcher[Behavior[T]] = { case NonFatal(t) if isInstanceOfTheThrowableClass(t) => strategy match { case _: Restart => @@ -262,13 +267,15 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav } } - override protected def handleSignalException(ctx: TypedActorContext[O], target: SignalTarget[T]): Catcher[Behavior[T]] = { + override protected def handleSignalException(ctx: TypedActorContext[O], + target: SignalTarget[T]): Catcher[Behavior[T]] = { handleException(ctx, signalRestart = { case e: UnstashException[O] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) case _ => target(ctx, PreRestart) }) } - override protected def handleReceiveException(ctx: TypedActorContext[O], target: ReceiveTarget[T]): Catcher[Behavior[T]] = { + override protected def handleReceiveException(ctx: TypedActorContext[O], + target: ReceiveTarget[T]): Catcher[Behavior[T]] = { handleException(ctx, signalRestart = { case e: UnstashException[O] @unchecked => Behavior.interpretSignal(e.behavior, ctx, PreRestart) case _ => target.signalRestart(ctx) @@ -286,7 +293,8 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav } } else { - try signalRestart(t) catch { + try signalRestart(t) + catch { case NonFatal(ex) => ctx.asScala.log.error(ex, "failure during PreRestart") } @@ -310,7 +318,8 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav strategy match { case backoff: Backoff => - val restartDelay = calculateDelay(currentRestartCount, backoff.minBackoff, backoff.maxBackoff, backoff.randomFactor) + val restartDelay = + calculateDelay(currentRestartCount, backoff.minBackoff, backoff.maxBackoff, backoff.randomFactor) gotScheduledRestart = false ctx.asScala.scheduleOnce(restartDelay, ctx.asScala.self.unsafeUpcast[Any], ScheduledRestart(this)) Behaviors.empty @@ -326,8 +335,9 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav strategy match { case backoff: Backoff => gotScheduledRestart = false - ctx.asScala.scheduleOnce(backoff.resetBackoffAfter, ctx.asScala.self.unsafeUpcast[Any], - ResetRestartCount(restartCount, this)) + ctx.asScala.scheduleOnce(backoff.resetBackoffAfter, + ctx.asScala.self.unsafeUpcast[Any], + ResetRestartCount(restartCount, this)) case _: Restart => } @@ -357,7 +367,8 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav strategy match { case restart: Restart => val timeLeft = deadlineHasTimeLeft - val newDeadline = if (deadline.isDefined && timeLeft) deadline else OptionVal.Some(Deadline.now + restart.withinTimeRange) + val newDeadline = + if (deadline.isDefined && timeLeft) deadline else OptionVal.Some(Deadline.now + restart.withinTimeRange) restartCount = if (timeLeft) restartCount + 1 else 1 deadline = newDeadline case _: Backoff => @@ -366,4 +377,3 @@ private class RestartSupervisor[O, T, Thr <: Throwable: ClassTag](initial: Behav } } - diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala index e6792d55c6..2e42401615 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/SystemMessage.scala @@ -18,11 +18,13 @@ private[typed] object SystemMessageList { final val ENil: EarliestFirstSystemMessageList = new EarliestFirstSystemMessageList(null) @tailrec - private[internal] def sizeInner(head: SystemMessage, acc: Int): Int = if (head eq null) acc else sizeInner(head.next, acc + 1) + private[internal] def sizeInner(head: SystemMessage, acc: Int): Int = + if (head eq null) acc else sizeInner(head.next, acc + 1) @tailrec private[internal] def reverseInner(head: SystemMessage, acc: SystemMessage): SystemMessage = { - if (head eq null) acc else { + if (head eq null) acc + else { val next = head.next head.next = acc reverseInner(next, head) @@ -222,7 +224,8 @@ private[akka] final case class Unwatch(watchee: ActorRef[Nothing], watcher: Acto * INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class DeathWatchNotification(actor: ActorRef[Nothing], failureCause: Throwable) extends SystemMessage +private[akka] final case class DeathWatchNotification(actor: ActorRef[Nothing], failureCause: Throwable) + extends SystemMessage /** * INTERNAL API diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala index dd3459415b..209f5287ab 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/TimerSchedulerImpl.scala @@ -7,7 +7,7 @@ package internal import akka.actor.typed.ActorRef.ActorRefOps import akka.actor.typed.scaladsl.ActorContext -import akka.actor.{ Cancellable, NotInfluenceReceiveTimeout, typed } +import akka.actor.{ typed, Cancellable, NotInfluenceReceiveTimeout } import akka.annotation.InternalApi import akka.dispatch.ExecutionContexts import akka.util.JavaDurationConverters._ @@ -43,11 +43,12 @@ import scala.concurrent.duration.FiniteDuration * INTERNAL API */ @InternalApi private[akka] class TimerSchedulerImpl[T](ctx: ActorContext[T]) - extends scaladsl.TimerScheduler[T] with javadsl.TimerScheduler[T] { + extends scaladsl.TimerScheduler[T] + with javadsl.TimerScheduler[T] { import TimerSchedulerImpl._ private var timers: Map[Any, Timer[T]] = Map.empty - private val timerGen = Iterator from 1 + private val timerGen = Iterator.from(1) override def startPeriodicTimer(key: Any, msg: T, interval: FiniteDuration): Unit = startTimer(key, msg, interval, repeat = true) @@ -131,9 +132,10 @@ import scala.concurrent.duration.FiniteDuration OptionVal.Some(t.msg) } else { // it was from an old timer that was enqueued in mailbox before canceled - log.debug( - "Received timer [{}] from old generation [{}], expected generation [{}], discarding", - timerMsg.key, timerMsg.generation, t.generation) + log.debug("Received timer [{}] from old generation [{}], expected generation [{}], discarding", + timerMsg.key, + timerMsg.generation, + t.generation) OptionVal.none // message should be ignored } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala index 2819376665..f991fefd98 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WithMdcBehaviorInterceptor.scala @@ -5,7 +5,7 @@ package akka.actor.typed.internal import akka.actor.typed.internal.adapter.AbstractLogger -import akka.actor.typed.{ TypedActorContext, Behavior, BehaviorInterceptor, Signal } +import akka.actor.typed.{ Behavior, BehaviorInterceptor, Signal, TypedActorContext } import akka.annotation.InternalApi import scala.collection.immutable.HashMap @@ -16,10 +16,9 @@ import scala.collection.immutable.HashMap @InternalApi private[akka] object WithMdcBehaviorInterceptor { val noMdcPerMessage = (_: Any) => Map.empty[String, Any] - def apply[T]( - staticMdc: Map[String, Any], - mdcForMessage: T => Map[String, Any], - behavior: Behavior[T]): Behavior[T] = { + def apply[T](staticMdc: Map[String, Any], + mdcForMessage: T => Map[String, Any], + behavior: Behavior[T]): Behavior[T] = { val interceptor = new WithMdcBehaviorInterceptor[T](staticMdc, mdcForMessage) BehaviorImpl.intercept(interceptor)(behavior) @@ -32,9 +31,9 @@ import scala.collection.immutable.HashMap * * INTERNAL API */ -@InternalApi private[akka] final class WithMdcBehaviorInterceptor[T] private ( - staticMdc: Map[String, Any], - mdcForMessage: T => Map[String, Any]) extends BehaviorInterceptor[T, T] { +@InternalApi private[akka] final class WithMdcBehaviorInterceptor[T] private (staticMdc: Map[String, Any], + mdcForMessage: T => Map[String, Any]) + extends BehaviorInterceptor[T, T] { import BehaviorInterceptor._ diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WrappingBehavior.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WrappingBehavior.scala index c4f4bb6dac..5432349a97 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WrappingBehavior.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/WrappingBehavior.scala @@ -21,10 +21,12 @@ import akka.annotation.InternalApi @DoNotInherit @InternalApi private[akka] trait WrappingBehavior[O, I] { + /** * @return The behavior that is wrapped by this behavior */ def nestedBehavior: Behavior[I] + /** * Replace the behavior that is wrapped by this behavior with a new nested behavior * @return a new instance of this wrapping behavior with `newNested` as nestedBehavior diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala index c1ee4248ec..e38e4c6f72 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorAdapter.scala @@ -37,7 +37,9 @@ import akka.util.OptionVal /** * INTERNAL API */ -@InternalApi private[typed] class ActorAdapter[T](_initialBehavior: Behavior[T]) extends untyped.Actor with untyped.ActorLogging { +@InternalApi private[typed] class ActorAdapter[T](_initialBehavior: Behavior[T]) + extends untyped.Actor + with untyped.ActorLogging { import Behavior._ protected var behavior: Behavior[T] = _initialBehavior @@ -170,10 +172,11 @@ import akka.util.OptionVal case ex => recordChildFailure(ex) val logMessage = ex match { - case e: ActorInitializationException if e.getCause ne null => e.getCause match { - case ex: InvocationTargetException if ex.getCause ne null => ex.getCause.getMessage - case ex => ex.getMessage - } + case e: ActorInitializationException if e.getCause ne null => + e.getCause match { + case ex: InvocationTargetException if ex.getCause ne null => ex.getCause.getMessage + case ex => ex.getMessage + } case e => e.getMessage } // log at Error as that is what the supervision strategy would have done. @@ -222,10 +225,11 @@ import akka.util.OptionVal case null => // skip PostStop case _: DeferredBehavior[_] => // Do not undefer a DeferredBehavior as that may cause creation side-effects, which we do not want on termination. - case s: StoppedBehavior[_] => s.postStop match { - case OptionVal.Some(postStop) => Behavior.interpretSignal(postStop, ctx, PostStop) - case OptionVal.None => // no postStop behavior defined - } + case s: StoppedBehavior[_] => + s.postStop match { + case OptionVal.Some(postStop) => Behavior.interpretSignal(postStop, ctx, PostStop) + case OptionVal.None => // no postStop behavior defined + } case b => Behavior.interpretSignal(b, ctx, PostStop) } @@ -275,6 +279,7 @@ private[typed] class GuardianActorAdapter[T](_initialBehavior: Behavior[T]) exte super.postStop() } } + /** * INTERNAL API */ diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala index 9d27561c07..740a936c1f 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorContextAdapter.scala @@ -18,7 +18,9 @@ import scala.concurrent.duration._ /** * INTERNAL API. Wrapping an [[akka.actor.ActorContext]] as an [[TypedActorContext]]. */ -@InternalApi private[akka] final class ActorContextAdapter[T](val untypedContext: untyped.ActorContext, adapter: ActorAdapter[T]) extends ActorContextImpl[T] { +@InternalApi private[akka] final class ActorContextAdapter[T](val untypedContext: untyped.ActorContext, + adapter: ActorAdapter[T]) + extends ActorContextImpl[T] { import ActorRefAdapter.toUntyped @@ -52,14 +54,14 @@ import scala.concurrent.duration._ } else if (self == child) { throw new IllegalArgumentException( "Only direct children of an actor can be stopped through the actor context, " + - s"but you tried to stop [$self] by passing its ActorRef to the `stop` method. " + - "Stopping self has to be expressed as explicitly returning a Stop Behavior " + - "with `Behaviors.stopped`.") + s"but you tried to stop [$self] by passing its ActorRef to the `stop` method. " + + "Stopping self has to be expressed as explicitly returning a Stop Behavior " + + "with `Behaviors.stopped`.") } else { throw new IllegalArgumentException( "Only direct children of an actor can be stopped through the actor context, " + - s"but [$child] is not a child of [$self]. Stopping other actors has to be expressed as " + - "an explicit stop message that the actor accepts.") + s"but [$child] is not a child of [$self]. Stopping other actors has to be expressed as " + + "an explicit stop message that the actor accepts.") } override def watch[U](other: ActorRef[U]): Unit = { untypedContext.watch(toUntyped(other)) } @@ -89,7 +91,8 @@ import scala.concurrent.duration._ private def initLoggerWithClass(logClass: Class[_]): LoggerAdapterImpl = { val logSource = self.path.toString val system = untypedContext.system.asInstanceOf[ExtendedActorSystem] - val logger = new LoggerAdapterImpl(system.eventStream, logClass, logSource, LoggingFilterWithMarker.wrap(system.logFilter)) + val logger = + new LoggerAdapterImpl(system.eventStream, logClass, logSource, LoggingFilterWithMarker.wrap(system.logFilter)) actorLogger = OptionVal.Some(logger) logger } @@ -117,7 +120,8 @@ import scala.concurrent.duration._ context match { case adapter: ActorContextAdapter[_] => adapter.untypedContext case _ => - throw new UnsupportedOperationException("only adapted untyped ActorContext permissible " + + throw new UnsupportedOperationException( + "only adapted untyped ActorContext permissible " + s"($context of class ${context.getClass.getName})") } @@ -127,7 +131,8 @@ import scala.concurrent.duration._ context match { case c: TypedActorContext[_] => toUntypedImp(c) case _ => - throw new UnsupportedOperationException("unknown ActorContext type " + + throw new UnsupportedOperationException( + "unknown ActorContext type " + s"($context of class ${context.getClass.getName})") } @@ -135,7 +140,8 @@ import scala.concurrent.duration._ context match { case c: TypedActorContext[_] => toUntypedImp(c) case _ => - throw new UnsupportedOperationException("unknown ActorContext type " + + throw new UnsupportedOperationException( + "unknown ActorContext type " + s"($context of class ${context.getClass.getName})") } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefAdapter.scala index eaea2af89b..ff6e11b3e1 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorRefAdapter.scala @@ -16,7 +16,9 @@ import akka.dispatch.sysmsg * INTERNAL API */ @InternalApi private[typed] class ActorRefAdapter[-T](val untypedRef: untyped.InternalActorRef) - extends ActorRef[T] with internal.ActorRefImpl[T] with internal.InternalRecipientRef[T] { + extends ActorRef[T] + with internal.ActorRefImpl[T] + with internal.InternalRecipientRef[T] { override def path: untyped.ActorPath = untypedRef.path @@ -48,7 +50,8 @@ private[akka] object ActorRefAdapter { case adapter: ActorRefAdapter[_] => adapter.untypedRef case system: ActorSystemAdapter[_] => system.untypedSystem.guardian case _ => - throw new UnsupportedOperationException("only adapted untyped ActorRefs permissible " + + throw new UnsupportedOperationException( + "only adapted untyped ActorRefs permissible " + s"($ref of class ${ref.getClass.getName})") } @@ -56,12 +59,12 @@ private[akka] object ActorRefAdapter { signal match { case internal.Create() => throw new IllegalStateException("WAT? No, seriously.") case internal.Terminate() => untypedRef.stop() - case internal.Watch(watchee, watcher) => untypedRef.sendSystemMessage( - sysmsg.Watch( - toUntyped(watchee), - toUntyped(watcher))) - case internal.Unwatch(watchee, watcher) => untypedRef.sendSystemMessage(sysmsg.Unwatch(toUntyped(watchee), toUntyped(watcher))) - case internal.DeathWatchNotification(ref, _) => untypedRef.sendSystemMessage(sysmsg.DeathWatchNotification(toUntyped(ref), true, false)) - case internal.NoMessage => // just to suppress the warning + case internal.Watch(watchee, watcher) => + untypedRef.sendSystemMessage(sysmsg.Watch(toUntyped(watchee), toUntyped(watcher))) + case internal.Unwatch(watchee, watcher) => + untypedRef.sendSystemMessage(sysmsg.Unwatch(toUntyped(watchee), toUntyped(watcher))) + case internal.DeathWatchNotification(ref, _) => + untypedRef.sendSystemMessage(sysmsg.DeathWatchNotification(toUntyped(ref), true, false)) + case internal.NoMessage => // just to suppress the warning } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorSystemAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorSystemAdapter.scala index be93d6e644..21b82a699f 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorSystemAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/ActorSystemAdapter.scala @@ -31,7 +31,11 @@ import akka.event.LoggingFilterWithMarker * most circumstances. */ @InternalApi private[akka] class ActorSystemAdapter[-T](val untypedSystem: untyped.ActorSystemImpl) - extends ActorSystem[T] with ActorRef[T] with internal.ActorRefImpl[T] with internal.InternalRecipientRef[T] with ExtensionsImpl { + extends ActorSystem[T] + with ActorRef[T] + with internal.ActorRefImpl[T] + with internal.InternalRecipientRef[T] + with ExtensionsImpl { untypedSystem.assertInitialized() @@ -53,7 +57,8 @@ import akka.event.LoggingFilterWithMarker // impl InternalRecipientRef def isTerminated: Boolean = whenTerminated.isCompleted - final override val path: untyped.ActorPath = untyped.RootActorPath(untyped.Address("akka", untypedSystem.name)) / "user" + final override val path + : untyped.ActorPath = untyped.RootActorPath(untyped.Address("akka", untypedSystem.name)) / "user" override def toString: String = untypedSystem.toString @@ -69,7 +74,10 @@ import akka.event.LoggingFilterWithMarker } override def dynamicAccess: untyped.DynamicAccess = untypedSystem.dynamicAccess implicit override def executionContext: scala.concurrent.ExecutionContextExecutor = untypedSystem.dispatcher - override val log: Logger = new LoggerAdapterImpl(untypedSystem.eventStream, getClass, name, LoggingFilterWithMarker.wrap(untypedSystem.logFilter)) + override val log: Logger = new LoggerAdapterImpl(untypedSystem.eventStream, + getClass, + name, + LoggingFilterWithMarker.wrap(untypedSystem.logFilter)) override def logConfiguration(): Unit = untypedSystem.logConfiguration() override def name: String = untypedSystem.name override def scheduler: akka.actor.Scheduler = untypedSystem.scheduler @@ -88,7 +96,8 @@ import akka.event.LoggingFilterWithMarker override lazy val getWhenTerminated: CompletionStage[akka.actor.typed.Terminated] = FutureConverters.toJava(whenTerminated) - def systemActorOf[U](behavior: Behavior[U], name: String, props: Props)(implicit timeout: Timeout): Future[ActorRef[U]] = { + def systemActorOf[U](behavior: Behavior[U], name: String, props: Props)( + implicit timeout: Timeout): Future[ActorRef[U]] = { val ref = untypedSystem.systemActorOf(PropsAdapter(() => behavior, props), name) Future.successful(ActorRefAdapter(ref)) } @@ -130,7 +139,9 @@ private[akka] object ActorSystemAdapter { def toUntyped[U](sys: ActorSystem[_]): untyped.ActorSystem = sys match { case adapter: ActorSystemAdapter[_] => adapter.untypedSystem - case _ => throw new UnsupportedOperationException("only adapted untyped ActorSystem permissible " + - s"($sys of class ${sys.getClass.getName})") + case _ => + throw new UnsupportedOperationException( + "only adapted untyped ActorSystem permissible " + + s"($sys of class ${sys.getClass.getName})") } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/LoggerAdapterImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/LoggerAdapterImpl.scala index 4ac224dc4c..3eb2632b7c 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/LoggerAdapterImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/LoggerAdapterImpl.scala @@ -85,8 +85,15 @@ private[akka] abstract class AbstractLogger extends Logger { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3), OptionVal.Some(cause), OptionVal.Some(marker)) } - override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { - if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3, arg4), OptionVal.Some(cause), OptionVal.Some(marker)) + override def error(marker: LogMarker, + cause: Throwable, + template: String, + arg1: Any, + arg2: Any, + arg3: Any, + arg4: Any): Unit = { + if (isErrorEnabled) + notifyError(format(template, arg1, arg2, arg3, arg4), OptionVal.Some(cause), OptionVal.Some(marker)) } override def error(marker: LogMarker, message: String): Unit = { @@ -158,11 +165,19 @@ private[akka] abstract class AbstractLogger extends Logger { } override def warning(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { - if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3), OptionVal.Some(cause), OptionVal.Some(marker)) + if (isWarningEnabled) + notifyWarning(format(template, arg1, arg2, arg3), OptionVal.Some(cause), OptionVal.Some(marker)) } - override def warning(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { - if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4), OptionVal.Some(cause), OptionVal.Some(marker)) + override def warning(marker: LogMarker, + cause: Throwable, + template: String, + arg1: Any, + arg2: Any, + arg3: Any, + arg4: Any): Unit = { + if (isWarningEnabled) + notifyWarning(format(template, arg1, arg2, arg3, arg4), OptionVal.Some(cause), OptionVal.Some(marker)) } override def warning(marker: LogMarker, cause: Throwable, message: String): Unit = { @@ -186,7 +201,8 @@ private[akka] abstract class AbstractLogger extends Logger { } override def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { - if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4), OptionVal.None, OptionVal.Some(marker)) + if (isWarningEnabled) + notifyWarning(format(template, arg1, arg2, arg3, arg4), OptionVal.None, OptionVal.Some(marker)) } override def info(message: String): Unit = { @@ -305,7 +321,13 @@ private[akka] abstract class AbstractLogger extends Logger { if (isLevelEnabled(level)) notify(level, format(template, arg1, arg2, arg3), OptionVal.Some(marker)) } - override def log(level: LogLevel, marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { + override def log(level: LogLevel, + marker: LogMarker, + template: String, + arg1: Any, + arg2: Any, + arg3: Any, + arg4: Any): Unit = { if (isLevelEnabled(level)) notify(level, format(template, arg1, arg2, arg3, arg4), OptionVal.Some(marker)) } @@ -323,12 +345,13 @@ private[akka] abstract class AbstractLogger extends Logger { */ private def format(t: String, arg1: Any): String = arg1 match { case a: Array[_] if !a.getClass.getComponentType.isPrimitive => formatArray(t, a: _*) - case a: Array[_] => formatArray(t, a.map(_.asInstanceOf[AnyRef]): _*) - case x => formatArray(t, x) + case a: Array[_] => formatArray(t, a.map(_.asInstanceOf[AnyRef]): _*) + case x => formatArray(t, x) } private def format(t: String, arg1: Any, arg2: Any): String = formatArray(t, arg1, arg2) private def format(t: String, arg1: Any, arg2: Any, arg3: Any): String = formatArray(t, arg1, arg2, arg3) - private def format(t: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): String = formatArray(t, arg1, arg2, arg3, arg4) + private def format(t: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): String = + formatArray(t, arg1, arg2, arg3, arg4) private def formatArray(t: String, arg: Any*): String = { val sb = new java.lang.StringBuilder(64) @@ -337,14 +360,11 @@ private[akka] abstract class AbstractLogger extends Logger { while (p < arg.length) { val index = t.indexOf("{}", startIndex) if (index == -1) { - sb.append(t.substring(startIndex, t.length)) - .append(" WARNING arguments left: ") - .append(arg.length - p) + sb.append(t.substring(startIndex, t.length)).append(" WARNING arguments left: ").append(arg.length - p) p = arg.length startIndex = t.length } else { - sb.append(t.substring(startIndex, index)) - .append(arg(p)) + sb.append(t.substring(startIndex, index)).append(arg(p)) startIndex = index + 2 p += 1 } @@ -358,17 +378,25 @@ private[akka] abstract class AbstractLogger extends Logger { * INTERNAL API */ @InternalApi -private[akka] final class LoggerAdapterImpl(bus: LoggingBus, logClass: Class[_], logSource: String, loggingFilter: LoggingFilterWithMarker) extends AbstractLogger { +private[akka] final class LoggerAdapterImpl(bus: LoggingBus, + logClass: Class[_], + logSource: String, + loggingFilter: LoggingFilterWithMarker) + extends AbstractLogger { override def isErrorEnabled = loggingFilter.isErrorEnabled(logClass, logSource) override def isWarningEnabled = loggingFilter.isWarningEnabled(logClass, logSource) override def isInfoEnabled = loggingFilter.isInfoEnabled(logClass, logSource) override def isDebugEnabled = loggingFilter.isDebugEnabled(logClass, logSource) - override def isErrorEnabled(marker: LogMarker): Boolean = loggingFilter.isErrorEnabled(logClass, logSource, marker.asInstanceOf[UntypedLM]) - override def isWarningEnabled(marker: LogMarker): Boolean = loggingFilter.isWarningEnabled(logClass, logSource, marker.asInstanceOf[UntypedLM]) - override def isInfoEnabled(marker: LogMarker): Boolean = loggingFilter.isInfoEnabled(logClass, logSource, marker.asInstanceOf[UntypedLM]) - override def isDebugEnabled(marker: LogMarker): Boolean = loggingFilter.isDebugEnabled(logClass, logSource, marker.asInstanceOf[UntypedLM]) + override def isErrorEnabled(marker: LogMarker): Boolean = + loggingFilter.isErrorEnabled(logClass, logSource, marker.asInstanceOf[UntypedLM]) + override def isWarningEnabled(marker: LogMarker): Boolean = + loggingFilter.isWarningEnabled(logClass, logSource, marker.asInstanceOf[UntypedLM]) + override def isInfoEnabled(marker: LogMarker): Boolean = + loggingFilter.isInfoEnabled(logClass, logSource, marker.asInstanceOf[UntypedLM]) + override def isDebugEnabled(marker: LogMarker): Boolean = + loggingFilter.isDebugEnabled(logClass, logSource, marker.asInstanceOf[UntypedLM]) override def withMdc(mdc: Map[String, Any]): Logger = { val mdcAdapter = new LoggerAdapterImpl(bus, logClass, logSource, loggingFilter) @@ -407,10 +435,11 @@ private[akka] final class LoggerAdapterImpl(bus: LoggingBus, logClass: Class[_], private[akka] def notifyWarning(message: String, cause: OptionVal[Throwable], marker: OptionVal[LogMarker]): Unit = { val warning = if (cause.isDefined) Warning(cause.get, logSource, logClass, message, mdc, marker.orNull.asInstanceOf[UntypedLM]) - else marker match { - case OptionVal.Some(m) => Warning(logSource, logClass, message, mdc, m.asInstanceOf[UntypedLM]) - case OptionVal.None => Warning(logSource, logClass, message, mdc) - } + else + marker match { + case OptionVal.Some(m) => Warning(logSource, logClass, message, mdc, m.asInstanceOf[UntypedLM]) + case OptionVal.None => Warning(logSource, logClass, message, mdc) + } bus.publish(warning) } @@ -431,4 +460,3 @@ private[akka] final class LoggerAdapterImpl(bus: LoggingBus, logClass: Class[_], } } - diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala index ececc7b5c1..9f8ebc9744 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/adapter/PropsAdapter.scala @@ -13,7 +13,9 @@ import akka.annotation.InternalApi * INTERNAL API */ @InternalApi private[akka] object PropsAdapter { - def apply[T](behavior: () => Behavior[T], deploy: Props = Props.empty, isGuardian: Boolean = false): akka.actor.Props = { + def apply[T](behavior: () => Behavior[T], + deploy: Props = Props.empty, + isGuardian: Boolean = false): akka.actor.Props = { val props = if (isGuardian) akka.actor.Props(new GuardianActorAdapter(behavior())) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala index f59845ced8..a7a3019e58 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/LocalReceptionist.scala @@ -38,22 +38,20 @@ private[akka] object LocalReceptionist extends ReceptionistBehaviorProvider { sealed trait InternalCommand final case class RegisteredActorTerminated[T](key: ServiceKey[T], ref: ActorRef[T]) extends InternalCommand - final case class SubscriberTerminated[T](key: ServiceKey[T], ref: ActorRef[ReceptionistMessages.Listing[T]]) extends InternalCommand + final case class SubscriberTerminated[T](key: ServiceKey[T], ref: ActorRef[ReceptionistMessages.Listing[T]]) + extends InternalCommand override def behavior: Behavior[Command] = Behaviors.setup { ctx => ctx.setLoggerClass(classOf[LocalReceptionist]) - behavior( - TypedMultiMap.empty[AbstractServiceKey, KV], - TypedMultiMap.empty[AbstractServiceKey, SubscriptionsKV] - ).narrow[Command] + behavior(TypedMultiMap.empty[AbstractServiceKey, KV], TypedMultiMap.empty[AbstractServiceKey, SubscriptionsKV]) + .narrow[Command] } - private def behavior( - serviceRegistry: LocalServiceRegistry, - subscriptions: SubscriptionRegistry): Behavior[Any] = { + private def behavior(serviceRegistry: LocalServiceRegistry, subscriptions: SubscriptionRegistry): Behavior[Any] = { // Helper to create new state - def next(newRegistry: LocalServiceRegistry = serviceRegistry, newSubscriptions: SubscriptionRegistry = subscriptions) = + def next(newRegistry: LocalServiceRegistry = serviceRegistry, + newSubscriptions: SubscriptionRegistry = subscriptions) = behavior(newRegistry, newSubscriptions) /* @@ -71,7 +69,8 @@ private[akka] object LocalReceptionist extends ReceptionistBehaviorProvider { }) // Helper that makes sure that subscribers are notified when an entry is changed - def updateRegistry(changedKeysHint: Set[AbstractServiceKey], f: LocalServiceRegistry => LocalServiceRegistry): Behavior[Any] = { + def updateRegistry(changedKeysHint: Set[AbstractServiceKey], + f: LocalServiceRegistry => LocalServiceRegistry): Behavior[Any] = { val newRegistry = f(serviceRegistry) def notifySubscribersFor[T](key: AbstractServiceKey): Unit = { @@ -79,12 +78,12 @@ private[akka] object LocalReceptionist extends ReceptionistBehaviorProvider { subscriptions.get(key).foreach(_ ! ReceptionistMessages.Listing(key.asServiceKey, newListing)) } - changedKeysHint foreach notifySubscribersFor + changedKeysHint.foreach(notifySubscribersFor) next(newRegistry = newRegistry) } def replyWithListing[T](key: ServiceKey[T], replyTo: ActorRef[Listing]): Unit = - replyTo ! ReceptionistMessages.Listing(key, serviceRegistry get key) + replyTo ! ReceptionistMessages.Listing(key, serviceRegistry.get(key)) def onCommand(ctx: ActorContext[Any], cmd: Command): Behavior[Any] = cmd match { case ReceptionistMessages.Register(key, serviceInstance, maybeReplyTo) => diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistMessages.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistMessages.scala index b3e3261214..4be59ccaac 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistMessages.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ReceptionistMessages.scala @@ -22,15 +22,17 @@ private[akka] object ReceptionistMessages { // of type erasure, more type safe factory methods for each message // is the user API below while still hiding the type parameter so that // users don't incorrectly match against it - final case class Register[T] private[akka] ( - key: ServiceKey[T], - serviceInstance: ActorRef[T], - replyTo: Option[ActorRef[Receptionist.Registered]]) extends Command + final case class Register[T] private[akka] (key: ServiceKey[T], + serviceInstance: ActorRef[T], + replyTo: Option[ActorRef[Receptionist.Registered]]) + extends Command - final case class Registered[T] private[akka] (key: ServiceKey[T], _serviceInstance: ActorRef[T]) extends Receptionist.Registered { + final case class Registered[T] private[akka] (key: ServiceKey[T], _serviceInstance: ActorRef[T]) + extends Receptionist.Registered { def isForKey(key: ServiceKey[_]): Boolean = key == this.key def serviceInstance[M](key: ServiceKey[M]): ActorRef[M] = { - if (key != this.key) throw new IllegalArgumentException(s"Wrong key [$key] used, must use listing key [${this.key}]") + if (key != this.key) + throw new IllegalArgumentException(s"Wrong key [$key] used, must use listing key [${this.key}]") _serviceInstance.asInstanceOf[ActorRef[M]] } @@ -40,12 +42,14 @@ private[akka] object ReceptionistMessages { final case class Find[T] private[akka] (key: ServiceKey[T], replyTo: ActorRef[Receptionist.Listing]) extends Command - final case class Listing[T] private[akka] (key: ServiceKey[T], _serviceInstances: Set[ActorRef[T]]) extends Receptionist.Listing { + final case class Listing[T] private[akka] (key: ServiceKey[T], _serviceInstances: Set[ActorRef[T]]) + extends Receptionist.Listing { def isForKey(key: ServiceKey[_]): Boolean = key == this.key def serviceInstances[M](key: ServiceKey[M]): Set[ActorRef[M]] = { - if (key != this.key) throw new IllegalArgumentException(s"Wrong key [$key] used, must use listing key [${this.key}]") + if (key != this.key) + throw new IllegalArgumentException(s"Wrong key [$key] used, must use listing key [${this.key}]") _serviceInstances.asInstanceOf[Set[ActorRef[M]]] } @@ -53,6 +57,7 @@ private[akka] object ReceptionistMessages { serviceInstances(key).asJava } - final case class Subscribe[T] private[akka] (key: ServiceKey[T], subscriber: ActorRef[Receptionist.Listing]) extends Command + final case class Subscribe[T] private[akka] (key: ServiceKey[T], subscriber: ActorRef[Receptionist.Listing]) + extends Command } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializer.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializer.scala index 25ab098e29..a09dc1263c 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializer.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/receptionist/ServiceKeySerializer.scala @@ -14,7 +14,9 @@ import akka.serialization.{ BaseSerializer, SerializerWithStringManifest } * Internal API */ @InternalApi -final class ServiceKeySerializer(val system: akka.actor.ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { +final class ServiceKeySerializer(val system: akka.actor.ExtendedActorSystem) + extends SerializerWithStringManifest + with BaseSerializer { def manifest(o: AnyRef): String = o match { case key: DefaultServiceKey[_] => key.typeName case _ => diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala index a16f8282bf..9abd3e9fbd 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/GroupRouterImpl.scala @@ -17,11 +17,11 @@ import akka.annotation.InternalApi * INTERNAL API */ @InternalApi -private[akka] final case class GroupRouterBuilder[T] private[akka] ( - key: ServiceKey[T], - logicFactory: () => RoutingLogic[T] = () => new RoutingLogics.RandomLogic[T]() -) extends javadsl.GroupRouter[T] - with scaladsl.GroupRouter[T] { +private[akka] final case class GroupRouterBuilder[T] private[akka] (key: ServiceKey[T], + logicFactory: () => RoutingLogic[T] = () => + new RoutingLogics.RandomLogic[T]()) + extends javadsl.GroupRouter[T] + with scaladsl.GroupRouter[T] { // deferred creation of the actual router def apply(ctx: TypedActorContext[T]): Behavior[T] = new GroupRouterImpl[T](ctx.asScala, key, logicFactory()) @@ -36,11 +36,8 @@ private[akka] final case class GroupRouterBuilder[T] private[akka] ( * INTERNAL API */ @InternalApi -private final class GroupRouterImpl[T]( - ctx: ActorContext[T], - serviceKey: ServiceKey[T], - routingLogic: RoutingLogic[T] -) extends AbstractBehavior[T] { +private final class GroupRouterImpl[T](ctx: ActorContext[T], serviceKey: ServiceKey[T], routingLogic: RoutingLogic[T]) + extends AbstractBehavior[T] { // casting trix to avoid having to wrap incoming messages - note that this will cause problems if intercepting // messages to a router diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala index 2dcfc40551..7d2264deba 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/PoolRouterImpl.scala @@ -13,16 +13,17 @@ import akka.annotation.InternalApi * INTERNAL API */ @InternalApi -private[akka] final case class PoolRouterBuilder[T]( - poolSize: Int, - behavior: Behavior[T], - logicFactory: () => RoutingLogic[T] = () => new RoutingLogics.RoundRobinLogic[T] -) extends javadsl.PoolRouter[T] - with scaladsl.PoolRouter[T] { +private[akka] final case class PoolRouterBuilder[T](poolSize: Int, + behavior: Behavior[T], + logicFactory: () => RoutingLogic[T] = () => + new RoutingLogics.RoundRobinLogic[T]) + extends javadsl.PoolRouter[T] + with scaladsl.PoolRouter[T] { if (poolSize < 1) throw new IllegalArgumentException(s"pool size must be positive, was $poolSize") // deferred creation of the actual router - def apply(ctx: TypedActorContext[T]): Behavior[T] = new PoolRouterImpl[T](ctx.asScala, poolSize, behavior, logicFactory()) + def apply(ctx: TypedActorContext[T]): Behavior[T] = + new PoolRouterImpl[T](ctx.asScala, poolSize, behavior, logicFactory()) def withRandomRouting(): PoolRouterBuilder[T] = copy(logicFactory = () => new RoutingLogics.RandomLogic[T]()) @@ -35,12 +36,11 @@ private[akka] final case class PoolRouterBuilder[T]( * INTERNAL API */ @InternalApi -private final class PoolRouterImpl[T]( - ctx: ActorContext[T], - poolSize: Int, - behavior: Behavior[T], - logic: RoutingLogic[T] -) extends AbstractBehavior[T] { +private final class PoolRouterImpl[T](ctx: ActorContext[T], + poolSize: Int, + behavior: Behavior[T], + logic: RoutingLogic[T]) + extends AbstractBehavior[T] { (1 to poolSize).foreach { _ => val child = ctx.spawnAnonymous(behavior) @@ -76,4 +76,3 @@ private final class PoolRouterImpl[T]( } } - diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala index 73fa2d5ce1..c79ca914fc 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/internal/routing/RoutingLogic.scala @@ -15,6 +15,7 @@ import akka.dispatch.forkjoin.ThreadLocalRandom */ @InternalApi sealed private[akka] trait RoutingLogic[T] { + /** * @param routees available routees, will contain at least one element. Must not be mutated by select logic. */ @@ -61,8 +62,8 @@ private[akka] object RoutingLogics { val firstDiffIndex = { var idx = 0 while (idx < currentRoutees.length && - idx < sortedNewRoutees.length && - currentRoutees(idx) == sortedNewRoutees(idx)) { + idx < sortedNewRoutees.length && + currentRoutees(idx) == sortedNewRoutees(idx)) { idx += 1 } idx diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ActorContext.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ActorContext.scala index d6ed1b5939..cce21eb5b7 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ActorContext.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ActorContext.scala @@ -281,12 +281,11 @@ trait ActorContext[T] extends TypedActorContext[T] { * @tparam Req The request protocol, what the other actor accepts * @tparam Res The response protocol, what the other actor sends back */ - def ask[Req, Res]( - resClass: Class[Res], - target: RecipientRef[Req], - responseTimeout: Duration, - createRequest: java.util.function.Function[ActorRef[Res], Req], - applyToResponse: BiFunction[Res, Throwable, T]): Unit + def ask[Req, Res](resClass: Class[Res], + target: RecipientRef[Req], + responseTimeout: Duration, + createRequest: java.util.function.Function[ActorRef[Res], Req], + applyToResponse: BiFunction[Res, Throwable, T]): Unit /** * Sends the result of the given `CompletionStage` to this Actor (“`self`”), after adapted it with diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala index ad85dabe8a..c9821a4e42 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/AskPattern.scala @@ -30,6 +30,9 @@ import scala.compat.java8.FutureConverters._ * */ object AskPattern { - def ask[T, U](actor: RecipientRef[T], message: JFunction[ActorRef[U], T], timeout: Duration, scheduler: Scheduler): CompletionStage[U] = + def ask[T, U](actor: RecipientRef[T], + message: JFunction[ActorRef[U], T], + timeout: Duration, + scheduler: Scheduler): CompletionStage[U] = (actor.ask(message.apply)(timeout.asScala, scheduler)).toJava } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala index d7c42ceed1..77bec5dbed 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/BehaviorBuilder.scala @@ -25,10 +25,7 @@ import akka.util.OptionVal * * @tparam T the common superclass of all supported messages. */ -final class BehaviorBuilder[T] private ( - messageHandlers: List[Case[T, T]], - signalHandlers: List[Case[T, Signal]] -) { +final class BehaviorBuilder[T] private (messageHandlers: List[Case[T, T]], signalHandlers: List[Case[T, Signal]]) { /** * Build a Behavior from the current state of the builder @@ -55,11 +52,10 @@ final class BehaviorBuilder[T] private ( * @tparam M type of message to match * @return a new behavior builder with the specified handling appended */ - def onMessage[M <: T](`type`: Class[M], test: JPredicate[M], handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = - withMessage( - OptionVal.Some(`type`), - OptionVal.Some((t: T) => test.test(t.asInstanceOf[M])), - handler) + def onMessage[M <: T](`type`: Class[M], + test: JPredicate[M], + handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = + withMessage(OptionVal.Some(`type`), OptionVal.Some((t: T) => test.test(t.asInstanceOf[M])), handler) /** * Add a new case to the message handling without compile time type check. @@ -71,9 +67,9 @@ final class BehaviorBuilder[T] private ( * @param handler action to apply when the type matches * @return a new behavior builder with the specified handling appended */ - def onMessageUnchecked[M <: T](`type`: Class[_ <: T], handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = - withMessage[M]( - OptionVal.Some(`type`.asInstanceOf[Class[M]]), OptionVal.None, handler) + def onMessageUnchecked[M <: T](`type`: Class[_ <: T], + handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = + withMessage[M](OptionVal.Some(`type`.asInstanceOf[Class[M]]), OptionVal.None, handler) /** * Add a new case to the message handling matching equal messages. @@ -83,12 +79,11 @@ final class BehaviorBuilder[T] private ( * @return a new behavior builder with the specified handling appended */ def onMessageEquals(msg: T, handler: JFunction[ActorContext[T], Behavior[T]]): BehaviorBuilder[T] = - withMessage[T]( - OptionVal.Some(msg.getClass.asInstanceOf[Class[T]]), - OptionVal.Some(_.equals(msg)), - new JFunction2[ActorContext[T], T, Behavior[T]] { - override def apply(ctx: ActorContext[T], msg: T): Behavior[T] = handler.apply(ctx) - }) + withMessage[T](OptionVal.Some(msg.getClass.asInstanceOf[Class[T]]), + OptionVal.Some(_.equals(msg)), + new JFunction2[ActorContext[T], T, Behavior[T]] { + override def apply(ctx: ActorContext[T], msg: T): Behavior[T] = handler.apply(ctx) + }) /** * Add a new case to the message handling matching any message. Subsequent `onMessage` clauses will @@ -108,11 +103,9 @@ final class BehaviorBuilder[T] private ( * @tparam M type of signal to match * @return a new behavior builder with the specified handling appended */ - def onSignal[M <: Signal](`type`: Class[M], handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = - withSignal( - `type`, - OptionVal.None, - handler.asInstanceOf[JFunction2[ActorContext[T], Signal, Behavior[T]]]) + def onSignal[M <: Signal](`type`: Class[M], + handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = + withSignal(`type`, OptionVal.None, handler.asInstanceOf[JFunction2[ActorContext[T], Signal, Behavior[T]]]) /** * Add a new predicated case to the signal handling. @@ -123,12 +116,12 @@ final class BehaviorBuilder[T] private ( * @tparam M type of signal to match * @return a new behavior builder with the specified handling appended */ - def onSignal[M <: Signal](`type`: Class[M], test: JPredicate[M], handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = - withSignal( - `type`, - OptionVal.Some((t: Signal) => test.test(t.asInstanceOf[M])), - handler.asInstanceOf[JFunction2[ActorContext[T], Signal, Behavior[T]]] - ) + def onSignal[M <: Signal](`type`: Class[M], + test: JPredicate[M], + handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = + withSignal(`type`, + OptionVal.Some((t: Signal) => test.test(t.asInstanceOf[M])), + handler.asInstanceOf[JFunction2[ActorContext[T], Signal, Behavior[T]]]) /** * Add a new case to the signal handling matching equal signals. @@ -138,29 +131,24 @@ final class BehaviorBuilder[T] private ( * @return a new behavior builder with the specified handling appended */ def onSignalEquals(signal: Signal, handler: Function[ActorContext[T], Behavior[T]]): BehaviorBuilder[T] = - withSignal( - signal.getClass, - OptionVal.Some(_.equals(signal)), - new JFunction2[ActorContext[T], Signal, Behavior[T]] { - override def apply(ctx: ActorContext[T], signal: Signal): Behavior[T] = { - handler.apply(ctx) - } - }) + withSignal(signal.getClass, OptionVal.Some(_.equals(signal)), new JFunction2[ActorContext[T], Signal, Behavior[T]] { + override def apply(ctx: ActorContext[T], signal: Signal): Behavior[T] = { + handler.apply(ctx) + } + }) - private def withMessage[M <: T](clazz: OptionVal[Class[M]], test: OptionVal[M => Boolean], handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = { - val newCase = Case( - clazz, - test, - handler - ) + private def withMessage[M <: T](clazz: OptionVal[Class[M]], + test: OptionVal[M => Boolean], + handler: JFunction2[ActorContext[T], M, Behavior[T]]): BehaviorBuilder[T] = { + val newCase = Case(clazz, test, handler) new BehaviorBuilder[T](newCase.asInstanceOf[Case[T, T]] +: messageHandlers, signalHandlers) } - private def withSignal[M <: Signal](`type`: Class[M], test: OptionVal[Signal => Boolean], handler: JFunction2[ActorContext[T], Signal, Behavior[T]]): BehaviorBuilder[T] = { - new BehaviorBuilder[T]( - messageHandlers, - Case(OptionVal.Some(`type`), test, handler).asInstanceOf[Case[T, Signal]] +: signalHandlers - ) + private def withSignal[M <: Signal](`type`: Class[M], + test: OptionVal[Signal => Boolean], + handler: JFunction2[ActorContext[T], Signal, Behavior[T]]): BehaviorBuilder[T] = { + new BehaviorBuilder[T](messageHandlers, + Case(OptionVal.Some(`type`), test, handler).asInstanceOf[Case[T, Signal]] +: signalHandlers) } } @@ -171,7 +159,9 @@ object BehaviorBuilder { // used for both matching signals and messages so we throw away types after they are enforced by the builder API above /** INTERNAL API */ @InternalApi - private[javadsl] final case class Case[BT, MT](`type`: OptionVal[Class[_ <: MT]], test: OptionVal[MT => Boolean], handler: JFunction2[ActorContext[BT], MT, Behavior[BT]]) + private[javadsl] final case class Case[BT, MT](`type`: OptionVal[Class[_ <: MT]], + test: OptionVal[MT => Boolean], + handler: JFunction2[ActorContext[BT], MT, Behavior[BT]]) /** * @return new empty immutable behavior builder. @@ -185,14 +175,13 @@ object BehaviorBuilder { * INTERNAL API */ @InternalApi -private final class BuiltBehavior[T]( - messageHandlers: List[Case[T, T]], - signalHandlers: List[Case[T, Signal]] -) extends ExtensibleBehavior[T] { +private final class BuiltBehavior[T](messageHandlers: List[Case[T, T]], signalHandlers: List[Case[T, Signal]]) + extends ExtensibleBehavior[T] { override def receive(ctx: TypedActorContext[T], msg: T): Behavior[T] = receive(ctx.asJava, msg, messageHandlers) - override def receiveSignal(ctx: TypedActorContext[T], msg: Signal): Behavior[T] = receive(ctx.asJava, msg, signalHandlers) + override def receiveSignal(ctx: TypedActorContext[T], msg: Signal): Behavior[T] = + receive(ctx.asJava, msg, signalHandlers) @tailrec private def receive[M](ctx: ActorContext[T], msg: M, handlers: List[Case[T, M]]): Behavior[T] = diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala index 9c45575c84..7246500a24 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/Behaviors.scala @@ -136,12 +136,11 @@ object Behaviors { * that can potentially be different from this one. State is maintained by returning * a new behavior that holds the new immutable state. */ - def receive[T]( - onMessage: JapiFunction2[ActorContext[T], T, Behavior[T]], - onSignal: JapiFunction2[ActorContext[T], Signal, Behavior[T]]): Behavior[T] = { - new BehaviorImpl.ReceiveBehavior( - (ctx, msg) => onMessage.apply(ctx.asJava, msg), - { case (ctx, sig) => onSignal.apply(ctx.asJava, sig) }) + def receive[T](onMessage: JapiFunction2[ActorContext[T], T, Behavior[T]], + onSignal: JapiFunction2[ActorContext[T], Signal, Behavior[T]]): Behavior[T] = { + new BehaviorImpl.ReceiveBehavior((ctx, msg) => onMessage.apply(ctx.asJava, msg), { + case (ctx, sig) => onSignal.apply(ctx.asJava, sig) + }) } /** @@ -229,6 +228,7 @@ object Behaviors { new Supervise[T](wrapped) final class Supervise[T] private[akka] (wrapped: Behavior[T]) { + /** * Specify the [[SupervisorStrategy]] to be invoked when the wrapped behavior throws. * @@ -297,8 +297,8 @@ object Behaviors { * * See also [[akka.actor.typed.Logger.withMdc]] */ - def withMdc[T]( - mdcForMessage: akka.japi.function.Function[T, java.util.Map[String, Any]], behavior: Behavior[T]): Behavior[T] = + def withMdc[T](mdcForMessage: akka.japi.function.Function[T, java.util.Map[String, Any]], + behavior: Behavior[T]): Behavior[T] = withMdc(Collections.emptyMap[String, Any], mdcForMessage, behavior) /** @@ -330,10 +330,9 @@ object Behaviors { * * See also [[akka.actor.typed.Logger.withMdc]] */ - def withMdc[T]( - staticMdc: java.util.Map[String, Any], - mdcForMessage: akka.japi.function.Function[T, java.util.Map[String, Any]], - behavior: Behavior[T]): Behavior[T] = { + def withMdc[T](staticMdc: java.util.Map[String, Any], + mdcForMessage: akka.japi.function.Function[T, java.util.Map[String, Any]], + behavior: Behavior[T]): Behavior[T] = { def asScalaMap(m: java.util.Map[String, Any]): Map[String, Any] = { if (m == null || m.isEmpty) Map.empty[String, Any] @@ -342,14 +341,11 @@ object Behaviors { val mdcForMessageFun: T => Map[String, Any] = if (mdcForMessage == null) Map.empty - else { - message => asScalaMap(mdcForMessage.apply(message)) + else { message => + asScalaMap(mdcForMessage.apply(message)) } - WithMdcBehaviorInterceptor[T]( - asScalaMap(staticMdc), - mdcForMessageFun, - behavior) + WithMdcBehaviorInterceptor[T](asScalaMap(staticMdc), mdcForMessageFun, behavior) } } diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala index 23637ad6c8..7890ca9e44 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/javadsl/ReceiveBuilder.scala @@ -20,10 +20,8 @@ import akka.util.OptionVal * * @tparam T the common superclass of all supported messages. */ -final class ReceiveBuilder[T] private ( - private var messageHandlers: List[ReceiveBuilder.Case[T, T]], - private var signalHandlers: List[ReceiveBuilder.Case[T, Signal]] -) { +final class ReceiveBuilder[T] private (private var messageHandlers: List[ReceiveBuilder.Case[T, T]], + private var signalHandlers: List[ReceiveBuilder.Case[T, Signal]]) { import ReceiveBuilder.Case @@ -110,7 +108,9 @@ final class ReceiveBuilder[T] private ( * @tparam M type of signal to match * @return this behavior builder */ - def onSignal[M <: Signal](`type`: Class[M], test: JPredicate[M], handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = + def onSignal[M <: Signal](`type`: Class[M], + test: JPredicate[M], + handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = withSignal(`type`, OptionVal.Some(test), handler) /** @@ -127,24 +127,31 @@ final class ReceiveBuilder[T] private ( override def apply(param: Signal): Behavior[T] = handler.create() }) - private def withMessage[M <: T](`type`: OptionVal[Class[M]], test: OptionVal[JPredicate[M]], handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = { + private def withMessage[M <: T](`type`: OptionVal[Class[M]], + test: OptionVal[JPredicate[M]], + handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = { messageHandlers = Case[T, M](`type`, test, handler).asInstanceOf[Case[T, T]] +: messageHandlers this } - private def withSignal[M <: Signal](`type`: Class[M], test: OptionVal[JPredicate[M]], handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = { + private def withSignal[M <: Signal](`type`: Class[M], + test: OptionVal[JPredicate[M]], + handler: JFunction[M, Behavior[T]]): ReceiveBuilder[T] = { signalHandlers = Case[T, M](OptionVal.Some(`type`), test, handler).asInstanceOf[Case[T, Signal]] +: signalHandlers this } } object ReceiveBuilder { + /** Create a new mutable receive builder */ def create[T]: ReceiveBuilder[T] = new ReceiveBuilder[T](Nil, Nil) /** INTERNAL API */ @InternalApi - private[javadsl] final case class Case[BT, MT](`type`: OptionVal[Class[_ <: MT]], test: OptionVal[JPredicate[MT]], handler: JFunction[MT, Behavior[BT]]) + private[javadsl] final case class Case[BT, MT](`type`: OptionVal[Class[_ <: MT]], + test: OptionVal[JPredicate[MT]], + handler: JFunction[MT, Behavior[BT]]) } @@ -154,10 +161,9 @@ object ReceiveBuilder { * INTERNAL API */ @InternalApi -private final class BuiltReceive[T]( - messageHandlers: List[ReceiveBuilder.Case[T, T]], - signalHandlers: List[ReceiveBuilder.Case[T, Signal]] -) extends Receive[T] { +private final class BuiltReceive[T](messageHandlers: List[ReceiveBuilder.Case[T, T]], + signalHandlers: List[ReceiveBuilder.Case[T, Signal]]) + extends Receive[T] { import ReceiveBuilder.Case override def receiveMessage(msg: T): Behavior[T] = receive[T](msg, messageHandlers) @@ -168,7 +174,8 @@ private final class BuiltReceive[T]( private def receive[M](msg: M, handlers: List[Case[T, M]]): Behavior[T] = handlers match { case Case(cls, predicate, handler) :: tail => - if ((cls.isEmpty || cls.get.isAssignableFrom(msg.getClass)) && (predicate.isEmpty || predicate.get.test(msg))) handler(msg) + if ((cls.isEmpty || cls.get.isAssignableFrom(msg.getClass)) && (predicate.isEmpty || predicate.get.test(msg))) + handler(msg) else receive[M](msg, tail) case _ => Behaviors.unhandled diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala index 91a4343764..a8b2463256 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/receptionist/Receptionist.scala @@ -43,8 +43,10 @@ abstract class Receptionist extends Extension { .recover { case e => throw new RuntimeException("ClusterReceptionist could not be loaded dynamically. Make sure you have " + - "'akka-cluster-typed' in the classpath.", e) - }.get + "'akka-cluster-typed' in the classpath.", + e) + } + .get } else LocalReceptionist import akka.actor.typed.scaladsl.adapter._ @@ -53,6 +55,7 @@ abstract class Receptionist extends Extension { } object ServiceKey { + /** * Scala API: Creates a service key. The given ID should uniquely define a service with a given protocol. */ @@ -135,16 +138,19 @@ object Receptionist extends ExtensionId[Receptionist] { */ def apply[T](key: ServiceKey[T], service: ActorRef[T]): Command = new ReceptionistMessages.Register[T](key, service, None) + /** * Create a Register with an actor that will get an ack that the service was registered */ def apply[T](key: ServiceKey[T], service: ActorRef[T], replyTo: ActorRef[Registered]): Command = new ReceptionistMessages.Register[T](key, service, Some(replyTo)) } + /** * Java API: A Register message without Ack that the service was registered */ def register[T](key: ServiceKey[T], service: ActorRef[T]): Command = Register(key, service) + /** * Java API: A Register message with Ack that the service was registered */ @@ -165,14 +171,17 @@ object Receptionist extends ExtensionId[Receptionist] { /** Scala API */ def key: ServiceKey[_] + /** Java API */ def getKey: ServiceKey[_] = key + /** * Scala API * * Also, see [[ServiceKey.Listing]] for more convenient pattern matching */ def serviceInstance[T](key: ServiceKey[T]): ActorRef[T] + /** Java API */ def getServiceInstance[T](key: ServiceKey[T]): ActorRef[T] } @@ -181,6 +190,7 @@ object Receptionist extends ExtensionId[Receptionist] { * Sent by the receptionist, available here for easier testing */ object Registered { + /** * Scala API */ @@ -188,6 +198,7 @@ object Receptionist extends ExtensionId[Receptionist] { new ReceptionistMessages.Registered(key, serviceInstance) } + /** * Java API: Sent by the receptionist, available here for easier testing */ @@ -202,6 +213,7 @@ object Receptionist extends ExtensionId[Receptionist] { * with the termination of the subscriber. */ object Subscribe { + /** * Scala API: */ @@ -224,6 +236,7 @@ object Receptionist extends ExtensionId[Receptionist] { * protocol at one point in time. */ object Find { + /** Scala API: */ def apply[T](key: ServiceKey[T], replyTo: ActorRef[Listing]): Command = new ReceptionistMessages.Find(key, replyTo) @@ -250,8 +263,10 @@ object Receptionist extends ExtensionId[Receptionist] { */ @DoNotInherit trait Listing { + /** Scala API */ def key: ServiceKey[_] + /** Java API */ def getKey: ServiceKey[_] = key @@ -273,6 +288,7 @@ object Receptionist extends ExtensionId[Receptionist] { * Sent by the receptionist, available here for easier testing */ object Listing { + /** Scala API: */ def apply[T](key: ServiceKey[T], serviceInstances: Set[ActorRef[T]]): Listing = new ReceptionistMessages.Listing[T](key, serviceInstances) @@ -301,4 +317,4 @@ object ReceptionistSetup { * for tests that need to replace extension with stub/mock implementations. */ final class ReceptionistSetup(createExtension: java.util.function.Function[ActorSystem[_], Receptionist]) - extends ExtensionSetup[Receptionist](Receptionist, createExtension) + extends ExtensionSetup[Receptionist](Receptionist, createExtension) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/ActorContext.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/ActorContext.scala index fd9825a80f..25eb404017 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/ActorContext.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/ActorContext.scala @@ -287,7 +287,8 @@ trait ActorContext[T] extends TypedActorContext[T] { * @tparam Req The request protocol, what the other actor accepts * @tparam Res The response protocol, what the other actor sends back */ - def ask[Req, Res](target: RecipientRef[Req])(createRequest: ActorRef[Res] => Req)(mapResponse: Try[Res] => T)(implicit responseTimeout: Timeout, classTag: ClassTag[Res]): Unit + def ask[Req, Res](target: RecipientRef[Req])(createRequest: ActorRef[Res] => Req)( + mapResponse: Try[Res] => T)(implicit responseTimeout: Timeout, classTag: ClassTag[Res]): Unit /** * Sends the result of the given `Future` to this Actor (“`self`”), after adapted it with diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AskPattern.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AskPattern.scala index a3b67a530c..45b27af458 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AskPattern.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/AskPattern.scala @@ -13,7 +13,7 @@ import akka.actor.typed.ActorRef import akka.actor.typed.internal.{ adapter => adapt } import akka.annotation.InternalApi import akka.pattern.PromiseActorRef -import akka.util.{ Timeout, unused } +import akka.util.{ unused, Timeout } import akka.actor.typed.RecipientRef import akka.actor.typed.internal.InternalRecipientRef @@ -28,6 +28,7 @@ object AskPattern { * See [[ask]] */ implicit final class Askable[T](val ref: RecipientRef[T]) extends AnyVal { + /** * The ask-pattern implements the initiator side of a request–reply protocol. * The `?` operator is pronounced as "ask" (and a convenience symbolic operation @@ -95,8 +96,9 @@ object AskPattern { // because it might be needed when we move to a 'native' typed runtime, see #24219 ref match { case a: InternalRecipientRef[_] => askUntyped(a, timeout, replyTo) - case a => throw new IllegalStateException( - "Only expect references to be RecipientRef, ActorRefAdapter or ActorSystemAdapter until " + + case a => + throw new IllegalStateException( + "Only expect references to be RecipientRef, ActorRefAdapter or ActorSystemAdapter until " + "native system is implemented: " + a.getClass) } } @@ -109,13 +111,14 @@ object AskPattern { // Note: _promiseRef mustn't have a type pattern, since it can be null private[this] val (_ref: ActorRef[U], _future: Future[U], _promiseRef) = if (target.isTerminated) - ( - adapt.ActorRefAdapter[U](target.provider.deadLetters), - Future.failed[U](new TimeoutException(s"Recipient[$target] had already been terminated.")), null) + (adapt.ActorRefAdapter[U](target.provider.deadLetters), + Future.failed[U](new TimeoutException(s"Recipient[$target] had already been terminated.")), + null) else if (timeout.duration.length <= 0) - ( - adapt.ActorRefAdapter[U](target.provider.deadLetters), - Future.failed[U](new IllegalArgumentException(s"Timeout length must be positive, question not sent to [$target]")), null) + (adapt.ActorRefAdapter[U](target.provider.deadLetters), + Future.failed[U]( + new IllegalArgumentException(s"Timeout length must be positive, question not sent to [$target]")), + null) else { // messageClassName "unknown' is set later, after applying the message factory val a = PromiseActorRef(target.provider, timeout, target, "unknown", onTimeout = onTimeout) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala index dc0e928130..c19b19a8d7 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/Behaviors.scala @@ -8,7 +8,7 @@ package scaladsl import akka.annotation.{ ApiMayChange, DoNotInherit, InternalApi } import akka.actor.typed.internal._ -import scala.reflect.{ ClassTag, classTag } +import scala.reflect.{ classTag, ClassTag } /** * Factories for [[akka.actor.typed.Behavior]]. @@ -202,6 +202,7 @@ object Behaviors { private final val ThrowableClassTag = ClassTag(classOf[Throwable]) final class Supervise[T] private[akka] (val wrapped: Behavior[T]) extends AnyVal { + /** Specify the [[SupervisorStrategy]] to be invoked when the wrapped behavior throws. */ def onFailure[Thr <: Throwable: ClassTag](strategy: SupervisorStrategy): Behavior[T] = { val tag = classTag[Thr] @@ -261,7 +262,8 @@ object Behaviors { * * See also [[akka.actor.typed.Logger.withMdc]] */ - def withMdc[T](staticMdc: Map[String, Any], mdcForMessage: T => Map[String, Any])(behavior: Behavior[T]): Behavior[T] = + def withMdc[T](staticMdc: Map[String, Any], mdcForMessage: T => Map[String, Any])( + behavior: Behavior[T]): Behavior[T] = WithMdcBehaviorInterceptor[T](staticMdc, mdcForMessage, behavior) /** @@ -275,14 +277,16 @@ object Behaviors { @InternalApi private[akka] final class ReceiveImpl[T](onMessage: (ActorContext[T], T) => Behavior[T]) - extends BehaviorImpl.ReceiveBehavior[T](onMessage) with Receive[T] { + extends BehaviorImpl.ReceiveBehavior[T](onMessage) + with Receive[T] { override def receiveSignal(onSignal: PartialFunction[(ActorContext[T], Signal), Behavior[T]]): Behavior[T] = new BehaviorImpl.ReceiveBehavior(onMessage, onSignal) } @InternalApi private[akka] final class ReceiveMessageImpl[T](onMessage: T => Behavior[T]) - extends BehaviorImpl.ReceiveMessageBehavior[T](onMessage) with Receive[T] { + extends BehaviorImpl.ReceiveMessageBehavior[T](onMessage) + with Receive[T] { override def receiveSignal(onSignal: PartialFunction[(ActorContext[T], Signal), Behavior[T]]): Behavior[T] = new BehaviorImpl.ReceiveMessageBehavior[T](onMessage, onSignal) diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/StashBuffer.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/StashBuffer.scala index e0d80659da..17b44d5cba 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/StashBuffer.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/StashBuffer.scala @@ -29,6 +29,7 @@ object StashBuffer { * Not for user extension. */ @DoNotInherit trait StashBuffer[T] { + /** * Check if the message buffer is empty. * diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/AdapterExtension.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/AdapterExtension.scala index 706ede4717..c711769b7f 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/AdapterExtension.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/AdapterExtension.scala @@ -16,6 +16,7 @@ import akka.actor.typed.internal.adapter.ActorSystemAdapter @InternalApi private[akka] class AdapterExtension(sys: akka.actor.ActorSystem) extends akka.actor.Extension { val adapter = ActorSystemAdapter(sys) } + /** * Internal API */ diff --git a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala index ced5740ebe..9813a9d17a 100644 --- a/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala +++ b/akka-actor-typed/src/main/scala/akka/actor/typed/scaladsl/adapter/package.scala @@ -58,7 +58,9 @@ package object adapter { /** * INTERNAL API */ - @InternalApi private[akka] def internalSystemActorOf[U](behavior: Behavior[U], name: String, props: Props): ActorRef[U] = { + @InternalApi private[akka] def internalSystemActorOf[U](behavior: Behavior[U], + name: String, + props: Props): ActorRef[U] = { toUntyped.asInstanceOf[ExtendedActorSystem].systemActorOf(PropsAdapter(behavior, props), name) } } diff --git a/akka-actor/src/main/scala-2.11/akka/compat/Future.scala b/akka-actor/src/main/scala-2.11/akka/compat/Future.scala index 2ff28a041d..2b0f1c60a0 100644 --- a/akka-actor/src/main/scala-2.11/akka/compat/Future.scala +++ b/akka-actor/src/main/scala-2.11/akka/compat/Future.scala @@ -16,12 +16,15 @@ import scala.concurrent.{ ExecutionContext, Future => SFuture } * Remove these classes as soon as support for Scala 2.11 is dropped! */ @InternalApi private[akka] object Future { - def fold[T, R](futures: TraversableOnce[SFuture[T]])(zero: R)(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = + def fold[T, R](futures: TraversableOnce[SFuture[T]])(zero: R)(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = SFuture.fold[T, R](futures)(zero)(op)(executor) - def reduce[T, R >: T](futures: TraversableOnce[SFuture[T]])(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = + def reduce[T, R >: T](futures: TraversableOnce[SFuture[T]])(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = SFuture.reduce[T, R](futures)(op)(executor) - def find[T](futures: TraversableOnce[SFuture[T]])(p: T => Boolean)(implicit executor: ExecutionContext): SFuture[Option[T]] = + def find[T](futures: TraversableOnce[SFuture[T]])(p: T => Boolean)( + implicit executor: ExecutionContext): SFuture[Option[T]] = SFuture.find[T](futures)(p)(executor) } diff --git a/akka-actor/src/main/scala-2.11/akka/util/ByteIterator.scala b/akka-actor/src/main/scala-2.11/akka/util/ByteIterator.scala index a0120fdc0c..f877dfec65 100644 --- a/akka-actor/src/main/scala-2.11/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala-2.11/akka/util/ByteIterator.scala @@ -25,7 +25,8 @@ object ByteIterator { val empty: ByteArrayIterator = apply(Array.emptyByteArray) } - class ByteArrayIterator private (private var array: Array[Byte], private var from: Int, private var until: Int) extends ByteIterator { + class ByteArrayIterator private (private var array: Array[Byte], private var from: Int, private var until: Int) + extends ByteIterator { iterator => @inline final def len: Int = until - from @@ -36,7 +37,9 @@ object ByteIterator { final def next(): Byte = { if (!hasNext) EmptyImmutableSeq.iterator.next() - else { val i = from; from = from + 1; array(i) } + else { + val i = from; from = from + 1; array(i) + } } def clear(): Unit = { this.array = Array.emptyByteArray; from = 0; until = from } @@ -47,19 +50,20 @@ object ByteIterator { case that: ByteIterator => if (that.isEmpty) this else if (this.isEmpty) that - else that match { - case that: ByteArrayIterator => - if ((this.array eq that.array) && (this.until == that.from)) { - this.until = that.until - that.clear() - this - } else { - val result = MultiByteArrayIterator(List(this, that)) - this.clear() - result - } - case that: MultiByteArrayIterator => this ++: that - } + else + that match { + case that: ByteArrayIterator => + if ((this.array eq that.array) && (this.until == that.from)) { + this.until = that.until + that.clear() + this + } else { + val result = MultiByteArrayIterator(List(this, that)) + this.clear() + result + } + case that: MultiByteArrayIterator => this ++: that + } case _ => super.++(that) } @@ -85,7 +89,11 @@ object ByteIterator { final override def dropWhile(p: Byte => Boolean): this.type = { var stop = false while (!stop && hasNext) { - if (p(array(from))) { from = from + 1 } else { stop = true } + if (p(array(from))) { + from = from + 1 + } else { + stop = true + } } this } @@ -113,20 +121,25 @@ object ByteIterator { private def wrappedByteBuffer: ByteBuffer = ByteBuffer.wrap(array, from, len).asReadOnlyBuffer - def getShorts(xs: Array[Short], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asShortBuffer.get(xs, offset, n); drop(2 * n) } + def getShorts(xs: Array[Short], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asShortBuffer.get(xs, offset, n); drop(2 * n) + } - def getInts(xs: Array[Int], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asIntBuffer.get(xs, offset, n); drop(4 * n) } + def getInts(xs: Array[Int], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asIntBuffer.get(xs, offset, n); drop(4 * n) + } - def getLongs(xs: Array[Long], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asLongBuffer.get(xs, offset, n); drop(8 * n) } + def getLongs(xs: Array[Long], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asLongBuffer.get(xs, offset, n); drop(8 * n) + } - def getFloats(xs: Array[Float], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asFloatBuffer.get(xs, offset, n); drop(4 * n) } + def getFloats(xs: Array[Float], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asFloatBuffer.get(xs, offset, n); drop(4 * n) + } - def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asDoubleBuffer.get(xs, offset, n); drop(8 * n) } + def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asDoubleBuffer.get(xs, offset, n); drop(8 * n) + } def copyToBuffer(buffer: ByteBuffer): Int = { val copyLength = math.min(buffer.remaining, len) @@ -300,7 +313,7 @@ object ByteIterator { } override def foreach[@specialized U](f: Byte => U): Unit = { - iterators foreach { _ foreach f } + iterators.foreach { _.foreach(f) } clear() } @@ -313,8 +326,10 @@ object ByteIterator { } } - @tailrec protected final def getToArray[A](xs: Array[A], offset: Int, n: Int, elemSize: Int)(getSingle: => A)(getMult: (Array[A], Int, Int) => Unit): this.type = - if (n <= 0) this else { + @tailrec protected final def getToArray[A](xs: Array[A], offset: Int, n: Int, elemSize: Int)(getSingle: => A)( + getMult: (Array[A], Int, Int) => Unit): this.type = + if (n <= 0) this + else { if (isEmpty) EmptyImmutableSeq.iterator.next() val nDone = if (current.len >= elemSize) { val nCurrent = math.min(n, current.len / elemSize) @@ -387,7 +402,6 @@ object ByteIterator { /** * An iterator over a ByteString. */ - abstract class ByteIterator extends BufferedIterator[Byte] { def len: Int @@ -397,24 +411,28 @@ abstract class ByteIterator extends BufferedIterator[Byte] { protected def clear(): Unit - def ++(that: TraversableOnce[Byte]): ByteIterator = if (that.isEmpty) this else ByteIterator.ByteArrayIterator(that.toArray) + def ++(that: TraversableOnce[Byte]): ByteIterator = + if (that.isEmpty) this else ByteIterator.ByteArrayIterator(that.toArray) // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def clone: ByteIterator = throw new UnsupportedOperationException("Method clone is not implemented in ByteIterator") + override def clone: ByteIterator = + throw new UnsupportedOperationException("Method clone is not implemented in ByteIterator") override def duplicate: (ByteIterator, ByteIterator) = (this, clone) // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def take(n: Int): this.type = throw new UnsupportedOperationException("Method take is not implemented in ByteIterator") + override def take(n: Int): this.type = + throw new UnsupportedOperationException("Method take is not implemented in ByteIterator") // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def drop(n: Int): this.type = throw new UnsupportedOperationException("Method drop is not implemented in ByteIterator") + override def drop(n: Int): this.type = + throw new UnsupportedOperationException("Method drop is not implemented in ByteIterator") override def slice(from: Int, until: Int): this.type = { if (from > 0) drop(from).take(until - from) @@ -424,12 +442,14 @@ abstract class ByteIterator extends BufferedIterator[Byte] { // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def takeWhile(p: Byte => Boolean): this.type = throw new UnsupportedOperationException("Method takeWhile is not implemented in ByteIterator") + override def takeWhile(p: Byte => Boolean): this.type = + throw new UnsupportedOperationException("Method takeWhile is not implemented in ByteIterator") // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def dropWhile(p: Byte => Boolean): this.type = throw new UnsupportedOperationException("Method dropWhile is not implemented in ByteIterator") + override def dropWhile(p: Byte => Boolean): this.type = + throw new UnsupportedOperationException("Method dropWhile is not implemented in ByteIterator") override def span(p: Byte => Boolean): (ByteIterator, ByteIterator) = { val that = clone @@ -442,7 +462,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def indexWhere(p: Byte => Boolean, from: Int): Int = { var index = from var found = false - while (!found && hasNext) if (p(next())) { found = true } else { index += 1 } + while (!found && hasNext) if (p(next())) { + found = true + } else { + index += 1 + } if (found) index else -1 } @@ -461,7 +485,9 @@ abstract class ByteIterator extends BufferedIterator[Byte] { override def foldLeft[@specialized B](z: B)(op: (B, Byte) => B): B = { var acc = z - foreach { byte => acc = op(acc, byte) } + foreach { byte => + acc = op(acc, byte) + } acc } @@ -493,14 +519,14 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getInt(implicit byteOrder: ByteOrder): Int = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next() & 0xff) << 24 - | (next() & 0xff) << 16 - | (next() & 0xff) << 8 - | (next() & 0xff) << 0) + | (next() & 0xff) << 16 + | (next() & 0xff) << 8 + | (next() & 0xff) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) ((next() & 0xff) << 0 - | (next() & 0xff) << 8 - | (next() & 0xff) << 16 - | (next() & 0xff) << 24) + | (next() & 0xff) << 8 + | (next() & 0xff) << 16 + | (next() & 0xff) << 24) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -510,22 +536,22 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLong(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next().toLong & 0xff) << 56 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 0) + | (next().toLong & 0xff) << 48 + | (next().toLong & 0xff) << 40 + | (next().toLong & 0xff) << 32 + | (next().toLong & 0xff) << 24 + | (next().toLong & 0xff) << 16 + | (next().toLong & 0xff) << 8 + | (next().toLong & 0xff) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) ((next().toLong & 0xff) << 0 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 56) + | (next().toLong & 0xff) << 8 + | (next().toLong & 0xff) << 16 + | (next().toLong & 0xff) << 24 + | (next().toLong & 0xff) << 32 + | (next().toLong & 0xff) << 40 + | (next().toLong & 0xff) << 48 + | (next().toLong & 0xff) << 56) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -536,11 +562,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLongPart(n: Int)(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) { var x = 0L - (1 to n) foreach (_ => x = (x << 8) | (next() & 0xff)) + (1 to n).foreach(_ => x = (x << 8) | (next() & 0xff)) x } else if (byteOrder == ByteOrder.LITTLE_ENDIAN) { var x = 0L - (0 until n) foreach (i => x |= (next() & 0xff) << 8 * i) + (0 until n).foreach(i => x |= (next() & 0xff) << 8 * i) x } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } diff --git a/akka-actor/src/main/scala-2.12/akka/compat/Future.scala b/akka-actor/src/main/scala-2.12/akka/compat/Future.scala index cba195a5cb..b7d9f947aa 100644 --- a/akka-actor/src/main/scala-2.12/akka/compat/Future.scala +++ b/akka-actor/src/main/scala-2.12/akka/compat/Future.scala @@ -18,22 +18,27 @@ import akka.util.ccompat._ * Remove these classes as soon as support for Scala 2.11 is dropped! */ @InternalApi private[akka] object Future { - def fold[T, R](futures: IterableOnce[SFuture[T]])(zero: R)(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = + def fold[T, R](futures: IterableOnce[SFuture[T]])(zero: R)(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = SFuture.fold[T, R](futures)(zero)(op)(executor) - def fold[T, R](futures: immutable.Iterable[SFuture[T]])(zero: R)(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = + def fold[T, R](futures: immutable.Iterable[SFuture[T]])(zero: R)(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = SFuture.foldLeft[T, R](futures)(zero)(op)(executor) - def reduce[T, R >: T](futures: IterableOnce[SFuture[T]])(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = + def reduce[T, R >: T](futures: IterableOnce[SFuture[T]])(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = SFuture.reduce[T, R](futures)(op)(executor) - def reduce[T, R >: T](futures: immutable.Iterable[SFuture[T]])(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = + def reduce[T, R >: T](futures: immutable.Iterable[SFuture[T]])(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = SFuture.reduceLeft[T, R](futures)(op)(executor) - def find[T](futures: IterableOnce[SFuture[T]])(p: T => Boolean)(implicit executor: ExecutionContext): SFuture[Option[T]] = + def find[T](futures: IterableOnce[SFuture[T]])(p: T => Boolean)( + implicit executor: ExecutionContext): SFuture[Option[T]] = SFuture.find[T](futures)(p)(executor) - def find[T](futures: immutable.Iterable[SFuture[T]])(p: T => Boolean)(implicit executor: ExecutionContext): SFuture[Option[T]] = + def find[T](futures: immutable.Iterable[SFuture[T]])(p: T => Boolean)( + implicit executor: ExecutionContext): SFuture[Option[T]] = SFuture.find[T](futures)(p)(executor) } - diff --git a/akka-actor/src/main/scala-2.12/akka/util/ByteIterator.scala b/akka-actor/src/main/scala-2.12/akka/util/ByteIterator.scala index 8041f8ff4d..9849bc7523 100644 --- a/akka-actor/src/main/scala-2.12/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala-2.12/akka/util/ByteIterator.scala @@ -25,7 +25,8 @@ object ByteIterator { val empty: ByteArrayIterator = apply(Array.emptyByteArray) } - class ByteArrayIterator private (private var array: Array[Byte], private var from: Int, private var until: Int) extends ByteIterator { + class ByteArrayIterator private (private var array: Array[Byte], private var from: Int, private var until: Int) + extends ByteIterator { iterator => @inline final def len: Int = until - from @@ -36,7 +37,9 @@ object ByteIterator { final def next(): Byte = { if (!hasNext) EmptyImmutableSeq.iterator.next() - else { val i = from; from = from + 1; array(i) } + else { + val i = from; from = from + 1; array(i) + } } def clear(): Unit = { this.array = Array.emptyByteArray; from = 0; until = from } @@ -47,19 +50,20 @@ object ByteIterator { case that: ByteIterator => if (that.isEmpty) this else if (this.isEmpty) that - else that match { - case that: ByteArrayIterator => - if ((this.array eq that.array) && (this.until == that.from)) { - this.until = that.until - that.clear() - this - } else { - val result = MultiByteArrayIterator(List(this, that)) - this.clear() - result - } - case that: MultiByteArrayIterator => this ++: that - } + else + that match { + case that: ByteArrayIterator => + if ((this.array eq that.array) && (this.until == that.from)) { + this.until = that.until + that.clear() + this + } else { + val result = MultiByteArrayIterator(List(this, that)) + this.clear() + result + } + case that: MultiByteArrayIterator => this ++: that + } case _ => super.++(that) } @@ -85,7 +89,11 @@ object ByteIterator { final override def dropWhile(p: Byte => Boolean): this.type = { var stop = false while (!stop && hasNext) { - if (p(array(from))) { from = from + 1 } else { stop = true } + if (p(array(from))) { + from = from + 1 + } else { + stop = true + } } this } @@ -113,20 +121,25 @@ object ByteIterator { private def wrappedByteBuffer: ByteBuffer = ByteBuffer.wrap(array, from, len).asReadOnlyBuffer - def getShorts(xs: Array[Short], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asShortBuffer.get(xs, offset, n); drop(2 * n) } + def getShorts(xs: Array[Short], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asShortBuffer.get(xs, offset, n); drop(2 * n) + } - def getInts(xs: Array[Int], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asIntBuffer.get(xs, offset, n); drop(4 * n) } + def getInts(xs: Array[Int], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asIntBuffer.get(xs, offset, n); drop(4 * n) + } - def getLongs(xs: Array[Long], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asLongBuffer.get(xs, offset, n); drop(8 * n) } + def getLongs(xs: Array[Long], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asLongBuffer.get(xs, offset, n); drop(8 * n) + } - def getFloats(xs: Array[Float], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asFloatBuffer.get(xs, offset, n); drop(4 * n) } + def getFloats(xs: Array[Float], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asFloatBuffer.get(xs, offset, n); drop(4 * n) + } - def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asDoubleBuffer.get(xs, offset, n); drop(8 * n) } + def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asDoubleBuffer.get(xs, offset, n); drop(8 * n) + } def copyToBuffer(buffer: ByteBuffer): Int = { val copyLength = math.min(buffer.remaining, len) @@ -300,7 +313,7 @@ object ByteIterator { } override def foreach[@specialized U](f: Byte => U): Unit = { - iterators foreach { _ foreach f } + iterators.foreach { _.foreach(f) } clear() } @@ -313,8 +326,10 @@ object ByteIterator { } } - @tailrec protected final def getToArray[A](xs: Array[A], offset: Int, n: Int, elemSize: Int)(getSingle: => A)(getMult: (Array[A], Int, Int) => Unit): this.type = - if (n <= 0) this else { + @tailrec protected final def getToArray[A](xs: Array[A], offset: Int, n: Int, elemSize: Int)(getSingle: => A)( + getMult: (Array[A], Int, Int) => Unit): this.type = + if (n <= 0) this + else { if (isEmpty) EmptyImmutableSeq.iterator.next() val nDone = if (current.len >= elemSize) { val nCurrent = math.min(n, current.len / elemSize) @@ -387,7 +402,6 @@ object ByteIterator { /** * An iterator over a ByteString. */ - abstract class ByteIterator extends BufferedIterator[Byte] { def len: Int @@ -397,24 +411,28 @@ abstract class ByteIterator extends BufferedIterator[Byte] { protected def clear(): Unit - def ++(that: TraversableOnce[Byte]): ByteIterator = if (that.isEmpty) this else ByteIterator.ByteArrayIterator(that.toArray) + def ++(that: TraversableOnce[Byte]): ByteIterator = + if (that.isEmpty) this else ByteIterator.ByteArrayIterator(that.toArray) // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def clone: ByteIterator = throw new UnsupportedOperationException("Method clone is not implemented in ByteIterator") + override def clone: ByteIterator = + throw new UnsupportedOperationException("Method clone is not implemented in ByteIterator") override def duplicate: (ByteIterator, ByteIterator) = (this, clone) // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def take(n: Int): this.type = throw new UnsupportedOperationException("Method take is not implemented in ByteIterator") + override def take(n: Int): this.type = + throw new UnsupportedOperationException("Method take is not implemented in ByteIterator") // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def drop(n: Int): this.type = throw new UnsupportedOperationException("Method drop is not implemented in ByteIterator") + override def drop(n: Int): this.type = + throw new UnsupportedOperationException("Method drop is not implemented in ByteIterator") override def slice(from: Int, until: Int): this.type = { if (from > 0) drop(from).take(until - from) @@ -424,12 +442,14 @@ abstract class ByteIterator extends BufferedIterator[Byte] { // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def takeWhile(p: Byte => Boolean): this.type = throw new UnsupportedOperationException("Method takeWhile is not implemented in ByteIterator") + override def takeWhile(p: Byte => Boolean): this.type = + throw new UnsupportedOperationException("Method takeWhile is not implemented in ByteIterator") // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def dropWhile(p: Byte => Boolean): this.type = throw new UnsupportedOperationException("Method dropWhile is not implemented in ByteIterator") + override def dropWhile(p: Byte => Boolean): this.type = + throw new UnsupportedOperationException("Method dropWhile is not implemented in ByteIterator") override def span(p: Byte => Boolean): (ByteIterator, ByteIterator) = { val that = clone @@ -442,7 +462,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { override def indexWhere(p: Byte => Boolean, from: Int): Int = { var index = from var found = false - while (!found && hasNext) if (p(next())) { found = true } else { index += 1 } + while (!found && hasNext) if (p(next())) { + found = true + } else { + index += 1 + } if (found) index else -1 } @@ -461,7 +485,9 @@ abstract class ByteIterator extends BufferedIterator[Byte] { override def foldLeft[@specialized B](z: B)(op: (B, Byte) => B): B = { var acc = z - foreach { byte => acc = op(acc, byte) } + foreach { byte => + acc = op(acc, byte) + } acc } @@ -493,14 +519,14 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getInt(implicit byteOrder: ByteOrder): Int = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next() & 0xff) << 24 - | (next() & 0xff) << 16 - | (next() & 0xff) << 8 - | (next() & 0xff) << 0) + | (next() & 0xff) << 16 + | (next() & 0xff) << 8 + | (next() & 0xff) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) ((next() & 0xff) << 0 - | (next() & 0xff) << 8 - | (next() & 0xff) << 16 - | (next() & 0xff) << 24) + | (next() & 0xff) << 8 + | (next() & 0xff) << 16 + | (next() & 0xff) << 24) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -510,22 +536,22 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLong(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next().toLong & 0xff) << 56 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 0) + | (next().toLong & 0xff) << 48 + | (next().toLong & 0xff) << 40 + | (next().toLong & 0xff) << 32 + | (next().toLong & 0xff) << 24 + | (next().toLong & 0xff) << 16 + | (next().toLong & 0xff) << 8 + | (next().toLong & 0xff) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) ((next().toLong & 0xff) << 0 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 56) + | (next().toLong & 0xff) << 8 + | (next().toLong & 0xff) << 16 + | (next().toLong & 0xff) << 24 + | (next().toLong & 0xff) << 32 + | (next().toLong & 0xff) << 40 + | (next().toLong & 0xff) << 48 + | (next().toLong & 0xff) << 56) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -536,11 +562,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLongPart(n: Int)(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) { var x = 0L - (1 to n) foreach (_ => x = (x << 8) | (next() & 0xff)) + (1 to n).foreach(_ => x = (x << 8) | (next() & 0xff)) x } else if (byteOrder == ByteOrder.LITTLE_ENDIAN) { var x = 0L - (0 until n) foreach (i => x |= (next() & 0xff) << 8 * i) + (0 until n).foreach(i => x |= (next() & 0xff) << 8 * i) x } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } diff --git a/akka-actor/src/main/scala-2.13+/akka/util/ByteIterator.scala b/akka-actor/src/main/scala-2.13+/akka/util/ByteIterator.scala index 450a098aac..5229ffdd12 100644 --- a/akka-actor/src/main/scala-2.13+/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala-2.13+/akka/util/ByteIterator.scala @@ -26,7 +26,8 @@ object ByteIterator { val empty: ByteArrayIterator = apply(Array.emptyByteArray) } - class ByteArrayIterator private (private var array: Array[Byte], private var from: Int, private var until: Int) extends ByteIterator { + class ByteArrayIterator private (private var array: Array[Byte], private var from: Int, private var until: Int) + extends ByteIterator { iterator => @inline final def len: Int = until - from @@ -37,7 +38,9 @@ object ByteIterator { final def next(): Byte = { if (!hasNext) EmptyImmutableSeq.iterator.next() - else { val i = from; from = from + 1; array(i) } + else { + val i = from; from = from + 1; array(i) + } } def clear(): Unit = { this.array = Array.emptyByteArray; from = 0; until = from } @@ -48,19 +51,20 @@ object ByteIterator { case that: ByteIterator => if (that.isEmpty) this else if (this.isEmpty) that - else that match { - case that: ByteArrayIterator => - if ((this.array eq that.array) && (this.until == that.from)) { - this.until = that.until - that.clear() - this - } else { - val result = MultiByteArrayIterator(List(this, that)) - this.clear() - result - } - case that: MultiByteArrayIterator => this ++: that - } + else + that match { + case that: ByteArrayIterator => + if ((this.array eq that.array) && (this.until == that.from)) { + this.until = that.until + that.clear() + this + } else { + val result = MultiByteArrayIterator(List(this, that)) + this.clear() + result + } + case that: MultiByteArrayIterator => this ++: that + } case _ => super.++(that) } @@ -86,7 +90,11 @@ object ByteIterator { final override def dropWhile(p: Byte => Boolean): this.type = { var stop = false while (!stop && hasNext) { - if (p(array(from))) { from = from + 1 } else { stop = true } + if (p(array(from))) { + from = from + 1 + } else { + stop = true + } } this } @@ -116,20 +124,25 @@ object ByteIterator { private def wrappedByteBuffer: ByteBuffer = ByteBuffer.wrap(array, from, len).asReadOnlyBuffer - def getShorts(xs: Array[Short], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asShortBuffer.get(xs, offset, n); drop(2 * n) } + def getShorts(xs: Array[Short], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asShortBuffer.get(xs, offset, n); drop(2 * n) + } - def getInts(xs: Array[Int], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asIntBuffer.get(xs, offset, n); drop(4 * n) } + def getInts(xs: Array[Int], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asIntBuffer.get(xs, offset, n); drop(4 * n) + } - def getLongs(xs: Array[Long], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asLongBuffer.get(xs, offset, n); drop(8 * n) } + def getLongs(xs: Array[Long], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asLongBuffer.get(xs, offset, n); drop(8 * n) + } - def getFloats(xs: Array[Float], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asFloatBuffer.get(xs, offset, n); drop(4 * n) } + def getFloats(xs: Array[Float], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asFloatBuffer.get(xs, offset, n); drop(4 * n) + } - def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = - { wrappedByteBuffer.order(byteOrder).asDoubleBuffer.get(xs, offset, n); drop(8 * n) } + def getDoubles(xs: Array[Double], offset: Int, n: Int)(implicit byteOrder: ByteOrder): this.type = { + wrappedByteBuffer.order(byteOrder).asDoubleBuffer.get(xs, offset, n); drop(8 * n) + } def copyToBuffer(buffer: ByteBuffer): Int = { val copyLength = math.min(buffer.remaining, len) @@ -304,7 +317,7 @@ object ByteIterator { } override def foreach[@specialized U](f: Byte => U): Unit = { - iterators foreach { _ foreach f } + iterators.foreach { _.foreach(f) } clear() } @@ -317,8 +330,10 @@ object ByteIterator { } } - @tailrec protected final def getToArray[A](xs: Array[A], offset: Int, n: Int, elemSize: Int)(getSingle: => A)(getMult: (Array[A], Int, Int) => Unit): this.type = - if (n <= 0) this else { + @tailrec protected final def getToArray[A](xs: Array[A], offset: Int, n: Int, elemSize: Int)(getSingle: => A)( + getMult: (Array[A], Int, Int) => Unit): this.type = + if (n <= 0) this + else { if (isEmpty) EmptyImmutableSeq.iterator.next() val nDone = if (current.len >= elemSize) { val nCurrent = math.min(n, current.len / elemSize) @@ -400,24 +415,28 @@ abstract class ByteIterator extends BufferedIterator[Byte] { protected def clear(): Unit - def ++(that: IterableOnce[Byte]): ByteIterator = if (that.isEmpty) this else ByteIterator.ByteArrayIterator(that.toArray) + def ++(that: IterableOnce[Byte]): ByteIterator = + if (that.isEmpty) this else ByteIterator.ByteArrayIterator(that.toArray) // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def clone: ByteIterator = throw new UnsupportedOperationException("Method clone is not implemented in ByteIterator") + override def clone: ByteIterator = + throw new UnsupportedOperationException("Method clone is not implemented in ByteIterator") override def duplicate: (ByteIterator, ByteIterator) = (this, clone) // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def take(n: Int): this.type = throw new UnsupportedOperationException("Method take is not implemented in ByteIterator") + override def take(n: Int): this.type = + throw new UnsupportedOperationException("Method take is not implemented in ByteIterator") // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def drop(n: Int): this.type = throw new UnsupportedOperationException("Method drop is not implemented in ByteIterator") + override def drop(n: Int): this.type = + throw new UnsupportedOperationException("Method drop is not implemented in ByteIterator") override def slice(from: Int, until: Int): this.type = { if (from > 0) drop(from).take(until - from) @@ -427,12 +446,14 @@ abstract class ByteIterator extends BufferedIterator[Byte] { // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def takeWhile(p: Byte => Boolean): this.type = throw new UnsupportedOperationException("Method takeWhile is not implemented in ByteIterator") + override def takeWhile(p: Byte => Boolean): this.type = + throw new UnsupportedOperationException("Method takeWhile is not implemented in ByteIterator") // *must* be overridden by derived classes. This construction is necessary // to specialize the return type, as the method is already implemented in // the parent class. - override def dropWhile(p: Byte => Boolean): this.type = throw new UnsupportedOperationException("Method dropWhile is not implemented in ByteIterator") + override def dropWhile(p: Byte => Boolean): this.type = + throw new UnsupportedOperationException("Method dropWhile is not implemented in ByteIterator") override def span(p: Byte => Boolean): (ByteIterator, ByteIterator) = { val that = clone @@ -444,7 +465,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { override def indexWhere(p: Byte => Boolean, from: Int = 0): Int = { var index = from var found = false - while (!found && hasNext) if (p(next())) { found = true } else { index += 1 } + while (!found && hasNext) if (p(next())) { + found = true + } else { + index += 1 + } if (found) index else -1 } @@ -463,7 +488,9 @@ abstract class ByteIterator extends BufferedIterator[Byte] { override def foldLeft[@specialized B](z: B)(op: (B, Byte) => B): B = { var acc = z - foreach { byte => acc = op(acc, byte) } + foreach { byte => + acc = op(acc, byte) + } acc } @@ -495,14 +522,14 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getInt(implicit byteOrder: ByteOrder): Int = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next() & 0xff) << 24 - | (next() & 0xff) << 16 - | (next() & 0xff) << 8 - | (next() & 0xff) << 0) + | (next() & 0xff) << 16 + | (next() & 0xff) << 8 + | (next() & 0xff) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) ((next() & 0xff) << 0 - | (next() & 0xff) << 8 - | (next() & 0xff) << 16 - | (next() & 0xff) << 24) + | (next() & 0xff) << 8 + | (next() & 0xff) << 16 + | (next() & 0xff) << 24) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -512,22 +539,22 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLong(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) ((next().toLong & 0xff) << 56 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 0) + | (next().toLong & 0xff) << 48 + | (next().toLong & 0xff) << 40 + | (next().toLong & 0xff) << 32 + | (next().toLong & 0xff) << 24 + | (next().toLong & 0xff) << 16 + | (next().toLong & 0xff) << 8 + | (next().toLong & 0xff) << 0) else if (byteOrder == ByteOrder.LITTLE_ENDIAN) ((next().toLong & 0xff) << 0 - | (next().toLong & 0xff) << 8 - | (next().toLong & 0xff) << 16 - | (next().toLong & 0xff) << 24 - | (next().toLong & 0xff) << 32 - | (next().toLong & 0xff) << 40 - | (next().toLong & 0xff) << 48 - | (next().toLong & 0xff) << 56) + | (next().toLong & 0xff) << 8 + | (next().toLong & 0xff) << 16 + | (next().toLong & 0xff) << 24 + | (next().toLong & 0xff) << 32 + | (next().toLong & 0xff) << 40 + | (next().toLong & 0xff) << 48 + | (next().toLong & 0xff) << 56) else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } @@ -538,11 +565,11 @@ abstract class ByteIterator extends BufferedIterator[Byte] { def getLongPart(n: Int)(implicit byteOrder: ByteOrder): Long = { if (byteOrder == ByteOrder.BIG_ENDIAN) { var x = 0L - (1 to n) foreach (_ => x = (x << 8) | (next() & 0xff)) + (1 to n).foreach(_ => x = (x << 8) | (next() & 0xff)) x } else if (byteOrder == ByteOrder.LITTLE_ENDIAN) { var x = 0L - (0 until n) foreach (i => x |= (next() & 0xff) << 8 * i) + (0 until n).foreach(i => x |= (next() & 0xff) << 8 * i) x } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } diff --git a/akka-actor/src/main/scala-2.13+/akka/util/ByteString.scala b/akka-actor/src/main/scala-2.13+/akka/util/ByteString.scala index 12b323163f..2d7dbdd138 100644 --- a/akka-actor/src/main/scala-2.13+/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala-2.13+/akka/util/ByteString.scala @@ -12,7 +12,7 @@ import java.lang.{ Iterable => JIterable } import scala.annotation.{ tailrec, varargs } import scala.collection.mutable.{ Builder, WrappedArray } -import scala.collection.{ mutable, immutable } +import scala.collection.{ immutable, mutable } import scala.collection.immutable.{ IndexedSeq, IndexedSeqOps, StrictOptimizedSeqOps, VectorBuilder } import scala.collection.generic.CanBuildFrom import scala.reflect.ClassTag @@ -273,7 +273,9 @@ object ByteString { /** * An unfragmented ByteString. */ - final class ByteString1 private (private val bytes: Array[Byte], private val startIndex: Int, val length: Int) extends ByteString with Serializable { + final class ByteString1 private (private val bytes: Array[Byte], private val startIndex: Int, val length: Int) + extends ByteString + with Serializable { private def this(bytes: Array[Byte]) = this(bytes, 0, bytes.length) @@ -360,14 +362,15 @@ object ByteString { def ++(that: ByteString): ByteString = { if (that.isEmpty) this else if (this.isEmpty) that - else that match { - case b: ByteString1C => ByteStrings(this, b.toByteString1) - case b: ByteString1 => - if ((bytes eq b.bytes) && (startIndex + length == b.startIndex)) - new ByteString1(bytes, startIndex, length + b.length) - else ByteStrings(this, b) - case bs: ByteStrings => ByteStrings(this, bs) - } + else + that match { + case b: ByteString1C => ByteStrings(this, b.toByteString1) + case b: ByteString1 => + if ((bytes eq b.bytes) && (startIndex + length == b.startIndex)) + new ByteString1(bytes, startIndex, length + b.length) + else ByteStrings(this, b) + case bs: ByteStrings => ByteStrings(this, bs) + } } override def indexOf[B >: Byte](elem: B): Int = indexOf(elem, 0) @@ -388,7 +391,8 @@ object ByteString { } private[akka] object ByteStrings extends Companion { - def apply(bytestrings: Vector[ByteString1]): ByteString = new ByteStrings(bytestrings, (0 /: bytestrings)(_ + _.length)) + def apply(bytestrings: Vector[ByteString1]): ByteString = + new ByteStrings(bytestrings, (0 /: bytestrings)(_ + _.length)) def apply(bytestrings: Vector[ByteString1], length: Int): ByteString = new ByteStrings(bytestrings, length) @@ -424,7 +428,8 @@ object ByteString { def compare(b1: ByteString, b2: ByteString): Int = if (b1.isEmpty) if (b2.isEmpty) 0 else 2 - else if (b2.isEmpty) 1 else 3 + else if (b2.isEmpty) 1 + else 3 val SerializationIdentity = 2.toByte @@ -449,7 +454,9 @@ object ByteString { /** * A ByteString with 2 or more fragments. */ - final class ByteStrings private (private[akka] val bytestrings: Vector[ByteString1], val length: Int) extends ByteString with Serializable { + final class ByteStrings private (private[akka] val bytestrings: Vector[ByteString1], val length: Int) + extends ByteString + with Serializable { if (bytestrings.isEmpty) throw new IllegalArgumentException("bytestrings must not be empty") if (bytestrings.head.isEmpty) throw new IllegalArgumentException("bytestrings.head must not be empty") @@ -467,16 +474,17 @@ object ByteString { /** Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead */ override def iterator: ByteIterator.MultiByteArrayIterator = - ByteIterator.MultiByteArrayIterator(bytestrings.toStream map { _.iterator }) + ByteIterator.MultiByteArrayIterator(bytestrings.toStream.map { _.iterator }) def ++(that: ByteString): ByteString = { if (that.isEmpty) this else if (this.isEmpty) that - else that match { - case b: ByteString1C => ByteStrings(this, b.toByteString1) - case b: ByteString1 => ByteStrings(this, b) - case bs: ByteStrings => ByteStrings(this, bs) - } + else + that match { + case b: ByteString1C => ByteStrings(this, b.toByteString1) + case b: ByteString1 => ByteStrings(this, b) + case bs: ByteStrings => ByteStrings(this, bs) + } } private[akka] def byteStringCompanion = ByteStrings @@ -496,7 +504,7 @@ object ByteString { else { val ar = new Array[Byte](length) var pos = 0 - bytestrings foreach { b => + bytestrings.foreach { b => b.copyToArray(ar, pos, b.length) pos += b.length } @@ -506,7 +514,7 @@ object ByteString { def asByteBuffer: ByteBuffer = compact.asByteBuffer - def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = bytestrings map { _.asByteBuffer } + def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = bytestrings.map { _.asByteBuffer } def decodeString(charset: String): String = compact.decodeString(charset) @@ -551,7 +559,9 @@ object ByteString { else if (remainingToDrop == 0) new ByteStrings(bytestrings.dropRight(fullDrops), length - n) else - new ByteStrings(bytestrings.dropRight(fullDrops + 1) :+ bytestrings(byteStringsSize - fullDrops - 1).dropRight1(remainingToDrop), length - n) + new ByteStrings(bytestrings.dropRight(fullDrops + 1) :+ bytestrings(byteStringsSize - fullDrops - 1) + .dropRight1(remainingToDrop), + length - n) } else { dropRightWithFullDropsAndRemainig(fullDrops + 1, remainingToDrop - bs.length) } @@ -639,9 +649,10 @@ object ByteString { } private[akka] object Companion { - private val companionMap = Seq(ByteString1, ByteString1C, ByteStrings). - map(x => x.SerializationIdentity -> x).toMap. - withDefault(x => throw new IllegalArgumentException("Invalid serialization id " + x)) + private val companionMap = Seq(ByteString1, ByteString1C, ByteStrings) + .map(x => x.SerializationIdentity -> x) + .toMap + .withDefault(x => throw new IllegalArgumentException("Invalid serialization id " + x)) def apply(from: Byte): Companion = companionMap(from) } @@ -661,9 +672,9 @@ object ByteString { * TODO: Add performance characteristics */ sealed abstract class ByteString - extends IndexedSeq[Byte] - with IndexedSeqOps[Byte, IndexedSeq, ByteString] - with StrictOptimizedSeqOps[Byte, IndexedSeq, ByteString] { + extends IndexedSeq[Byte] + with IndexedSeqOps[Byte, IndexedSeq, ByteString] + with StrictOptimizedSeqOps[Byte, IndexedSeq, ByteString] { override protected def fromSpecific(coll: IterableOnce[Byte]): ByteString = ByteString(coll) override protected def newSpecificBuilder: mutable.Builder[Byte, ByteString] = ByteString.newBuilder @@ -685,7 +696,8 @@ sealed abstract class ByteString // a parent trait. // // Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead - override def iterator: ByteIterator = throw new UnsupportedOperationException("Method iterator is not implemented in ByteString") + override def iterator: ByteIterator = + throw new UnsupportedOperationException("Method iterator is not implemented in ByteString") override def head: Byte = apply(0) override def tail: ByteString = drop(1) @@ -693,23 +705,28 @@ sealed abstract class ByteString override def init: ByteString = dropRight(1) // *must* be overridden by derived classes. - override def take(n: Int): ByteString = throw new UnsupportedOperationException("Method take is not implemented in ByteString") + override def take(n: Int): ByteString = + throw new UnsupportedOperationException("Method take is not implemented in ByteString") override def takeRight(n: Int): ByteString = slice(length - n, length) // these methods are optimized in derived classes utilising the maximum knowlage about data layout available to them: // *must* be overridden by derived classes. - override def slice(from: Int, until: Int): ByteString = throw new UnsupportedOperationException("Method slice is not implemented in ByteString") + override def slice(from: Int, until: Int): ByteString = + throw new UnsupportedOperationException("Method slice is not implemented in ByteString") // *must* be overridden by derived classes. - override def drop(n: Int): ByteString = throw new UnsupportedOperationException("Method drop is not implemented in ByteString") + override def drop(n: Int): ByteString = + throw new UnsupportedOperationException("Method drop is not implemented in ByteString") // *must* be overridden by derived classes. - override def dropRight(n: Int): ByteString = throw new UnsupportedOperationException("Method dropRight is not implemented in ByteString") + override def dropRight(n: Int): ByteString = + throw new UnsupportedOperationException("Method dropRight is not implemented in ByteString") override def takeWhile(p: Byte => Boolean): ByteString = iterator.takeWhile(p).toByteString override def dropWhile(p: Byte => Boolean): ByteString = iterator.dropWhile(p).toByteString - override def span(p: Byte => Boolean): (ByteString, ByteString) = - { val (a, b) = iterator.span(p); (a.toByteString, b.toByteString) } + override def span(p: Byte => Boolean): (ByteString, ByteString) = { + val (a, b) = iterator.span(p); (a.toByteString, b.toByteString) + } override def splitAt(n: Int): (ByteString, ByteString) = (take(n), drop(n)) @@ -723,9 +740,7 @@ sealed abstract class ByteString throw new IllegalArgumentException(s"size=$size must be positive") } - Iterator.iterate(this)(_.drop(size)) - .takeWhile(_.nonEmpty) - .map(_.take(size)) + Iterator.iterate(this)(_.drop(size)).takeWhile(_.nonEmpty).map(_.take(size)) } override def toString(): String = { @@ -747,7 +762,7 @@ sealed abstract class ByteString // override def copyToArray[B >: Byte](xs: Array[B], start: Int, len: Int): Unit = // iterator.copyToArray(xs, start, len) - override def foreach[@specialized U](f: Byte => U): Unit = iterator foreach f + override def foreach[@specialized U](f: Byte => U): Unit = iterator.foreach(f) private[akka] def writeToOutputStream(os: ObjectOutputStream): Unit @@ -769,7 +784,8 @@ sealed abstract class ByteString * @return the number of bytes actually copied */ // *must* be overridden by derived classes. - def copyToBuffer(buffer: ByteBuffer): Int = throw new UnsupportedOperationException("Method copyToBuffer is not implemented in ByteString") + def copyToBuffer(buffer: ByteBuffer): Int = + throw new UnsupportedOperationException("Method copyToBuffer is not implemented in ByteString") /** * Create a new ByteString with all contents compacted into a single, @@ -834,12 +850,13 @@ sealed abstract class ByteString /** * map method that will automatically cast Int back into Byte. */ - final def mapI(f: Byte => Int): ByteString = map(f andThen (_.toByte)) + final def mapI(f: Byte => Int): ByteString = map(f.andThen(_.toByte)) def map[A](f: Byte => Byte): ByteString = fromSpecific(super.map(f)) } object CompactByteString { + /** * Creates a new CompactByteString by copying a byte array. */ @@ -938,7 +955,7 @@ sealed abstract class CompactByteString extends ByteString with Serializable { final class ByteStringBuilder extends Builder[Byte, ByteString] { builder => - import ByteString.{ ByteString1C, ByteString1, ByteStrings } + import ByteString.{ ByteString1, ByteString1C, ByteStrings } private var _length: Int = 0 private val _builder: VectorBuilder[ByteString1] = new VectorBuilder[ByteString1]() private var _temp: Array[Byte] = _ @@ -1122,9 +1139,13 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { fillArray(n) { (target, offset) => if (byteOrder == ByteOrder.BIG_ENDIAN) { val start = n * 8 - 8 - (0 until n) foreach { i => target(offset + i) = (x >>> start - 8 * i).toByte } + (0 until n).foreach { i => + target(offset + i) = (x >>> start - 8 * i).toByte + } } else if (byteOrder == ByteOrder.LITTLE_ENDIAN) { - (0 until n) foreach { i => target(offset + i) = (x >>> 8 * i).toByte } + (0 until n).foreach { i => + target(offset + i) = (x >>> 8 * i).toByte + } } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } } diff --git a/akka-actor/src/main/scala-2.13+/akka/util/ccompat/imm/package.scala b/akka-actor/src/main/scala-2.13+/akka/util/ccompat/imm/package.scala index 963f63c620..a7f35ad750 100644 --- a/akka-actor/src/main/scala-2.13+/akka/util/ccompat/imm/package.scala +++ b/akka-actor/src/main/scala-2.13+/akka/util/ccompat/imm/package.scala @@ -6,5 +6,4 @@ package akka.util.ccompat import scala.collection.immutable -package object imm { -} +package object imm {} diff --git a/akka-actor/src/main/scala-2.13-/akka/util/ByteString.scala b/akka-actor/src/main/scala-2.13-/akka/util/ByteString.scala index da834ac606..1823ec0828 100644 --- a/akka-actor/src/main/scala-2.13-/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala-2.13-/akka/util/ByteString.scala @@ -267,7 +267,9 @@ object ByteString { /** * An unfragmented ByteString. */ - final class ByteString1 private (private val bytes: Array[Byte], private val startIndex: Int, val length: Int) extends ByteString with Serializable { + final class ByteString1 private (private val bytes: Array[Byte], private val startIndex: Int, val length: Int) + extends ByteString + with Serializable { private def this(bytes: Array[Byte]) = this(bytes, 0, bytes.length) @@ -354,14 +356,15 @@ object ByteString { def ++(that: ByteString): ByteString = { if (that.isEmpty) this else if (this.isEmpty) that - else that match { - case b: ByteString1C => ByteStrings(this, b.toByteString1) - case b: ByteString1 => - if ((bytes eq b.bytes) && (startIndex + length == b.startIndex)) - new ByteString1(bytes, startIndex, length + b.length) - else ByteStrings(this, b) - case bs: ByteStrings => ByteStrings(this, bs) - } + else + that match { + case b: ByteString1C => ByteStrings(this, b.toByteString1) + case b: ByteString1 => + if ((bytes eq b.bytes) && (startIndex + length == b.startIndex)) + new ByteString1(bytes, startIndex, length + b.length) + else ByteStrings(this, b) + case bs: ByteStrings => ByteStrings(this, bs) + } } override def indexOf[B >: Byte](elem: B): Int = indexOf(elem, 0) @@ -382,7 +385,8 @@ object ByteString { } private[akka] object ByteStrings extends Companion { - def apply(bytestrings: Vector[ByteString1]): ByteString = new ByteStrings(bytestrings, (0 /: bytestrings)(_ + _.length)) + def apply(bytestrings: Vector[ByteString1]): ByteString = + new ByteStrings(bytestrings, (0 /: bytestrings)(_ + _.length)) def apply(bytestrings: Vector[ByteString1], length: Int): ByteString = new ByteStrings(bytestrings, length) @@ -418,7 +422,8 @@ object ByteString { def compare(b1: ByteString, b2: ByteString): Int = if (b1.isEmpty) if (b2.isEmpty) 0 else 2 - else if (b2.isEmpty) 1 else 3 + else if (b2.isEmpty) 1 + else 3 val SerializationIdentity = 2.toByte @@ -443,7 +448,9 @@ object ByteString { /** * A ByteString with 2 or more fragments. */ - final class ByteStrings private (private[akka] val bytestrings: Vector[ByteString1], val length: Int) extends ByteString with Serializable { + final class ByteStrings private (private[akka] val bytestrings: Vector[ByteString1], val length: Int) + extends ByteString + with Serializable { if (bytestrings.isEmpty) throw new IllegalArgumentException("bytestrings must not be empty") if (bytestrings.head.isEmpty) throw new IllegalArgumentException("bytestrings.head must not be empty") @@ -461,16 +468,17 @@ object ByteString { /** Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead */ override def iterator: ByteIterator.MultiByteArrayIterator = - ByteIterator.MultiByteArrayIterator(bytestrings.toStream map { _.iterator }) + ByteIterator.MultiByteArrayIterator(bytestrings.toStream.map { _.iterator }) def ++(that: ByteString): ByteString = { if (that.isEmpty) this else if (this.isEmpty) that - else that match { - case b: ByteString1C => ByteStrings(this, b.toByteString1) - case b: ByteString1 => ByteStrings(this, b) - case bs: ByteStrings => ByteStrings(this, bs) - } + else + that match { + case b: ByteString1C => ByteStrings(this, b.toByteString1) + case b: ByteString1 => ByteStrings(this, b) + case bs: ByteStrings => ByteStrings(this, bs) + } } private[akka] def byteStringCompanion = ByteStrings @@ -490,7 +498,7 @@ object ByteString { else { val ar = new Array[Byte](length) var pos = 0 - bytestrings foreach { b => + bytestrings.foreach { b => b.copyToArray(ar, pos, b.length) pos += b.length } @@ -500,7 +508,7 @@ object ByteString { def asByteBuffer: ByteBuffer = compact.asByteBuffer - def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = bytestrings map { _.asByteBuffer } + def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = bytestrings.map { _.asByteBuffer } def decodeString(charset: String): String = compact.decodeString(charset) @@ -545,7 +553,9 @@ object ByteString { else if (remainingToDrop == 0) new ByteStrings(bytestrings.dropRight(fullDrops), length - n) else - new ByteStrings(bytestrings.dropRight(fullDrops + 1) :+ bytestrings(byteStringsSize - fullDrops - 1).dropRight1(remainingToDrop), length - n) + new ByteStrings(bytestrings.dropRight(fullDrops + 1) :+ bytestrings(byteStringsSize - fullDrops - 1) + .dropRight1(remainingToDrop), + length - n) } else { dropRightWithFullDropsAndRemainig(fullDrops + 1, remainingToDrop - bs.length) } @@ -633,9 +643,10 @@ object ByteString { } private[akka] object Companion { - private val companionMap = Seq(ByteString1, ByteString1C, ByteStrings). - map(x => x.SerializationIdentity -> x).toMap. - withDefault(x => throw new IllegalArgumentException("Invalid serialization id " + x)) + private val companionMap = Seq(ByteString1, ByteString1C, ByteStrings) + .map(x => x.SerializationIdentity -> x) + .toMap + .withDefault(x => throw new IllegalArgumentException("Invalid serialization id " + x)) def apply(from: Byte): Companion = companionMap(from) } @@ -668,7 +679,8 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz // a parent trait. // // Avoid `iterator` in performance sensitive code, call ops directly on ByteString instead - override def iterator: ByteIterator = throw new UnsupportedOperationException("Method iterator is not implemented in ByteString") + override def iterator: ByteIterator = + throw new UnsupportedOperationException("Method iterator is not implemented in ByteString") override def head: Byte = apply(0) override def tail: ByteString = drop(1) @@ -676,23 +688,28 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz override def init: ByteString = dropRight(1) // *must* be overridden by derived classes. - override def take(n: Int): ByteString = throw new UnsupportedOperationException("Method take is not implemented in ByteString") + override def take(n: Int): ByteString = + throw new UnsupportedOperationException("Method take is not implemented in ByteString") override def takeRight(n: Int): ByteString = slice(length - n, length) // these methods are optimized in derived classes utilising the maximum knowlage about data layout available to them: // *must* be overridden by derived classes. - override def slice(from: Int, until: Int): ByteString = throw new UnsupportedOperationException("Method slice is not implemented in ByteString") + override def slice(from: Int, until: Int): ByteString = + throw new UnsupportedOperationException("Method slice is not implemented in ByteString") // *must* be overridden by derived classes. - override def drop(n: Int): ByteString = throw new UnsupportedOperationException("Method drop is not implemented in ByteString") + override def drop(n: Int): ByteString = + throw new UnsupportedOperationException("Method drop is not implemented in ByteString") // *must* be overridden by derived classes. - override def dropRight(n: Int): ByteString = throw new UnsupportedOperationException("Method dropRight is not implemented in ByteString") + override def dropRight(n: Int): ByteString = + throw new UnsupportedOperationException("Method dropRight is not implemented in ByteString") override def takeWhile(p: Byte => Boolean): ByteString = iterator.takeWhile(p).toByteString override def dropWhile(p: Byte => Boolean): ByteString = iterator.dropWhile(p).toByteString - override def span(p: Byte => Boolean): (ByteString, ByteString) = - { val (a, b) = iterator.span(p); (a.toByteString, b.toByteString) } + override def span(p: Byte => Boolean): (ByteString, ByteString) = { + val (a, b) = iterator.span(p); (a.toByteString, b.toByteString) + } override def splitAt(n: Int): (ByteString, ByteString) = (take(n), drop(n)) @@ -706,9 +723,7 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz throw new IllegalArgumentException(s"size=$size must be positive") } - Iterator.iterate(this)(_.drop(size)) - .takeWhile(_.nonEmpty) - .map(_.take(size)) + Iterator.iterate(this)(_.drop(size)).takeWhile(_.nonEmpty).map(_.take(size)) } override def toString(): String = { @@ -730,7 +745,7 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz override def copyToArray[B >: Byte](xs: Array[B], start: Int, len: Int): Unit = iterator.copyToArray(xs, start, len) - override def foreach[@specialized U](f: Byte => U): Unit = iterator foreach f + override def foreach[@specialized U](f: Byte => U): Unit = iterator.foreach(f) private[akka] def writeToOutputStream(os: ObjectOutputStream): Unit @@ -752,8 +767,9 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz * @return the number of bytes actually copied */ // *must* be overridden by derived classes. - def copyToBuffer(buffer: ByteBuffer): Int = throw new UnsupportedOperationException( - s"Method copyToBuffer is not implemented in ByteString, failed for buffer $buffer") + def copyToBuffer(buffer: ByteBuffer): Int = + throw new UnsupportedOperationException( + s"Method copyToBuffer is not implemented in ByteString, failed for buffer $buffer") /** * Create a new ByteString with all contents compacted into a single, @@ -818,10 +834,11 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz /** * map method that will automatically cast Int back into Byte. */ - final def mapI(f: Byte => Int): ByteString = map(f andThen (_.toByte)) + final def mapI(f: Byte => Int): ByteString = map(f.andThen(_.toByte)) } object CompactByteString { + /** * Creates a new CompactByteString by copying a byte array. */ @@ -914,7 +931,7 @@ sealed abstract class CompactByteString extends ByteString with Serializable { final class ByteStringBuilder extends Builder[Byte, ByteString] { builder => - import ByteString.{ ByteString1C, ByteString1, ByteStrings } + import ByteString.{ ByteString1, ByteString1C, ByteStrings } private var _length: Int = 0 private val _builder: VectorBuilder[ByteString1] = new VectorBuilder[ByteString1]() private var _temp: Array[Byte] = _ @@ -1099,9 +1116,13 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { fillArray(n) { (target, offset) => if (byteOrder == ByteOrder.BIG_ENDIAN) { val start = n * 8 - 8 - (0 until n) foreach { i => target(offset + i) = (x >>> start - 8 * i).toByte } + (0 until n).foreach { i => + target(offset + i) = (x >>> start - 8 * i).toByte + } } else if (byteOrder == ByteOrder.LITTLE_ENDIAN) { - (0 until n) foreach { i => target(offset + i) = (x >>> 8 * i).toByte } + (0 until n).foreach { i => + target(offset + i) = (x >>> 8 * i).toByte + } } else throw new IllegalArgumentException("Unknown byte order " + byteOrder) } } diff --git a/akka-actor/src/main/scala-2.13-/akka/util/ccompat/package.scala b/akka-actor/src/main/scala-2.13-/akka/util/ccompat/package.scala index e6670db036..2b6be7e8d9 100644 --- a/akka-actor/src/main/scala-2.13-/akka/util/ccompat/package.scala +++ b/akka-actor/src/main/scala-2.13-/akka/util/ccompat/package.scala @@ -48,11 +48,12 @@ package object ccompat { } private[akka] implicit def genericCompanionToCBF[A, CC[X] <: GenTraversable[X]]( - fact: GenericCompanion[CC]): CanBuildFrom[Any, A, CC[A]] = + fact: GenericCompanion[CC]): CanBuildFrom[Any, A, CC[A]] = simpleCBF(fact.newBuilder[A]) - private[akka] implicit def sortedSetCompanionToCBF[A: Ordering, CC[X] <: c.SortedSet[X] with c.SortedSetLike[X, CC[X]]]( - fact: SortedSetFactory[CC]): CanBuildFrom[Any, A, CC[A]] = + private[akka] implicit def sortedSetCompanionToCBF[A: Ordering, + CC[X] <: c.SortedSet[X] with c.SortedSetLike[X, CC[X]]]( + fact: SortedSetFactory[CC]): CanBuildFrom[Any, A, CC[A]] = simpleCBF(fact.newBuilder[A]) private[ccompat] def build[T, CC](builder: m.Builder[T, CC], source: TraversableOnce[T]): CC = { diff --git a/akka-actor/src/main/scala-2.13/akka/compat/Future.scala b/akka-actor/src/main/scala-2.13/akka/compat/Future.scala index 047be51fa1..c3b4869f09 100644 --- a/akka-actor/src/main/scala-2.13/akka/compat/Future.scala +++ b/akka-actor/src/main/scala-2.13/akka/compat/Future.scala @@ -17,27 +17,33 @@ import scala.collection.immutable * Remove these classes as soon as support for Scala 2.11 is dropped! */ @InternalApi private[akka] object Future { - def fold[T, R](futures: IterableOnce[SFuture[T]])(zero: R)(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = { + def fold[T, R](futures: IterableOnce[SFuture[T]])(zero: R)(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = { // This will have performance implications since the elements are copied to a Vector SFuture.foldLeft[T, R](futures.to(immutable.Iterable))(zero)(op)(executor) } - def fold[T, R](futures: immutable.Iterable[SFuture[T]])(zero: R)(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = + def fold[T, R](futures: immutable.Iterable[SFuture[T]])(zero: R)(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = SFuture.foldLeft[T, R](futures)(zero)(op)(executor) - def reduce[T, R >: T](futures: IterableOnce[SFuture[T]])(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = { + def reduce[T, R >: T](futures: IterableOnce[SFuture[T]])(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = { // This will have performance implications since the elements are copied to a Vector SFuture.reduceLeft[T, R](futures.to(immutable.Iterable))(op)(executor) } - def reduce[T, R >: T](futures: immutable.Iterable[SFuture[T]])(op: (R, T) => R)(implicit executor: ExecutionContext): SFuture[R] = + def reduce[T, R >: T](futures: immutable.Iterable[SFuture[T]])(op: (R, T) => R)( + implicit executor: ExecutionContext): SFuture[R] = SFuture.reduceLeft[T, R](futures)(op)(executor) - def find[T](futures: IterableOnce[SFuture[T]])(p: T => Boolean)(implicit executor: ExecutionContext): SFuture[Option[T]] = { + def find[T](futures: IterableOnce[SFuture[T]])(p: T => Boolean)( + implicit executor: ExecutionContext): SFuture[Option[T]] = { // This will have performance implications since the elements are copied to a Vector SFuture.find[T](futures.to(immutable.Iterable))(p)(executor) } - def find[T](futures: immutable.Iterable[SFuture[T]])(p: T => Boolean)(implicit executor: ExecutionContext): SFuture[Option[T]] = + def find[T](futures: immutable.Iterable[SFuture[T]])(p: T => Boolean)( + implicit executor: ExecutionContext): SFuture[Option[T]] = SFuture.find[T](futures)(p)(executor) } diff --git a/akka-actor/src/main/scala/akka/AkkaVersion.scala b/akka-actor/src/main/scala/akka/AkkaVersion.scala index a7ee6b9208..4cdc4e5396 100644 --- a/akka-actor/src/main/scala/akka/AkkaVersion.scala +++ b/akka-actor/src/main/scala/akka/AkkaVersion.scala @@ -9,6 +9,7 @@ import akka.annotation.InternalApi final class UnsupportedAkkaVersion private[akka] (msg: String) extends RuntimeException(msg) object AkkaVersion { + /** * Check that the version of Akka is a specific patch version or higher and throw an [[UnsupportedAkkaVersion]] * exception if the version requirement is not fulfilled. @@ -38,9 +39,10 @@ object AkkaVersion { if (mOrRc ne null) currentPatchStr.toInt - 1 else currentPatchStr.toInt if (requiredMajorStr.toInt != currentMajorStr.toInt || - requiredMinorStr.toInt > currentMinorStr.toInt || - (requiredMinorStr == currentMinorStr && requiredPatchStr.toInt > currentPatch)) - throw new UnsupportedAkkaVersion(s"Current version of Akka is [$currentVersion], but $libraryName requires version [$requiredVersion]") + requiredMinorStr.toInt > currentMinorStr.toInt || + (requiredMinorStr == currentMinorStr && requiredPatchStr.toInt > currentPatch)) + throw new UnsupportedAkkaVersion( + s"Current version of Akka is [$currentVersion], but $libraryName requires version [$requiredVersion]") case _ => throw new IllegalArgumentException(s"Required version string is invalid: [$requiredVersion]") } diff --git a/akka-actor/src/main/scala/akka/Done.scala b/akka-actor/src/main/scala/akka/Done.scala index e51bb5bf9d..eb2908604a 100644 --- a/akka-actor/src/main/scala/akka/Done.scala +++ b/akka-actor/src/main/scala/akka/Done.scala @@ -15,6 +15,7 @@ import akka.annotation.DoNotInherit @DoNotInherit sealed abstract class Done extends Serializable case object Done extends Done { + /** * Java API: the singleton instance */ diff --git a/akka-actor/src/main/scala/akka/Main.scala b/akka-actor/src/main/scala/akka/Main.scala index c8aefbc817..b3bc4b8086 100644 --- a/akka-actor/src/main/scala/akka/Main.scala +++ b/akka-actor/src/main/scala/akka/Main.scala @@ -39,7 +39,7 @@ object Main { } class Terminator(app: ActorRef) extends Actor with ActorLogging { - context watch app + context.watch(app) def receive = { case Terminated(_) => log.info("application supervisor has terminated, shutting down") diff --git a/akka-actor/src/main/scala/akka/NotUsed.scala b/akka-actor/src/main/scala/akka/NotUsed.scala index f1b7a1ddfb..3b5437831b 100644 --- a/akka-actor/src/main/scala/akka/NotUsed.scala +++ b/akka-actor/src/main/scala/akka/NotUsed.scala @@ -13,6 +13,7 @@ package akka sealed abstract class NotUsed case object NotUsed extends NotUsed { + /** * Java API: the singleton instance */ diff --git a/akka-actor/src/main/scala/akka/actor/AbstractActor.scala b/akka-actor/src/main/scala/akka/actor/AbstractActor.scala index 5fbe627ace..8264bd21b7 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractActor.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractActor.scala @@ -29,6 +29,7 @@ object AbstractActor { * extending `AbstractPartialFunction`. */ final class Receive(val onMessage: PartialFunction[Any, BoxedUnit]) { + /** * Composes this `Receive` with a fallback which gets applied * where this partial function is not defined. diff --git a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala index ce24be80ec..092f359113 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractFSM.scala @@ -12,6 +12,7 @@ import scala.concurrent.duration.FiniteDuration * */ object AbstractFSM { + /** * A partial function value which does not match anything and can be used to * “reset” `whenUnhandled` and `onTermination` handlers. @@ -92,10 +93,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param stateTimeout default state timeout for this state * @param stateFunctionBuilder partial function builder describing response to input */ - final def when( - stateName: S, - stateTimeout: FiniteDuration, - stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit = + final def when(stateName: S, + stateTimeout: FiniteDuration, + stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit = super.when(stateName, stateTimeout)(stateFunctionBuilder.build()) /** @@ -108,10 +108,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param stateTimeout default state timeout for this state * @param stateFunctionBuilder partial function builder describing response to input */ - final def when( - stateName: S, - stateTimeout: java.time.Duration, - stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit = { + final def when(stateName: S, + stateTimeout: java.time.Duration, + stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit = { import JavaDurationConverters._ when(stateName, stateTimeout.asScala, stateFunctionBuilder) } @@ -200,7 +199,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent[ET, DT <: D](eventType: Class[ET], dataType: Class[DT], predicate: TypedPredicate2[ET, DT], apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D] = + final def matchEvent[ET, DT <: D](eventType: Class[ET], + dataType: Class[DT], + predicate: TypedPredicate2[ET, DT], + apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D] = new FSMStateFunctionBuilder[S, D]().event(eventType, dataType, predicate, apply) /** @@ -213,7 +215,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent[ET, DT <: D](eventType: Class[ET], dataType: Class[DT], apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D] = + final def matchEvent[ET, DT <: D](eventType: Class[ET], + dataType: Class[DT], + apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D] = new FSMStateFunctionBuilder[S, D]().event(eventType, dataType, apply) /** @@ -226,7 +230,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent[ET](eventType: Class[ET], predicate: TypedPredicate2[ET, D], apply: Apply2[ET, D, State]): FSMStateFunctionBuilder[S, D] = + final def matchEvent[ET](eventType: Class[ET], + predicate: TypedPredicate2[ET, D], + apply: Apply2[ET, D, State]): FSMStateFunctionBuilder[S, D] = new FSMStateFunctionBuilder[S, D]().event(eventType, predicate, apply) /** @@ -250,7 +256,8 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent(predicate: TypedPredicate2[AnyRef, D], apply: Apply2[AnyRef, D, State]): FSMStateFunctionBuilder[S, D] = + final def matchEvent(predicate: TypedPredicate2[AnyRef, D], + apply: Apply2[AnyRef, D, State]): FSMStateFunctionBuilder[S, D] = new FSMStateFunctionBuilder[S, D]().event(predicate, apply) /** @@ -264,7 +271,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent[DT <: D](eventMatches: JList[AnyRef], dataType: Class[DT], apply: Apply2[AnyRef, DT, State]): FSMStateFunctionBuilder[S, D] = + final def matchEvent[DT <: D](eventMatches: JList[AnyRef], + dataType: Class[DT], + apply: Apply2[AnyRef, DT, State]): FSMStateFunctionBuilder[S, D] = new FSMStateFunctionBuilder[S, D]().event(eventMatches, dataType, apply) /** @@ -290,7 +299,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEventEquals[E, DT <: D](event: E, dataType: Class[DT], apply: Apply2[E, DT, State]): FSMStateFunctionBuilder[S, D] = + final def matchEventEquals[E, DT <: D](event: E, + dataType: Class[DT], + apply: Apply2[E, DT, State]): FSMStateFunctionBuilder[S, D] = new FSMStateFunctionBuilder[S, D]().eventEquals(event, dataType, apply) /** @@ -376,7 +387,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param predicate a predicate that will be evaluated on the reason if the type matches * @return the builder with the case statement added */ - final def matchStop[RT <: Reason](reasonType: Class[RT], predicate: TypedPredicate[RT], apply: UnitApply3[RT, S, D]): FSMStopBuilder[S, D] = + final def matchStop[RT <: Reason](reasonType: Class[RT], + predicate: TypedPredicate[RT], + apply: UnitApply3[RT, S, D]): FSMStopBuilder[S, D] = new FSMStopBuilder[S, D]().stop(reasonType, predicate, apply) /** @@ -397,7 +410,9 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] { * @param apply an action to apply to the argument if the type and predicate matches * @return a builder with the case statement added */ - final def matchData[DT <: D](dataType: Class[DT], predicate: TypedPredicate[DT], apply: UnitApply[DT]): UnitPFBuilder[D] = + final def matchData[DT <: D](dataType: Class[DT], + predicate: TypedPredicate[DT], + apply: UnitApply[DT]): UnitPFBuilder[D] = UnitMatch.`match`(dataType, predicate, apply) /** diff --git a/akka-actor/src/main/scala/akka/actor/AbstractProps.scala b/akka-actor/src/main/scala/akka/actor/AbstractProps.scala index 5218337c2c..02d2028b94 100644 --- a/akka-actor/src/main/scala/akka/actor/AbstractProps.scala +++ b/akka-actor/src/main/scala/akka/actor/AbstractProps.scala @@ -28,7 +28,8 @@ private[akka] trait AbstractProps { * Java API: create a Props given a class and its constructor arguments. */ @varargs - def create(clazz: Class[_], args: AnyRef*): Props = new Props(deploy = Props.defaultDeploy, clazz = clazz, args = args.toList) + def create(clazz: Class[_], args: AnyRef*): Props = + new Props(deploy = Props.defaultDeploy, clazz = clazz, args = args.toList) /** * Create new Props from the given [[akka.japi.Creator]]. @@ -50,11 +51,12 @@ private[akka] trait AbstractProps { t.getActualTypeArguments.head match { case c: Class[_] => c // since T <: Actor case v: TypeVariable[_] => - v.getBounds collectFirst { case c: Class[_] if ac.isAssignableFrom(c) && c != ac => c } getOrElse ac + v.getBounds.collectFirst { case c: Class[_] if ac.isAssignableFrom(c) && c != ac => c }.getOrElse(ac) case x => throw new IllegalArgumentException(s"unsupported type found in Creator argument [$x]") } case c: Class[_] if (c == coc) => - throw new IllegalArgumentException("erased Creator types (e.g. lambdas) are unsupported, use Props.create(actorClass, creator) instead") + throw new IllegalArgumentException( + "erased Creator types (e.g. lambdas) are unsupported, use Props.create(actorClass, creator) instead") } create(classOf[CreatorConsumer], actorClass, creator) } diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 51698cb68e..1e300c1ad1 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -81,8 +81,7 @@ final case class ActorIdentity(correlationId: Any, ref: Option[ActorRef]) { if (ref.isDefined && ref.get == null) { throw new IllegalArgumentException( "ActorIdentity created with ref = Some(null) is not allowed, " + - "this could happen when serializing with Scala 2.12 and deserializing with Scala 2.11 which is not supported." - ) + "this could happen when serializing with Scala 2.12 and deserializing with Scala 2.11 which is not supported.") } /** @@ -228,8 +227,7 @@ final case class PreRestartException private[akka] (actor: ActorRef, (if (originalCause == null) "null" else originalCause.getClass) + ", " + (messageOption match { case Some(m: AnyRef) => m.getClass; case _ => "None" }) + ")", - cause - ) + cause) /** * A PostRestartException is thrown when constructor or postRestart() method @@ -244,8 +242,7 @@ final case class PostRestartException private[akka] (actor: ActorRef, cause: Thr extends ActorInitializationException( actor, "exception post restart (" + (if (originalCause == null) "null" else originalCause.getClass) + ")", - cause - ) + cause) /** * This is an extractor for retrieving the original cause (i.e. the first @@ -258,7 +255,7 @@ object OriginalRestartException { def unapply(ex: PostRestartException): Option[Throwable] = { @tailrec def rec(ex: PostRestartException): Option[Throwable] = ex match { case PostRestartException(_, _, e: PostRestartException) => rec(e) - case PostRestartException(_, _, e) => Some(e) + case PostRestartException(_, _, e) => Some(e) } rec(ex) } @@ -491,8 +488,7 @@ trait Actor { if ((contextStack.isEmpty) || (contextStack.head eq null)) throw ActorInitializationException( s"You cannot create an instance of [${getClass.getName}] explicitly using the constructor (new). " + - "You have to use one of the 'actorOf' factory methods to create a new actor. See the documentation." - ) + "You have to use one of the 'actorOf' factory methods to create a new actor. See the documentation.") val c = contextStack.head ActorCell.contextStack.set(null :: contextStack) c @@ -616,7 +612,7 @@ trait Actor { @throws(classOf[Exception]) // when changing this you MUST also change ActorDocTest //#lifecycle-hooks def preRestart(@unused reason: Throwable, @unused message: Option[Any]): Unit = { - context.children foreach { child => + context.children.foreach { child => context.unwatch(child) context.stop(child) } @@ -649,7 +645,7 @@ trait Actor { def unhandled(message: Any): Unit = { message match { case Terminated(dead) => throw DeathPactException(dead) - case _ => context.system.eventStream.publish(UnhandledMessage(message, sender(), self)) + case _ => context.system.eventStream.publish(UnhandledMessage(message, sender(), self)) } } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 8a7fb2c307..abbcf19e83 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -17,7 +17,7 @@ import akka.dispatch.{ Envelope, MessageDispatcher } import akka.dispatch.sysmsg._ import akka.event.Logging.{ Debug, Error, LogEvent } import akka.japi.Procedure -import akka.util.{ Reflect, unused } +import akka.util.{ unused, Reflect } import akka.annotation.InternalApi /** @@ -277,52 +277,64 @@ trait UntypedActorContext extends ActorContext { */ @InternalApi private[akka] trait Cell { + /** * The “self” reference which this Cell is attached to. */ def self: ActorRef + /** * The system within which this Cell lives. */ def system: ActorSystem + /** * The system internals where this Cell lives. */ def systemImpl: ActorSystemImpl + /** * Start the cell: enqueued message must not be processed before this has * been called. The usual action is to attach the mailbox to a dispatcher. */ def start(): this.type + /** * Recursively suspend this actor and all its children. Is only allowed to throw Fatal Throwables. */ def suspend(): Unit + /** * Recursively resume this actor and all its children. Is only allowed to throw Fatal Throwables. */ def resume(causedByFailure: Throwable): Unit + /** * Restart this actor (will recursively restart or stop all children). Is only allowed to throw Fatal Throwables. */ def restart(cause: Throwable): Unit + /** * Recursively terminate this actor and all its children. Is only allowed to throw Fatal Throwables. */ def stop(): Unit + /** * Returns “true” if the actor is locally known to be terminated, “false” if * alive or uncertain. */ private[akka] def isTerminated: Boolean + /** * The supervisor of this actor. */ def parent: InternalActorRef + /** * All children of this actor, including only reserved-names. */ def childrenRefs: ChildrenContainer + /** * Get the stats for the named child, if that exists. */ @@ -355,21 +367,25 @@ private[akka] trait Cell { * Is only allowed to throw Fatal Throwables. */ def sendSystemMessage(msg: SystemMessage): Unit + /** * Returns true if the actor is local, i.e. if it is actually scheduled * on a Thread in the current JVM when run. */ def isLocal: Boolean + /** * If the actor isLocal, returns whether "user messages" are currently queued, * “false” otherwise. */ def hasMessages: Boolean + /** * If the actor isLocal, returns the number of "user messages" currently queued, * which may be a costly operation, 0 otherwise. */ def numberOfMessages: Int + /** * The props for this actor cell. */ @@ -427,17 +443,19 @@ private[akka] object ActorCell { * for! (waves hand) */ private[akka] class ActorCell( - val system: ActorSystemImpl, - val self: InternalActorRef, - final val props: Props, // Must be final so that it can be properly cleared in clearActorCellFields - val dispatcher: MessageDispatcher, - val parent: InternalActorRef) - extends UntypedActorContext with AbstractActor.ActorContext with Cell - with dungeon.ReceiveTimeout - with dungeon.Children - with dungeon.Dispatch - with dungeon.DeathWatch - with dungeon.FaultHandling { + val system: ActorSystemImpl, + val self: InternalActorRef, + final val props: Props, // Must be final so that it can be properly cleared in clearActorCellFields + val dispatcher: MessageDispatcher, + val parent: InternalActorRef) + extends UntypedActorContext + with AbstractActor.ActorContext + with Cell + with dungeon.ReceiveTimeout + with dungeon.Children + with dungeon.Dispatch + with dungeon.DeathWatch + with dungeon.FaultHandling { import ActorCell._ @@ -522,17 +540,17 @@ private[akka] class ActorCell( try { message match { case message: SystemMessage if shouldStash(message, currentState) => stash(message) - case f: Failed => handleFailure(f) - case DeathWatchNotification(a, ec, at) => watchedActorTerminated(a, ec, at) - case Create(failure) => create(failure) - case Watch(watchee, watcher) => addWatcher(watchee, watcher) - case Unwatch(watchee, watcher) => remWatcher(watchee, watcher) - case Recreate(cause) => faultRecreate(cause) - case Suspend() => faultSuspend() - case Resume(inRespToFailure) => faultResume(inRespToFailure) - case Terminate() => terminate() - case Supervise(child, async) => supervise(child, async) - case NoMessage => // only here to suppress warning + case f: Failed => handleFailure(f) + case DeathWatchNotification(a, ec, at) => watchedActorTerminated(a, ec, at) + case Create(failure) => create(failure) + case Watch(watchee, watcher) => addWatcher(watchee, watcher) + case Unwatch(watchee, watcher) => remWatcher(watchee, watcher) + case Recreate(cause) => faultRecreate(cause) + case Suspend() => faultSuspend() + case Resume(inRespToFailure) => faultResume(inRespToFailure) + case Terminate() => terminate() + case Supervise(child, async) => supervise(child, async) + case NoMessage => // only here to suppress warning } } catch handleNonFatalOrInterruptedException { e => handleInvokeFailure(Nil, e) @@ -648,14 +666,15 @@ private[akka] class ActorCell( } } - failure foreach { throw _ } + failure.foreach { throw _ } try { val created = newActor() actor = created created.aroundPreStart() checkReceiveTimeout() - if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(created), "started (" + created + ")")) + if (system.settings.DebugLifecycle) + publish(Debug(self.path.toString, clazz(created), "started (" + created + ")")) } catch { case e: InterruptedException => clearOutActorIfNonNull() @@ -664,12 +683,14 @@ private[akka] class ActorCell( case NonFatal(e) => clearOutActorIfNonNull() e match { - case i: InstantiationException => throw ActorInitializationException( - self, - """exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either, + case i: InstantiationException => + throw ActorInitializationException( + self, + """exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either, a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new Creator ... ) or is missing an appropriate, reachable no-args constructor. - """, i.getCause) + """, + i.getCause) case x => throw ActorInitializationException(self, "exception during creation", x) } } @@ -681,8 +702,13 @@ private[akka] class ActorCell( initChild(child) match { case Some(_) => handleSupervise(child, async) - if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) - case None => publish(Error(self.path.toString, clazz(actor), "received Supervise from unregistered child " + child + ", this will not end well")) + if (system.settings.DebugLifecycle) + publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) + case None => + publish( + Error(self.path.toString, + clazz(actor), + "received Supervise from unregistered child " + child + ", this will not end well")) } } @@ -707,12 +733,15 @@ private[akka] class ActorCell( final protected def setActorFields(actorInstance: Actor, context: ActorContext, self: ActorRef): Unit = if (actorInstance ne null) { if (!Reflect.lookupAndSetField(actorInstance.getClass, actorInstance, "context", context) - || !Reflect.lookupAndSetField(actorInstance.getClass, actorInstance, "self", self)) - throw IllegalActorStateException(actorInstance.getClass + " is not an Actor since it have not mixed in the 'Actor' trait") + || !Reflect.lookupAndSetField(actorInstance.getClass, actorInstance, "self", self)) + throw IllegalActorStateException( + actorInstance.getClass + " is not an Actor since it have not mixed in the 'Actor' trait") } // logging is not the main purpose, and if it fails there’s nothing we can do - protected final def publish(e: LogEvent): Unit = try system.eventStream.publish(e) catch { case NonFatal(_) => } + protected final def publish(e: LogEvent): Unit = + try system.eventStream.publish(e) + catch { case NonFatal(_) => } protected final def clazz(o: AnyRef): Class[_] = if (o eq null) this.getClass else o.getClass } diff --git a/akka-actor/src/main/scala/akka/actor/ActorDSL.scala b/akka-actor/src/main/scala/akka/actor/ActorDSL.scala index 25317f2730..e026f02af0 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorDSL.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorDSL.scala @@ -69,7 +69,9 @@ import akka.util.JavaDurationConverters._ * * @deprecated Use the normal `actorOf` methods defined on `ActorSystem` and `ActorContext` to create Actors instead. */ -@deprecated("deprecated Use the normal `actorOf` methods defined on `ActorSystem` and `ActorContext` to create Actors instead.", since = "2.5.0") +@deprecated( + "deprecated Use the normal `actorOf` methods defined on `ActorSystem` and `ActorContext` to create Actors instead.", + since = "2.5.0") object ActorDSL extends dsl.Inbox with dsl.Creators { protected object Extension extends ExtensionId[Extension] with ExtensionIdProvider { @@ -87,13 +89,14 @@ object ActorDSL extends dsl.Inbox with dsl.Creators { protected class Extension(val system: ExtendedActorSystem) extends akka.actor.Extension with InboxExtension { private case class MkChild(props: Props, name: String) extends NoSerializationVerificationNeeded - private val boss = system.systemActorOf(Props( - new Actor { + private val boss = system + .systemActorOf(Props(new Actor { def receive = { case MkChild(props, name) => sender() ! context.actorOf(props, name) case any => sender() ! any } - }), "dsl").asInstanceOf[RepointableActorRef] + }), "dsl") + .asInstanceOf[RepointableActorRef] lazy val config = system.settings.config.getConfig("akka.actor.dsl") @@ -157,6 +160,7 @@ abstract class Inbox { } object Inbox { + /** * Create a new Inbox within the given system. */ diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index daf65a6fc8..1a869d8801 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -51,6 +51,7 @@ object ActorPaths { } object ActorPath { + /** * Parse string as actor path; throws java.net.MalformedURLException if unable to do so. */ @@ -88,11 +89,11 @@ object ActorPath { case EmptyPathCode => throw InvalidActorNameException(s"Actor path element must not be empty $fullPathMsg") case invalidAt => - throw InvalidActorNameException( - s"""Invalid actor path element [$element]$fullPathMsg, illegal character [${element(invalidAt)}] at position: $invalidAt. """ + - """Actor paths MUST: """ + - """not start with `$`, """ + - s"""include only ASCII letters and can only contain these special characters: ${ActorPath.ValidSymbols}.""") + throw InvalidActorNameException(s"""Invalid actor path element [$element]$fullPathMsg, illegal character [${element( + invalidAt)}] at position: $invalidAt. """ + + """Actor paths MUST: """ + + """not start with `$`, """ + + s"""include only ASCII letters and can only contain these special characters: ${ActorPath.ValidSymbols}.""") } } @@ -106,25 +107,27 @@ object ActorPath { final def isValidPathElement(s: String): Boolean = findInvalidPathElementCharPosition(s) == ValidPathCode - private final def findInvalidPathElementCharPosition(s: String): Int = if (s.isEmpty) EmptyPathCode else { - def isValidChar(c: Char): Boolean = - (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (ValidSymbols.indexOf(c) != -1) + private final def findInvalidPathElementCharPosition(s: String): Int = + if (s.isEmpty) EmptyPathCode + else { + def isValidChar(c: Char): Boolean = + (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (ValidSymbols.indexOf(c) != -1) - def isHexChar(c: Char): Boolean = - (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') || (c >= '0' && c <= '9') + def isHexChar(c: Char): Boolean = + (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') || (c >= '0' && c <= '9') - val len = s.length - def validate(pos: Int): Int = - if (pos < len) - s.charAt(pos) match { - case c if isValidChar(c) => validate(pos + 1) - case '%' if pos + 2 < len && isHexChar(s.charAt(pos + 1)) && isHexChar(s.charAt(pos + 2)) => validate(pos + 3) - case _ => pos - } - else ValidPathCode + val len = s.length + def validate(pos: Int): Int = + if (pos < len) + s.charAt(pos) match { + case c if isValidChar(c) => validate(pos + 1) + case '%' if pos + 2 < len && isHexChar(s.charAt(pos + 1)) && isHexChar(s.charAt(pos + 2)) => + validate(pos + 3) + case _ => pos + } else ValidPathCode - if (len > 0 && s.charAt(0) != '$') validate(0) else 0 - } + if (len > 0 && s.charAt(0) != '$') validate(0) else 0 + } private[akka] final val emptyActorPath: immutable.Iterable[String] = List("") } @@ -148,6 +151,7 @@ object ActorPath { */ @SerialVersionUID(1L) sealed trait ActorPath extends Comparable[ActorPath] with Serializable { + /** * The Address under which this path can be reached; walks up the tree to * the RootActorPath. @@ -177,7 +181,8 @@ sealed trait ActorPath extends Comparable[ActorPath] with Serializable { /** * Recursively create a descendant’s path by appending all child names. */ - def /(child: Iterable[String]): ActorPath = child.foldLeft(this)((path, elem) => if (elem.isEmpty) path else path / elem) + def /(child: Iterable[String]): ActorPath = + child.foldLeft(this)((path, elem) => if (elem.isEmpty) path else path / elem) /** * Java API: Recursively create a descendant’s path by appending all child names. @@ -252,11 +257,10 @@ sealed trait ActorPath extends Comparable[ActorPath] with Serializable { */ @SerialVersionUID(1L) final case class RootActorPath(address: Address, name: String = "/") extends ActorPath { - require( - name.length == 1 || name.indexOf('/', 1) == -1, - "/ may only exist at the beginning of the root actors name, " + - "it is a path separator and is not legal in ActorPath names: [%s]" format name) - require(name.indexOf('#') == -1, "# is a fragment separator and is not legal in ActorPath names: [%s]" format name) + require(name.length == 1 || name.indexOf('/', 1) == -1, + ("/ may only exist at the beginning of the root actors name, " + + "it is a path separator and is not legal in ActorPath names: [%s]").format(name)) + require(name.indexOf('#') == -1, "# is a fragment separator and is not legal in ActorPath names: [%s]".format(name)) override def parent: ActorPath = this @@ -280,7 +284,8 @@ final case class RootActorPath(address: Address, name: String = "/") extends Act override def toSerializationFormatWithAddress(addr: Address): String = toStringWithAddress(addr) override def compareTo(other: ActorPath): Int = other match { - case r: RootActorPath => toString compareTo r.toString // FIXME make this cheaper by comparing address and name in isolation + case r: RootActorPath => + toString.compareTo(r.toString) // FIXME make this cheaper by comparing address and name in isolation case _: ChildActorPath => 1 } @@ -299,9 +304,13 @@ final case class RootActorPath(address: Address, name: String = "/") extends Act } @SerialVersionUID(1L) -final class ChildActorPath private[akka] (val parent: ActorPath, val name: String, override private[akka] val uid: Int) extends ActorPath { - if (name.indexOf('/') != -1) throw new IllegalArgumentException("/ is a path separator and is not legal in ActorPath names: [%s]" format name) - if (name.indexOf('#') != -1) throw new IllegalArgumentException("# is a fragment separator and is not legal in ActorPath names: [%s]" format name) +final class ChildActorPath private[akka] (val parent: ActorPath, val name: String, override private[akka] val uid: Int) + extends ActorPath { + if (name.indexOf('/') != -1) + throw new IllegalArgumentException("/ is a path separator and is not legal in ActorPath names: [%s]".format(name)) + if (name.indexOf('#') != -1) + throw new IllegalArgumentException( + "# is a fragment separator and is not legal in ActorPath names: [%s]".format(name)) def this(parent: ActorPath, name: String) = this(parent, name, ActorCell.undefinedUid) @@ -384,7 +393,10 @@ final class ChildActorPath private[akka] (val parent: ActorPath, val name: Strin * @param diff difference in offset for each child element, due to different address * @param rootString function to construct the root element string */ - private def buildToString(sb: JStringBuilder, length: Int, diff: Int, rootString: RootActorPath => String): JStringBuilder = { + private def buildToString(sb: JStringBuilder, + length: Int, + diff: Int, + rootString: RootActorPath => String): JStringBuilder = { @tailrec def rec(p: ActorPath): JStringBuilder = p match { case r: RootActorPath => @@ -412,8 +424,8 @@ final class ChildActorPath private[akka] (val parent: ActorPath, val name: Strin @tailrec def rec(left: ActorPath, right: ActorPath): Boolean = if (left eq right) true - else if (left.isInstanceOf[RootActorPath]) left equals right - else if (right.isInstanceOf[RootActorPath]) right equals left + else if (left.isInstanceOf[RootActorPath]) left.equals(right) + else if (right.isInstanceOf[RootActorPath]) right.equals(left) else left.name == right.name && rec(left.parent, right.parent) other match { @@ -439,10 +451,10 @@ final class ChildActorPath private[akka] (val parent: ActorPath, val name: Strin @tailrec def rec(left: ActorPath, right: ActorPath): Int = if (left eq right) 0 - else if (left.isInstanceOf[RootActorPath]) left compareTo right - else if (right.isInstanceOf[RootActorPath]) -(right compareTo left) + else if (left.isInstanceOf[RootActorPath]) left.compareTo(right) + else if (right.isInstanceOf[RootActorPath]) -right.compareTo(left) else { - val x = left.name compareTo right.name + val x = left.name.compareTo(right.name) if (x == 0) rec(left.parent, right.parent) else x } @@ -450,4 +462,3 @@ final class ChildActorPath private[akka] (val parent: ActorPath, val name: Strin rec(this, other) } } - diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index cd469765ae..7441264b05 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -112,7 +112,7 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable * Comparison takes path and the unique id of the actor cell into account. */ final def compareTo(other: ActorRef) = { - val x = this.path compareTo other.path + val x = this.path.compareTo(other.path) if (x == 0) if (this.path.uid < other.path.uid) -1 else if (this.path.uid == other.path.uid) 0 else 1 else x } @@ -299,14 +299,14 @@ private[akka] case object Nobody extends MinimalActorRef { * * INTERNAL API */ -private[akka] class LocalActorRef private[akka] ( - _system: ActorSystemImpl, - _props: Props, - _dispatcher: MessageDispatcher, - _mailboxType: MailboxType, - _supervisor: InternalActorRef, - override val path: ActorPath) - extends ActorRefWithCell with LocalRef { +private[akka] class LocalActorRef private[akka] (_system: ActorSystemImpl, + _props: Props, + _dispatcher: MessageDispatcher, + _mailboxType: MailboxType, + _supervisor: InternalActorRef, + override val path: ActorPath) + extends ActorRefWithCell + with LocalRef { /* * Safe publication of this class’s fields is guaranteed by mailbox.setActor() @@ -321,7 +321,11 @@ private[akka] class LocalActorRef private[akka] ( private val actorCell: ActorCell = newActorCell(_system, this, _props, _dispatcher, _supervisor) actorCell.init(sendSupervise = true, _mailboxType) - protected def newActorCell(system: ActorSystemImpl, ref: InternalActorRef, props: Props, dispatcher: MessageDispatcher, supervisor: InternalActorRef): ActorCell = + protected def newActorCell(system: ActorSystemImpl, + ref: InternalActorRef, + props: Props, + dispatcher: MessageDispatcher, + supervisor: InternalActorRef): ActorCell = new ActorCell(system, ref, props, dispatcher, supervisor) protected def actorContext: ActorContext = actorCell @@ -399,7 +403,8 @@ private[akka] class LocalActorRef private[akka] ( override def sendSystemMessage(message: SystemMessage): Unit = actorCell.sendSystemMessage(message) - override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = actorCell.sendMessage(message, sender) + override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = + actorCell.sendMessage(message, sender) override def restart(cause: Throwable): Unit = actorCell.restart(cause) @@ -424,7 +429,7 @@ private[akka] final case class SerializedActorRef private (path: String) { case null => throw new IllegalStateException( "Trying to deserialize a serialized ActorRef without an ActorSystem in scope." + - " Use 'akka.serialization.JavaSerializer.currentSystem.withValue(system) { ... }'") + " Use 'akka.serialization.JavaSerializer.currentSystem.withValue(system) { ... }'") case someSystem => someSystem.provider.resolveActorRef(path) } @@ -497,7 +502,8 @@ trait DeadLetterSuppression * It is possible to subscribe to suppressed dead letters on the ActorSystem's EventStream explicitly. */ @SerialVersionUID(1L) -final case class SuppressedDeadLetter(message: DeadLetterSuppression, sender: ActorRef, recipient: ActorRef) extends AllDeadLetters { +final case class SuppressedDeadLetter(message: DeadLetterSuppression, sender: ActorRef, recipient: ActorRef) + extends AllDeadLetters { require(sender ne null, "DeadLetter sender may not be null") require(recipient ne null, "DeadLetter recipient may not be null") } @@ -518,10 +524,10 @@ private[akka] object DeadLetterActorRef { * * INTERNAL API */ -private[akka] class EmptyLocalActorRef( - override val provider: ActorRefProvider, - override val path: ActorPath, - val eventStream: EventStream) extends MinimalActorRef { +private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, + override val path: ActorPath, + val eventStream: EventStream) + extends MinimalActorRef { @deprecated("Use context.watch(actor) and receive Terminated(actor)", "2.2") override private[akka] def isTerminated = true @@ -571,17 +577,16 @@ private[akka] class EmptyLocalActorRef( * * INTERNAL API */ -private[akka] class DeadLetterActorRef( - _provider: ActorRefProvider, - _path: ActorPath, - _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { +private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: ActorPath, _eventStream: EventStream) + extends EmptyLocalActorRef(_provider, _path, _eventStream) { override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { case null => throw InvalidMessageException("Message is null") case Identify(messageId) => sender ! ActorIdentity(messageId, None) case d: DeadLetter => if (!specialHandle(d.message, d.sender)) eventStream.publish(d) - case _ => if (!specialHandle(message, sender)) - eventStream.publish(DeadLetter(message, if (sender eq Actor.noSender) provider.deadLetters else sender, this)) + case _ => + if (!specialHandle(message, sender)) + eventStream.publish(DeadLetter(message, if (sender eq Actor.noSender) provider.deadLetters else sender, this)) } override protected def specialHandle(msg: Any, sender: ActorRef): Boolean = msg match { @@ -602,11 +607,11 @@ private[akka] class DeadLetterActorRef( * * INTERNAL API */ -private[akka] class VirtualPathContainer( - override val provider: ActorRefProvider, - override val path: ActorPath, - override val getParent: InternalActorRef, - val log: MarkerLoggingAdapter) extends MinimalActorRef { +private[akka] class VirtualPathContainer(override val provider: ActorRefProvider, + override val path: ActorPath, + override val getParent: InternalActorRef, + val log: MarkerLoggingAdapter) + extends MinimalActorRef { private val children = new ConcurrentHashMap[String, InternalActorRef] @@ -618,8 +623,10 @@ private[akka] class VirtualPathContainer( case sel @ ActorSelectionMessage(msg, elements, wildcardFanOut) => { require(elements.nonEmpty) - def emptyRef = new EmptyLocalActorRef(provider, path / sel.elements.map(_.toString), - provider.systemGuardian.underlying.system.eventStream) + def emptyRef = + new EmptyLocalActorRef(provider, + path / sel.elements.map(_.toString), + provider.systemGuardian.underlying.system.eventStream) elements.head match { case SelectChildName(name) => @@ -645,7 +652,7 @@ private[akka] class VirtualPathContainer( def addChild(name: String, ref: InternalActorRef): Unit = { children.put(name, ref) match { case null => // okay - case old => + case old => // this can happen from RemoteSystemDaemon if a new child is created // before the old is removed from RemoteSystemDaemon children log.debug("{} replacing child {} ({} -> {})", path, name, old, ref) @@ -675,12 +682,13 @@ private[akka] class VirtualPathContainer( else { val n = name.next() if (n.isEmpty) this - else children.get(n) match { - case null => Nobody - case some => - if (name.isEmpty) some - else some.getChild(name) - } + else + children.get(n) match { + case null => Nobody + case some => + if (name.isEmpty) some + else some.getChild(name) + } } } @@ -709,11 +717,11 @@ private[akka] class VirtualPathContainer( * [[FunctionRef#unwatch]] must be called to avoid a resource leak, which is different * from an ordinary actor. */ -private[akka] final class FunctionRef( - override val path: ActorPath, - override val provider: ActorRefProvider, - system: ActorSystem, - f: (ActorRef, Any) => Unit) extends MinimalActorRef { +private[akka] final class FunctionRef(override val path: ActorPath, + override val provider: ActorRefProvider, + system: ActorSystem, + f: (ActorRef, Any) => Unit) + extends MinimalActorRef { override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = { message match { @@ -764,17 +772,19 @@ private[akka] final class FunctionRef( // outside of synchronized block if (toUnwatch.nonEmpty) - toUnwatch foreach unwatchWatched + toUnwatch.foreach(unwatchWatched) if (watchedBy.nonEmpty) { - watchedBy foreach sendTerminated(ifLocal = false) - watchedBy foreach sendTerminated(ifLocal = true) + watchedBy.foreach(sendTerminated(ifLocal = false)) + watchedBy.foreach(sendTerminated(ifLocal = true)) } } private def sendTerminated(ifLocal: Boolean)(watcher: ActorRef): Unit = if (watcher.asInstanceOf[ActorRefScope].isLocal == ifLocal) - watcher.asInstanceOf[InternalActorRef].sendSystemMessage(DeathWatchNotification(this, existenceConfirmed = true, addressTerminated = false)) + watcher + .asInstanceOf[InternalActorRef] + .sendSystemMessage(DeathWatchNotification(this, existenceConfirmed = true, addressTerminated = false)) private def addressTerminated(address: Address): Unit = { val toNotify = this.synchronized { @@ -816,9 +826,13 @@ private[akka] final class FunctionRef( } } } else if (!watcheeSelf && watcherSelf) { - publish(Logging.Warning(path.toString, classOf[FunctionRef], s"externally triggered watch from $watcher to $watchee is illegal on FunctionRef")) + publish( + Logging.Warning(path.toString, + classOf[FunctionRef], + s"externally triggered watch from $watcher to $watchee is illegal on FunctionRef")) } else { - publish(Logging.Error(path.toString, classOf[FunctionRef], s"BUG: illegal Watch($watchee,$watcher) for $this")) + publish( + Logging.Error(path.toString, classOf[FunctionRef], s"BUG: illegal Watch($watchee,$watcher) for $this")) } false } @@ -844,14 +858,20 @@ private[akka] final class FunctionRef( } } } else if (!watcheeSelf && watcherSelf) { - publish(Logging.Warning(path.toString, classOf[FunctionRef], s"externally triggered unwatch from $watcher to $watchee is illegal on FunctionRef")) + publish( + Logging.Warning(path.toString, + classOf[FunctionRef], + s"externally triggered unwatch from $watcher to $watchee is illegal on FunctionRef")) } else { - publish(Logging.Error(path.toString, classOf[FunctionRef], s"BUG: illegal Unwatch($watchee,$watcher) for $this")) + publish( + Logging.Error(path.toString, classOf[FunctionRef], s"BUG: illegal Unwatch($watchee,$watcher) for $this")) } } } - private def publish(e: Logging.LogEvent): Unit = try system.eventStream.publish(e) catch { case NonFatal(_) => } + private def publish(e: Logging.LogEvent): Unit = + try system.eventStream.publish(e) + catch { case NonFatal(_) => } /** * Have this FunctionRef watch the given Actor. @@ -918,7 +938,7 @@ private[akka] final class FunctionRef( // AddressTerminatedTopic update not needed block case _ => - def hasNonLocalAddress: Boolean = (watching exists isNonLocal) || (watchedByOrEmpty exists isNonLocal) + def hasNonLocalAddress: Boolean = (watching.exists(isNonLocal)) || (watchedByOrEmpty.exists(isNonLocal)) val had = hasNonLocalAddress val result = block diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 166eeac275..5fd9d54a35 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -111,15 +111,14 @@ import akka.util.OptionVal * but it should be overridable from external configuration; the lookup of * the latter can be suppressed by setting ``lookupDeploy`` to ``false``. */ - private[akka] def actorOf( - system: ActorSystemImpl, - props: Props, - supervisor: InternalActorRef, - path: ActorPath, - systemService: Boolean, - deploy: Option[Deploy], - lookupDeploy: Boolean, - async: Boolean): InternalActorRef + private[akka] def actorOf(system: ActorSystemImpl, + props: Props, + supervisor: InternalActorRef, + path: ActorPath, + systemService: Boolean, + deploy: Option[Deploy], + lookupDeploy: Boolean, + async: Boolean): InternalActorRef /** * INTERNAL API @@ -195,12 +194,15 @@ import akka.util.OptionVal * Interface implemented by ActorSystem and ActorContext, the only two places * from which you can get fresh actors. */ -@implicitNotFound("implicit ActorRefFactory required: if outside of an Actor you need an implicit ActorSystem, inside of an actor this should be the implicit ActorContext") +@implicitNotFound( + "implicit ActorRefFactory required: if outside of an Actor you need an implicit ActorSystem, inside of an actor this should be the implicit ActorContext") trait ActorRefFactory { + /** * INTERNAL API */ protected def systemImpl: ActorSystemImpl + /** * INTERNAL API */ @@ -333,7 +335,8 @@ trait ActorRefFactory { * equal to references acquired with `actorOf`, `sender`, or `context.self`. */ @deprecated("use actorSelection instead of actorFor", "2.2") - private[akka] def actorFor(path: java.lang.Iterable[String]): ActorRef = provider.actorFor(lookupRoot, immutableSeq(path)) + private[akka] def actorFor(path: java.lang.Iterable[String]): ActorRef = + provider.actorFor(lookupRoot, immutableSeq(path)) /** * Construct an [[akka.actor.ActorSelection]] from the given path, which is @@ -383,6 +386,7 @@ private[akka] final case class StopChild(child: ActorRef) * INTERNAL API */ private[akka] object SystemGuardian { + /** * For the purpose of orderly shutdown it's possible * to register interest in the termination of systemGuardian @@ -401,8 +405,9 @@ private[akka] object LocalActorRefProvider { /* * Root and user guardian */ - private class Guardian(override val supervisorStrategy: SupervisorStrategy) extends Actor - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { + private class Guardian(override val supervisorStrategy: SupervisorStrategy) + extends Actor + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { def receive = { case Terminated(_) => context.stop(self) @@ -417,7 +422,8 @@ private[akka] object LocalActorRefProvider { * System guardian */ private class SystemGuardian(override val supervisorStrategy: SupervisorStrategy, val guardian: ActorRef) - extends Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { + extends Actor + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import SystemGuardian._ var terminationHooks = Set.empty[ActorRef] @@ -428,7 +434,7 @@ private[akka] object LocalActorRefProvider { // termination hooks, they will reply with TerminationHookDone // and when all are done the systemGuardian is stopped context.become(terminating) - terminationHooks foreach { _ ! TerminationHook } + terminationHooks.foreach { _ ! TerminationHook } stopWhenAllTerminationHooksDone() case Terminated(a) => // a registered, and watched termination hook terminated before @@ -437,7 +443,7 @@ private[akka] object LocalActorRefProvider { case StopChild(child) => context.stop(child) case RegisterTerminationHook if sender() != context.system.deadLetters => terminationHooks += sender() - context watch sender() + context.watch(sender()) } def terminating: Receive = { @@ -469,35 +475,30 @@ private[akka] object LocalActorRefProvider { * * Depending on this class is not supported, only the [[ActorRefProvider]] interface is supported. */ -private[akka] class LocalActorRefProvider private[akka] ( - _systemName: String, - override val settings: ActorSystem.Settings, - val eventStream: EventStream, - val dynamicAccess: DynamicAccess, - override val deployer: Deployer, - _deadLetters: Option[ActorPath => InternalActorRef]) - extends ActorRefProvider { +private[akka] class LocalActorRefProvider private[akka] (_systemName: String, + override val settings: ActorSystem.Settings, + val eventStream: EventStream, + val dynamicAccess: DynamicAccess, + override val deployer: Deployer, + _deadLetters: Option[ActorPath => InternalActorRef]) + extends ActorRefProvider { // this is the constructor needed for reflectively instantiating the provider - def this( - _systemName: String, - settings: ActorSystem.Settings, - eventStream: EventStream, - dynamicAccess: DynamicAccess) = - this( - _systemName, - settings, - eventStream, - dynamicAccess, - new Deployer(settings, dynamicAccess), - None) + def this(_systemName: String, + settings: ActorSystem.Settings, + eventStream: EventStream, + dynamicAccess: DynamicAccess) = + this(_systemName, settings, eventStream, dynamicAccess, new Deployer(settings, dynamicAccess), None) override val rootPath: ActorPath = RootActorPath(Address("akka", _systemName)) - private[akka] val log: MarkerLoggingAdapter = Logging.withMarker(eventStream, getClass.getName + "(" + rootPath.address + ")") + private[akka] val log: MarkerLoggingAdapter = + Logging.withMarker(eventStream, getClass.getName + "(" + rootPath.address + ")") override val deadLetters: InternalActorRef = - _deadLetters.getOrElse((p: ActorPath) => new DeadLetterActorRef(this, p, eventStream)).apply(rootPath / "deadLetters") + _deadLetters + .getOrElse((p: ActorPath) => new DeadLetterActorRef(this, p, eventStream)) + .apply(rootPath / "deadLetters") private[this] final val terminationPromise: Promise[Terminated] = Promise[Terminated]() @@ -526,7 +527,8 @@ private[akka] class LocalActorRefProvider private[akka] ( def isWalking = causeOfTermination.future.isCompleted == false override def stop(): Unit = { - causeOfTermination.trySuccess(Terminated(provider.rootGuardian)(existenceConfirmed = true, addressTerminated = true)) //Idempotent + causeOfTermination.trySuccess( + Terminated(provider.rootGuardian)(existenceConfirmed = true, addressTerminated = true)) //Idempotent terminationPromise.tryCompleteWith(causeOfTermination.future) // Signal termination downstream, idempotent } @@ -575,7 +577,9 @@ private[akka] class LocalActorRefProvider private[akka] ( def registerExtraNames(_extras: Map[String, InternalActorRef]): Unit = extraNames ++= _extras private def guardianSupervisorStrategyConfigurator = - dynamicAccess.createInstanceFor[SupervisorStrategyConfigurator](settings.SupervisorStrategyClass, EmptyImmutableSeq).get + dynamicAccess + .createInstanceFor[SupervisorStrategyConfigurator](settings.SupervisorStrategyClass, EmptyImmutableSeq) + .get /** * Overridable supervision strategy to be used by the “/user” guardian. @@ -601,13 +605,12 @@ private[akka] class LocalActorRefProvider private[akka] ( private lazy val defaultMailbox = system.mailboxes.lookup(Mailboxes.DefaultMailboxId) override lazy val rootGuardian: LocalActorRef = - new LocalActorRef( - system, - Props(classOf[LocalActorRefProvider.Guardian], rootGuardianStrategy), - defaultDispatcher, - defaultMailbox, - theOneWhoWalksTheBubblesOfSpaceTime, - rootPath) { + new LocalActorRef(system, + Props(classOf[LocalActorRefProvider.Guardian], rootGuardianStrategy), + defaultDispatcher, + defaultMailbox, + theOneWhoWalksTheBubblesOfSpaceTime, + rootPath) { override def getParent: InternalActorRef = this override def getSingleChild(name: String): InternalActorRef = name match { case "temp" => tempContainer @@ -623,8 +626,13 @@ private[akka] class LocalActorRefProvider private[akka] ( override lazy val guardian: LocalActorRef = { val cell = rootGuardian.underlying cell.reserveChild("user") - val ref = new LocalActorRef(system, system.guardianProps.getOrElse(Props(classOf[LocalActorRefProvider.Guardian], guardianStrategy)), - defaultDispatcher, defaultMailbox, rootGuardian, rootPath / "user") + val ref = new LocalActorRef( + system, + system.guardianProps.getOrElse(Props(classOf[LocalActorRefProvider.Guardian], guardianStrategy)), + defaultDispatcher, + defaultMailbox, + rootGuardian, + rootPath / "user") cell.initChild(ref) ref.start() ref @@ -633,9 +641,12 @@ private[akka] class LocalActorRefProvider private[akka] ( override lazy val systemGuardian: LocalActorRef = { val cell = rootGuardian.underlying cell.reserveChild("system") - val ref = new LocalActorRef( - system, Props(classOf[LocalActorRefProvider.SystemGuardian], systemGuardianStrategy, guardian), - defaultDispatcher, defaultMailbox, rootGuardian, rootPath / "system") + val ref = new LocalActorRef(system, + Props(classOf[LocalActorRefProvider.SystemGuardian], systemGuardianStrategy, guardian), + defaultDispatcher, + defaultMailbox, + rootGuardian, + rootPath / "system") cell.initChild(ref) ref.start() ref @@ -689,12 +700,13 @@ private[akka] class LocalActorRefProvider private[akka] ( if (path.isEmpty) { log.debug("look-up of empty path sequence fails (per definition)") deadLetters - } else ref.getChild(path.iterator) match { - case Nobody => - log.debug("look-up of path sequence [/{}] failed", path.mkString("/")) - new EmptyLocalActorRef(system.provider, ref.path / path, eventStream) - case x => x - } + } else + ref.getChild(path.iterator) match { + case Nobody => + log.debug("look-up of path sequence [/{}] failed", path.mkString("/")) + new EmptyLocalActorRef(system.provider, ref.path / path, eventStream) + case x => x + } def resolveActorRef(path: String): ActorRef = path match { case ActorPathExtractor(address, elems) if address == rootPath.address => resolveActorRef(rootGuardian, elems) @@ -706,9 +718,9 @@ private[akka] class LocalActorRefProvider private[akka] ( def resolveActorRef(path: ActorPath): ActorRef = { if (path.root == rootPath) resolveActorRef(rootGuardian, path.elements) else { - log.debug( - "Resolve (deserialization) of foreign path [{}] doesn't match root path [{}], using deadLetters.", - path, rootPath) + log.debug("Resolve (deserialization) of foreign path [{}] doesn't match root path [{}], using deadLetters.", + path, + rootPath) deadLetters } } @@ -720,25 +732,33 @@ private[akka] class LocalActorRefProvider private[akka] ( if (pathElements.isEmpty) { log.debug("Resolve (deserialization) of empty path doesn't match an active actor, using deadLetters.") deadLetters - } else ref.getChild(pathElements.iterator) match { - case Nobody => - if (log.isDebugEnabled) - log.debug( - "Resolve (deserialization) of path [{}] doesn't match an active actor. " + - "It has probably been stopped, using deadLetters.", - pathElements.mkString("/")) - new EmptyLocalActorRef(system.provider, ref.path / pathElements, eventStream) - case x => x - } + } else + ref.getChild(pathElements.iterator) match { + case Nobody => + if (log.isDebugEnabled) + log.debug("Resolve (deserialization) of path [{}] doesn't match an active actor. " + + "It has probably been stopped, using deadLetters.", + pathElements.mkString("/")) + new EmptyLocalActorRef(system.provider, ref.path / pathElements, eventStream) + case x => x + } - def actorOf(system: ActorSystemImpl, props: Props, supervisor: InternalActorRef, path: ActorPath, - systemService: Boolean, deploy: Option[Deploy], lookupDeploy: Boolean, async: Boolean): InternalActorRef = { + def actorOf(system: ActorSystemImpl, + props: Props, + supervisor: InternalActorRef, + path: ActorPath, + systemService: Boolean, + deploy: Option[Deploy], + lookupDeploy: Boolean, + async: Boolean): InternalActorRef = { props.deploy.routerConfig match { case NoRouter => if (settings.DebugRouterMisconfiguration) { - deployer.lookup(path) foreach { d => + deployer.lookup(path).foreach { d => if (d.routerConfig != NoRouter) - log.warning("Configuration says that [{}] should be a router, but code disagrees. Remove the config or add a routerConfig to its Props.", path) + log.warning( + "Configuration says that [{}] should be a router, but code disagrees. Remove the config or add a routerConfig to its Props.", + path) } } @@ -762,16 +782,20 @@ private[akka] class LocalActorRefProvider private[akka] ( val dispatcher = system.dispatchers.lookup(props2.dispatcher) val mailboxType = system.mailboxes.getMailboxType(props2, dispatcher.configurator.config) - if (async) new RepointableActorRef(system, props2, dispatcher, mailboxType, supervisor, path).initialize(async) + if (async) + new RepointableActorRef(system, props2, dispatcher, mailboxType, supervisor, path).initialize(async) else new LocalActorRef(system, props2, dispatcher, mailboxType, supervisor, path) } catch { - case NonFatal(e) => throw new ConfigurationException( - s"configuration problem while creating [$path] with dispatcher [${props2.dispatcher}] and mailbox [${props2.mailbox}]", e) + case NonFatal(e) => + throw new ConfigurationException( + s"configuration problem while creating [$path] with dispatcher [${props2.dispatcher}] and mailbox [${props2.mailbox}]", + e) } case router => val lookup = if (lookupDeploy) deployer.lookup(path) else None - val r = router :: deploy.map(_.routerConfig).toList ::: lookup.map(_.routerConfig).toList reduce ((a, b) => b withFallback a) + val r = (router :: deploy.map(_.routerConfig).toList ::: lookup.map(_.routerConfig).toList).reduce((a, b) => + b.withFallback(a)) val p = props.withRouter(r) if (!system.dispatchers.hasDispatcher(p.dispatcher)) @@ -779,9 +803,9 @@ private[akka] class LocalActorRefProvider private[akka] ( if (!system.dispatchers.hasDispatcher(r.routerDispatcher)) throw new ConfigurationException(s"Dispatcher [${p.dispatcher}] not configured for router of $path") - val routerProps = Props( - p.deploy.copy(dispatcher = p.routerConfig.routerDispatcher), - classOf[RoutedActorCell.RouterActorCreator], Vector(p.routerConfig)) + val routerProps = Props(p.deploy.copy(dispatcher = p.routerConfig.routerDispatcher), + classOf[RoutedActorCell.RouterActorCreator], + Vector(p.routerConfig)) val routeeProps = p.withRouter(NoRouter) try { @@ -793,11 +817,14 @@ private[akka] class LocalActorRefProvider private[akka] ( val routeeDispatcher = system.dispatchers.lookup(p.dispatcher) system.mailboxes.getMailboxType(routeeProps, routeeDispatcher.configurator.config) - new RoutedActorRef(system, routerProps, routerDispatcher, routerMailbox, routeeProps, supervisor, path).initialize(async) + new RoutedActorRef(system, routerProps, routerDispatcher, routerMailbox, routeeProps, supervisor, path) + .initialize(async) } catch { - case NonFatal(e) => throw new ConfigurationException( - s"configuration problem while creating [$path] with router dispatcher [${routerProps.dispatcher}] and mailbox [${routerProps.mailbox}] " + - s"and routee dispatcher [${routeeProps.dispatcher}] and mailbox [${routeeProps.mailbox}]", e) + case NonFatal(e) => + throw new ConfigurationException( + s"configuration problem while creating [$path] with router dispatcher [${routerProps.dispatcher}] and mailbox [${routerProps.mailbox}] " + + s"and routee dispatcher [${routeeProps.dispatcher}] and mailbox [${routeeProps.mailbox}]", + e) } } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index fd798c1848..6008b943e4 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -43,8 +43,9 @@ abstract class ActorSelection extends Serializable { * Pass [[ActorRef#noSender]] or `null` as sender if there is nobody to reply to */ def tell(msg: Any, sender: ActorRef): Unit = - ActorSelection.deliverSelection(anchor.asInstanceOf[InternalActorRef], sender, - ActorSelectionMessage(msg, path, wildcardFanOut = false)) + ActorSelection.deliverSelection(anchor.asInstanceOf[InternalActorRef], + sender, + ActorSelectionMessage(msg, path, wildcardFanOut = false)) /** * Forwards the message and passes the original sender actor as the sender. @@ -66,7 +67,7 @@ abstract class ActorSelection extends Serializable { def resolveOne()(implicit timeout: Timeout): Future[ActorRef] = { implicit val ec = ExecutionContexts.sameThreadExecutionContext val p = Promise[ActorRef]() - this.ask(Identify(None)) onComplete { + this.ask(Identify(None)).onComplete { case Success(ActorIdentity(_, Some(ref))) => p.success(ref) case _ => p.failure(ActorNotFound(this)) } @@ -204,12 +205,14 @@ object ActorSelection { * intention is to send messages frequently. */ def apply(anchorRef: ActorRef, elements: Iterable[String]): ActorSelection = { - val compiled: immutable.IndexedSeq[SelectionPathElement] = elements.iterator.collect({ - case x if !x.isEmpty => - if ((x.indexOf('?') != -1) || (x.indexOf('*') != -1)) SelectChildPattern(x) - else if (x == "..") SelectParent - else SelectChildName(x) - }).to(immutable.IndexedSeq) + val compiled: immutable.IndexedSeq[SelectionPathElement] = elements.iterator + .collect({ + case x if !x.isEmpty => + if ((x.indexOf('?') != -1) || (x.indexOf('*') != -1)) SelectChildPattern(x) + else if (x == "..") SelectParent + else SelectChildName(x) + }) + .to(immutable.IndexedSeq) new ActorSelection with ScalaActorSelection { override val anchor = anchorRef override val path = compiled @@ -231,9 +234,10 @@ object ActorSelection { @tailrec def rec(ref: InternalActorRef): Unit = { ref match { case refWithCell: ActorRefWithCell => - - def emptyRef = new EmptyLocalActorRef(refWithCell.provider, anchor.path / sel.elements.map(_.toString), - refWithCell.underlying.system.eventStream) + def emptyRef = + new EmptyLocalActorRef(refWithCell.provider, + anchor.path / sel.elements.map(_.toString), + refWithCell.underlying.system.eventStream) iter.next() match { case SelectParent => @@ -267,9 +271,8 @@ object ActorSelection { if (matchingChildren.isEmpty && !sel.wildcardFanOut) emptyRef.tell(sel, sender) else { - val m = sel.copy( - elements = iter.toVector, - wildcardFanOut = sel.wildcardFanOut || matchingChildren.size > 1) + val m = sel.copy(elements = iter.toVector, + wildcardFanOut = sel.wildcardFanOut || matchingChildren.size > 1) matchingChildren.foreach(c => deliverSelection(c.asInstanceOf[InternalActorRef], sender, m)) } } @@ -302,11 +305,11 @@ trait ScalaActorSelection { * message is delivered by traversing the various actor paths involved. */ @SerialVersionUID(2L) // it has protobuf serialization in akka-remote -private[akka] final case class ActorSelectionMessage( - msg: Any, - elements: immutable.Iterable[SelectionPathElement], - wildcardFanOut: Boolean) - extends AutoReceivedMessage with PossiblyHarmful { +private[akka] final case class ActorSelectionMessage(msg: Any, + elements: immutable.Iterable[SelectionPathElement], + wildcardFanOut: Boolean) + extends AutoReceivedMessage + with PossiblyHarmful { def identifyRequest: Option[Identify] = msg match { case x: Identify => Some(x) @@ -351,4 +354,3 @@ private[akka] case object SelectParent extends SelectionPathElement { */ @SerialVersionUID(1L) final case class ActorNotFound(selection: ActorSelection) extends RuntimeException("Actor not found for: " + selection) - diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 758758935b..2d96f5abf2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -45,7 +45,9 @@ object BootstrapSetup { * * @see [[BootstrapSetup]] for description of the properties */ - def apply(classLoader: Option[ClassLoader], config: Option[Config], defaultExecutionContext: Option[ExecutionContext]): BootstrapSetup = + def apply(classLoader: Option[ClassLoader], + config: Option[Config], + defaultExecutionContext: Option[ExecutionContext]): BootstrapSetup = new BootstrapSetup(classLoader, config, defaultExecutionContext) /** @@ -58,7 +60,9 @@ object BootstrapSetup { * * @see [[BootstrapSetup]] for description of the properties */ - def create(classLoader: Optional[ClassLoader], config: Optional[Config], defaultExecutionContext: Optional[ExecutionContext]): BootstrapSetup = + def create(classLoader: Optional[ClassLoader], + config: Optional[Config], + defaultExecutionContext: Optional[ExecutionContext]): BootstrapSetup = apply(classLoader.asScala, config.asScala, defaultExecutionContext.asScala) /** @@ -114,11 +118,11 @@ object ProviderSelection { * @param actorRefProvider Overrides the `akka.actor.provider` setting in config, can be `local` (default), `remote` or * `cluster`. It can also be a fully qualified class name of a provider. */ -final class BootstrapSetup private ( - val classLoader: Option[ClassLoader] = None, - val config: Option[Config] = None, - val defaultExecutionContext: Option[ExecutionContext] = None, - val actorRefProvider: Option[ProviderSelection] = None) extends Setup { +final class BootstrapSetup private (val classLoader: Option[ClassLoader] = None, + val config: Option[Config] = None, + val defaultExecutionContext: Option[ExecutionContext] = None, + val actorRefProvider: Option[ProviderSelection] = None) + extends Setup { def withClassloader(classLoader: ClassLoader): BootstrapSetup = new BootstrapSetup(Some(classLoader), config, defaultExecutionContext, actorRefProvider) @@ -148,7 +152,7 @@ object ActorSystem { case value => Some(value) } - val GlobalHome: Option[String] = SystemHome orElse EnvHome + val GlobalHome: Option[String] = SystemHome.orElse(EnvHome) /** * Creates a new ActorSystem with the name "default", @@ -213,7 +217,11 @@ object ActorSystem { * * @see The Typesafe Config Library API Documentation */ - def create(name: String, config: Config, classLoader: ClassLoader, defaultExecutionContext: ExecutionContext): ActorSystem = apply(name, Option(config), Option(classLoader), Option(defaultExecutionContext)) + def create(name: String, + config: Config, + classLoader: ClassLoader, + defaultExecutionContext: ExecutionContext): ActorSystem = + apply(name, Option(config), Option(classLoader), Option(defaultExecutionContext)) /** * Creates a new ActorSystem with the name "default", @@ -268,7 +276,8 @@ object ActorSystem { * * @see The Typesafe Config Library API Documentation */ - def apply(name: String, config: Config, classLoader: ClassLoader): ActorSystem = apply(name, Option(config), Option(classLoader), None) + def apply(name: String, config: Config, classLoader: ClassLoader): ActorSystem = + apply(name, Option(config), Option(classLoader), None) /** * Creates a new ActorSystem with the specified name, @@ -281,11 +290,10 @@ object ActorSystem { * * @see The Typesafe Config Library API Documentation */ - def apply( - name: String, - config: Option[Config] = None, - classLoader: Option[ClassLoader] = None, - defaultExecutionContext: Option[ExecutionContext] = None): ActorSystem = + def apply(name: String, + config: Option[Config] = None, + classLoader: Option[ClassLoader] = None, + defaultExecutionContext: Option[ExecutionContext] = None): ActorSystem = apply(name, ActorSystemSetup(BootstrapSetup(classLoader, config, defaultExecutionContext))) /** @@ -315,15 +323,17 @@ object ActorSystem { final val ConfigVersion: String = getString("akka.version") final val ProviderClass: String = - setup.get[BootstrapSetup] - .flatMap(_.actorRefProvider).map(_.identifier) + setup + .get[BootstrapSetup] + .flatMap(_.actorRefProvider) + .map(_.identifier) .getOrElse(getString("akka.actor.provider")) match { - case "local" => classOf[LocalActorRefProvider].getName - // these two cannot be referenced by class as they may not be on the classpath - case "remote" => "akka.remote.RemoteActorRefProvider" - case "cluster" => "akka.cluster.ClusterActorRefProvider" - case fqcn => fqcn - } + case "local" => classOf[LocalActorRefProvider].getName + // these two cannot be referenced by class as they may not be on the classpath + case "remote" => "akka.remote.RemoteActorRefProvider" + case "cluster" => "akka.cluster.ClusterActorRefProvider" + case fqcn => fqcn + } final val SupervisorStrategyClass: String = getString("akka.actor.guardian-supervisor-strategy") final val CreationTimeout: Timeout = Timeout(config.getMillisDuration("akka.actor.creation-timeout")) @@ -370,7 +380,8 @@ object ActorSystem { final val DefaultVirtualNodesFactor: Int = getInt("akka.actor.deployment.default.virtual-nodes-factor") if (ConfigVersion != Version) - throw new akka.ConfigurationException("Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") + throw new akka.ConfigurationException( + "Akka JAR version [" + Version + "] does not match the provided config version [" + ConfigVersion + "]") /** * Returns the String representation of the Config that this Settings is backed by @@ -667,18 +678,18 @@ abstract class ExtendedActorSystem extends ActorSystem { * Internal API */ @InternalApi -private[akka] class ActorSystemImpl( - val name: String, - applicationConfig: Config, - classLoader: ClassLoader, - defaultExecutionContext: Option[ExecutionContext], - val guardianProps: Option[Props], - setup: ActorSystemSetup) extends ExtendedActorSystem { +private[akka] class ActorSystemImpl(val name: String, + applicationConfig: Config, + classLoader: ClassLoader, + defaultExecutionContext: Option[ExecutionContext], + val guardianProps: Option[Props], + setup: ActorSystemSetup) + extends ExtendedActorSystem { if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-_]*$""")) throw new IllegalArgumentException( "invalid ActorSystem name [" + name + - "], must contain only word characters (i.e. [a-zA-Z0-9] plus non-leading '-' or '_')") + "], must contain only word characters (i.e. [a-zA-Z0-9] plus non-leading '-' or '_')") import ActorSystem._ @@ -689,7 +700,8 @@ private[akka] class ActorSystemImpl( new Thread.UncaughtExceptionHandler() { def uncaughtException(thread: Thread, cause: Throwable): Unit = { cause match { - case NonFatal(_) | _: InterruptedException | _: NotImplementedError | _: ControlThrowable => log.error(cause, "Uncaught error from thread [{}]", thread.getName) + case NonFatal(_) | _: InterruptedException | _: NotImplementedError | _: ControlThrowable => + log.error(cause, "Uncaught error from thread [{}]", thread.getName) case _ => if (cause.isInstanceOf[IncompatibleClassChangeError] && cause.getMessage.startsWith("akka")) System.err.println( @@ -727,7 +739,12 @@ private[akka] class ActorSystemImpl( System.err.flush() // Also log using the normal infrastructure - hope for the best: - markerLogging.error(LogMarker.Security, cause, "Uncaught error from thread [{}]: " + cause.getMessage + ", " + message + " ActorSystem[{}]", thread.getName, name) + markerLogging.error( + LogMarker.Security, + cause, + "Uncaught error from thread [{}]: " + cause.getMessage + ", " + message + " ActorSystem[{}]", + thread.getName, + name) } } @@ -747,16 +764,20 @@ private[akka] class ActorSystemImpl( protected def systemImpl: ActorSystemImpl = this - def systemActorOf(props: Props, name: String): ActorRef = systemGuardian.underlying.attachChild(props, name, systemService = true) + def systemActorOf(props: Props, name: String): ActorRef = + systemGuardian.underlying.attachChild(props, name, systemService = true) def actorOf(props: Props, name: String): ActorRef = if (guardianProps.isEmpty) guardian.underlying.attachChild(props, name, systemService = false) - else throw new UnsupportedOperationException( - s"cannot create top-level actor [$name] from the outside on ActorSystem with custom user guardian") + else + throw new UnsupportedOperationException( + s"cannot create top-level actor [$name] from the outside on ActorSystem with custom user guardian") def actorOf(props: Props): ActorRef = if (guardianProps.isEmpty) guardian.underlying.attachChild(props, systemService = false) - else throw new UnsupportedOperationException("cannot create top-level actor from the outside on ActorSystem with custom user guardian") + else + throw new UnsupportedOperationException( + "cannot create top-level actor from the outside on ActorSystem with custom user guardian") def stop(actor: ActorRef): Unit = { val path = actor.path @@ -780,17 +801,17 @@ private[akka] class ActorSystemImpl( dynamicAccess.createInstanceFor[LoggingFilter](LoggingFilter, arguments).get } - private[this] val markerLogging = new MarkerLoggingAdapter(eventStream, getClass.getName + "(" + name + ")", this.getClass, logFilter) + private[this] val markerLogging = + new MarkerLoggingAdapter(eventStream, getClass.getName + "(" + name + ")", this.getClass, logFilter) val log: LoggingAdapter = markerLogging val scheduler: Scheduler = createScheduler() val provider: ActorRefProvider = try { - val arguments = Vector( - classOf[String] -> name, - classOf[Settings] -> settings, - classOf[EventStream] -> eventStream, - classOf[DynamicAccess] -> dynamicAccess) + val arguments = Vector(classOf[String] -> name, + classOf[Settings] -> settings, + classOf[EventStream] -> eventStream, + classOf[DynamicAccess] -> dynamicAccess) dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get } catch { @@ -803,17 +824,24 @@ private[akka] class ActorSystemImpl( val mailboxes: Mailboxes = new Mailboxes(settings, eventStream, dynamicAccess, deadLetters) - val dispatchers: Dispatchers = new Dispatchers(settings, DefaultDispatcherPrerequisites( - threadFactory, eventStream, scheduler, dynamicAccess, settings, mailboxes, defaultExecutionContext)) + val dispatchers: Dispatchers = new Dispatchers(settings, + DefaultDispatcherPrerequisites(threadFactory, + eventStream, + scheduler, + dynamicAccess, + settings, + mailboxes, + defaultExecutionContext)) val dispatcher: ExecutionContextExecutor = dispatchers.defaultGlobalDispatcher val internalCallingThreadExecutionContext: ExecutionContext = - dynamicAccess.getObjectFor[ExecutionContext]("scala.concurrent.Future$InternalCallbackExecutor$").getOrElse( - new ExecutionContext with BatchingExecutor { + dynamicAccess + .getObjectFor[ExecutionContext]("scala.concurrent.Future$InternalCallbackExecutor$") + .getOrElse(new ExecutionContext with BatchingExecutor { override protected def unbatchedExecute(r: Runnable): Unit = r.run() override protected def resubmitOnBlock: Boolean = false // Since we execute inline, no gain in resubmitting - override def reportFailure(t: Throwable): Unit = dispatcher reportFailure t + override def reportFailure(t: Throwable): Unit = dispatcher.reportFailure(t) }) private[this] final val terminationCallbacks = new TerminationCallbacks(provider.terminationFuture)(dispatcher) @@ -828,34 +856,35 @@ private[akka] class ActorSystemImpl( def /(path: Iterable[String]): ActorPath = guardian.path / path // Used for ManifestInfo.checkSameVersion - private def allModules: List[String] = List( - "akka-actor", - "akka-actor-testkit-typed", - "akka-actor-typed", - "akka-agent", - "akka-camel", - "akka-cluster", - "akka-cluster-metrics", - "akka-cluster-sharding", - "akka-cluster-sharding-typed", - "akka-cluster-tools", - "akka-cluster-typed", - "akka-discovery", - "akka-distributed-data", - "akka-multi-node-testkit", - "akka-osgi", - "akka-persistence", - "akka-persistence-query", - "akka-persistence-shared", - "akka-persistence-typed", - "akka-protobuf", - "akka-remote", - "akka-slf4j", - "akka-stream", - "akka-stream-testkit", - "akka-stream-typed") + private def allModules: List[String] = + List("akka-actor", + "akka-actor-testkit-typed", + "akka-actor-typed", + "akka-agent", + "akka-camel", + "akka-cluster", + "akka-cluster-metrics", + "akka-cluster-sharding", + "akka-cluster-sharding-typed", + "akka-cluster-tools", + "akka-cluster-typed", + "akka-discovery", + "akka-distributed-data", + "akka-multi-node-testkit", + "akka-osgi", + "akka-persistence", + "akka-persistence-query", + "akka-persistence-shared", + "akka-persistence-typed", + "akka-protobuf", + "akka-remote", + "akka-slf4j", + "akka-stream", + "akka-stream-testkit", + "akka-stream-typed") @volatile private var _initialized = false + /** * Asserts that the ActorSystem has been fully initialized. Can be used to guard code blocks that might accidentally * be run during initialization but require a fully initialized ActorSystem before proceeding. @@ -864,9 +893,8 @@ private[akka] class ActorSystemImpl( if (!_initialized) throw new IllegalStateException( "The calling code expected that the ActorSystem was initialized but it wasn't yet. " + - "This is probably a bug in the ActorSystem initialization sequence often related to initialization of extensions. " + - "Please report at https://github.com/akka/akka/issues." - ) + "This is probably a bug in the ActorSystem initialization sequence often related to initialization of extensions. " + + "Please report at https://github.com/akka/akka/issues.") private lazy val _start: this.type = try { registerOnTermination(stopScheduler()) @@ -884,7 +912,8 @@ private[akka] class ActorSystemImpl( this } catch { case NonFatal(e) => - try terminate() catch { case NonFatal(_) => Try(stopScheduler()) } + try terminate() + catch { case NonFatal(_) => Try(stopScheduler()) } throw e } @@ -893,7 +922,7 @@ private[akka] class ActorSystemImpl( def registerOnTermination(code: Runnable): Unit = { terminationCallbacks.add(code) } override def terminate(): Future[Terminated] = { - if (!settings.LogDeadLettersDuringShutdown) logDeadLetterListener foreach stop + if (!settings.LogDeadLettersDuringShutdown) logDeadLetterListener.foreach(stop) guardian.stop() whenTerminated } @@ -922,10 +951,13 @@ private[akka] class ActorSystemImpl( * executed upon close(), the task may execute before its timeout. */ protected def createScheduler(): Scheduler = - dynamicAccess.createInstanceFor[Scheduler](settings.SchedulerClass, immutable.Seq( - classOf[Config] -> settings.config, - classOf[LoggingAdapter] -> log, - classOf[ThreadFactory] -> threadFactory.withName(threadFactory.name + "-scheduler"))).get + dynamicAccess + .createInstanceFor[Scheduler](settings.SchedulerClass, + immutable.Seq(classOf[Config] -> settings.config, + classOf[LoggingAdapter] -> log, + classOf[ThreadFactory] -> threadFactory.withName( + threadFactory.name + "-scheduler"))) + .get //#create-scheduler /* @@ -958,21 +990,24 @@ private[akka] class ActorSystemImpl( case null => //Doesn't already exist, commence registration val inProcessOfRegistration = new CountDownLatch(1) extensions.putIfAbsent(ext, inProcessOfRegistration) match { // Signal that registration is in process - case null => try { // Signal was successfully sent - ext.createExtension(this) match { // Create and initialize the extension - case null => throw new IllegalStateException(s"Extension instance created as 'null' for extension [$ext]") - case instance => - extensions.replace(ext, inProcessOfRegistration, instance) //Replace our in process signal with the initialized extension - instance //Profit! + case null => + try { // Signal was successfully sent + ext.createExtension(this) match { // Create and initialize the extension + case null => + throw new IllegalStateException(s"Extension instance created as 'null' for extension [$ext]") + case instance => + extensions.replace(ext, inProcessOfRegistration, instance) //Replace our in process signal with the initialized extension + instance //Profit! + } + } catch { + case t: Throwable => + extensions.replace(ext, inProcessOfRegistration, t) //In case shit hits the fan, remove the inProcess signal + throw t //Escalate to caller + } finally { + inProcessOfRegistration.countDown //Always notify listeners of the inProcess signal } - } catch { - case t: Throwable => - extensions.replace(ext, inProcessOfRegistration, t) //In case shit hits the fan, remove the inProcess signal - throw t //Escalate to caller - } finally { - inProcessOfRegistration.countDown //Always notify listeners of the inProcess signal - } - case _ => registerExtension(ext) //Someone else is in process of registering an extension for this Extension, retry + case _ => + registerExtension(ext) //Someone else is in process of registering an extension for this Extension, retry } case existing => existing.asInstanceOf[T] } @@ -986,12 +1021,15 @@ private[akka] class ActorSystemImpl( def hasExtension(ext: ExtensionId[_ <: Extension]): Boolean = findExtension(ext) != null private def loadExtensions(): Unit = { + /** * @param throwOnLoadFail Throw exception when an extension fails to load (needed for backwards compatibility) */ def loadExtensions(key: String, throwOnLoadFail: Boolean): Unit = { - immutableSeq(settings.config.getStringList(key)) foreach { fqcn => - dynamicAccess.getObjectFor[AnyRef](fqcn) recoverWith { case _ => dynamicAccess.createInstanceFor[AnyRef](fqcn, Nil) } match { + immutableSeq(settings.config.getStringList(key)).foreach { fqcn => + dynamicAccess.getObjectFor[AnyRef](fqcn).recoverWith { + case _ => dynamicAccess.createInstanceFor[AnyRef](fqcn, Nil) + } match { case Success(p: ExtensionIdProvider) => registerExtension(p.lookup()) case Success(p: ExtensionId[_]) => registerExtension(p) case Success(_) => @@ -1016,29 +1054,30 @@ private[akka] class ActorSystemImpl( case wc: ActorRefWithCell => val cell = wc.underlying (if (indent.isEmpty) "-> " else indent.dropRight(1) + "⌊-> ") + - node.path.name + " " + Logging.simpleName(node) + " " + - (cell match { - case real: ActorCell => if (real.actor ne null) real.actor.getClass else "null" - case _ => Logging.simpleName(cell) - }) + - (cell match { - case real: ActorCell => " status=" + real.mailbox.currentStatus - case _ => "" - }) + - " " + (cell.childrenRefs match { - case ChildrenContainer.TerminatingChildrenContainer(_, toDie, reason) => - "Terminating(" + reason + ")" + - (toDie.toSeq.sorted mkString ("\n" + indent + " | toDie: ", "\n" + indent + " | ", "")) - case x @ (ChildrenContainer.TerminatedChildrenContainer | ChildrenContainer.EmptyChildrenContainer) => x.toString - case n: ChildrenContainer.NormalChildrenContainer => n.c.size + " children" - case x => Logging.simpleName(x) - }) + - (if (cell.childrenRefs.children.isEmpty) "" else "\n") + - ({ - val children = cell.childrenRefs.children.toSeq.sorted - val bulk = children.dropRight(1) map (printNode(_, indent + " |")) - bulk ++ (children.lastOption map (printNode(_, indent + " "))) - } mkString ("\n")) + node.path.name + " " + Logging.simpleName(node) + " " + + (cell match { + case real: ActorCell => if (real.actor ne null) real.actor.getClass else "null" + case _ => Logging.simpleName(cell) + }) + + (cell match { + case real: ActorCell => " status=" + real.mailbox.currentStatus + case _ => "" + }) + + " " + (cell.childrenRefs match { + case ChildrenContainer.TerminatingChildrenContainer(_, toDie, reason) => + "Terminating(" + reason + ")" + + (toDie.toSeq.sorted.mkString("\n" + indent + " | toDie: ", "\n" + indent + " | ", "")) + case x @ (ChildrenContainer.TerminatedChildrenContainer | ChildrenContainer.EmptyChildrenContainer) => + x.toString + case n: ChildrenContainer.NormalChildrenContainer => n.c.size + " children" + case x => Logging.simpleName(x) + }) + + (if (cell.childrenRefs.children.isEmpty) "" else "\n") + + ({ + val children = cell.childrenRefs.children.toSeq.sorted + val bulk = children.dropRight(1).map(printNode(_, indent + " |")) + bulk ++ (children.lastOption.map(printNode(_, indent + " "))) + }.mkString("\n")) case _ => indent + node.path.name + " " + Logging.simpleName(node) } @@ -1051,8 +1090,8 @@ private[akka] class ActorSystemImpl( private[this] final val ref = new AtomicReference(done) // onComplete never fires twice so safe to avoid null check - upStreamTerminated onComplete { - t => ref.getAndSet(null).complete(t) + upStreamTerminated.onComplete { t => + ref.getAndSet(null).complete(t) } /** diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index 08f7e62692..cf5fc56743 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -68,6 +68,7 @@ final case class Address private (protocol: String, system: String, host: Option } object Address { + /** * Constructs a new Address with the specified protocol and system name */ @@ -76,7 +77,8 @@ object Address { /** * Constructs a new Address with the specified protocol, system name, host and port */ - def apply(protocol: String, system: String, host: String, port: Int) = new Address(protocol, system, Some(host), Some(port)) + def apply(protocol: String, system: String, host: String, port: Int) = + new Address(protocol, system, Some(host), Some(port)) /** * `Address` ordering type class, sorts addresses by protocol, name, host and port. @@ -129,7 +131,9 @@ object RelativeActorPath extends PathUtils { * This object serves as extractor for Scala and as address parser for Java. */ object AddressFromURIString { - def unapply(addr: String): Option[Address] = try unapply(new URI(addr)) catch { case _: URISyntaxException => None } + def unapply(addr: String): Option[Address] = + try unapply(new URI(addr)) + catch { case _: URISyntaxException => None } def unapply(uri: URI): Option[Address] = if (uri eq null) None @@ -139,9 +143,10 @@ object AddressFromURIString { else Some(Address(uri.getScheme, uri.getHost)) } else { // case 2: “akka://system@host:port” if (uri.getHost == null || uri.getPort == -1) None - else Some( - if (uri.getUserInfo == null) Address(uri.getScheme, uri.getHost) - else Address(uri.getScheme, uri.getUserInfo, uri.getHost, uri.getPort)) + else + Some( + if (uri.getUserInfo == null) Address(uri.getScheme, uri.getHost) + else Address(uri.getScheme, uri.getUserInfo, uri.getHost, uri.getPort)) } /** diff --git a/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala b/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala index 72d91e5320..405a3a4369 100644 --- a/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala +++ b/akka-actor/src/main/scala/akka/actor/CoordinatedShutdown.scala @@ -34,6 +34,7 @@ import akka.annotation.InternalApi import akka.util.OptionVal object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with ExtensionIdProvider { + /** * The first pre-defined phase that applications can add tasks to. * Note that more phases can be added in the application's @@ -56,6 +57,7 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi * Final shutdown of service endpoints. */ val PhaseServiceStop = "service-stop" + /** * Phase for custom application tasks that are to be run * after service shutdown and before cluster shutdown. @@ -181,12 +183,12 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi // locate reason-specific overrides and merge with defaults. @InternalApi private[akka] def confWithOverrides(conf: Config, reason: Option[Reason]): Config = { - reason.flatMap { r => - val basePath = s"""reason-overrides."${r.getClass.getName}"""" - if (conf.hasPath(basePath)) Some(conf.getConfig(basePath).withFallback(conf)) else None - }.getOrElse( - conf - ) + reason + .flatMap { r => + val basePath = s"""reason-overrides."${r.getClass.getName}"""" + if (conf.hasPath(basePath)) Some(conf.getConfig(basePath).withFallback(conf)) else None + } + .getOrElse(conf) } private def initPhaseActorSystemTerminate(system: ActorSystem, conf: Config, coord: CoordinatedShutdown): Unit = { @@ -213,10 +215,12 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi } if (terminateActorSystem) { - system.terminate().map { _ => - if (exitJvm && !runningJvmHook) System.exit(exitCode) - Done - }(ExecutionContexts.sameThreadExecutionContext) + system + .terminate() + .map { _ => + if (exitJvm && !runningJvmHook) System.exit(exitCode) + Done + }(ExecutionContexts.sameThreadExecutionContext) } else if (exitJvm) { System.exit(exitCode) Future.successful(Done) @@ -238,9 +242,7 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi Await.ready(coord.run(JvmExitReason), totalTimeout) } catch { case NonFatal(e) => - coord.log.warning( - "CoordinatedShutdown from JVM shutdown failed: {}", - e.getMessage) + coord.log.warning("CoordinatedShutdown from JVM shutdown failed: {}", e.getMessage) } } }) @@ -250,7 +252,10 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi /** * INTERNAL API */ - private[akka] final case class Phase(dependsOn: Set[String], timeout: FiniteDuration, recover: Boolean, enabled: Boolean) + private[akka] final case class Phase(dependsOn: Set[String], + timeout: FiniteDuration, + recover: Boolean, + enabled: Boolean) /** * INTERNAL API @@ -292,7 +297,8 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi def depthFirstSearch(u: String): Unit = { if (tempMark(u)) - throw new IllegalArgumentException("Cycle detected in graph of phases. It must be a DAG. " + + throw new IllegalArgumentException( + "Cycle detected in graph of phases. It must be a DAG. " + s"phase [$u] depends transitively on itself. All dependencies: $phases") if (unmarked(u)) { tempMark += u @@ -311,15 +317,16 @@ object CoordinatedShutdown extends ExtensionId[CoordinatedShutdown] with Extensi } -final class CoordinatedShutdown private[akka] ( - system: ExtendedActorSystem, - phases: Map[String, CoordinatedShutdown.Phase]) extends Extension { +final class CoordinatedShutdown private[akka] (system: ExtendedActorSystem, + phases: Map[String, CoordinatedShutdown.Phase]) + extends Extension { import CoordinatedShutdown.Reason import CoordinatedShutdown.UnknownReason /** INTERNAL API */ private[akka] val log = Logging(system, getClass) private val knownPhases = phases.keySet ++ phases.values.flatMap(_.dependsOn) + /** INTERNAL API */ private[akka] val orderedPhases = CoordinatedShutdown.topologicalSort(phases) private val tasks = new ConcurrentHashMap[String, Vector[(String, () => Future[Done])]] @@ -347,12 +354,12 @@ final class CoordinatedShutdown private[akka] ( * and it will be performed. */ @tailrec def addTask(phase: String, taskName: String)(task: () => Future[Done]): Unit = { - require( - knownPhases(phase), - s"Unknown phase [$phase], known phases [$knownPhases]. " + - "All phases (along with their optional dependencies) must be defined in configuration") - require(taskName.nonEmpty, "Set a task name when adding tasks to the Coordinated Shutdown. " + - "Try to use unique, self-explanatory names.") + require(knownPhases(phase), + s"Unknown phase [$phase], known phases [$knownPhases]. " + + "All phases (along with their optional dependencies) must be defined in configuration") + require(taskName.nonEmpty, + "Set a task name when adding tasks to the Coordinated Shutdown. " + + "Try to use unique, self-explanatory names.") val current = tasks.get(phase) if (current == null) { if (tasks.putIfAbsent(phase, Vector(taskName -> task)) != null) @@ -430,8 +437,9 @@ final class CoordinatedShutdown private[akka] ( case Nil => Future.successful(Done) case phase :: remaining if !phases(phase).enabled => tasks.get(phase) match { - case null => // This pretty much is ok as there are no tasks - case tasks => log.info("Phase [{}] disabled through configuration, skipping [{}] tasks", phase, tasks.size) + case null => // This pretty much is ok as there are no tasks + case tasks => + log.info("Phase [{}] disabled through configuration, skipping [{}] tasks", phase, tasks.size) } loop(remaining) case phase :: remaining => @@ -440,31 +448,34 @@ final class CoordinatedShutdown private[akka] ( if (debugEnabled) log.debug("Performing phase [{}] with [0] tasks", phase) Future.successful(Done) case tasks => - if (debugEnabled) log.debug( - "Performing phase [{}] with [{}] tasks: [{}]", - phase, tasks.size, tasks.map { case (taskName, _) => taskName }.mkString(", ")) + if (debugEnabled) + log.debug("Performing phase [{}] with [{}] tasks: [{}]", + phase, + tasks.size, + tasks.map { case (taskName, _) => taskName }.mkString(", ")) // note that tasks within same phase are performed in parallel val recoverEnabled = phases(phase).recover - val result = Future.sequence(tasks.map { - case (taskName, task) => - try { - val r = task.apply() - if (recoverEnabled) r.recover { + val result = Future + .sequence(tasks.map { + case (taskName, task) => + try { + val r = task.apply() + if (recoverEnabled) r.recover { + case NonFatal(e) => + log.warning("Task [{}] failed in phase [{}]: {}", taskName, phase, e.getMessage) + Done + } else r + } catch { case NonFatal(e) => - log.warning("Task [{}] failed in phase [{}]: {}", taskName, phase, e.getMessage) - Done + // in case task.apply throws + if (recoverEnabled) { + log.warning("Task [{}] failed in phase [{}]: {}", taskName, phase, e.getMessage) + Future.successful(Done) + } else + Future.failed(e) } - else r - } catch { - case NonFatal(e) => - // in case task.apply throws - if (recoverEnabled) { - log.warning("Task [{}] failed in phase [{}]: {}", taskName, phase, e.getMessage) - Future.successful(Done) - } else - Future.failed(e) - } - }).map(_ => Done)(ExecutionContexts.sameThreadExecutionContext) + }) + .map(_ => Done)(ExecutionContexts.sameThreadExecutionContext) val timeout = phases(phase).timeout val deadline = Deadline.now + timeout val timeoutFut = try { @@ -572,7 +583,8 @@ final class CoordinatedShutdown private[akka] ( if (_jvmHooksLatch.compareAndSet(currentLatch, newLatch)) { val thread = new Thread { override def run(): Unit = { - try hook finally _jvmHooksLatch.get.countDown() + try hook + finally _jvmHooksLatch.get.countDown() } } thread.setName(s"${system.name}-shutdown-hook-${newLatch.getCount}") diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 1196a26799..ded4b94087 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -34,13 +34,12 @@ object Deploy { * }}} */ @SerialVersionUID(2L) -final case class Deploy( - path: String = "", - config: Config = ConfigFactory.empty, - routerConfig: RouterConfig = NoRouter, - scope: Scope = NoScopeGiven, - dispatcher: String = Deploy.NoDispatcherGiven, - mailbox: String = Deploy.NoMailboxGiven) { +final case class Deploy(path: String = "", + config: Config = ConfigFactory.empty, + routerConfig: RouterConfig = NoRouter, + scope: Scope = NoScopeGiven, + dispatcher: String = Deploy.NoDispatcherGiven, + mailbox: String = Deploy.NoMailboxGiven) { /** * Java API to create a Deploy with the given RouterConfig @@ -63,13 +62,12 @@ final case class Deploy( * other members are merged using `X.withFallback(other.X)`. */ def withFallback(other: Deploy): Deploy = { - Deploy( - path, - config.withFallback(other.config), - routerConfig.withFallback(other.routerConfig), - scope.withFallback(other.scope), - if (dispatcher == Deploy.NoDispatcherGiven) other.dispatcher else dispatcher, - if (mailbox == Deploy.NoMailboxGiven) other.mailbox else mailbox) + Deploy(path, + config.withFallback(other.config), + routerConfig.withFallback(other.routerConfig), + scope.withFallback(other.scope), + if (dispatcher == Deploy.NoDispatcherGiven) other.dispatcher else dispatcher, + if (mailbox == Deploy.NoMailboxGiven) other.mailbox else mailbox) } } @@ -81,6 +79,7 @@ final case class Deploy( * Akka actors fully extensible. */ trait Scope { + /** * When merging [[akka.actor.Deploy]] instances using ``withFallback()`` on * the left one, this is propagated to “merging” scopes in the same way. @@ -101,6 +100,7 @@ abstract class LocalScope extends Scope */ @SerialVersionUID(1L) case object LocalScope extends LocalScope { + /** * Java API: get the singleton instance */ @@ -136,15 +136,24 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce private val config = settings.config.getConfig("akka.actor.deployment") protected val default = config.getConfig("default") val routerTypeMapping: Map[String, String] = - settings.config.getConfig("akka.actor.router.type-mapping").root.unwrapped.asScala.collect { - case (key, value: String) => (key -> value) - }.toMap + settings.config + .getConfig("akka.actor.router.type-mapping") + .root + .unwrapped + .asScala + .collect { + case (key, value: String) => (key -> value) + } + .toMap - config.root.asScala.map { - case ("default", _) => None - case (key, value: ConfigObject) => parseConfig(key, value.toConfig) - case _ => None - }.flatten foreach deploy + config.root.asScala + .map { + case ("default", _) => None + case (key, value: ConfigObject) => parseConfig(key, value.toConfig) + case _ => None + } + .flatten + .foreach(deploy) def lookup(path: ActorPath): Option[Deploy] = lookup(path.elements.drop(1)) @@ -192,21 +201,28 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce def throwCannotInstantiateRouter(args: Seq[(Class[_], AnyRef)], cause: Throwable) = throw new IllegalArgumentException( s"Cannot instantiate router [$fqn], defined in [$key], " + - s"make sure it extends [${classOf[RouterConfig]}] and has constructor with " + - s"[${args(0)._1.getName}] and optional [${args(1)._1.getName}] parameter", cause) + s"make sure it extends [${classOf[RouterConfig]}] and has constructor with " + + s"[${args(0)._1.getName}] and optional [${args(1)._1.getName}] parameter", + cause) // first try with Config param, and then with Config and DynamicAccess parameters val args1 = List(classOf[Config] -> deployment2) val args2 = List(classOf[Config] -> deployment2, classOf[DynamicAccess] -> dynamicAccess) - dynamicAccess.createInstanceFor[RouterConfig](fqn, args1).recover({ - case e @ (_: IllegalArgumentException | _: ConfigException) => throw e - case e: NoSuchMethodException => - dynamicAccess.createInstanceFor[RouterConfig](fqn, args2).recover({ - case e @ (_: IllegalArgumentException | _: ConfigException) => throw e - case _ => throwCannotInstantiateRouter(args2, e) - }).get - case e => throwCannotInstantiateRouter(args2, e) - }).get + dynamicAccess + .createInstanceFor[RouterConfig](fqn, args1) + .recover({ + case e @ (_: IllegalArgumentException | _: ConfigException) => throw e + case e: NoSuchMethodException => + dynamicAccess + .createInstanceFor[RouterConfig](fqn, args2) + .recover({ + case e @ (_: IllegalArgumentException | _: ConfigException) => throw e + case _ => throwCannotInstantiateRouter(args2, e) + }) + .get + case e => throwCannotInstantiateRouter(args2, e) + }) + .get } } diff --git a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala index af3ce5cae5..07eada5449 100644 --- a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala @@ -17,6 +17,7 @@ import scala.util.Try * unless they are extending Akka in ways which go beyond simple Extensions. */ abstract class DynamicAccess { + /** * Convenience method which given a `Class[_]` object and a constructor description * will create a new instance of that class. diff --git a/akka-actor/src/main/scala/akka/actor/Extension.scala b/akka-actor/src/main/scala/akka/actor/Extension.scala index 3ad666796c..3284f9c299 100644 --- a/akka-actor/src/main/scala/akka/actor/Extension.scala +++ b/akka-actor/src/main/scala/akka/actor/Extension.scala @@ -112,6 +112,7 @@ abstract class AbstractExtensionId[T <: Extension] extends ExtensionId[T] * The lookup method should return the canonical reference to the extension. */ trait ExtensionIdProvider { + /** * Returns the canonical ExtensionId for this Extension */ @@ -152,5 +153,6 @@ abstract class ExtensionKey[T <: Extension](implicit m: ClassTag[T]) extends Ext def this(clazz: Class[T]) = this()(ClassTag(clazz)) override def lookup(): ExtensionId[T] = this - def createExtension(system: ExtendedActorSystem): T = system.dynamicAccess.createInstanceFor[T](m.runtimeClass, List(classOf[ExtendedActorSystem] -> system)).get + def createExtension(system: ExtendedActorSystem): T = + system.dynamicAccess.createInstanceFor[T](m.runtimeClass, List(classOf[ExtendedActorSystem] -> system)).get } diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 9442f8e071..8e53ddf300 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -33,8 +33,10 @@ private[akka] case object ChildNameReserved extends ChildStats * ChildRestartStats is the statistics kept by every parent Actor for every child Actor * and is used for SupervisorStrategies to know how to deal with problems that occur for the children. */ -final case class ChildRestartStats(child: ActorRef, var maxNrOfRetriesCount: Int = 0, var restartTimeWindowStartNanos: Long = 0L) - extends ChildStats { +final case class ChildRestartStats(child: ActorRef, + var maxNrOfRetriesCount: Int = 0, + var restartTimeWindowStartNanos: Long = 0L) + extends ChildStats { def uid: Int = child.path.uid @@ -197,7 +199,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { * the given Throwables matches the cause and restarts, otherwise escalates. */ def makeDecider(trapExit: immutable.Seq[Class[_ <: Throwable]]): Decider = { - case x => if (trapExit exists (_ isInstance x)) Restart else Escalate + case x => if (trapExit.exists(_.isInstance(x))) Restart else Escalate } /** @@ -215,7 +217,7 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { def makeDecider(flat: Iterable[CauseDirective]): Decider = { val directives = sort(flat) - { case x => directives collectFirst { case (c, d) if c isInstance x => d } getOrElse Escalate } + { case x => directives.collectFirst { case (c, d) if c.isInstance(x) => d }.getOrElse(Escalate) } } /** @@ -231,12 +233,13 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { */ private[akka] def sort(in: Iterable[CauseDirective]): immutable.Seq[CauseDirective] = in.foldLeft(new ArrayBuffer[CauseDirective](in.size)) { (buf, ca) => - buf.indexWhere(_._1 isAssignableFrom ca._1) match { - case -1 => buf append ca - case x => buf insert (x, ca) + buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { + case -1 => buf.append(ca) + case x => buf.insert(x, ca) + } + buf } - buf - }.to(immutable.IndexedSeq) + .to(immutable.IndexedSeq) private[akka] def withinTimeRangeOption(withinTimeRange: Duration): Option[Duration] = if (withinTimeRange.isFinite && withinTimeRange >= Duration.Zero) Some(withinTimeRange) else None @@ -279,7 +282,12 @@ abstract class SupervisorStrategy { /** * This method is called to act on the failure of a child: restart if the flag is true, stop otherwise. */ - def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit + def processFailure(context: ActorContext, + restart: Boolean, + child: ActorRef, + cause: Throwable, + stats: ChildRestartStats, + children: Iterable[ChildRestartStats]): Unit /** * This is the main entry point: in case of a child’s failure, this method @@ -295,7 +303,11 @@ abstract class SupervisorStrategy { * * @param children is a lazy collection (a view) */ - def handleFailure(context: ActorContext, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Boolean = { + def handleFailure(context: ActorContext, + child: ActorRef, + cause: Throwable, + stats: ChildRestartStats, + children: Iterable[ChildRestartStats]): Boolean = { val directive = decider.applyOrElse(cause, escalateDefault) directive match { case Resume => @@ -332,10 +344,11 @@ abstract class SupervisorStrategy { def logFailure(context: ActorContext, child: ActorRef, cause: Throwable, decision: Directive): Unit = if (loggingEnabled) { val logMessage = cause match { - case e: ActorInitializationException if e.getCause ne null => e.getCause match { - case ex: InvocationTargetException if ex.getCause ne null => ex.getCause.getMessage - case ex => ex.getMessage - } + case e: ActorInitializationException if e.getCause ne null => + e.getCause match { + case ex: InvocationTargetException if ex.getCause ne null => ex.getCause.getMessage + case ex => ex.getMessage + } case e => e.getMessage } decision match { @@ -347,14 +360,16 @@ abstract class SupervisorStrategy { // logging is not the main purpose, and if it fails there’s nothing we can do private def publish(context: ActorContext, logEvent: LogEvent): Unit = - try context.system.eventStream.publish(logEvent) catch { case NonFatal(_) => } + try context.system.eventStream.publish(logEvent) + catch { case NonFatal(_) => } /** * Resume the previously failed child: do never apply this to a child which * is not the currently failing child. Suspend/resume needs to be done in * matching pairs, otherwise actors will wake up too soon or never at all. */ - final def resumeChild(child: ActorRef, cause: Throwable): Unit = child.asInstanceOf[InternalActorRef].resume(causedByFailure = cause) + final def resumeChild(child: ActorRef, cause: Throwable): Unit = + child.asInstanceOf[InternalActorRef].resume(causedByFailure = cause) /** * Restart the given child, possibly suspending it first. @@ -386,24 +401,29 @@ abstract class SupervisorStrategy { * [[scala.collection.immutable.Seq]] of Throwables which maps the given Throwables to restarts, otherwise escalates. * @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled */ -case class AllForOneStrategy( - maxNrOfRetries: Int = -1, - withinTimeRange: Duration = Duration.Inf, - override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) - extends SupervisorStrategy { +case class AllForOneStrategy(maxNrOfRetries: Int = -1, + withinTimeRange: Duration = Duration.Inf, + override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) + extends SupervisorStrategy { import SupervisorStrategy._ /** * Java API */ - def this(maxNrOfRetries: Int, withinTimeRange: Duration, decider: SupervisorStrategy.JDecider, loggingEnabled: Boolean) = + def this(maxNrOfRetries: Int, + withinTimeRange: Duration, + decider: SupervisorStrategy.JDecider, + loggingEnabled: Boolean) = this(maxNrOfRetries, withinTimeRange, loggingEnabled)(SupervisorStrategy.makeDecider(decider)) /** * Java API */ - def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.JDecider, loggingEnabled: Boolean) = + def this(maxNrOfRetries: Int, + withinTimeRange: java.time.Duration, + decider: SupervisorStrategy.JDecider, + loggingEnabled: Boolean) = this(maxNrOfRetries, withinTimeRange.asScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider)) /** @@ -459,14 +479,20 @@ case class AllForOneStrategy( * every call to requestRestartPermission, assuming that strategies are shared * across actors and thus this field does not take up much space */ - private val retriesWindow = (maxNrOfRetriesOption(maxNrOfRetries), withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt)) + private val retriesWindow = + (maxNrOfRetriesOption(maxNrOfRetries), withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt)) def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit = () - def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit = { + def processFailure(context: ActorContext, + restart: Boolean, + child: ActorRef, + cause: Throwable, + stats: ChildRestartStats, + children: Iterable[ChildRestartStats]): Unit = { if (children.nonEmpty) { if (restart && children.forall(_.requestRestartPermission(retriesWindow))) - children foreach (crs => restartChild(crs.child, cause, suspendFirst = (crs.child != child))) + children.foreach(crs => restartChild(crs.child, cause, suspendFirst = (crs.child != child))) else for (c <- children) context.stop(c.child) } @@ -485,22 +511,27 @@ case class AllForOneStrategy( * [[scala.collection.immutable.Seq]] of Throwables which maps the given Throwables to restarts, otherwise escalates. * @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled */ -case class OneForOneStrategy( - maxNrOfRetries: Int = -1, - withinTimeRange: Duration = Duration.Inf, - override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) - extends SupervisorStrategy { +case class OneForOneStrategy(maxNrOfRetries: Int = -1, + withinTimeRange: Duration = Duration.Inf, + override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider) + extends SupervisorStrategy { /** * Java API */ - def this(maxNrOfRetries: Int, withinTimeRange: Duration, decider: SupervisorStrategy.JDecider, loggingEnabled: Boolean) = + def this(maxNrOfRetries: Int, + withinTimeRange: Duration, + decider: SupervisorStrategy.JDecider, + loggingEnabled: Boolean) = this(maxNrOfRetries, withinTimeRange, loggingEnabled)(SupervisorStrategy.makeDecider(decider)) /** * Java API */ - def this(maxNrOfRetries: Int, withinTimeRange: java.time.Duration, decider: SupervisorStrategy.JDecider, loggingEnabled: Boolean) = + def this(maxNrOfRetries: Int, + withinTimeRange: java.time.Duration, + decider: SupervisorStrategy.JDecider, + loggingEnabled: Boolean) = this(maxNrOfRetries, withinTimeRange.asScala, loggingEnabled)(SupervisorStrategy.makeDecider(decider)) /** @@ -555,13 +586,17 @@ case class OneForOneStrategy( * every call to requestRestartPermission, assuming that strategies are shared * across actors and thus this field does not take up much space */ - private val retriesWindow = ( - SupervisorStrategy.maxNrOfRetriesOption(maxNrOfRetries), - SupervisorStrategy.withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt)) + private val retriesWindow = (SupervisorStrategy.maxNrOfRetriesOption(maxNrOfRetries), + SupervisorStrategy.withinTimeRangeOption(withinTimeRange).map(_.toMillis.toInt)) def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit = () - def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit = { + def processFailure(context: ActorContext, + restart: Boolean, + child: ActorRef, + cause: Throwable, + stats: ChildRestartStats, + children: Iterable[ChildRestartStats]): Unit = { if (restart && stats.requestRestartPermission(retriesWindow)) restartChild(child, cause, suspendFirst = false) else diff --git a/akka-actor/src/main/scala/akka/actor/IndirectActorProducer.scala b/akka-actor/src/main/scala/akka/actor/IndirectActorProducer.scala index 2fcc780dea..0e82b15e64 100644 --- a/akka-actor/src/main/scala/akka/actor/IndirectActorProducer.scala +++ b/akka-actor/src/main/scala/akka/actor/IndirectActorProducer.scala @@ -82,7 +82,8 @@ private[akka] class CreatorConsumer(clazz: Class[_ <: Actor], creator: Creator[A /** * INTERNAL API */ -private[akka] class TypedCreatorFunctionConsumer(clz: Class[_ <: Actor], creator: () => Actor) extends IndirectActorProducer { +private[akka] class TypedCreatorFunctionConsumer(clz: Class[_ <: Actor], creator: () => Actor) + extends IndirectActorProducer { override def actorClass = clz override def produce() = creator() } @@ -90,7 +91,8 @@ private[akka] class TypedCreatorFunctionConsumer(clz: Class[_ <: Actor], creator /** * INTERNAL API */ -private[akka] class ArgsReflectConstructor(clz: Class[_ <: Actor], args: immutable.Seq[Any]) extends IndirectActorProducer { +private[akka] class ArgsReflectConstructor(clz: Class[_ <: Actor], args: immutable.Seq[Any]) + extends IndirectActorProducer { private[this] val constructor = Reflect.findConstructor(clz, args) override def actorClass = clz override def produce() = Reflect.instantiate(constructor, args).asInstanceOf[Actor] diff --git a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala index 86d7a00391..955a0b355a 100644 --- a/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/LightArrayRevolverScheduler.scala @@ -34,21 +34,22 @@ import akka.dispatch.AbstractNodeQueue * scheduled possibly one tick later than they could be (if checking that * “now() + delay <= nextTick” were done). */ -class LightArrayRevolverScheduler( - config: Config, - log: LoggingAdapter, - threadFactory: ThreadFactory) - extends Scheduler with Closeable { +class LightArrayRevolverScheduler(config: Config, log: LoggingAdapter, threadFactory: ThreadFactory) + extends Scheduler + with Closeable { import Helpers.Requiring import Helpers.ConfigOps val WheelSize = - config.getInt("akka.scheduler.ticks-per-wheel") + config + .getInt("akka.scheduler.ticks-per-wheel") .requiring(ticks => (ticks & (ticks - 1)) == 0, "ticks-per-wheel must be a power of 2") val TickDuration = - config.getMillisDuration("akka.scheduler.tick-duration") - .requiring(_ >= 10.millis || !Helpers.isWindows, "minimum supported akka.scheduler.tick-duration on Windows is 10ms") + config + .getMillisDuration("akka.scheduler.tick-duration") + .requiring(_ >= 10.millis || !Helpers.isWindows, + "minimum supported akka.scheduler.tick-duration on Windows is 10ms") .requiring(_ >= 1.millis, "minimum supported akka.scheduler.tick-duration is 1ms") val ShutdownTimeout = config.getMillisDuration("akka.scheduler.shutdown-timeout") @@ -82,31 +83,34 @@ class LightArrayRevolverScheduler( protected def waitNanos(nanos: Long): Unit = { // see http://www.javamex.com/tutorials/threads/sleep_issues.shtml val sleepMs = if (Helpers.isWindows) (nanos + 4999999) / 10000000 * 10 else (nanos + 999999) / 1000000 - try Thread.sleep(sleepMs) catch { + try Thread.sleep(sleepMs) + catch { case _: InterruptedException => Thread.currentThread.interrupt() // we got woken up } } - override def schedule( - initialDelay: FiniteDuration, - delay: FiniteDuration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = { + override def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, runnable: Runnable)( + implicit executor: ExecutionContext): Cancellable = { checkMaxDelay(roundUp(delay).toNanos) try new AtomicReference[Cancellable](InitialRepeatMarker) with Cancellable { self => - compareAndSet(InitialRepeatMarker, schedule( - executor, - new AtomicLong(clock() + initialDelay.toNanos) with Runnable { - override def run(): Unit = { - try { - runnable.run() - val driftNanos = clock() - getAndAdd(delay.toNanos) - if (self.get != null) - swap(schedule(executor, this, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)))) - } catch { - case _: SchedulerException => // ignore failure to enqueue or terminated target actor - } - } - }, roundUp(initialDelay))) + compareAndSet(InitialRepeatMarker, + schedule(executor, + new AtomicLong(clock() + initialDelay.toNanos) with Runnable { + override def run(): Unit = { + try { + runnable.run() + val driftNanos = clock() - getAndAdd(delay.toNanos) + if (self.get != null) + swap( + schedule(executor, + this, + Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)))) + } catch { + case _: SchedulerException => // ignore failure to enqueue or terminated target actor + } + } + }, + roundUp(initialDelay))) @tailrec private def swap(c: Cancellable): Unit = { get match { @@ -130,19 +134,20 @@ class LightArrayRevolverScheduler( } } - override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)( + implicit executor: ExecutionContext): Cancellable = try schedule(executor, runnable, roundUp(delay)) catch { case SchedulerException(msg) => throw new IllegalStateException(msg) } - override def close(): Unit = Await.result(stop(), getShutdownTimeout) foreach { - task => - try task.run() catch { - case e: InterruptedException => throw e - case _: SchedulerException => // ignore terminated actors - case NonFatal(e) => log.error(e, "exception while executing timer task") - } + override def close(): Unit = Await.result(stop(), getShutdownTimeout).foreach { task => + try task.run() + catch { + case e: InterruptedException => throw e + case _: SchedulerException => // ignore terminated actors + case NonFatal(e) => log.error(e, "exception while executing timer task") + } } override val maxFrequency: Double = 1.second / TickDuration @@ -178,7 +183,8 @@ class LightArrayRevolverScheduler( private def checkMaxDelay(delayNanos: Long): Unit = if (delayNanos / tickNanos > Int.MaxValue) // 1 second margin in the error message due to rounding - throw new IllegalArgumentException(s"Task scheduled with [${delayNanos.nanos.toSeconds}] seconds delay, " + + throw new IllegalArgumentException( + s"Task scheduled with [${delayNanos.nanos.toSeconds}] seconds delay, " + s"which is too far in future, maximum delay is [${(tickNanos * Int.MaxValue).nanos.toSeconds - 1}] seconds") private val stopped = new AtomicReference[Promise[immutable.Seq[TimerTask]]] @@ -206,7 +212,7 @@ class LightArrayRevolverScheduler( case x => collect(q, acc :+ x) } } - ((0 until WheelSize) flatMap (i => collect(wheel(i), Vector.empty))) ++ collect(queue, Vector.empty) + ((0 until WheelSize).flatMap(i => collect(wheel(i), Vector.empty))) ++ collect(queue, Vector.empty) } @tailrec @@ -244,13 +250,13 @@ class LightArrayRevolverScheduler( catch { case e: Throwable => log.error(e, "LARS cannot start new thread, ship’s going down!") - stopped.set(Promise successful Nil) + stopped.set(Promise.successful(Nil)) clearAll() } timerThread = thread case p => - assert(stopped.compareAndSet(p, Promise successful Nil), "Stop signal violated in LARS") - p success clearAll() + assert(stopped.compareAndSet(p, Promise.successful(Nil)), "Stop signal violated in LARS") + p.success(clearAll()) } throw t } @@ -289,8 +295,8 @@ class LightArrayRevolverScheduler( stopped.get match { case null => nextTick() case p => - assert(stopped.compareAndSet(p, Promise successful Nil), "Stop signal violated in LARS") - p success clearAll() + assert(stopped.compareAndSet(p, Promise.successful(Nil)), "Stop signal violated in LARS") + p.success(clearAll()) } } }) @@ -312,7 +318,7 @@ object LightArrayRevolverScheduler { * INTERNAL API */ protected[actor] class TaskHolder(@volatile var task: Runnable, var ticks: Int, executionContext: ExecutionContext) - extends TimerTask { + extends TimerTask { @tailrec private final def extractTask(replaceWith: Runnable): Runnable = @@ -325,7 +331,7 @@ object LightArrayRevolverScheduler { case ExecutedTask | CancelledTask => false case other => try { - executionContext execute other + executionContext.execute(other) true } catch { case _: InterruptedException => { Thread.currentThread.interrupt(); false } diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 373b7a0e6d..ba19214c2a 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -190,7 +190,7 @@ final case class Props(deploy: Deploy, clazz: Class[_], args: immutable.Seq[Any] /** * Returns a new Props with the specified deployment configuration. */ - def withDeploy(d: Deploy): Props = copy(deploy = d withFallback deploy) + def withDeploy(d: Deploy): Props = copy(deploy = d.withFallback(deploy)) /** * Obtain an upper-bound approximation of the actor class which is going to diff --git a/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala index c7838f4dfe..f15086894e 100644 --- a/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/ReflectiveDynamicAccess.scala @@ -32,17 +32,20 @@ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAcces constructor.setAccessible(true) val obj = constructor.newInstance(values: _*) val t = implicitly[ClassTag[T]].runtimeClass - if (t.isInstance(obj)) obj.asInstanceOf[T] else throw new ClassCastException(clazz.getName + " is not a subtype of " + t) - } recover { case i: InvocationTargetException if i.getTargetException ne null => throw i.getTargetException } + if (t.isInstance(obj)) obj.asInstanceOf[T] + else throw new ClassCastException(clazz.getName + " is not a subtype of " + t) + }.recover { case i: InvocationTargetException if i.getTargetException ne null => throw i.getTargetException } override def createInstanceFor[T: ClassTag](fqcn: String, args: immutable.Seq[(Class[_], AnyRef)]): Try[T] = - getClassFor(fqcn) flatMap { c => createInstanceFor(c, args) } + getClassFor(fqcn).flatMap { c => + createInstanceFor(c, args) + } override def getObjectFor[T: ClassTag](fqcn: String): Try[T] = { val classTry = if (fqcn.endsWith("$")) getClassFor(fqcn) - else getClassFor(fqcn + "$") recoverWith { case _ => getClassFor(fqcn) } - classTry flatMap { c => + else getClassFor(fqcn + "$").recoverWith { case _ => getClassFor(fqcn) } + classTry.flatMap { c => Try { val module = c.getDeclaredField("MODULE$") module.setAccessible(true) @@ -52,7 +55,7 @@ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAcces case x if !t.isInstance(x) => throw new ClassCastException(fqcn + " is not a subtype of " + t) case x: T => x } - } recover { case i: InvocationTargetException if i.getTargetException ne null => throw i.getTargetException } + }.recover { case i: InvocationTargetException if i.getTargetException ne null => throw i.getTargetException } } } } diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala index 47704f6ce8..7621cf1b8b 100644 --- a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala @@ -11,7 +11,7 @@ import scala.annotation.tailrec import scala.collection.immutable import akka.actor.dungeon.ChildrenContainer import akka.event.Logging.Warning -import akka.util.{ Unsafe, unused } +import akka.util.{ unused, Unsafe } import akka.dispatch._ import akka.dispatch.sysmsg._ @@ -25,14 +25,14 @@ import scala.util.control.NonFatal * with a fully functional one, transfer all messages from dummy to real queue * and swap out the cell ref. */ -private[akka] class RepointableActorRef( - val system: ActorSystemImpl, - val props: Props, - val dispatcher: MessageDispatcher, - val mailboxType: MailboxType, - val supervisor: InternalActorRef, - val path: ActorPath) - extends ActorRefWithCell with RepointableRef { +private[akka] class RepointableActorRef(val system: ActorSystemImpl, + val props: Props, + val dispatcher: MessageDispatcher, + val mailboxType: MailboxType, + val supervisor: InternalActorRef, + val path: ActorPath) + extends ActorRefWithCell + with RepointableRef { import AbstractActorRef.{ cellOffset, lookupOffset } @@ -136,7 +136,8 @@ private[akka] class RepointableActorRef( case _ => true } - @deprecated("Use context.watch(actor) and receive Terminated(actor)", "2.2") def isTerminated: Boolean = underlying.isTerminated + @deprecated("Use context.watch(actor) and receive Terminated(actor)", "2.2") def isTerminated: Boolean = + underlying.isTerminated def provider: ActorRefProvider = system.provider @@ -154,10 +155,11 @@ private[akka] class RepointableActorRef( lookup.getChildByName(childName) match { case Some(crs: ChildRestartStats) if uid == ActorCell.undefinedUid || uid == crs.uid => crs.child.asInstanceOf[InternalActorRef].getChild(name) - case _ => lookup match { - case ac: ActorCell => ac.getFunctionRefOrNobody(childName, uid) - case _ => Nobody - } + case _ => + lookup match { + case ac: ActorCell => ac.getFunctionRefOrNobody(childName, uid) + case _ => Nobody + } } } } else this @@ -178,11 +180,11 @@ private[akka] class RepointableActorRef( protected def writeReplace(): AnyRef = SerializedActorRef(this) } -private[akka] class UnstartedCell( - val systemImpl: ActorSystemImpl, - val self: RepointableActorRef, - val props: Props, - val supervisor: InternalActorRef) extends Cell { +private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, + val self: RepointableActorRef, + val props: Props, + val supervisor: InternalActorRef) + extends Cell { /* * This lock protects all accesses to this cell’s queues. It also ensures @@ -248,12 +250,18 @@ private[akka] class UnstartedCell( if (cellIsReady(cell)) { cell.sendMessage(msg) } else if (!queue.offer(msg)) { - system.eventStream.publish(Warning(self.path.toString, getClass, "dropping message of type " + msg.message.getClass + " due to enqueue failure")) + system.eventStream.publish( + Warning(self.path.toString, + getClass, + "dropping message of type " + msg.message.getClass + " due to enqueue failure")) system.deadLetters.tell(DeadLetter(msg.message, msg.sender, self), msg.sender) } else if (Mailbox.debug) println(s"$self temp queueing ${msg.message} from ${msg.sender}") } finally lock.unlock() } else { - system.eventStream.publish(Warning(self.path.toString, getClass, "dropping message of type" + msg.message.getClass + " due to lock timeout")) + system.eventStream.publish( + Warning(self.path.toString, + getClass, + "dropping message of type" + msg.message.getClass + " due to lock timeout")) system.deadLetters.tell(DeadLetter(msg.message, msg.sender, self), msg.sender) } } @@ -287,7 +295,8 @@ private[akka] class UnstartedCell( private[this] final def locked[T](body: => T): T = { lock.lock() - try body finally lock.unlock() + try body + finally lock.unlock() } } diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 1c8eed8a89..d8071b338a 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -38,6 +38,7 @@ private final case class SchedulerException(msg: String) extends akka.AkkaExcept * into the future (which by default is around 8 months (`Int.MaxValue` seconds). */ trait Scheduler { + /** * Schedules a message to be sent repeatedly with an initial delay and * frequency. E.g. if you would like a message to be sent immediately and @@ -46,20 +47,19 @@ trait Scheduler { * * Java & Scala API */ - final def schedule( - initialDelay: FiniteDuration, - interval: FiniteDuration, - receiver: ActorRef, - message: Any)(implicit - executor: ExecutionContext, - sender: ActorRef = Actor.noSender): Cancellable = - schedule(initialDelay, interval, new Runnable { - def run(): Unit = { - receiver ! message - if (receiver.isTerminated) - throw SchedulerException("timer active for terminated actor") - } - }) + final def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, receiver: ActorRef, message: Any)( + implicit + executor: ExecutionContext, + sender: ActorRef = Actor.noSender): Cancellable = + schedule(initialDelay, + interval, + new Runnable { + def run(): Unit = { + receiver ! message + if (receiver.isTerminated) + throw SchedulerException("timer active for terminated actor") + } + }) /** * Schedules a message to be sent repeatedly with an initial delay and @@ -69,16 +69,16 @@ trait Scheduler { * * Java API */ - final def schedule( - initialDelay: java.time.Duration, - interval: java.time.Duration, - receiver: ActorRef, - message: Any, - executor: ExecutionContext, - sender: ActorRef): Cancellable = { + final def schedule(initialDelay: java.time.Duration, + interval: java.time.Duration, + receiver: ActorRef, + message: Any, + executor: ExecutionContext, + sender: ActorRef): Cancellable = { import JavaDurationConverters._ schedule(initialDelay.asScala, interval.asScala, receiver, message)(executor, sender) } + /** * Schedules a function to be run repeatedly with an initial delay and a * frequency. E.g. if you would like the function to be run after 2 seconds @@ -94,11 +94,9 @@ trait Scheduler { * * Scala API */ - final def schedule( - initialDelay: FiniteDuration, - interval: FiniteDuration)(f: => Unit)( - implicit - executor: ExecutionContext): Cancellable = + final def schedule(initialDelay: FiniteDuration, interval: FiniteDuration)(f: => Unit)( + implicit + executor: ExecutionContext): Cancellable = schedule(initialDelay, interval, new Runnable { override def run(): Unit = f }) /** @@ -120,10 +118,8 @@ trait Scheduler { * * Java API */ - def schedule( - initialDelay: FiniteDuration, - interval: FiniteDuration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable + def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)( + implicit executor: ExecutionContext): Cancellable /** * Schedules a `Runnable` to be run repeatedly with an initial delay and @@ -144,10 +140,8 @@ trait Scheduler { * * Java API */ - def schedule( - initialDelay: java.time.Duration, - interval: java.time.Duration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = { + def schedule(initialDelay: java.time.Duration, interval: java.time.Duration, runnable: Runnable)( + implicit executor: ExecutionContext): Cancellable = { import JavaDurationConverters._ schedule(initialDelay.asScala, interval.asScala, runnable) } @@ -161,12 +155,10 @@ trait Scheduler { * * Java & Scala API */ - final def scheduleOnce( - delay: FiniteDuration, - receiver: ActorRef, - message: Any)(implicit - executor: ExecutionContext, - sender: ActorRef = Actor.noSender): Cancellable = + final def scheduleOnce(delay: FiniteDuration, receiver: ActorRef, message: Any)( + implicit + executor: ExecutionContext, + sender: ActorRef = Actor.noSender): Cancellable = scheduleOnce(delay, new Runnable { override def run(): Unit = receiver ! message }) @@ -180,12 +172,11 @@ trait Scheduler { * * Java API */ - final def scheduleOnce( - delay: java.time.Duration, - receiver: ActorRef, - message: Any, - executor: ExecutionContext, - sender: ActorRef): Cancellable = { + final def scheduleOnce(delay: java.time.Duration, + receiver: ActorRef, + message: Any, + executor: ExecutionContext, + sender: ActorRef): Cancellable = { import JavaDurationConverters._ scheduleOnce(delay.asScala, receiver, message)(executor, sender) } @@ -199,9 +190,8 @@ trait Scheduler { * * Scala API */ - final def scheduleOnce(delay: FiniteDuration)(f: => Unit)( - implicit - executor: ExecutionContext): Cancellable = + final def scheduleOnce(delay: FiniteDuration)(f: => Unit)(implicit + executor: ExecutionContext): Cancellable = scheduleOnce(delay, new Runnable { override def run(): Unit = f }) /** @@ -213,9 +203,7 @@ trait Scheduler { * * Java & Scala API */ - def scheduleOnce( - delay: FiniteDuration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable + def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable /** * Schedules a Runnable to be run once with a delay, i.e. a time period that @@ -226,9 +214,7 @@ trait Scheduler { * * Java & Scala API */ - def scheduleOnce( - delay: java.time.Duration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = { + def scheduleOnce(delay: java.time.Duration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = { import JavaDurationConverters._ scheduleOnce(delay.asScala, runnable)(executor) } @@ -252,6 +238,7 @@ abstract class AbstractSchedulerBase extends Scheduler * but it should be good practice to make it so. */ trait Cancellable { + /** * Cancels this Cancellable and returns true if that was successful. * If this cancellable was (concurrently) cancelled already, then this method diff --git a/akka-actor/src/main/scala/akka/actor/Stash.scala b/akka-actor/src/main/scala/akka/actor/Stash.scala index 4360ddb987..66a13400cd 100644 --- a/akka-actor/src/main/scala/akka/actor/Stash.scala +++ b/akka-actor/src/main/scala/akka/actor/Stash.scala @@ -7,7 +7,12 @@ package akka.actor import scala.collection.immutable import akka.AkkaException -import akka.dispatch.{ UnboundedDequeBasedMessageQueueSemantics, RequiresMessageQueue, Envelope, DequeBasedMessageQueueSemantics } +import akka.dispatch.{ + DequeBasedMessageQueueSemantics, + Envelope, + RequiresMessageQueue, + UnboundedDequeBasedMessageQueueSemantics +} import scala.util.control.NoStackTrace @@ -65,12 +70,14 @@ trait UnboundedStash extends UnrestrictedStash with RequiresMessageQueue[Unbound * manually, and the mailbox should extend the [[akka.dispatch.DequeBasedMessageQueueSemantics]] marker trait. */ trait UnrestrictedStash extends Actor with StashSupport { + /** * Overridden callback. Prepends all messages in the stash to the mailbox, * clears the stash, stops all children and invokes the postStop() callback. */ override def preRestart(reason: Throwable, message: Option[Any]): Unit = { - try unstashAll() finally super.preRestart(reason, message) + try unstashAll() + finally super.preRestart(reason, message) } /** @@ -78,7 +85,9 @@ trait UnrestrictedStash extends Actor with StashSupport { * Must be called when overriding this method, otherwise stashed messages won't be propagated to DeadLetters * when actor stops. */ - override def postStop(): Unit = try unstashAll() finally super.postStop() + override def postStop(): Unit = + try unstashAll() + finally super.postStop() } /** @@ -103,6 +112,7 @@ private[akka] trait StashFactory { this: Actor => * (optionally in addition to and isolated from the user stash) can create new stashes via [[StashFactory]]. */ private[akka] trait StashSupport { + /** * INTERNAL API. * @@ -138,8 +148,10 @@ private[akka] trait StashSupport { private[akka] val mailbox: DequeBasedMessageQueueSemantics = { actorCell.mailbox.messageQueue match { case queue: DequeBasedMessageQueueSemantics => queue - case other => throw ActorInitializationException(self, s"DequeBasedMailbox required, got: ${other.getClass.getName}\n" + - """An (unbounded) deque-based mailbox can be configured as follows: + case other => + throw ActorInitializationException(self, + s"DequeBasedMailbox required, got: ${other.getClass.getName}\n" + + """An (unbounded) deque-based mailbox can be configured as follows: | my-custom-mailbox { | mailbox-type = "akka.dispatch.UnboundedDequeBasedMailbox" | } @@ -159,8 +171,9 @@ private[akka] trait StashSupport { if (theStash.nonEmpty && (currMsg eq theStash.last)) throw new IllegalStateException(s"Can't stash the same message $currMsg more than once") if (capacity <= 0 || theStash.size < capacity) theStash :+= currMsg - else throw new StashOverflowException( - s"Couldn't enqueue message ${currMsg.message.getClass.getName} from ${currMsg.sender} to stash of $self") + else + throw new StashOverflowException( + s"Couldn't enqueue message ${currMsg.message.getClass.getName} from ${currMsg.sender} to stash of $self") } /** @@ -181,11 +194,12 @@ private[akka] trait StashSupport { * The unstashed message is guaranteed to be removed from the stash regardless * if the `unstash()` call successfully returns or throws an exception. */ - private[akka] def unstash(): Unit = if (theStash.nonEmpty) try { - enqueueFirst(theStash.head) - } finally { - theStash = theStash.tail - } + private[akka] def unstash(): Unit = + if (theStash.nonEmpty) try { + enqueueFirst(theStash.head) + } finally { + theStash = theStash.tail + } /** * Prepends all messages in the stash to the mailbox, and then clears the stash. @@ -250,4 +264,6 @@ private[akka] trait StashSupport { /** * Is thrown when the size of the Stash exceeds the capacity of the Stash */ -class StashOverflowException(message: String, cause: Throwable = null) extends AkkaException(message, cause) with NoStackTrace +class StashOverflowException(message: String, cause: Throwable = null) + extends AkkaException(message, cause) + with NoStackTrace diff --git a/akka-actor/src/main/scala/akka/actor/Timers.scala b/akka-actor/src/main/scala/akka/actor/Timers.scala index df003301b6..669f465fd3 100644 --- a/akka-actor/src/main/scala/akka/actor/Timers.scala +++ b/akka-actor/src/main/scala/akka/actor/Timers.scala @@ -65,6 +65,7 @@ trait Timers extends Actor { * and thus are cancelled automatically when it is restarted or stopped. */ abstract class AbstractActorWithTimers extends AbstractActor with Timers { + /** * Start and cancel timers via the enclosed `TimerScheduler`. */ diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 51cc6febf2..d2910b5cad 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -129,26 +129,28 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi final case class MethodCall(method: Method, parameters: Array[AnyRef]) { def isOneWay = method.getReturnType == java.lang.Void.TYPE - def returnsFuture = classOf[Future[_]] isAssignableFrom method.getReturnType - def returnsJOption = classOf[akka.japi.Option[_]] isAssignableFrom method.getReturnType - def returnsOption = classOf[scala.Option[_]] isAssignableFrom method.getReturnType + def returnsFuture = classOf[Future[_]].isAssignableFrom(method.getReturnType) + def returnsJOption = classOf[akka.japi.Option[_]].isAssignableFrom(method.getReturnType) + def returnsOption = classOf[scala.Option[_]].isAssignableFrom(method.getReturnType) /** * Invokes the Method on the supplied instance * * Throws the underlying exception if there's an InvocationTargetException thrown on the invocation. */ - def apply(instance: AnyRef): AnyRef = try { - parameters match { - case null => method.invoke(instance) - case args if args.length == 0 => method.invoke(instance) - case args => method.invoke(instance, args: _*) - } - } catch { case i: InvocationTargetException => throw i.getTargetException } + def apply(instance: AnyRef): AnyRef = + try { + parameters match { + case null => method.invoke(instance) + case args if args.length == 0 => method.invoke(instance) + case args => method.invoke(instance, args: _*) + } + } catch { case i: InvocationTargetException => throw i.getTargetException } @throws(classOf[ObjectStreamException]) private def writeReplace(): AnyRef = parameters match { - case null => SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, null) - case ps if ps.length == 0 => SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, Array()) + case null => SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, null) + case ps if ps.length == 0 => + SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, Array()) case ps => val serialization = SerializationExtension(akka.serialization.JavaSerializer.currentSystem.value) val serializedParameters = new Array[(Int, String, Array[Byte])](ps.length) @@ -156,7 +158,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi val p = ps(i) val s = serialization.findSerializerFor(p) val m = Serializers.manifestFor(s, p) - serializedParameters(i) = (s.identifier, m, s toBinary parameters(i)) //Mutable for the sake of sanity + serializedParameters(i) = (s.identifier, m, s.toBinary(parameters(i))) //Mutable for the sake of sanity } SerializedMethodCall(method.getDeclaringClass, method.getName, method.getParameterTypes, serializedParameters) @@ -168,28 +170,34 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * * Represents the serialized form of a MethodCall, uses readResolve and writeReplace to marshall the call */ - private[akka] final case class SerializedMethodCall(ownerType: Class[_], methodName: String, parameterTypes: Array[Class[_]], serializedParameters: Array[(Int, String, Array[Byte])]) { + private[akka] final case class SerializedMethodCall(ownerType: Class[_], + methodName: String, + parameterTypes: Array[Class[_]], + serializedParameters: Array[(Int, String, Array[Byte])]) { //TODO implement writeObject and readObject to serialize //TODO Possible optimization is to special encode the parameter-types to conserve space @throws(classOf[ObjectStreamException]) private def readResolve(): AnyRef = { val system = akka.serialization.JavaSerializer.currentSystem.value - if (system eq null) throw new IllegalStateException( - "Trying to deserialize a SerializedMethodCall without an ActorSystem in scope." + + if (system eq null) + throw new IllegalStateException( + "Trying to deserialize a SerializedMethodCall without an ActorSystem in scope." + " Use akka.serialization.JavaSerializer.currentSystem.withValue(system) { ... }") val serialization = SerializationExtension(system) - MethodCall(ownerType.getDeclaredMethod(methodName, parameterTypes: _*), serializedParameters match { - case null => null - case a if a.length == 0 => Array[AnyRef]() - case a => - val deserializedParameters: Array[AnyRef] = new Array[AnyRef](a.length) //Mutable for the sake of sanity - for (i <- 0 until a.length) { - val (sId, manifest, bytes) = a(i) - deserializedParameters(i) = serialization.deserialize(bytes, sId, manifest).get - } + MethodCall(ownerType.getDeclaredMethod(methodName, parameterTypes: _*), + serializedParameters match { + case null => null + case a if a.length == 0 => Array[AnyRef]() + case a => + val deserializedParameters + : Array[AnyRef] = new Array[AnyRef](a.length) //Mutable for the sake of sanity + for (i <- 0 until a.length) { + val (sId, manifest, bytes) = a(i) + deserializedParameters(i) = serialization.deserialize(bytes, sId, manifest).get + } - deserializedParameters - }) + deserializedParameters + }) } } @@ -219,7 +227,8 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * Throws ClassCastException if the supplied type T isn't the type of the proxy associated with this TypedActor. */ def self[T <: AnyRef] = selfReference.get.asInstanceOf[T] match { - case null => throw new IllegalStateException("Calling TypedActor.self outside of a TypedActor implementation method!") + case null => + throw new IllegalStateException("Calling TypedActor.self outside of a TypedActor implementation method!") case some => some } @@ -227,7 +236,8 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * Returns the ActorContext (for a TypedActor) when inside a method call in a TypedActor. */ def context: ActorContext = currentContext.get match { - case null => throw new IllegalStateException("Calling TypedActor.context outside of a TypedActor implementation method!") + case null => + throw new IllegalStateException("Calling TypedActor.context outside of a TypedActor implementation method!") case some => some } @@ -241,11 +251,13 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * * Implementation of TypedActor as an Actor */ - private[akka] class TypedActor[R <: AnyRef, T <: R](val proxyVar: AtomVar[R], createInstance: => T, interfaces: immutable.Seq[Class[_]]) extends Actor { + private[akka] class TypedActor[R <: AnyRef, T <: R](val proxyVar: AtomVar[R], + createInstance: => T, + interfaces: immutable.Seq[Class[_]]) + extends Actor { // if we were remote deployed we need to create a local proxy if (!context.parent.asInstanceOf[InternalActorRef].isLocal) - TypedActor.get(context.system).createActorRefProxy( - TypedProps(interfaces, createInstance), proxyVar, context.self) + TypedActor.get(context.system).createActorRefProxy(TypedProps(interfaces, createInstance), proxyVar, context.self) private val me = withContext[T](createInstance) @@ -261,26 +273,29 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi } } - override def postStop(): Unit = try { - withContext { - me match { - case l: PostStop => l.postStop() - case _ => super.postStop() + override def postStop(): Unit = + try { + withContext { + me match { + case l: PostStop => l.postStop() + case _ => super.postStop() + } + } + } finally { + TypedActor(context.system).invocationHandlerFor(proxyVar.get) match { + case null => + case some => + some.actorVar.set(context.system.deadLetters) //Point it to the DLQ + proxyVar.set(null.asInstanceOf[R]) } } - } finally { - TypedActor(context.system).invocationHandlerFor(proxyVar.get) match { - case null => - case some => - some.actorVar.set(context.system.deadLetters) //Point it to the DLQ - proxyVar.set(null.asInstanceOf[R]) - } - } override def preRestart(reason: Throwable, message: Option[Any]): Unit = withContext { me match { case l: PreRestart => l.preRestart(reason, message) - case _ => context.children foreach context.stop //Can't be super.preRestart(reason, message) since that would invoke postStop which would set the actorVar to DL and proxyVar to null + case _ => + context.children + .foreach(context.stop) //Can't be super.preRestart(reason, message) since that would invoke postStop which would set the actorVar to DL and proxyVar to null } } @@ -292,42 +307,45 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi } protected def withContext[U](unitOfWork: => U): U = { - TypedActor.selfReference set proxyVar.get - TypedActor.currentContext set context - try unitOfWork finally { - TypedActor.selfReference set null - TypedActor.currentContext set null + TypedActor.selfReference.set(proxyVar.get) + TypedActor.currentContext.set(context) + try unitOfWork + finally { + TypedActor.selfReference.set(null) + TypedActor.currentContext.set(null) } } def receive = { - case m: MethodCall => withContext { - if (m.isOneWay) m(me) - else { - try { - val s = sender() - m(me) match { - case f: Future[_] if m.returnsFuture => - implicit val dispatcher = context.dispatcher - f onComplete { - case Success(null) => s ! NullResponse - case Success(result) => s ! result - case Failure(f) => s ! Status.Failure(f) - } - case null => s ! NullResponse - case result => s ! result + case m: MethodCall => + withContext { + if (m.isOneWay) m(me) + else { + try { + val s = sender() + m(me) match { + case f: Future[_] if m.returnsFuture => + implicit val dispatcher = context.dispatcher + f.onComplete { + case Success(null) => s ! NullResponse + case Success(result) => s ! result + case Failure(f) => s ! Status.Failure(f) + } + case null => s ! NullResponse + case result => s ! result + } + } catch { + case NonFatal(e) => + sender() ! Status.Failure(e) + throw e } - } catch { - case NonFatal(e) => - sender() ! Status.Failure(e) - throw e } } - } - case msg if me.isInstanceOf[Receiver] => withContext { - me.asInstanceOf[Receiver].onReceive(msg, sender()) - } + case msg if me.isInstanceOf[Receiver] => + withContext { + me.asInstanceOf[Receiver].onReceive(msg, sender()) + } } } @@ -335,6 +353,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * Mix this into your TypedActor to be able to define supervisor strategy */ trait Supervisor { + /** * User overridable definition the strategy to use for supervising * child actors. @@ -353,6 +372,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * Mix this into your TypedActor to be able to hook into its lifecycle */ trait PreStart { + /** * User overridable callback. *

@@ -365,6 +385,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * Mix this into your TypedActor to be able to hook into its lifecycle */ trait PostStop { + /** * User overridable callback. *

@@ -377,6 +398,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * Mix this into your TypedActor to be able to hook into its lifecycle */ trait PreRestart { + /** * User overridable callback: '''By default it disposes of all children and then calls `postStop()`.''' * @param reason the Throwable that caused the restart to happen @@ -390,6 +412,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi } trait PostRestart { + /** * User overridable callback: By default it calls `preStart()`. * @param reason the Throwable that caused the restart to happen @@ -402,13 +425,19 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi /** * INTERNAL API */ - private[akka] class TypedActorInvocationHandler(@transient val extension: TypedActorExtension, @transient val actorVar: AtomVar[ActorRef], @transient val timeout: Timeout) extends InvocationHandler with Serializable { + private[akka] class TypedActorInvocationHandler(@transient val extension: TypedActorExtension, + @transient val actorVar: AtomVar[ActorRef], + @transient val timeout: Timeout) + extends InvocationHandler + with Serializable { def actor = actorVar.get @throws(classOf[Throwable]) def invoke(proxy: AnyRef, method: Method, args: Array[AnyRef]): AnyRef = method.getName match { case "toString" => actor.toString - case "equals" => (args.length == 1 && (proxy eq args(0)) || actor == extension.getActorRefFor(args(0))).asInstanceOf[AnyRef] //Force boxing of the boolean + case "equals" => + (args.length == 1 && (proxy eq args(0)) || actor == extension.getActorRefFor(args(0))) + .asInstanceOf[AnyRef] //Force boxing of the boolean case "hashCode" => actor.hashCode.asInstanceOf[AnyRef] case _ => implicit val dispatcher = extension.system.dispatcher @@ -416,36 +445,45 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi MethodCall(method, args) match { case m if m.isOneWay => actor ! m; null //Null return value - case m if m.returnsFuture => ask(actor, m)(timeout) map { - case NullResponse => null - case other => other - } + case m if m.returnsFuture => + ask(actor, m)(timeout).map { + case NullResponse => null + case other => other + } case m if m.returnsJOption || m.returnsOption => val f = ask(actor, m)(timeout) - (try { Await.ready(f, timeout.duration).value } catch { case _: TimeoutException => None }) match { + (try { + Await.ready(f, timeout.duration).value + } catch { case _: TimeoutException => None }) match { case None | Some(Success(NullResponse)) | Some(Failure(_: AskTimeoutException)) => if (m.returnsJOption) JOption.none[Any] else None case Some(t: Try[_]) => t.get.asInstanceOf[AnyRef] } - case m => Await.result(ask(actor, m)(timeout), timeout.duration) match { - case NullResponse => null - case other => other.asInstanceOf[AnyRef] - } + case m => + Await.result(ask(actor, m)(timeout), timeout.duration) match { + case NullResponse => null + case other => other.asInstanceOf[AnyRef] + } } } - @throws(classOf[ObjectStreamException]) private def writeReplace(): AnyRef = SerializedTypedActorInvocationHandler(actor, timeout.duration) + @throws(classOf[ObjectStreamException]) private def writeReplace(): AnyRef = + SerializedTypedActorInvocationHandler(actor, timeout.duration) } /** * INTERNAL API */ - private[akka] final case class SerializedTypedActorInvocationHandler(val actor: ActorRef, val timeout: FiniteDuration) { - @throws(classOf[ObjectStreamException]) private def readResolve(): AnyRef = JavaSerializer.currentSystem.value match { - case null => throw new IllegalStateException("SerializedTypedActorInvocationHandler.readResolve requires that " + - "JavaSerializer.currentSystem.value is set to a non-null value") - case some => toTypedActorInvocationHandler(some) - } + private[akka] final case class SerializedTypedActorInvocationHandler(val actor: ActorRef, + val timeout: FiniteDuration) { + @throws(classOf[ObjectStreamException]) private def readResolve(): AnyRef = + JavaSerializer.currentSystem.value match { + case null => + throw new IllegalStateException( + "SerializedTypedActorInvocationHandler.readResolve requires that " + + "JavaSerializer.currentSystem.value is set to a non-null value") + case some => toTypedActorInvocationHandler(some) + } def toTypedActorInvocationHandler(system: ActorSystem): TypedActorInvocationHandler = new TypedActorInvocationHandler(TypedActor(system), new AtomVar[ActorRef](actor), new Timeout(timeout)) @@ -522,12 +560,12 @@ object TypedProps { */ @SerialVersionUID(1L) final case class TypedProps[T <: AnyRef] protected[TypedProps] ( - interfaces: immutable.Seq[Class[_]], - creator: () => T, - dispatcher: String = TypedProps.defaultDispatcherId, - deploy: Deploy = Props.defaultDeploy, - timeout: Option[Timeout] = TypedProps.defaultTimeout, - loader: Option[ClassLoader] = TypedProps.defaultLoader) { + interfaces: immutable.Seq[Class[_]], + creator: () => T, + dispatcher: String = TypedProps.defaultDispatcherId, + deploy: Deploy = Props.defaultDeploy, + timeout: Option[Timeout] = TypedProps.defaultTimeout, + loader: Option[ClassLoader] = TypedProps.defaultLoader) { /** * Uses the supplied class as the factory for the TypedActor implementation, @@ -536,9 +574,7 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] ( * appended in the sequence of interfaces. */ def this(implementation: Class[T]) = - this( - interfaces = TypedProps.extractInterfaces(implementation), - creator = instantiator(implementation)) + this(interfaces = TypedProps.extractInterfaces(implementation), creator = instantiator(implementation)) /** * Java API: Uses the supplied Creator as the factory for the TypedActor implementation, @@ -547,9 +583,7 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] ( * appended in the sequence of interfaces. */ def this(interface: Class[_ >: T], implementation: Creator[T]) = - this( - interfaces = TypedProps.extractInterfaces(interface), - creator = implementation.create _) + this(interfaces = TypedProps.extractInterfaces(interface), creator = implementation.create _) /** * Java API: Uses the supplied class as the factory for the TypedActor implementation, @@ -558,9 +592,7 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] ( * appended in the sequence of interfaces. */ def this(interface: Class[_ >: T], implementation: Class[T]) = - this( - interfaces = TypedProps.extractInterfaces(interface), - creator = instantiator(implementation)) + this(interfaces = TypedProps.extractInterfaces(interface), creator = instantiator(implementation)) /** * Returns a new TypedProps with the specified dispatcher set. @@ -613,7 +645,7 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] ( * or if the interface class is not an interface, all the interfaces it implements. */ def withoutInterface(interface: Class[_ >: T]): TypedProps[T] = - this.copy(interfaces = interfaces diff TypedProps.extractInterfaces(interface)) + this.copy(interfaces = interfaces.diff(TypedProps.extractInterfaces(interface))) /** * Returns the akka.actor.Props representation of this TypedProps @@ -628,7 +660,8 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] ( * ContextualTypedActorFactory allows TypedActors to create children, effectively forming the same Actor Supervision Hierarchies * as normal Actors can. */ -final case class ContextualTypedActorFactory(typedActor: TypedActorExtension, actorFactory: ActorContext) extends TypedActorFactory { +final case class ContextualTypedActorFactory(typedActor: TypedActorExtension, actorFactory: ActorContext) + extends TypedActorFactory { override def getActorRefFor(proxy: AnyRef): ActorRef = typedActor.getActorRefFor(proxy) override def isTypedActor(proxyOrNot: AnyRef): Boolean = typedActor.isTypedActor(proxyOrNot) } @@ -663,20 +696,26 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac /** * INTERNAL API */ - private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T], proxyVar: AtomVar[R], actorRef: => ActorRef): R = { + private[akka] def createActorRefProxy[R <: AnyRef, T <: R](props: TypedProps[T], + proxyVar: AtomVar[R], + actorRef: => ActorRef): R = { //Warning, do not change order of the following statements, it's some elaborate chicken-n-egg handling val actorVar = new AtomVar[ActorRef](null) - val proxy = Proxy.newProxyInstance( - (props.loader orElse props.interfaces.collectFirst { case any => any.getClassLoader }).orNull, //If we have no loader, we arbitrarily take the loader of the first interface - props.interfaces.toArray, - new TypedActorInvocationHandler(this, actorVar, props.timeout getOrElse DefaultReturnTimeout)).asInstanceOf[R] + val proxy = Proxy + .newProxyInstance( + props.loader + .orElse(props.interfaces.collectFirst { case any => any.getClassLoader }) + .orNull, //If we have no loader, we arbitrarily take the loader of the first interface + props.interfaces.toArray, + new TypedActorInvocationHandler(this, actorVar, props.timeout.getOrElse(DefaultReturnTimeout))) + .asInstanceOf[R] if (proxyVar eq null) { - actorVar set actorRef + actorVar.set(actorRef) proxy } else { - proxyVar set proxy // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive - actorVar set actorRef //Make sure the InvocationHandler gets a hold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet + proxyVar.set(proxy) // Chicken and egg situation we needed to solve, set the proxy so that we can set the self-reference inside each receive + actorVar.set(actorRef) //Make sure the InvocationHandler gets a hold of the actor reference, this is not a problem since the proxy hasn't escaped this method yet proxyVar.get } } @@ -685,13 +724,14 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac * INTERNAL API */ private[akka] def invocationHandlerFor(typedActor: AnyRef): TypedActorInvocationHandler = - if ((typedActor ne null) && classOf[Proxy].isAssignableFrom(typedActor.getClass) && Proxy.isProxyClass(typedActor.getClass)) typedActor match { + if ((typedActor ne null) && classOf[Proxy].isAssignableFrom(typedActor.getClass) && Proxy.isProxyClass( + typedActor.getClass)) typedActor match { case null => null - case other => Proxy.getInvocationHandler(other) match { - case null => null - case handler: TypedActorInvocationHandler => handler - case _ => null - } - } - else null + case other => + Proxy.getInvocationHandler(other) match { + case null => null + case handler: TypedActorInvocationHandler => handler + case _ => null + } + } else null } diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index a86679049e..2c07d5b89a 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -176,4 +176,3 @@ abstract class UntypedActor extends Actor { override def unhandled(message: Any): Unit = super.unhandled(message) } - diff --git a/akka-actor/src/main/scala/akka/actor/dsl/Creators.scala b/akka-actor/src/main/scala/akka/actor/dsl/Creators.scala index 6fe9fe2360..2f4d95d308 100644 --- a/akka-actor/src/main/scala/akka/actor/dsl/Creators.scala +++ b/akka-actor/src/main/scala/akka/actor/dsl/Creators.scala @@ -120,8 +120,10 @@ trait Creators { this: ActorDSL.type => def whenStopping(body: => Unit): Unit = postStopFun = () => body override def preStart(): Unit = if (preStartFun != null) preStartFun() else super.preStart() - override def preRestart(cause: Throwable, msg: Option[Any]): Unit = if (preRestartFun != null) preRestartFun(cause, msg) else super.preRestart(cause, msg) - override def postRestart(cause: Throwable): Unit = if (postRestartFun != null) postRestartFun(cause) else super.postRestart(cause) + override def preRestart(cause: Throwable, msg: Option[Any]): Unit = + if (preRestartFun != null) preRestartFun(cause, msg) else super.preRestart(cause, msg) + override def postRestart(cause: Throwable): Unit = + if (postRestartFun != null) postRestartFun(cause) else super.postRestart(cause) override def postStop(): Unit = if (postStopFun != null) postStopFun() else super.postStop() override def supervisorStrategy: SupervisorStrategy = if (strategy != null) strategy else super.supervisorStrategy diff --git a/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala b/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala index 1517d1214a..c13daed70a 100644 --- a/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala +++ b/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala @@ -34,7 +34,8 @@ private[akka] object Inbox { private final case class Get(deadline: Deadline, client: ActorRef = null) extends Query { def withClient(c: ActorRef) = copy(client = c) } - private final case class Select(deadline: Deadline, predicate: PartialFunction[Any, Any], client: ActorRef = null) extends Query { + private final case class Select(deadline: Deadline, predicate: PartialFunction[Any, Any], client: ActorRef = null) + extends Query { def withClient(c: ActorRef) = copy(client = c) } private final case class StartWatch(target: ActorRef) @@ -56,7 +57,7 @@ trait Inbox { this: ActorDSL.type => } private implicit val deadlineOrder: Ordering[Query] = new Ordering[Query] { - def compare(left: Query, right: Query): Int = left.deadline.time compare right.deadline.time + def compare(left: Query, right: Query): Int = left.deadline.time.compare(right.deadline.time) } private class InboxActor(size: Int) extends Actor with ActorLogging { @@ -66,16 +67,17 @@ trait Inbox { this: ActorDSL.type => var printedWarning = false def enqueueQuery(q: Query): Unit = { - val query = q withClient sender() - clients enqueue query + val query = q.withClient(sender()) + clients.enqueue(query) clientsByTimeout += query } def enqueueMessage(msg: Any): Unit = { - if (messages.size < size) messages enqueue msg + if (messages.size < size) messages.enqueue(msg) else { if (!printedWarning) { - log.warning("dropping message: either your program is buggy or you might want to increase akka.actor.dsl.inbox-size, current value is " + size) + log.warning( + "dropping message: either your program is buggy or you might want to increase akka.actor.dsl.inbox-size, current value is " + size) printedWarning = true } } @@ -84,7 +86,7 @@ trait Inbox { this: ActorDSL.type => var currentMsg: Any = _ val clientPredicate: (Query) => Boolean = { case _: Get => true - case Select(_, p, _) => p isDefinedAt currentMsg + case Select(_, p, _) => p.isDefinedAt(currentMsg) case _ => false } @@ -93,59 +95,60 @@ trait Inbox { this: ActorDSL.type => var currentDeadline: Option[(Deadline, Cancellable)] = None - def receive = ({ - case g: Get => - if (messages.isEmpty) enqueueQuery(g) - else sender() ! messages.dequeue() - case s: Select => - if (messages.isEmpty) enqueueQuery(s) - else { - currentSelect = s - messages.dequeueFirst(messagePredicate) match { - case Some(msg) => sender() ! msg - case None => enqueueQuery(s) + def receive = + ({ + case g: Get => + if (messages.isEmpty) enqueueQuery(g) + else sender() ! messages.dequeue() + case s: Select => + if (messages.isEmpty) enqueueQuery(s) + else { + currentSelect = s + messages.dequeueFirst(messagePredicate) match { + case Some(msg) => sender() ! msg + case None => enqueueQuery(s) + } + currentSelect = null } - currentSelect = null - } - case StartWatch(target) => context watch target - case Kick => - val now = Deadline.now - val pred = (q: Query) => q.deadline.time < now.time - val overdue = clientsByTimeout.iterator.takeWhile(pred) - while (overdue.hasNext) { - val toKick = overdue.next() - toKick.client ! Status.Failure(new TimeoutException("deadline passed")) - } - clients = clients.filterNot(pred) - clientsByTimeout = clientsByTimeout.from(Get(now)) - case msg => - if (clients.isEmpty) enqueueMessage(msg) - else { - currentMsg = msg - clients.dequeueFirst(clientPredicate) match { - case Some(q) => { clientsByTimeout -= q; q.client ! msg } - case None => enqueueMessage(msg) + case StartWatch(target) => context.watch(target) + case Kick => + val now = Deadline.now + val pred = (q: Query) => q.deadline.time < now.time + val overdue = clientsByTimeout.iterator.takeWhile(pred) + while (overdue.hasNext) { + val toKick = overdue.next() + toKick.client ! Status.Failure(new TimeoutException("deadline passed")) + } + clients = clients.filterNot(pred) + clientsByTimeout = clientsByTimeout.from(Get(now)) + case msg => + if (clients.isEmpty) enqueueMessage(msg) + else { + currentMsg = msg + clients.dequeueFirst(clientPredicate) match { + case Some(q) => { clientsByTimeout -= q; q.client ! msg } + case None => enqueueMessage(msg) + } + currentMsg = null + } + }: Receive).andThen { _ => + if (clients.isEmpty) { + if (currentDeadline.isDefined) { + currentDeadline.get._2.cancel() + currentDeadline = None } - currentMsg = null - } - }: Receive) andThen { _ => - if (clients.isEmpty) { - if (currentDeadline.isDefined) { - currentDeadline.get._2.cancel() - currentDeadline = None - } - } else { - val next = clientsByTimeout.head.deadline - import context.dispatcher - if (currentDeadline.isEmpty) { - currentDeadline = Some((next, context.system.scheduler.scheduleOnce(next.timeLeft, self, Kick))) } else { - // must not rely on the Scheduler to not fire early (for robustness) - currentDeadline.get._2.cancel() - currentDeadline = Some((next, context.system.scheduler.scheduleOnce(next.timeLeft, self, Kick))) + val next = clientsByTimeout.head.deadline + import context.dispatcher + if (currentDeadline.isEmpty) { + currentDeadline = Some((next, context.system.scheduler.scheduleOnce(next.timeLeft, self, Kick))) + } else { + // must not rely on the Scheduler to not fire early (for robustness) + currentDeadline.get._2.cancel() + currentDeadline = Some((next, context.system.scheduler.scheduleOnce(next.timeLeft, self, Kick))) + } } } - } } /* diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala index c4941f5990..c45a64b895 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala @@ -95,7 +95,9 @@ private[akka] trait Children { this: ActorCell => } protected def stopFunctionRefs(): Unit = { - val refs = Unsafe.instance.getAndSetObject(this, AbstractActorCell.functionRefsOffset, Map.empty).asInstanceOf[Map[String, FunctionRef]] + val refs = Unsafe.instance + .getAndSetObject(this, AbstractActorCell.functionRefsOffset, Map.empty) + .asInstanceOf[Map[String, FunctionRef]] refs.valuesIterator.foreach(_.stop()) } @@ -117,9 +119,9 @@ private[akka] trait Children { this: ActorCell => } if (actor match { - case r: RepointableRef => r.isStarted - case _ => true - }) shallDie(actor) + case r: RepointableRef => r.isStarted + case _ => true + }) shallDie(actor) } actor.asInstanceOf[InternalActorRef].stop() } @@ -160,7 +162,8 @@ private[akka] trait Children { this: ActorCell => } } - final protected def setTerminated(): Unit = Unsafe.instance.putObjectVolatile(this, AbstractActorCell.childrenOffset, TerminatedChildrenContainer) + final protected def setTerminated(): Unit = + Unsafe.instance.putObjectVolatile(this, AbstractActorCell.childrenOffset, TerminatedChildrenContainer) /* * ActorCell-internal API @@ -172,17 +175,18 @@ private[akka] trait Children { this: ActorCell => protected def waitingForChildrenOrNull = childrenRefs match { case TerminatingChildrenContainer(_, _, w: WaitingForChildren) => w - case _ => null + case _ => null } protected def suspendChildren(exceptFor: Set[ActorRef] = Set.empty): Unit = - childrenRefs.stats foreach { - case ChildRestartStats(child, _, _) if !(exceptFor contains child) => child.asInstanceOf[InternalActorRef].suspend() + childrenRefs.stats.foreach { + case ChildRestartStats(child, _, _) if !(exceptFor contains child) => + child.asInstanceOf[InternalActorRef].suspend() case _ => } protected def resumeChildren(causedByFailure: Throwable, perp: ActorRef): Unit = - childrenRefs.stats foreach { + childrenRefs.stats.foreach { case ChildRestartStats(child: InternalActorRef, _, _) => child.resume(if (perp == child) causedByFailure else null) case stats => @@ -244,7 +248,11 @@ private[akka] trait Children { this: ActorCell => } } - private def makeChild(cell: ActorCell, props: Props, name: String, async: Boolean, systemService: Boolean): ActorRef = { + private def makeChild(cell: ActorCell, + props: Props, + name: String, + async: Boolean, + systemService: Boolean): ActorRef = { if (cell.system.settings.SerializeAllCreators && !systemService && props.deploy.scope != LocalScope) { val oldInfo = Serialization.currentTransportInformation.value try { @@ -252,8 +260,9 @@ private[akka] trait Children { this: ActorCell => if (oldInfo eq null) Serialization.currentTransportInformation.value = system.provider.serializationInformation - props.args forall (arg => - arg == null || + props.args.forall( + arg => + arg == null || arg.isInstanceOf[NoSerializationVerificationNeeded] || { val o = arg.asInstanceOf[AnyRef] val serializer = ser.findSerializerFor(o) @@ -262,7 +271,8 @@ private[akka] trait Children { this: ActorCell => ser.deserialize(bytes, serializer.identifier, ms).get != null }) } catch { - case NonFatal(e) => throw new IllegalArgumentException(s"pre-creation serialization check failed at [${cell.self.path}/$name]", e) + case NonFatal(e) => + throw new IllegalArgumentException(s"pre-creation serialization check failed at [${cell.self.path}/$name]", e) } finally Serialization.currentTransportInformation.value = oldInfo } @@ -270,15 +280,22 @@ private[akka] trait Children { this: ActorCell => * in case we are currently terminating, fail external attachChild requests * (internal calls cannot happen anyway because we are suspended) */ - if (cell.childrenRefs.isTerminating) throw new IllegalStateException("cannot create children while terminating or terminated") + if (cell.childrenRefs.isTerminating) + throw new IllegalStateException("cannot create children while terminating or terminated") else { reserveChild(name) // this name will either be unreserved or overwritten with a real child below val actor = try { val childPath = new ChildActorPath(cell.self.path, name, ActorCell.newUid()) - cell.provider.actorOf(cell.systemImpl, props, cell.self, childPath, - systemService = systemService, deploy = None, lookupDeploy = true, async = async) + cell.provider.actorOf(cell.systemImpl, + props, + cell.self, + childPath, + systemService = systemService, + deploy = None, + lookupDeploy = true, + async = async) } catch { case e: InterruptedException => unreserveChild(name) diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala index ea41170086..c04652af58 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala @@ -6,7 +6,7 @@ package akka.actor.dungeon import scala.collection.immutable -import akka.actor.{ InvalidActorNameException, ChildStats, ChildRestartStats, ChildNameReserved, ActorRef } +import akka.actor.{ ActorRef, ChildNameReserved, ChildRestartStats, ChildStats, InvalidActorNameException } import akka.util.Collections.{ EmptyImmutableSeq, PartialImmutableValuesIterable } /** @@ -49,13 +49,15 @@ private[akka] object ChildrenContainer { final case class Creation() extends SuspendReason with WaitingForChildren case object Termination extends SuspendReason - class ChildRestartsIterable(stats: immutable.Map[_, ChildStats]) extends PartialImmutableValuesIterable[ChildStats, ChildRestartStats] { + class ChildRestartsIterable(stats: immutable.Map[_, ChildStats]) + extends PartialImmutableValuesIterable[ChildStats, ChildRestartStats] { override final def apply(c: ChildStats) = c.asInstanceOf[ChildRestartStats] override final def isDefinedAt(c: ChildStats) = c.isInstanceOf[ChildRestartStats] override final def valuesIterator = stats.valuesIterator } - class ChildrenIterable(stats: immutable.Map[_, ChildStats]) extends PartialImmutableValuesIterable[ChildStats, ActorRef] { + class ChildrenIterable(stats: immutable.Map[_, ChildStats]) + extends PartialImmutableValuesIterable[ChildStats, ActorRef] { override final def apply(c: ChildStats) = c.asInstanceOf[ChildRestartStats].child override final def isDefinedAt(c: ChildStats) = c.isInstanceOf[ChildRestartStats] override final def valuesIterator = stats.valuesIterator @@ -65,14 +67,16 @@ private[akka] object ChildrenContainer { trait EmptyChildrenContainer extends ChildrenContainer { val emptyStats = immutable.TreeMap.empty[String, ChildStats] - override def add(name: String, stats: ChildRestartStats): ChildrenContainer = new NormalChildrenContainer(emptyStats.updated(name, stats)) + override def add(name: String, stats: ChildRestartStats): ChildrenContainer = + new NormalChildrenContainer(emptyStats.updated(name, stats)) override def remove(child: ActorRef): ChildrenContainer = this override def getByName(name: String): Option[ChildRestartStats] = None override def getByRef(actor: ActorRef): Option[ChildRestartStats] = None override def children: immutable.Iterable[ActorRef] = EmptyImmutableSeq override def stats: immutable.Iterable[ChildRestartStats] = EmptyImmutableSeq override def shallDie(actor: ActorRef): ChildrenContainer = this - override def reserve(name: String): ChildrenContainer = new NormalChildrenContainer(emptyStats.updated(name, ChildNameReserved)) + override def reserve(name: String): ChildrenContainer = + new NormalChildrenContainer(emptyStats.updated(name, ChildNameReserved)) override def unreserve(name: String): ChildrenContainer = this } @@ -105,7 +109,8 @@ private[akka] object ChildrenContainer { */ class NormalChildrenContainer(val c: immutable.TreeMap[String, ChildStats]) extends ChildrenContainer { - override def add(name: String, stats: ChildRestartStats): ChildrenContainer = new NormalChildrenContainer(c.updated(name, stats)) + override def add(name: String, stats: ChildRestartStats): ChildrenContainer = + new NormalChildrenContainer(c.updated(name, stats)) override def remove(child: ActorRef): ChildrenContainer = NormalChildrenContainer(c - child.path.name) @@ -113,7 +118,7 @@ private[akka] object ChildrenContainer { override def getByRef(actor: ActorRef): Option[ChildRestartStats] = c.get(actor.path.name) match { case c @ Some(crs: ChildRestartStats) if (crs.child == actor) => c.asInstanceOf[Option[ChildRestartStats]] - case _ => None + case _ => None } override def children: immutable.Iterable[ActorRef] = @@ -155,8 +160,10 @@ private[akka] object ChildrenContainer { * type of container, depending on whether or not children are left and whether or not * the reason was “Terminating”. */ - final case class TerminatingChildrenContainer(c: immutable.TreeMap[String, ChildStats], toDie: Set[ActorRef], reason: SuspendReason) - extends ChildrenContainer { + final case class TerminatingChildrenContainer(c: immutable.TreeMap[String, ChildStats], + toDie: Set[ActorRef], + reason: SuspendReason) + extends ChildrenContainer { override def add(name: String, stats: ChildRestartStats): ChildrenContainer = copy(c.updated(name, stats)) @@ -165,15 +172,14 @@ private[akka] object ChildrenContainer { if (t.isEmpty) reason match { case Termination => TerminatedChildrenContainer case _ => NormalChildrenContainer(c - child.path.name) - } - else copy(c - child.path.name, t) + } else copy(c - child.path.name, t) } override def getByName(name: String): Option[ChildStats] = c.get(name) override def getByRef(actor: ActorRef): Option[ChildRestartStats] = c.get(actor.path.name) match { case c @ Some(crs: ChildRestartStats) if (crs.child == actor) => c.asInstanceOf[Option[ChildRestartStats]] - case _ => None + case _ => None } override def children: immutable.Iterable[ActorRef] = diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala index 0f6a542c0d..b9c5c22353 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala @@ -29,8 +29,7 @@ private[akka] trait DeathWatch { this: ActorCell => maintainAddressTerminatedSubscription(a) { a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ updateWatching(a, None) - } - else + } else checkWatchingSame(a, None) } a @@ -43,8 +42,7 @@ private[akka] trait DeathWatch { this: ActorCell => maintainAddressTerminatedSubscription(a) { a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ updateWatching(a, Some(msg)) - } - else + } else checkWatchingSame(a, Some(msg)) } a @@ -72,7 +70,9 @@ private[akka] trait DeathWatch { this: ActorCell => * When this actor is watching the subject of [[akka.actor.Terminated]] message * it will be propagated to user's receive. */ - protected def watchedActorTerminated(actor: ActorRef, existenceConfirmed: Boolean, addressTerminated: Boolean): Unit = { + protected def watchedActorTerminated(actor: ActorRef, + existenceConfirmed: Boolean, + addressTerminated: Boolean): Unit = { watchingGet(actor) match { case None => // We're apparently no longer watching this actor. case Some(optionalMessage) => @@ -94,7 +94,7 @@ private[akka] trait DeathWatch { this: ActorCell => // when all actor references have uid, i.e. actorFor is removed private def watchingContains(subject: ActorRef): Boolean = watching.contains(subject) || (subject.path.uid != ActorCell.undefinedUid && - watching.contains(new UndefinedUidActorRef(subject))) + watching.contains(new UndefinedUidActorRef(subject))) // TODO this should be removed and be replaced with `watching.get(subject)` // when all actor references have uid, i.e. actorFor is removed @@ -102,21 +102,23 @@ private[akka] trait DeathWatch { this: ActorCell => // If the subject is being matched, the inner option is the optional custom termination // message that should be sent instead of the default Terminated. private def watchingGet(subject: ActorRef): Option[Option[Any]] = - watching.get(subject).orElse( - if (subject.path.uid == ActorCell.undefinedUid) None - else watching.get(new UndefinedUidActorRef(subject))) + watching + .get(subject) + .orElse( + if (subject.path.uid == ActorCell.undefinedUid) None + else watching.get(new UndefinedUidActorRef(subject))) // TODO this should be removed and be replaced with `set - subject` // when all actor references have uid, i.e. actorFor is removed private def removeFromSet(subject: ActorRef, set: Set[ActorRef]): Set[ActorRef] = if (subject.path.uid != ActorCell.undefinedUid) (set - subject) - new UndefinedUidActorRef(subject) - else set filterNot (_.path == subject.path) + else set.filterNot(_.path == subject.path) // TODO this should be removed and be replaced with `set - subject` // when all actor references have uid, i.e. actorFor is removed private def removeFromMap[T](subject: ActorRef, map: Map[ActorRef, T]): Map[ActorRef, T] = if (subject.path.uid != ActorCell.undefinedUid) (map - subject) - new UndefinedUidActorRef(subject) - else (map filterKeys (_.path != subject.path)).toMap + else map.filterKeys(_.path != subject.path).toMap private def updateWatching(ref: InternalActorRef, newMessage: Option[Any]): Unit = watching = watching.updated(ref, newMessage) @@ -127,7 +129,7 @@ private[akka] trait DeathWatch { this: ActorCell => if (previous != newMessage) throw new IllegalStateException( s"Watch($self, $ref) termination message was not overwritten from [$previous] to [$newMessage]. " + - s"If this was intended, unwatch first before using `watch` / `watchWith` with another message.") + s"If this was intended, unwatch first before using `watch` / `watchWith` with another message.") } protected def tellWatchersWeDied(): Unit = @@ -136,7 +138,9 @@ private[akka] trait DeathWatch { this: ActorCell => // Don't need to send to parent parent since it receives a DWN by default def sendTerminated(ifLocal: Boolean)(watcher: ActorRef): Unit = if (watcher.asInstanceOf[ActorRefScope].isLocal == ifLocal && watcher != parent) - watcher.asInstanceOf[InternalActorRef].sendSystemMessage(DeathWatchNotification(self, existenceConfirmed = true, addressTerminated = false)) + watcher + .asInstanceOf[InternalActorRef] + .sendSystemMessage(DeathWatchNotification(self, existenceConfirmed = true, addressTerminated = false)) /* * It is important to notify the remote watchers first, otherwise RemoteDaemon might shut down, causing @@ -151,8 +155,8 @@ private[akka] trait DeathWatch { this: ActorCell => * * If the remote watchers are notified first, then the mailbox of the Remoting will guarantee the correct order. */ - watchedBy foreach sendTerminated(ifLocal = false) - watchedBy foreach sendTerminated(ifLocal = true) + watchedBy.foreach(sendTerminated(ifLocal = false)) + watchedBy.foreach(sendTerminated(ifLocal = true)) } finally { maintainAddressTerminatedSubscription() { watchedBy = ActorCell.emptyActorRefSet @@ -164,9 +168,9 @@ private[akka] trait DeathWatch { this: ActorCell => if (!watching.isEmpty) { maintainAddressTerminatedSubscription() { try { - watching foreach { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + watching.foreach { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ case (watchee: InternalActorRef, _) => watchee.sendSystemMessage(Unwatch(watchee, self)) - case (watchee, _) => + case (watchee, _) => // should never happen, suppress "match may not be exhaustive" compiler warning throw new IllegalStateException(s"Expected InternalActorRef, but got [${watchee.getClass.getName}]") } @@ -189,7 +193,8 @@ private[akka] trait DeathWatch { this: ActorCell => } else if (!watcheeSelf && watcherSelf) { watch(watchee) } else { - publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, self))) + publish( + Warning(self.path.toString, clazz(actor), "BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, self))) } } @@ -200,12 +205,14 @@ private[akka] trait DeathWatch { this: ActorCell => if (watcheeSelf && !watcherSelf) { if (watchedBy.contains(watcher)) maintainAddressTerminatedSubscription(watcher) { watchedBy -= watcher - if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), s"no longer watched by $watcher")) + if (system.settings.DebugLifecycle) + publish(Debug(self.path.toString, clazz(actor), s"no longer watched by $watcher")) } } else if (!watcheeSelf && watcherSelf) { unwatch(watchee) } else { - publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Unwatch(%s,%s) for %s".format(watchee, watcher, self))) + publish( + Warning(self.path.toString, clazz(actor), "BUG: illegal Unwatch(%s,%s) for %s".format(watchee, watcher, self))) } } @@ -222,7 +229,8 @@ private[akka] trait DeathWatch { this: ActorCell => // it is removed by sending DeathWatchNotification with existenceConfirmed = true to support // immediate creation of child with same name. for ((a, _) <- watching; if a.path.address == address) { - self.sendSystemMessage(DeathWatchNotification(a, existenceConfirmed = childrenRefs.getByRef(a).isDefined, addressTerminated = true)) + self.sendSystemMessage( + DeathWatchNotification(a, existenceConfirmed = childrenRefs.getByRef(a).isDefined, addressTerminated = true)) } } @@ -240,7 +248,7 @@ private[akka] trait DeathWatch { this: ActorCell => } if (isNonLocal(change)) { - def hasNonLocalAddress: Boolean = ((watching.keysIterator exists isNonLocal) || (watchedBy exists isNonLocal)) + def hasNonLocalAddress: Boolean = ((watching.keysIterator.exists(isNonLocal)) || (watchedBy.exists(isNonLocal))) val had = hasNonLocalAddress val result = block val has = hasNonLocalAddress diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala index 44e04c9c3f..2520ccbfde 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala @@ -23,18 +23,23 @@ import akka.serialization.Serialization @SerialVersionUID(1L) final case class SerializationCheckFailedException private (msg: Object, cause: Throwable) - extends AkkaException(s"Failed to serialize and deserialize message of type ${msg.getClass.getName} for testing. " + - "To avoid this error, either disable 'akka.actor.serialize-messages', mark the message with 'akka.actor.NoSerializationVerificationNeeded', or configure serialization to support this message", cause) + extends AkkaException( + s"Failed to serialize and deserialize message of type ${msg.getClass.getName} for testing. " + + "To avoid this error, either disable 'akka.actor.serialize-messages', mark the message with 'akka.actor.NoSerializationVerificationNeeded', or configure serialization to support this message", + cause) private[akka] trait Dispatch { this: ActorCell => - @volatile private var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status + @volatile private var _mailboxDoNotCallMeDirectly + : Mailbox = _ //This must be volatile since it isn't protected by the mailbox status - @inline final def mailbox: Mailbox = Unsafe.instance.getObjectVolatile(this, AbstractActorCell.mailboxOffset).asInstanceOf[Mailbox] + @inline final def mailbox: Mailbox = + Unsafe.instance.getObjectVolatile(this, AbstractActorCell.mailboxOffset).asInstanceOf[Mailbox] @tailrec final def swapMailbox(newMailbox: Mailbox): Mailbox = { val oldMailbox = mailbox - if (!Unsafe.instance.compareAndSwapObject(this, AbstractActorCell.mailboxOffset, oldMailbox, newMailbox)) swapMailbox(newMailbox) + if (!Unsafe.instance.compareAndSwapObject(this, AbstractActorCell.mailboxOffset, oldMailbox, newMailbox)) + swapMailbox(newMailbox) else oldMailbox } @@ -67,12 +72,10 @@ private[akka] trait Dispatch { this: ActorCell => val createMessage = mailboxType match { case _: ProducesMessageQueue[_] if system.mailboxes.hasRequiredType(actorClass) => val req = system.mailboxes.getRequiredType(actorClass) - if (req isInstance mbox.messageQueue) Create(None) + if (req.isInstance(mbox.messageQueue)) Create(None) else { val gotType = if (mbox.messageQueue == null) "null" else mbox.messageQueue.getClass.getName - Create(Some(ActorInitializationException( - self, - s"Actor [$self] requires mailbox type [$req] got [$gotType]"))) + Create(Some(ActorInitializationException(self, s"Actor [$self] requires mailbox type [$req] got [$gotType]"))) } case _ => Create(None) } @@ -122,16 +125,24 @@ private[akka] trait Dispatch { this: ActorCell => } // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def suspend(): Unit = try dispatcher.systemDispatch(this, Suspend()) catch handleException + final def suspend(): Unit = + try dispatcher.systemDispatch(this, Suspend()) + catch handleException // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def resume(causedByFailure: Throwable): Unit = try dispatcher.systemDispatch(this, Resume(causedByFailure)) catch handleException + final def resume(causedByFailure: Throwable): Unit = + try dispatcher.systemDispatch(this, Resume(causedByFailure)) + catch handleException // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def restart(cause: Throwable): Unit = try dispatcher.systemDispatch(this, Recreate(cause)) catch handleException + final def restart(cause: Throwable): Unit = + try dispatcher.systemDispatch(this, Recreate(cause)) + catch handleException // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def stop(): Unit = try dispatcher.systemDispatch(this, Terminate()) catch handleException + final def stop(): Unit = + try dispatcher.systemDispatch(this, Terminate()) + catch handleException def sendMessage(msg: Envelope): Unit = try { @@ -182,6 +193,8 @@ private[akka] trait Dispatch { this: ActorCell => } } - override def sendSystemMessage(message: SystemMessage): Unit = try dispatcher.systemDispatch(this, message) catch handleException + override def sendSystemMessage(message: SystemMessage): Unit = + try dispatcher.systemDispatch(this, message) + catch handleException } diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala index 3961bda251..db28ae646c 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala @@ -6,7 +6,7 @@ package akka.actor.dungeon import akka.actor.PostRestartException import akka.actor.PreRestartException -import akka.actor.{ InternalActorRef, ActorRef, ActorInterruptedException, ActorCell, Actor } +import akka.actor.{ Actor, ActorCell, ActorInterruptedException, ActorRef, InternalActorRef } import akka.dispatch._ import akka.dispatch.sysmsg._ import akka.event.Logging @@ -33,9 +33,9 @@ private[akka] trait FaultHandling { this: ActorCell => * of a restart, failures in constructor/preStart count as new failures. */ - private def suspendNonRecursive(): Unit = dispatcher suspend this + private def suspendNonRecursive(): Unit = dispatcher.suspend(this) - private def resumeNonRecursive(): Unit = dispatcher resume this + private def resumeNonRecursive(): Unit = dispatcher.resume(this) /* * have we told our supervisor that we Failed() and have not yet heard back? @@ -54,8 +54,8 @@ private[akka] trait FaultHandling { this: ActorCell => */ protected def faultRecreate(cause: Throwable): Unit = if (actor == null) { - system.eventStream.publish(Error(self.path.toString, clazz(actor), - "changing Recreate into Create after " + cause)) + system.eventStream.publish( + Error(self.path.toString, clazz(actor), "changing Recreate into Create after " + cause)) faultCreate() } else if (isNormal) { val failedActor = actor @@ -97,12 +97,12 @@ private[akka] trait FaultHandling { this: ActorCell => */ protected def faultResume(causedByFailure: Throwable): Unit = { if (actor == null) { - system.eventStream.publish(Error(self.path.toString, clazz(actor), - "changing Resume into Create after " + causedByFailure)) + system.eventStream.publish( + Error(self.path.toString, clazz(actor), "changing Resume into Create after " + causedByFailure)) faultCreate() } else if (actor.context == null && causedByFailure != null) { - system.eventStream.publish(Error(self.path.toString, clazz(actor), - "changing Resume into Restart after " + causedByFailure)) + system.eventStream.publish( + Error(self.path.toString, clazz(actor), "changing Resume into Restart after " + causedByFailure)) faultRecreate(causedByFailure) } else { val perp = perpetrator @@ -125,7 +125,7 @@ private[akka] trait FaultHandling { this: ActorCell => cancelReceiveTimeout // stop all children, which will turn childrenRefs into TerminatingChildrenContainer (if there are children) - children foreach stop + children.foreach(stop) if (!setChildrenTerminationReason(ChildrenContainer.Creation())) finishCreate() } @@ -147,11 +147,11 @@ private[akka] trait FaultHandling { this: ActorCell => unwatchWatchedActors(actor) // stop all children, which will turn childrenRefs into TerminatingChildrenContainer (if there are children) - children foreach stop + children.foreach(stop) if (systemImpl.aborting) { // separate iteration because this is a very rare case that should not penalize normal operation - children foreach { + children.foreach { case ref: ActorRefScope if !ref.isLocal => self.sendSystemMessage(DeathWatchNotification(ref, true, false)) case _ => } @@ -193,9 +193,12 @@ private[akka] trait FaultHandling { this: ActorCell => parent.sendSystemMessage(Failed(self, t, uid)) } } catch handleNonFatalOrInterruptedException { e => - publish(Error(e, self.path.toString, clazz(actor), - "emergency stop: exception in failure handling for " + t.getClass + Logging.stackTraceFor(t))) - try children foreach stop + publish( + Error(e, + self.path.toString, + clazz(actor), + "emergency stop: exception in failure handling for " + t.getClass + Logging.stackTraceFor(t))) + try children.foreach(stop) finally finishTerminate() } } @@ -208,9 +211,11 @@ private[akka] trait FaultHandling { this: ActorCell => * specific order. */ try if (a ne null) a.aroundPostStop() - catch handleNonFatalOrInterruptedException { e => publish(Error(e, self.path.toString, clazz(a), e.getMessage)) } - finally try dispatcher.detach(this) - finally try parent.sendSystemMessage(DeathWatchNotification(self, existenceConfirmed = true, addressTerminated = false)) + catch handleNonFatalOrInterruptedException { e => + publish(Error(e, self.path.toString, clazz(a), e.getMessage)) + } finally try dispatcher.detach(this) + finally try parent.sendSystemMessage( + DeathWatchNotification(self, existenceConfirmed = true, addressTerminated = false)) finally try stopFunctionRefs() finally try tellWatchersWeDied() finally try unwatchWatchedActors(a) // stay here as we expect an emergency stop from handleInvokeFailure @@ -240,11 +245,12 @@ private[akka] trait FaultHandling { this: ActorCell => if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(freshActor), "restarted")) // only after parent is up and running again do restart the children which were not stopped - survivors foreach (child => - try child.asInstanceOf[InternalActorRef].restart(cause) - catch handleNonFatalOrInterruptedException { e => - publish(Error(e, self.path.toString, clazz(freshActor), "restarting " + child)) - }) + survivors.foreach( + child => + try child.asInstanceOf[InternalActorRef].restart(cause) + catch handleNonFatalOrInterruptedException { e => + publish(Error(e, self.path.toString, clazz(freshActor), "restarting " + child)) + }) } catch handleNonFatalOrInterruptedException { e => clearActorFields(actor, recreate = false) // in order to prevent preRestart() from happening again handleInvokeFailure(survivors, PostRestartException(self, e, cause)) @@ -262,10 +268,14 @@ private[akka] trait FaultHandling { this: ActorCell => case Some(stats) if stats.uid == f.uid => if (!actor.supervisorStrategy.handleFailure(this, f.child, f.cause, stats, getAllChildStats)) throw f.cause case Some(stats) => - publish(Debug(self.path.toString, clazz(actor), - "dropping Failed(" + f.cause + ") from old child " + f.child + " (uid=" + stats.uid + " != " + f.uid + ")")) + publish( + Debug( + self.path.toString, + clazz(actor), + "dropping Failed(" + f.cause + ") from old child " + f.child + " (uid=" + stats.uid + " != " + f.uid + ")")) case None => - publish(Debug(self.path.toString, clazz(actor), "dropping Failed(" + f.cause + ") from unknown child " + f.child)) + publish( + Debug(self.path.toString, clazz(actor), "dropping Failed(" + f.cause + ") from unknown child " + f.child)) } } diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala b/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala index e6ac315eaa..5f3d965171 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/TimerSchedulerImpl.scala @@ -22,9 +22,12 @@ import akka.util.OptionVal final case class Timer(key: Any, msg: Any, repeat: Boolean, generation: Int, task: Cancellable) final case class InfluenceReceiveTimeoutTimerMsg(key: Any, generation: Int, owner: TimerSchedulerImpl) - extends TimerMsg with NoSerializationVerificationNeeded + extends TimerMsg + with NoSerializationVerificationNeeded final case class NotInfluenceReceiveTimeoutTimerMsg(key: Any, generation: Int, owner: TimerSchedulerImpl) - extends TimerMsg with NoSerializationVerificationNeeded with NotInfluenceReceiveTimeout + extends TimerMsg + with NoSerializationVerificationNeeded + with NotInfluenceReceiveTimeout } /** @@ -113,9 +116,10 @@ import akka.util.OptionVal OptionVal.Some(t.msg.asInstanceOf[AnyRef]) } else { // it was from an old timer that was enqueued in mailbox before canceled - log.debug( - "Received timer [{}] from from old generation [{}], expected generation [{}], discarding", - timerMsg.key, timerMsg.generation, t.generation) + log.debug("Received timer [{}] from from old generation [{}], expected generation [{}], discarding", + timerMsg.key, + timerMsg.generation, + t.generation) OptionVal.None // message should be ignored } } diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 8e7309cc9d..44bdea874b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -12,7 +12,7 @@ import akka.dispatch.affinity.AffinityPoolConfigurator import akka.dispatch.sysmsg._ import akka.event.EventStream import akka.event.Logging.{ Debug, Error, LogEventException } -import akka.util.{ Index, Unsafe, unused } +import akka.util.{ unused, Index, Unsafe } import com.typesafe.config.Config import scala.annotation.tailrec @@ -37,7 +37,8 @@ final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cl } def run(): Unit = - try runnable.run() catch { + try runnable.run() + catch { case NonFatal(e) => eventStream.publish(Error(e, "TaskInvocation", this.getClass, e.getMessage)) } finally cleanup() } @@ -83,7 +84,10 @@ private[akka] object MessageDispatcher { } } -abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator) extends AbstractMessageDispatcher with BatchingExecutor with ExecutionContextExecutor { +abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator) + extends AbstractMessageDispatcher + with BatchingExecutor + with ExecutionContextExecutor { import AbstractMessageDispatcher.{ inhabitantsOffset, shutdownScheduleOffset } import MessageDispatcher._ @@ -111,7 +115,8 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator final def inhabitants: Long = Unsafe.instance.getLongVolatile(this, inhabitantsOffset) private final def shutdownSchedule: Int = Unsafe.instance.getIntVolatile(this, shutdownScheduleOffset) - private final def updateShutdownSchedule(expect: Int, update: Int): Boolean = Unsafe.instance.compareAndSwapInt(this, shutdownScheduleOffset, expect, update) + private final def updateShutdownSchedule(expect: Int, update: Int): Boolean = + Unsafe.instance.compareAndSwapInt(this, shutdownScheduleOffset, expect, update) /** * Creates and returns a mailbox for the given actor. @@ -137,7 +142,9 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator /** * Detaches the specified actor instance from this dispatcher */ - final def detach(actor: ActorCell): Unit = try unregister(actor) finally ifSensibleToDoSoThenScheduleShutdown() + final def detach(actor: ActorCell): Unit = + try unregister(actor) + finally ifSensibleToDoSoThenScheduleShutdown() final protected def resubmitOnBlock: Boolean = true // We want to avoid starvation final override protected def unbatchedExecute(r: Runnable): Unit = { val invocation = TaskInvocation(eventStream, r, taskCleanup) @@ -174,7 +181,8 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator try prerequisites.scheduler.scheduleOnce(shutdownTimeout, shutdownAction)(new ExecutionContext { override def execute(runnable: Runnable): Unit = runnable.run() override def reportFailure(t: Throwable): Unit = MessageDispatcher.this.reportFailure(t) - }) catch { + }) + catch { case _: IllegalStateException => shutdown() // Since there is no scheduler anymore, restore the state to UNSCHEDULED. @@ -274,7 +282,9 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator * * INTERNAL API */ - protected[akka] def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean + protected[akka] def registerForExecution(mbox: Mailbox, + hasMessageHint: Boolean, + hasSystemMessageHint: Boolean): Boolean // TODO check whether this should not actually be a property of the mailbox /** @@ -309,7 +319,8 @@ abstract class MessageDispatcher(val configurator: MessageDispatcherConfigurator /** * An ExecutorServiceConfigurator is a class that given some prerequisites and a configuration can create instances of ExecutorService */ -abstract class ExecutorServiceConfigurator(@unused config: Config, @unused prerequisites: DispatcherPrerequisites) extends ExecutorServiceFactoryProvider +abstract class ExecutorServiceConfigurator(@unused config: Config, @unused prerequisites: DispatcherPrerequisites) + extends ExecutorServiceFactoryProvider /** * Base class to be used for hooking in new dispatchers into Dispatchers. @@ -327,54 +338,74 @@ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites: def configureExecutor(): ExecutorServiceConfigurator = { def configurator(executor: String): ExecutorServiceConfigurator = executor match { - case null | "" | "fork-join-executor" => new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) - case "thread-pool-executor" => new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) - case "affinity-pool-executor" => new AffinityPoolConfigurator(config.getConfig("affinity-pool-executor"), prerequisites) + case null | "" | "fork-join-executor" => + new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) + case "thread-pool-executor" => + new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) + case "affinity-pool-executor" => + new AffinityPoolConfigurator(config.getConfig("affinity-pool-executor"), prerequisites) case fqcn => - val args = List( - classOf[Config] -> config, - classOf[DispatcherPrerequisites] -> prerequisites) - prerequisites.dynamicAccess.createInstanceFor[ExecutorServiceConfigurator](fqcn, args).recover({ - case exception => throw new IllegalArgumentException( - ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], + val args = List(classOf[Config] -> config, classOf[DispatcherPrerequisites] -> prerequisites) + prerequisites.dynamicAccess + .createInstanceFor[ExecutorServiceConfigurator](fqcn, args) + .recover({ + case exception => + throw new IllegalArgumentException( + ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], make sure it has an accessible constructor with a [%s,%s] signature""") - .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception) - }).get + .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), + exception) + }) + .get } config.getString("executor") match { - case "default-executor" => new DefaultExecutorServiceConfigurator(config.getConfig("default-executor"), prerequisites, configurator(config.getString("default-executor.fallback"))) - case other => configurator(other) + case "default-executor" => + new DefaultExecutorServiceConfigurator(config.getConfig("default-executor"), + prerequisites, + configurator(config.getString("default-executor.fallback"))) + case other => configurator(other) } } } -class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) { +class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) + extends ExecutorServiceConfigurator(config, prerequisites) { val threadPoolConfig: ThreadPoolConfig = createThreadPoolConfigBuilder(config, prerequisites).config - protected def createThreadPoolConfigBuilder(config: Config, @unused prerequisites: DispatcherPrerequisites): ThreadPoolConfigBuilder = { + protected def createThreadPoolConfigBuilder( + config: Config, + @unused prerequisites: DispatcherPrerequisites): ThreadPoolConfigBuilder = { import akka.util.Helpers.ConfigOps val builder = ThreadPoolConfigBuilder(ThreadPoolConfig()) .setKeepAliveTime(config.getMillisDuration("keep-alive-time")) - .setAllowCoreThreadTimeout(config getBoolean "allow-core-timeout") - .configure( - Some(config getInt "task-queue-size") flatMap { - case size if size > 0 => - Some(config getString "task-queue-type") map { + .setAllowCoreThreadTimeout(config.getBoolean("allow-core-timeout")) + .configure(Some(config.getInt("task-queue-size")).flatMap { + case size if size > 0 => + Some(config.getString("task-queue-type")) + .map { case "array" => ThreadPoolConfig.arrayBlockingQueue(size, false) //TODO config fairness? case "" | "linked" => ThreadPoolConfig.linkedBlockingQueue(size) - case x => throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!" format x) - } map { qf => (q: ThreadPoolConfigBuilder) => q.setQueueFactory(qf) } - case _ => None - }) + case x => + throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!".format(x)) + } + .map { qf => (q: ThreadPoolConfigBuilder) => + q.setQueueFactory(qf) + } + case _ => None + }) if (config.getString("fixed-pool-size") == "off") builder - .setCorePoolSizeFromFactor(config getInt "core-pool-size-min", config getDouble "core-pool-size-factor", config getInt "core-pool-size-max") - .setMaxPoolSizeFromFactor(config getInt "max-pool-size-min", config getDouble "max-pool-size-factor", config getInt "max-pool-size-max") + .setCorePoolSizeFromFactor(config.getInt("core-pool-size-min"), + config.getDouble("core-pool-size-factor"), + config.getInt("core-pool-size-max")) + .setMaxPoolSizeFromFactor(config.getInt("max-pool-size-min"), + config.getDouble("max-pool-size-factor"), + config.getInt("max-pool-size-max")) else builder.setFixedPoolSize(config.getInt("fixed-pool-size")) } @@ -383,11 +414,17 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr threadPoolConfig.createExecutorServiceFactory(id, threadFactory) } -class DefaultExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites, fallback: ExecutorServiceConfigurator) extends ExecutorServiceConfigurator(config, prerequisites) { +class DefaultExecutorServiceConfigurator(config: Config, + prerequisites: DispatcherPrerequisites, + fallback: ExecutorServiceConfigurator) + extends ExecutorServiceConfigurator(config, prerequisites) { val provider: ExecutorServiceFactoryProvider = prerequisites.defaultExecutionContext match { case Some(ec) => - prerequisites.eventStream.publish(Debug("DefaultExecutorServiceConfigurator", this.getClass, s"Using passed in ExecutionContext as default executor for this ActorSystem. If you want to use a different executor, please specify one in akka.actor.default-dispatcher.default-executor.")) + prerequisites.eventStream.publish( + Debug("DefaultExecutorServiceConfigurator", + this.getClass, + s"Using passed in ExecutionContext as default executor for this ActorSystem. If you want to use a different executor, please specify one in akka.actor.default-dispatcher.default-executor.")) new AbstractExecutorService with ExecutorServiceFactory with ExecutorServiceFactoryProvider { def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = this diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index e239b5b8d6..730b6e228c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -31,23 +31,27 @@ import scala.concurrent.duration.FiniteDuration * @see akka.dispatch.Dispatchers */ @deprecated("Use BalancingPool instead of BalancingDispatcher", "2.3") -private[akka] class BalancingDispatcher( - _configurator: MessageDispatcherConfigurator, - _id: String, - throughput: Int, - throughputDeadlineTime: Duration, - _mailboxType: MailboxType, - _executorServiceFactoryProvider: ExecutorServiceFactoryProvider, - _shutdownTimeout: FiniteDuration, - attemptTeamWork: Boolean) - extends Dispatcher(_configurator, _id, throughput, throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) { +private[akka] class BalancingDispatcher(_configurator: MessageDispatcherConfigurator, + _id: String, + throughput: Int, + throughputDeadlineTime: Duration, + _mailboxType: MailboxType, + _executorServiceFactoryProvider: ExecutorServiceFactoryProvider, + _shutdownTimeout: FiniteDuration, + attemptTeamWork: Boolean) + extends Dispatcher(_configurator, + _id, + throughput, + throughputDeadlineTime, + _executorServiceFactoryProvider, + _shutdownTimeout) { /** * INTERNAL API */ - private[akka] val team = new ConcurrentSkipListSet[ActorCell]( - Helpers.identityHashComparator(new Comparator[ActorCell] { - def compare(l: ActorCell, r: ActorCell) = l.self.path compareTo r.self.path + private[akka] val team = + new ConcurrentSkipListSet[ActorCell](Helpers.identityHashComparator(new Comparator[ActorCell] { + def compare(l: ActorCell, r: ActorCell) = l.self.path.compareTo(r.self.path) })) /** @@ -56,7 +60,8 @@ private[akka] class BalancingDispatcher( private[akka] val messageQueue: MessageQueue = _mailboxType.create(None, None) private class SharingMailbox(val system: ActorSystemImpl, _messageQueue: MessageQueue) - extends Mailbox(_messageQueue) with DefaultSystemMessageQueue { + extends Mailbox(_messageQueue) + with DefaultSystemMessageQueue { override def cleanUp(): Unit = { val dlq = mailboxes.deadLetterMailbox //Don't call the original implementation of this since it scraps all messages, and we don't want to do that @@ -94,12 +99,12 @@ private[akka] class BalancingDispatcher( if (attemptTeamWork) { @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = if (messageQueue.hasMessages - && i.hasNext - && (executorService.executor match { - case lm: LoadMetrics => lm.atFullThrottle == false - case _ => true - }) - && !registerForExecution(i.next.mailbox, false, false)) + && i.hasNext + && (executorService.executor match { + case lm: LoadMetrics => lm.atFullThrottle == false + case _ => true + }) + && !registerForExecution(i.next.mailbox, false, false)) scheduleOne(i) scheduleOne() diff --git a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala index dd21963c64..af09a105b8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala @@ -69,8 +69,9 @@ private[akka] trait BatchingExecutor extends Executor { private[this] final class Batch extends AbstractBatch { override final def run: Unit = { require(_tasksLocal.get eq null) - _tasksLocal set this // Install ourselves as the current batch - try processBatch(this) catch { + _tasksLocal.set(this) // Install ourselves as the current batch + try processBatch(this) + catch { case t: Throwable => resubmitUnbatched() throw t @@ -84,11 +85,12 @@ private[akka] trait BatchingExecutor extends Executor { // this method runs in the delegate ExecutionContext's thread override final def run(): Unit = { require(_tasksLocal.get eq null) - _tasksLocal set this // Install ourselves as the current batch + _tasksLocal.set(this) // Install ourselves as the current batch val firstInvocation = _blockContext.get eq null if (firstInvocation) _blockContext.set(BlockContext.current) BlockContext.withBlockContext(this) { - try processBatch(this) catch { + try processBatch(this) + catch { case t: Throwable => resubmitUnbatched() throw t diff --git a/akka-actor/src/main/scala/akka/dispatch/CachingConfig.scala b/akka-actor/src/main/scala/akka/dispatch/CachingConfig.scala index 467849fc49..454a24b8d8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/CachingConfig.scala +++ b/akka-actor/src/main/scala/akka/dispatch/CachingConfig.scala @@ -197,9 +197,9 @@ private[akka] class CachingConfig(_config: Config) extends Config { def resolveWith(source: Config) = config.resolveWith(source) - override def getEnumList[T <: Enum[T]](enumClass: Class[T], path: String): util.List[T] = config.getEnumList(enumClass, path) + override def getEnumList[T <: Enum[T]](enumClass: Class[T], path: String): util.List[T] = + config.getEnumList(enumClass, path) override def getEnum[T <: Enum[T]](enumClass: Class[T], path: String): T = config.getEnum(enumClass, path) } - diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 1942640ba9..9e3ded6a17 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -25,14 +25,13 @@ import java.util.concurrent.atomic.AtomicReferenceFieldUpdater * always continues until the mailbox is empty. * Larger values (or zero or negative) increase throughput, smaller values increase fairness */ -class Dispatcher( - _configurator: MessageDispatcherConfigurator, - val id: String, - val throughput: Int, - val throughputDeadlineTime: Duration, - executorServiceFactoryProvider: ExecutorServiceFactoryProvider, - val shutdownTimeout: FiniteDuration) - extends MessageDispatcher(_configurator) { +class Dispatcher(_configurator: MessageDispatcherConfigurator, + val id: String, + val throughput: Int, + val throughputDeadlineTime: Duration, + executorServiceFactoryProvider: ExecutorServiceFactoryProvider, + val shutdownTimeout: FiniteDuration) + extends MessageDispatcher(_configurator) { import configurator.prerequisites._ @@ -69,11 +68,11 @@ class Dispatcher( */ protected[akka] def executeTask(invocation: TaskInvocation): Unit = { try { - executorService execute invocation + executorService.execute(invocation) } catch { case e: RejectedExecutionException => try { - executorService execute invocation + executorService.execute(invocation) } catch { case e2: RejectedExecutionException => eventStream.publish(Error(e, getClass.getName, getClass, "executeTask was rejected twice!")) @@ -89,10 +88,9 @@ class Dispatcher( new Mailbox(mailboxType.create(Some(actor.self), Some(actor.system))) with DefaultSystemMessageQueue } - private val esUpdater = AtomicReferenceFieldUpdater.newUpdater( - classOf[Dispatcher], - classOf[LazyExecutorServiceDelegate], - "executorServiceDelegate") + private val esUpdater = AtomicReferenceFieldUpdater.newUpdater(classOf[Dispatcher], + classOf[LazyExecutorServiceDelegate], + "executorServiceDelegate") /** * INTERNAL API @@ -108,16 +106,18 @@ class Dispatcher( * * INTERNAL API */ - protected[akka] override def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = { + protected[akka] override def registerForExecution(mbox: Mailbox, + hasMessageHint: Boolean, + hasSystemMessageHint: Boolean): Boolean = { if (mbox.canBeScheduledForExecution(hasMessageHint, hasSystemMessageHint)) { //This needs to be here to ensure thread safety and no races if (mbox.setAsScheduled()) { try { - executorService execute mbox + executorService.execute(mbox) true } catch { case _: RejectedExecutionException => try { - executorService execute mbox + executorService.execute(mbox) true } catch { //Retry once case e: RejectedExecutionException => @@ -134,6 +134,7 @@ class Dispatcher( } object PriorityGenerator { + /** * Creates a PriorityGenerator that uses the supplied function as priority generator */ diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 09faa89b92..35a0c17b00 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -5,8 +5,8 @@ package akka.dispatch import java.util.concurrent.{ ConcurrentHashMap, ThreadFactory } -import com.typesafe.config.{ ConfigFactory, Config } -import akka.actor.{ Scheduler, DynamicAccess, ActorSystem } +import com.typesafe.config.{ Config, ConfigFactory } +import akka.actor.{ ActorSystem, DynamicAccess, Scheduler } import akka.event.Logging.Warning import akka.event.EventStream import akka.ConfigurationException @@ -29,16 +29,17 @@ trait DispatcherPrerequisites { /** * INTERNAL API */ -private[akka] final case class DefaultDispatcherPrerequisites( - val threadFactory: ThreadFactory, - val eventStream: EventStream, - val scheduler: Scheduler, - val dynamicAccess: DynamicAccess, - val settings: ActorSystem.Settings, - val mailboxes: Mailboxes, - val defaultExecutionContext: Option[ExecutionContext]) extends DispatcherPrerequisites +private[akka] final case class DefaultDispatcherPrerequisites(val threadFactory: ThreadFactory, + val eventStream: EventStream, + val scheduler: Scheduler, + val dynamicAccess: DynamicAccess, + val settings: ActorSystem.Settings, + val mailboxes: Mailboxes, + val defaultExecutionContext: Option[ExecutionContext]) + extends DispatcherPrerequisites object Dispatchers { + /** * The id of the default dispatcher, also the full key of the * configuration of the default dispatcher. @@ -169,26 +170,31 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc * IllegalArgumentException if it cannot create the MessageDispatcherConfigurator */ private def configuratorFrom(cfg: Config): MessageDispatcherConfigurator = { - if (!cfg.hasPath("id")) throw new ConfigurationException("Missing dispatcher 'id' property in config: " + cfg.root.render) + if (!cfg.hasPath("id")) + throw new ConfigurationException("Missing dispatcher 'id' property in config: " + cfg.root.render) cfg.getString("type") match { - case "Dispatcher" => new DispatcherConfigurator(cfg, prerequisites) + case "Dispatcher" => new DispatcherConfigurator(cfg, prerequisites) case "BalancingDispatcher" => // FIXME remove this case in 2.4 - throw new IllegalArgumentException("BalancingDispatcher is deprecated, use a BalancingPool instead. " + + throw new IllegalArgumentException( + "BalancingDispatcher is deprecated, use a BalancingPool instead. " + "During a migration period you can still use BalancingDispatcher by specifying the full class name: " + classOf[BalancingDispatcherConfigurator].getName) case "PinnedDispatcher" => new PinnedDispatcherConfigurator(cfg, prerequisites) case fqn => val args = List(classOf[Config] -> cfg, classOf[DispatcherPrerequisites] -> prerequisites) - prerequisites.dynamicAccess.createInstanceFor[MessageDispatcherConfigurator](fqn, args).recover({ - case exception => - throw new ConfigurationException( - ("Cannot instantiate MessageDispatcherConfigurator type [%s], defined in [%s], " + + prerequisites.dynamicAccess + .createInstanceFor[MessageDispatcherConfigurator](fqn, args) + .recover({ + case exception => + throw new ConfigurationException( + ("Cannot instantiate MessageDispatcherConfigurator type [%s], defined in [%s], " + "make sure it has constructor with [com.typesafe.config.Config] and " + - "[akka.dispatch.DispatcherPrerequisites] parameters") - .format(fqn, cfg.getString("id")), exception) - }).get + "[akka.dispatch.DispatcherPrerequisites] parameters").format(fqn, cfg.getString("id")), + exception) + }) + .get } } } @@ -199,15 +205,14 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc * of the `dispatcher()` method. */ class DispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends MessageDispatcherConfigurator(config, prerequisites) { + extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance = new Dispatcher( - this, - config.getString("id"), - config.getInt("throughput"), - config.getNanosDuration("throughput-deadline-time"), - configureExecutor(), - config.getMillisDuration("shutdown-timeout")) + private val instance = new Dispatcher(this, + config.getString("id"), + config.getInt("throughput"), + config.getNanosDuration("throughput-deadline-time"), + configureExecutor(), + config.getMillisDuration("shutdown-timeout")) /** * Returns the same dispatcher instance for each invocation @@ -232,7 +237,7 @@ private[akka] object BalancingDispatcherConfigurator { * of the `dispatcher()` method. */ class BalancingDispatcherConfigurator(_config: Config, _prerequisites: DispatcherPrerequisites) - extends MessageDispatcherConfigurator(BalancingDispatcherConfigurator.amendConfig(_config), _prerequisites) { + extends MessageDispatcherConfigurator(BalancingDispatcherConfigurator.amendConfig(_config), _prerequisites) { private val instance = { val mailboxes = prerequisites.mailboxes @@ -241,7 +246,7 @@ class BalancingDispatcherConfigurator(_config: Config, _prerequisites: Dispatche if (!classOf[MultipleConsumerSemantics].isAssignableFrom(requirement)) throw new IllegalArgumentException( "BalancingDispatcher must have 'mailbox-requirement' which implements akka.dispatch.MultipleConsumerSemantics; " + - s"dispatcher [$id] has [$requirement]") + s"dispatcher [$id] has [$requirement]") val mailboxType = if (config.hasPath("mailbox")) { val mt = mailboxes.lookup(config.getString("mailbox")) @@ -260,15 +265,14 @@ class BalancingDispatcherConfigurator(_config: Config, _prerequisites: Dispatche } protected def create(mailboxType: MailboxType): BalancingDispatcher = - new BalancingDispatcher( - this, - config.getString("id"), - config.getInt("throughput"), - config.getNanosDuration("throughput-deadline-time"), - mailboxType, - configureExecutor(), - config.getMillisDuration("shutdown-timeout"), - config.getBoolean("attempt-teamwork")) + new BalancingDispatcher(this, + config.getString("id"), + config.getInt("throughput"), + config.getNanosDuration("throughput-deadline-time"), + mailboxType, + configureExecutor(), + config.getMillisDuration("shutdown-timeout"), + config.getBoolean("attempt-teamwork")) /** * Returns the same dispatcher instance for each invocation @@ -282,25 +286,27 @@ class BalancingDispatcherConfigurator(_config: Config, _prerequisites: Dispatche * of the `dispatcher()` method. */ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends MessageDispatcherConfigurator(config, prerequisites) { + extends MessageDispatcherConfigurator(config, prerequisites) { private val threadPoolConfig: ThreadPoolConfig = configureExecutor() match { case e: ThreadPoolExecutorConfigurator => e.threadPoolConfig case _ => prerequisites.eventStream.publish( - Warning( - "PinnedDispatcherConfigurator", - this.getClass, - "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format( - config.getString("id")))) + Warning("PinnedDispatcherConfigurator", + this.getClass, + "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config." + .format(config.getString("id")))) ThreadPoolConfig() } + /** * Creates new dispatcher for each invocation. */ override def dispatcher(): MessageDispatcher = - new PinnedDispatcher( - this, null, config.getString("id"), - config.getMillisDuration("shutdown-timeout"), threadPoolConfig) + new PinnedDispatcher(this, + null, + config.getString("id"), + config.getMillisDuration("shutdown-timeout"), + threadPoolConfig) } diff --git a/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala b/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala index 25c37bc6eb..94a4fa4d23 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ForkJoinExecutorConfigurator.scala @@ -14,20 +14,21 @@ object ForkJoinExecutorConfigurator { /** * INTERNAL AKKA USAGE ONLY */ - final class AkkaForkJoinPool( - parallelism: Int, - threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, - unhandledExceptionHandler: Thread.UncaughtExceptionHandler, - asyncMode: Boolean) - extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, asyncMode) with LoadMetrics { - def this( - parallelism: Int, - threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, - unhandledExceptionHandler: Thread.UncaughtExceptionHandler) = this(parallelism, threadFactory, unhandledExceptionHandler, asyncMode = true) + final class AkkaForkJoinPool(parallelism: Int, + threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, + unhandledExceptionHandler: Thread.UncaughtExceptionHandler, + asyncMode: Boolean) + extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, asyncMode) + with LoadMetrics { + def this(parallelism: Int, + threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, + unhandledExceptionHandler: Thread.UncaughtExceptionHandler) = + this(parallelism, threadFactory, unhandledExceptionHandler, asyncMode = true) override def execute(r: Runnable): Unit = if (r ne null) - super.execute((if (r.isInstanceOf[ForkJoinTask[_]]) r else new AkkaForkJoinTask(r)).asInstanceOf[ForkJoinTask[Any]]) + super.execute( + (if (r.isInstanceOf[ForkJoinTask[_]]) r else new AkkaForkJoinTask(r)).asInstanceOf[ForkJoinTask[Any]]) else throw new NullPointerException("Runnable was null") @@ -41,35 +42,43 @@ object ForkJoinExecutorConfigurator { final class AkkaForkJoinTask(runnable: Runnable) extends ForkJoinTask[Unit] { override def getRawResult(): Unit = () override def setRawResult(unit: Unit): Unit = () - final override def exec(): Boolean = try { runnable.run(); true } catch { - case _: InterruptedException => - Thread.currentThread.interrupt() - false - case anything: Throwable => - val t = Thread.currentThread - t.getUncaughtExceptionHandler match { - case null => - case some => some.uncaughtException(t, anything) - } - throw anything - } + final override def exec(): Boolean = + try { + runnable.run(); true + } catch { + case _: InterruptedException => + Thread.currentThread.interrupt() + false + case anything: Throwable => + val t = Thread.currentThread + t.getUncaughtExceptionHandler match { + case null => + case some => some.uncaughtException(t, anything) + } + throw anything + } } } -class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) { +class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) + extends ExecutorServiceConfigurator(config, prerequisites) { import ForkJoinExecutorConfigurator._ def validate(t: ThreadFactory): ForkJoinPool.ForkJoinWorkerThreadFactory = t match { case correct: ForkJoinPool.ForkJoinWorkerThreadFactory => correct - case _ => throw new IllegalStateException("The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!") + case _ => + throw new IllegalStateException( + "The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!") } - class ForkJoinExecutorServiceFactory( - val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, - val parallelism: Int, - val asyncMode: Boolean) extends ExecutorServiceFactory { - def this(threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, parallelism: Int) = this(threadFactory, parallelism, asyncMode = true) - def createExecutorService: ExecutorService = new AkkaForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing, asyncMode) + class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, + val parallelism: Int, + val asyncMode: Boolean) + extends ExecutorServiceFactory { + def this(threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, parallelism: Int) = + this(threadFactory, parallelism, asyncMode = true) + def createExecutorService: ExecutorService = + new AkkaForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing, asyncMode) } final def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = { @@ -83,16 +92,16 @@ class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrer val asyncMode = config.getString("task-peeking-mode") match { case "FIFO" => true case "LIFO" => false - case _ => throw new IllegalArgumentException("Cannot instantiate ForkJoinExecutorServiceFactory. " + - """"task-peeking-mode" in "fork-join-executor" section could only set to "FIFO" or "LIFO".""") + case _ => + throw new IllegalArgumentException( + "Cannot instantiate ForkJoinExecutorServiceFactory. " + + """"task-peeking-mode" in "fork-join-executor" section could only set to "FIFO" or "LIFO".""") } - new ForkJoinExecutorServiceFactory( - validate(tf), - ThreadPoolConfig.scaledPoolSize( - config.getInt("parallelism-min"), - config.getDouble("parallelism-factor"), - config.getInt("parallelism-max")), - asyncMode) + new ForkJoinExecutorServiceFactory(validate(tf), + ThreadPoolConfig.scaledPoolSize(config.getInt("parallelism-min"), + config.getDouble("parallelism-factor"), + config.getInt("parallelism-max")), + asyncMode) } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 1464894219..32c7b694ae 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -23,6 +23,7 @@ import akka.util.unused * ExecutionContexts is the Java API for ExecutionContexts */ object ExecutionContexts { + /** * Returns a new ExecutionContextExecutor which will delegate execution to the underlying Executor, * and which will use the default error reporter. @@ -62,7 +63,8 @@ object ExecutionContexts { * @param errorReporter a Procedure that will log any exceptions passed to it * @return a new ExecutionContext */ - def fromExecutorService(executorService: ExecutorService, errorReporter: Procedure[Throwable]): ExecutionContextExecutorService = + def fromExecutorService(executorService: ExecutorService, + errorReporter: Procedure[Throwable]): ExecutionContextExecutorService = ExecutionContext.fromExecutorService(executorService, errorReporter.apply) /** @@ -90,6 +92,7 @@ object ExecutionContexts { */ object Futures { import scala.collection.JavaConverters.iterableAsScalaIterableConverter + /** * Starts an asynchronous computation and returns a `Future` object with the result of that computation. * @@ -130,9 +133,11 @@ object Futures { /** * Returns a Future that will hold the optional result of the first Future with a result that matches the predicate */ - def find[T <: AnyRef](futures: JIterable[Future[T]], predicate: JFunc[T, java.lang.Boolean], executor: ExecutionContext): Future[JOption[T]] = { + def find[T <: AnyRef](futures: JIterable[Future[T]], + predicate: JFunc[T, java.lang.Boolean], + executor: ExecutionContext): Future[JOption[T]] = { implicit val ec = executor - compat.Future.find[T](futures.asScala)(predicate.apply(_))(executor) map JOption.fromScalaOption + compat.Future.find[T](futures.asScala)(predicate.apply(_))(executor).map(JOption.fromScalaOption) } /** @@ -147,13 +152,18 @@ object Futures { * the result will be the first failure of any of the futures, or any failure in the actual fold, * or the result of the fold. */ - def fold[T <: AnyRef, R <: AnyRef](zero: R, futures: JIterable[Future[T]], fun: akka.japi.Function2[R, T, R], executor: ExecutionContext): Future[R] = + def fold[T <: AnyRef, R <: AnyRef](zero: R, + futures: JIterable[Future[T]], + fun: akka.japi.Function2[R, T, R], + executor: ExecutionContext): Future[R] = compat.Future.fold(futures.asScala)(zero)(fun.apply)(executor) /** * Reduces the results of the supplied futures and binary function. */ - def reduce[T <: AnyRef, R >: T](futures: JIterable[Future[T]], fun: akka.japi.Function2[R, T, R], executor: ExecutionContext): Future[R] = + def reduce[T <: AnyRef, R >: T](futures: JIterable[Future[T]], + fun: akka.japi.Function2[R, T, R], + executor: ExecutionContext): Future[R] = compat.Future.reduce[T, R](futures.asScala)(fun.apply)(executor) /** @@ -162,7 +172,9 @@ object Futures { */ def sequence[A](in: JIterable[Future[A]], executor: ExecutionContext): Future[JIterable[A]] = { implicit val d = executor - in.asScala.foldLeft(Future(new JLinkedList[A]())) { (fr, fa) => for (r <- fr; a <- fa) yield { r add a; r } } + in.asScala.foldLeft(Future(new JLinkedList[A]())) { (fr, fa) => + for (r <- fr; a <- fa) yield { r.add(a); r } + } } /** @@ -174,7 +186,7 @@ object Futures { implicit val d = executor in.asScala.foldLeft(Future(new JLinkedList[B]())) { (fr, a) => val fb = fn(a) - for (r <- fr; b <- fb) yield { r add b; r } + for (r <- fr; b <- fb) yield { r.add(b); r } } } } @@ -369,5 +381,6 @@ abstract class Mapper[-T, +R] extends scala.runtime.AbstractFunction1[T, R] { * Throws UnsupportedOperation by default. */ @throws(classOf[Throwable]) - def checkedApply(@unused parameter: T): R = throw new UnsupportedOperationException("Mapper.checkedApply has not been implemented") + def checkedApply(@unused parameter: T): R = + throw new UnsupportedOperationException("Mapper.checkedApply has not been implemented") } diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 7387fcaa57..8c837d2e84 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -55,7 +55,9 @@ private[akka] object Mailbox { * INTERNAL API */ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) - extends ForkJoinTask[Unit] with SystemMessageQueue with Runnable { + extends ForkJoinTask[Unit] + with SystemMessageQueue + with Runnable { import Mailbox._ @@ -204,7 +206,8 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) protected final def systemQueueGet: LatestFirstSystemMessageList = // Note: contrary how it looks, there is no allocation here, as SystemMessageList is a value class and as such // it just exists as a typed view during compile-time. The actual return type is still SystemMessage. - new LatestFirstSystemMessageList(Unsafe.instance.getObjectVolatile(this, AbstractMailbox.systemMessageOffset).asInstanceOf[SystemMessage]) + new LatestFirstSystemMessageList( + Unsafe.instance.getObjectVolatile(this, AbstractMailbox.systemMessageOffset).asInstanceOf[SystemMessage]) protected final def systemQueuePut(_old: LatestFirstSystemMessageList, _new: LatestFirstSystemMessageList): Boolean = // Note: calling .head is not actually existing on the bytecode level as the parameters _old and _new @@ -212,11 +215,12 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) // Without calling .head the parameters would be boxed in SystemMessageList wrapper. Unsafe.instance.compareAndSwapObject(this, AbstractMailbox.systemMessageOffset, _old.head, _new.head) - final def canBeScheduledForExecution(hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = currentStatus match { - case Open | Scheduled => hasMessageHint || hasSystemMessageHint || hasSystemMessages || hasMessages - case Closed => false - case _ => hasSystemMessageHint || hasSystemMessages - } + final def canBeScheduledForExecution(hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = + currentStatus match { + case Open | Scheduled => hasMessageHint || hasSystemMessageHint || hasSystemMessages || hasMessages + case Closed => false + case _ => hasSystemMessageHint || hasSystemMessages + } override final def run(): Unit = { try { @@ -232,30 +236,35 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) override final def getRawResult(): Unit = () override final def setRawResult(unit: Unit): Unit = () - final override def exec(): Boolean = try { run(); false } catch { - case _: InterruptedException => - Thread.currentThread.interrupt() - false - case anything: Throwable => - val t = Thread.currentThread - t.getUncaughtExceptionHandler match { - case null => - case some => some.uncaughtException(t, anything) - } - throw anything - } + final override def exec(): Boolean = + try { + run(); false + } catch { + case _: InterruptedException => + Thread.currentThread.interrupt() + false + case anything: Throwable => + val t = Thread.currentThread + t.getUncaughtExceptionHandler match { + case null => + case some => some.uncaughtException(t, anything) + } + throw anything + } /** * Process the messages in the mailbox */ - @tailrec private final def processMailbox( - left: Int = java.lang.Math.max(dispatcher.throughput, 1), - deadlineNs: Long = if (dispatcher.isThroughputDeadlineTimeDefined == true) System.nanoTime + dispatcher.throughputDeadlineTime.toNanos else 0L): Unit = + @tailrec private final def processMailbox(left: Int = java.lang.Math.max(dispatcher.throughput, 1), + deadlineNs: Long = + if (dispatcher.isThroughputDeadlineTimeDefined == true) + System.nanoTime + dispatcher.throughputDeadlineTime.toNanos + else 0L): Unit = if (shouldProcessMessage) { val next = dequeue() if (next ne null) { if (Mailbox.debug) println(actor.self + " processing message " + next) - actor invoke next + actor.invoke(next) if (Thread.interrupted()) throw new InterruptedException("Interrupted while processing actor messages") processAllSystemMessages() @@ -280,7 +289,7 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) msg.unlink() if (debug) println(actor.self + " processing system message " + msg + " with " + actor.childrenRefs) // we know here that systemInvoke ensures that only "fatal" exceptions get rethrown - actor systemInvoke msg + actor.systemInvoke(msg) if (Thread.interrupted()) interruption = new InterruptedException("Interrupted while processing system messages") // don’t ever execute normal message when system message present! @@ -301,8 +310,12 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) try dlm.systemEnqueue(actor.self, msg) catch { case e: InterruptedException => interruption = e - case NonFatal(e) => actor.system.eventStream.publish( - Error(e, actor.self.path.toString, this.getClass, "error while enqueuing " + msg + " to deadLetters: " + e.getMessage)) + case NonFatal(e) => + actor.system.eventStream.publish( + Error(e, + actor.self.path.toString, + this.getClass, + "error while enqueuing " + msg + " to deadLetters: " + e.getMessage)) } } // if we got an interrupted exception while handling system messages, then rethrow it @@ -340,6 +353,7 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) * It needs to at least support N producers and 1 consumer thread-safely. */ trait MessageQueue { + /** * Try to enqueue the message to this queue, or throw an exception. */ @@ -394,14 +408,20 @@ class NodeMessageQueue extends AbstractNodeQueue[Envelope] with MessageQueue wit * Lock-free bounded non-blocking multiple-producer single-consumer queue. * Discards overflowing messages into DeadLetters. */ -class BoundedNodeMessageQueue(capacity: Int) extends AbstractBoundedNodeQueue[Envelope](capacity) - with MessageQueue with BoundedMessageQueueSemantics with MultipleConsumerSemantics { +class BoundedNodeMessageQueue(capacity: Int) + extends AbstractBoundedNodeQueue[Envelope](capacity) + with MessageQueue + with BoundedMessageQueueSemantics + with MultipleConsumerSemantics { final def pushTimeOut: Duration = Duration.Undefined final def enqueue(receiver: ActorRef, handle: Envelope): Unit = if (!add(handle)) - receiver.asInstanceOf[InternalActorRef].provider.deadLetters.tell( - DeadLetter(handle.message, handle.sender, receiver), handle.sender) + receiver + .asInstanceOf[InternalActorRef] + .provider + .deadLetters + .tell(DeadLetter(handle.message, handle.sender, receiver), handle.sender) final def dequeue(): Envelope = poll() @@ -422,6 +442,7 @@ class BoundedNodeMessageQueue(capacity: Int) extends AbstractBoundedNodeQueue[En * INTERNAL API */ private[akka] trait SystemMessageQueue { + /** * Enqueue a new system message, e.g. by prepending atomically as new head of a single-linked list. */ @@ -501,7 +522,7 @@ trait QueueBasedMessageQueue extends MessageQueue with MultipleConsumerSemantics trait UnboundedMessageQueueSemantics trait UnboundedQueueBasedMessageQueue extends QueueBasedMessageQueue with UnboundedMessageQueueSemantics { - def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle + def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue.add(handle) def dequeue(): Envelope = queue.poll() } @@ -528,9 +549,12 @@ trait BoundedQueueBasedMessageQueue extends QueueBasedMessageQueue with BoundedM def enqueue(receiver: ActorRef, handle: Envelope): Unit = if (pushTimeOut.length >= 0) { if (!queue.offer(handle, pushTimeOut.length, pushTimeOut.unit)) - receiver.asInstanceOf[InternalActorRef].provider.deadLetters.tell( - DeadLetter(handle.message, handle.sender, receiver), handle.sender) - } else queue put handle + receiver + .asInstanceOf[InternalActorRef] + .provider + .deadLetters + .tell(DeadLetter(handle.message, handle.sender, receiver), handle.sender) + } else queue.put(handle) def dequeue(): Envelope = queue.poll() } @@ -542,7 +566,9 @@ trait DequeBasedMessageQueueSemantics { def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit } -trait UnboundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueueSemantics with UnboundedMessageQueueSemantics +trait UnboundedDequeBasedMessageQueueSemantics + extends DequeBasedMessageQueueSemantics + with UnboundedMessageQueueSemantics trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueueSemantics with BoundedMessageQueueSemantics @@ -555,8 +581,8 @@ trait DequeBasedMessageQueue extends QueueBasedMessageQueue with DequeBasedMessa * i.e. a non-blocking enqueue and dequeue. */ trait UnboundedDequeBasedMessageQueue extends DequeBasedMessageQueue with UnboundedDequeBasedMessageQueueSemantics { - def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue add handle - def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = queue addFirst handle + def enqueue(receiver: ActorRef, handle: Envelope): Unit = queue.add(handle) + def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = queue.addFirst(handle) def dequeue(): Envelope = queue.poll() } @@ -571,16 +597,22 @@ trait BoundedDequeBasedMessageQueue extends DequeBasedMessageQueue with BoundedD def enqueue(receiver: ActorRef, handle: Envelope): Unit = if (pushTimeOut.length >= 0) { if (!queue.offer(handle, pushTimeOut.length, pushTimeOut.unit)) - receiver.asInstanceOf[InternalActorRef].provider.deadLetters.tell( - DeadLetter(handle.message, handle.sender, receiver), handle.sender) - } else queue put handle + receiver + .asInstanceOf[InternalActorRef] + .provider + .deadLetters + .tell(DeadLetter(handle.message, handle.sender, receiver), handle.sender) + } else queue.put(handle) def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = if (pushTimeOut.length >= 0) { if (!queue.offerFirst(handle, pushTimeOut.length, pushTimeOut.unit)) - receiver.asInstanceOf[InternalActorRef].provider.deadLetters.tell( - DeadLetter(handle.message, handle.sender, receiver), handle.sender) - } else queue putFirst handle + receiver + .asInstanceOf[InternalActorRef] + .provider + .deadLetters + .tell(DeadLetter(handle.message, handle.sender, receiver), handle.sender) + } else queue.putFirst(handle) def dequeue(): Envelope = queue.poll() } @@ -644,7 +676,9 @@ final case class SingleConsumerOnlyUnboundedMailbox() extends MailboxType with P * * NOTE: NonBlockingBoundedMailbox does not use `mailbox-push-timeout-time` as it is non-blocking. */ -case class NonBlockingBoundedMailbox(val capacity: Int) extends MailboxType with ProducesMessageQueue[BoundedNodeMessageQueue] { +case class NonBlockingBoundedMailbox(val capacity: Int) + extends MailboxType + with ProducesMessageQueue[BoundedNodeMessageQueue] { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity")) @@ -658,12 +692,12 @@ case class NonBlockingBoundedMailbox(val capacity: Int) extends MailboxType with * BoundedMailbox is the default bounded MailboxType used by Akka Actors. */ final case class BoundedMailbox(val capacity: Int, override val pushTimeOut: FiniteDuration) - extends MailboxType with ProducesMessageQueue[BoundedMailbox.MessageQueue] - with ProducesPushTimeoutSemanticsMailbox { + extends MailboxType + with ProducesMessageQueue[BoundedMailbox.MessageQueue] + with ProducesPushTimeoutSemanticsMailbox { - def this(settings: ActorSystem.Settings, config: Config) = this( - config.getInt("mailbox-capacity"), - config.getNanosDuration("mailbox-push-timeout-time")) + def this(settings: ActorSystem.Settings, config: Config) = + this(config.getInt("mailbox-capacity"), config.getNanosDuration("mailbox-push-timeout-time")) if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative") if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null") @@ -674,7 +708,8 @@ final case class BoundedMailbox(val capacity: Int, override val pushTimeOut: Fin object BoundedMailbox { class MessageQueue(capacity: Int, final val pushTimeOut: FiniteDuration) - extends LinkedBlockingQueue[Envelope](capacity) with BoundedQueueBasedMessageQueue { + extends LinkedBlockingQueue[Envelope](capacity) + with BoundedQueueBasedMessageQueue { final def queue: BlockingQueue[Envelope] = this } } @@ -684,7 +719,8 @@ object BoundedMailbox { * Extend this class and provide the Comparator in the constructor. */ class UnboundedPriorityMailbox(val cmp: Comparator[Envelope], val initialCapacity: Int) - extends MailboxType with ProducesMessageQueue[UnboundedPriorityMailbox.MessageQueue] { + extends MailboxType + with ProducesMessageQueue[UnboundedPriorityMailbox.MessageQueue] { def this(cmp: Comparator[Envelope]) = this(cmp, 11) final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new UnboundedPriorityMailbox.MessageQueue(initialCapacity, cmp) @@ -692,7 +728,8 @@ class UnboundedPriorityMailbox(val cmp: Comparator[Envelope], val initialCapacit object UnboundedPriorityMailbox { class MessageQueue(initialCapacity: Int, cmp: Comparator[Envelope]) - extends PriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue { + extends PriorityBlockingQueue[Envelope](initialCapacity, cmp) + with UnboundedQueueBasedMessageQueue { final def queue: Queue[Envelope] = this } } @@ -701,9 +738,12 @@ object UnboundedPriorityMailbox { * BoundedPriorityMailbox is a bounded mailbox that allows for prioritization of its contents. * Extend this class and provide the Comparator in the constructor. */ -class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val capacity: Int, override final val pushTimeOut: Duration) - extends MailboxType with ProducesMessageQueue[BoundedPriorityMailbox.MessageQueue] - with ProducesPushTimeoutSemanticsMailbox { +class BoundedPriorityMailbox(final val cmp: Comparator[Envelope], + final val capacity: Int, + override final val pushTimeOut: Duration) + extends MailboxType + with ProducesMessageQueue[BoundedPriorityMailbox.MessageQueue] + with ProducesPushTimeoutSemanticsMailbox { if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative") if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null") @@ -714,8 +754,8 @@ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val cap object BoundedPriorityMailbox { class MessageQueue(capacity: Int, cmp: Comparator[Envelope], val pushTimeOut: Duration) - extends BoundedBlockingQueue[Envelope](capacity, new PriorityQueue[Envelope](11, cmp)) - with BoundedQueueBasedMessageQueue { + extends BoundedBlockingQueue[Envelope](capacity, new PriorityQueue[Envelope](11, cmp)) + with BoundedQueueBasedMessageQueue { final def queue: BlockingQueue[Envelope] = this } } @@ -726,7 +766,8 @@ object BoundedPriorityMailbox { * Extend this class and provide the Comparator in the constructor. */ class UnboundedStablePriorityMailbox(val cmp: Comparator[Envelope], val initialCapacity: Int) - extends MailboxType with ProducesMessageQueue[UnboundedStablePriorityMailbox.MessageQueue] { + extends MailboxType + with ProducesMessageQueue[UnboundedStablePriorityMailbox.MessageQueue] { def this(cmp: Comparator[Envelope]) = this(cmp, 11) final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new UnboundedStablePriorityMailbox.MessageQueue(initialCapacity, cmp) @@ -734,7 +775,8 @@ class UnboundedStablePriorityMailbox(val cmp: Comparator[Envelope], val initialC object UnboundedStablePriorityMailbox { class MessageQueue(initialCapacity: Int, cmp: Comparator[Envelope]) - extends StablePriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue { + extends StablePriorityBlockingQueue[Envelope](initialCapacity, cmp) + with UnboundedQueueBasedMessageQueue { final def queue: Queue[Envelope] = this } } @@ -744,9 +786,12 @@ object UnboundedStablePriorityMailbox { * [[BoundedPriorityMailbox]] it preserves ordering for messages of equal priority. * Extend this class and provide the Comparator in the constructor. */ -class BoundedStablePriorityMailbox( final val cmp: Comparator[Envelope], final val capacity: Int, override final val pushTimeOut: Duration) - extends MailboxType with ProducesMessageQueue[BoundedStablePriorityMailbox.MessageQueue] - with ProducesPushTimeoutSemanticsMailbox { +class BoundedStablePriorityMailbox(final val cmp: Comparator[Envelope], + final val capacity: Int, + override final val pushTimeOut: Duration) + extends MailboxType + with ProducesMessageQueue[BoundedStablePriorityMailbox.MessageQueue] + with ProducesPushTimeoutSemanticsMailbox { if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative") if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null") @@ -757,8 +802,8 @@ class BoundedStablePriorityMailbox( final val cmp: Comparator[Envelope], final v object BoundedStablePriorityMailbox { class MessageQueue(capacity: Int, cmp: Comparator[Envelope], val pushTimeOut: Duration) - extends BoundedBlockingQueue[Envelope](capacity, new StablePriorityQueue[Envelope](11, cmp)) - with BoundedQueueBasedMessageQueue { + extends BoundedBlockingQueue[Envelope](capacity, new StablePriorityQueue[Envelope](11, cmp)) + with BoundedQueueBasedMessageQueue { final def queue: BlockingQueue[Envelope] = this } } @@ -766,7 +811,9 @@ object BoundedStablePriorityMailbox { /** * UnboundedDequeBasedMailbox is an unbounded MailboxType, backed by a Deque. */ -final case class UnboundedDequeBasedMailbox() extends MailboxType with ProducesMessageQueue[UnboundedDequeBasedMailbox.MessageQueue] { +final case class UnboundedDequeBasedMailbox() + extends MailboxType + with ProducesMessageQueue[UnboundedDequeBasedMailbox.MessageQueue] { def this(settings: ActorSystem.Settings, config: Config) = this() @@ -783,16 +830,17 @@ object UnboundedDequeBasedMailbox { /** * BoundedDequeBasedMailbox is an bounded MailboxType, backed by a Deque. */ -case class BoundedDequeBasedMailbox( final val capacity: Int, override final val pushTimeOut: FiniteDuration) - extends MailboxType with ProducesMessageQueue[BoundedDequeBasedMailbox.MessageQueue] - with ProducesPushTimeoutSemanticsMailbox { +case class BoundedDequeBasedMailbox(final val capacity: Int, override final val pushTimeOut: FiniteDuration) + extends MailboxType + with ProducesMessageQueue[BoundedDequeBasedMailbox.MessageQueue] + with ProducesPushTimeoutSemanticsMailbox { - def this(settings: ActorSystem.Settings, config: Config) = this( - config.getInt("mailbox-capacity"), - config.getNanosDuration("mailbox-push-timeout-time")) + def this(settings: ActorSystem.Settings, config: Config) = + this(config.getInt("mailbox-capacity"), config.getNanosDuration("mailbox-push-timeout-time")) if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedDequeBasedMailbox can not be negative") - if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedDequeBasedMailbox can not be null") + if (pushTimeOut eq null) + throw new IllegalArgumentException("The push time-out for BoundedDequeBasedMailbox can not be null") final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new BoundedDequeBasedMailbox.MessageQueue(capacity, pushTimeOut) @@ -800,7 +848,8 @@ case class BoundedDequeBasedMailbox( final val capacity: Int, override final val object BoundedDequeBasedMailbox { class MessageQueue(capacity: Int, val pushTimeOut: FiniteDuration) - extends LinkedBlockingDeque[Envelope](capacity) with BoundedDequeBasedMessageQueue { + extends LinkedBlockingDeque[Envelope](capacity) + with BoundedDequeBasedMessageQueue { final val queue = this } } @@ -813,8 +862,8 @@ trait ControlAwareMessageQueueSemantics extends QueueBasedMessageQueue { def queue: Queue[Envelope] def enqueue(receiver: ActorRef, handle: Envelope): Unit = handle match { - case envelope @ Envelope(_: ControlMessage, _) => controlQueue add envelope - case envelope => queue add envelope + case envelope @ Envelope(_: ControlMessage, _) => controlQueue.add(envelope) + case envelope => queue.add(envelope) } def dequeue(): Envelope = { @@ -829,8 +878,12 @@ trait ControlAwareMessageQueueSemantics extends QueueBasedMessageQueue { override def hasMessages: Boolean = !(queue.isEmpty && controlQueue.isEmpty) } -trait UnboundedControlAwareMessageQueueSemantics extends UnboundedMessageQueueSemantics with ControlAwareMessageQueueSemantics -trait BoundedControlAwareMessageQueueSemantics extends BoundedMessageQueueSemantics with ControlAwareMessageQueueSemantics +trait UnboundedControlAwareMessageQueueSemantics + extends UnboundedMessageQueueSemantics + with ControlAwareMessageQueueSemantics +trait BoundedControlAwareMessageQueueSemantics + extends BoundedMessageQueueSemantics + with ControlAwareMessageQueueSemantics /** * Messages that extend this trait will be handled with priority by control aware mailboxes. @@ -841,13 +894,16 @@ trait ControlMessage * UnboundedControlAwareMailbox is an unbounded MailboxType, that maintains two queues * to allow messages that extend [[akka.dispatch.ControlMessage]] to be delivered with priority. */ -final case class UnboundedControlAwareMailbox() extends MailboxType with ProducesMessageQueue[UnboundedControlAwareMailbox.MessageQueue] { +final case class UnboundedControlAwareMailbox() + extends MailboxType + with ProducesMessageQueue[UnboundedControlAwareMailbox.MessageQueue] { // this constructor will be called via reflection when this mailbox type // is used in the application config def this(settings: ActorSystem.Settings, config: Config) = this() - def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new UnboundedControlAwareMailbox.MessageQueue + def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = + new UnboundedControlAwareMailbox.MessageQueue } object UnboundedControlAwareMailbox { @@ -861,18 +917,21 @@ object UnboundedControlAwareMailbox { * BoundedControlAwareMailbox is a bounded MailboxType, that maintains two queues * to allow messages that extend [[akka.dispatch.ControlMessage]] to be delivered with priority. */ -final case class BoundedControlAwareMailbox(capacity: Int, override final val pushTimeOut: FiniteDuration) extends MailboxType - with ProducesMessageQueue[BoundedControlAwareMailbox.MessageQueue] - with ProducesPushTimeoutSemanticsMailbox { - def this(settings: ActorSystem.Settings, config: Config) = this( - config.getInt("mailbox-capacity"), - config.getNanosDuration("mailbox-push-timeout-time")) +final case class BoundedControlAwareMailbox(capacity: Int, override final val pushTimeOut: FiniteDuration) + extends MailboxType + with ProducesMessageQueue[BoundedControlAwareMailbox.MessageQueue] + with ProducesPushTimeoutSemanticsMailbox { + def this(settings: ActorSystem.Settings, config: Config) = + this(config.getInt("mailbox-capacity"), config.getNanosDuration("mailbox-push-timeout-time")) - def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new BoundedControlAwareMailbox.MessageQueue(capacity, pushTimeOut) + def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = + new BoundedControlAwareMailbox.MessageQueue(capacity, pushTimeOut) } object BoundedControlAwareMailbox { - class MessageQueue(val capacity: Int, val pushTimeOut: FiniteDuration) extends BoundedControlAwareMessageQueueSemantics with java.io.Serializable { + class MessageQueue(val capacity: Int, val pushTimeOut: FiniteDuration) + extends BoundedControlAwareMessageQueueSemantics + with java.io.Serializable { private final val size = new AtomicInteger(0) private final val putLock = new ReentrantLock() @@ -948,8 +1007,11 @@ object BoundedControlAwareMailbox { } if (!inserted) { - receiver.asInstanceOf[InternalActorRef].provider.deadLetters.tell( - DeadLetter(envelope.message, envelope.sender, receiver), envelope.sender) + receiver + .asInstanceOf[InternalActorRef] + .provider + .deadLetters + .tell(DeadLetter(envelope.message, envelope.sender, receiver), envelope.sender) } } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala index c42399dbca..aba568e8df 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailboxes.scala @@ -8,7 +8,12 @@ import java.lang.reflect.ParameterizedType import java.util.concurrent.ConcurrentHashMap import akka.ConfigurationException import akka.actor.{ Actor, ActorRef, ActorSystem, DeadLetter, Deploy, DynamicAccess, Props } -import akka.dispatch.sysmsg.{ EarliestFirstSystemMessageList, LatestFirstSystemMessageList, SystemMessage, SystemMessageList } +import akka.dispatch.sysmsg.{ + EarliestFirstSystemMessageList, + LatestFirstSystemMessageList, + SystemMessage, + SystemMessageList +} import akka.event.EventStream import akka.event.Logging.Warning import akka.util.Reflect @@ -22,11 +27,10 @@ object Mailboxes { final val NoMailboxRequirement = "" } -private[akka] class Mailboxes( - val settings: ActorSystem.Settings, - val eventStream: EventStream, - dynamicAccess: DynamicAccess, - deadLetters: ActorRef) { +private[akka] class Mailboxes(val settings: ActorSystem.Settings, + val eventStream: EventStream, + dynamicAccess: DynamicAccess, + deadLetters: ActorRef) { import Mailboxes._ @@ -51,16 +55,26 @@ private[akka] class Mailboxes( private val mailboxBindings: Map[Class[_ <: Any], String] = { import scala.collection.JavaConverters._ - settings.config.getConfig("akka.actor.mailbox.requirements").root.unwrapped.asScala - .toMap.foldLeft(Map.empty[Class[_ <: Any], String]) { + settings.config + .getConfig("akka.actor.mailbox.requirements") + .root + .unwrapped + .asScala + .toMap + .foldLeft(Map.empty[Class[_ <: Any], String]) { case (m, (k, v)) => - dynamicAccess.getClassFor[Any](k).map { - case x => m.updated(x, v.toString) - }.recover { - case e => - throw new ConfigurationException(s"Type [${k}] specified as akka.actor.mailbox.requirement " + - s"[${v}] in config can't be loaded due to [${e.getMessage}]", e) - }.get + dynamicAccess + .getClassFor[Any](k) + .map { + case x => m.updated(x, v.toString) + } + .recover { + case e => + throw new ConfigurationException(s"Type [${k}] specified as akka.actor.mailbox.requirement " + + s"[${v}] in config can't be loaded due to [${e.getMessage}]", + e) + } + .get } } @@ -75,15 +89,18 @@ private[akka] class Mailboxes( def lookupByQueueType(queueType: Class[_ <: Any]): MailboxType = lookup(lookupId(queueType)) private final val rmqClass = classOf[RequiresMessageQueue[_]] + /** * Return the required message queue type for this class if any. */ def getRequiredType(actorClass: Class[_ <: Actor]): Class[_] = Reflect.findMarker(actorClass, rmqClass) match { - case t: ParameterizedType => t.getActualTypeArguments.head match { - case c: Class[_] => c - case x => throw new IllegalArgumentException(s"no wildcard type allowed in RequireMessageQueue argument (was [$x])") - } + case t: ParameterizedType => + t.getActualTypeArguments.head match { + case c: Class[_] => c + case x => + throw new IllegalArgumentException(s"no wildcard type allowed in RequireMessageQueue argument (was [$x])") + } } // don’t care if this happens twice @@ -98,14 +115,16 @@ private[akka] class Mailboxes( def getProducedMessageQueueType(mailboxType: MailboxType): Class[_] = { val pmqClass = classOf[ProducesMessageQueue[_]] if (!pmqClass.isAssignableFrom(mailboxType.getClass)) classOf[MessageQueue] - else Reflect.findMarker(mailboxType.getClass, pmqClass) match { - case t: ParameterizedType => - t.getActualTypeArguments.head match { - case c: Class[_] => c - case x => throw new IllegalArgumentException( - s"no wildcard type allowed in ProducesMessageQueue argument (was [$x])") - } - } + else + Reflect.findMarker(mailboxType.getClass, pmqClass) match { + case t: ParameterizedType => + t.getActualTypeArguments.head match { + case c: Class[_] => c + case x => + throw new IllegalArgumentException( + s"no wildcard type allowed in ProducesMessageQueue argument (was [$x])") + } + } } /** @@ -123,12 +142,14 @@ private[akka] class Mailboxes( val hasMailboxType = dispatcherConfig.hasPath("mailbox-type") && - dispatcherConfig.getString("mailbox-type") != Deploy.NoMailboxGiven + dispatcherConfig.getString("mailbox-type") != Deploy.NoMailboxGiven // TODO remove in 2.3 if (!hasMailboxType && !mailboxSizeWarningIssued && dispatcherConfig.hasPath("mailbox-size")) { - eventStream.publish(Warning("mailboxes", getClass, - s"ignoring setting 'mailbox-size' for dispatcher [$id], you need to specify 'mailbox-type=bounded'")) + eventStream.publish( + Warning("mailboxes", + getClass, + s"ignoring setting 'mailbox-size' for dispatcher [$id], you need to specify 'mailbox-type=bounded'")) mailboxSizeWarningIssued = true } @@ -137,11 +158,11 @@ private[akka] class Mailboxes( if (hasMailboxRequirement && !mailboxRequirement.isAssignableFrom(mqType)) throw new IllegalArgumentException( s"produced message queue type [$mqType] does not fulfill requirement for dispatcher [$id]. " + - s"Must be a subclass of [$mailboxRequirement].") + s"Must be a subclass of [$mailboxRequirement].") if (hasRequiredType(actorClass) && !actorRequirement.isAssignableFrom(mqType)) throw new IllegalArgumentException( s"produced message queue type [$mqType] does not fulfill requirement for actor class [$actorClass]. " + - s"Must be a subclass of [$actorRequirement].") + s"Must be a subclass of [$actorRequirement].") mailboxType } @@ -188,19 +209,23 @@ private[akka] class Mailboxes( case "" => throw new ConfigurationException(s"The setting mailbox-type, defined in [$id] is empty") case fqcn => val args = List(classOf[ActorSystem.Settings] -> settings, classOf[Config] -> conf) - dynamicAccess.createInstanceFor[MailboxType](fqcn, args).recover({ - case exception => - throw new IllegalArgumentException( - s"Cannot instantiate MailboxType [$fqcn], defined in [$id], make sure it has a public" + + dynamicAccess + .createInstanceFor[MailboxType](fqcn, args) + .recover({ + case exception => + throw new IllegalArgumentException( + s"Cannot instantiate MailboxType [$fqcn], defined in [$id], make sure it has a public" + " constructor with [akka.actor.ActorSystem.Settings, com.typesafe.config.Config] parameters", - exception) - }).get + exception) + }) + .get } if (!mailboxNonZeroPushTimeoutWarningIssued) { mailboxType match { case m: ProducesPushTimeoutSemanticsMailbox if m.pushTimeOut.toNanos > 0L => - warn(s"Configured potentially-blocking mailbox [$id] configured with non-zero pushTimeOut (${m.pushTimeOut}), " + + warn( + s"Configured potentially-blocking mailbox [$id] configured with non-zero pushTimeOut (${m.pushTimeOut}), " + s"which can lead to blocking behavior when sending messages to this mailbox. " + s"Avoid this by setting `$id.mailbox-push-timeout-time` to `0`.") mailboxNonZeroPushTimeoutWarningIssued = true @@ -228,7 +253,8 @@ private[akka] class Mailboxes( //INTERNAL API private def config(id: String): Config = { import scala.collection.JavaConverters._ - ConfigFactory.parseMap(Map("id" -> id).asJava) + ConfigFactory + .parseMap(Map("id" -> id).asJava) .withFallback(settings.config.getConfig(id)) .withFallback(defaultMailboxConfig) } @@ -244,7 +270,7 @@ private[akka] class Mailboxes( @tailrec def updateCache(cache: Map[String, Int], key: String, value: Int): Boolean = { stashCapacityCache.compareAndSet(cache, cache.updated(key, value)) || - updateCache(stashCapacityCache.get, key, value) // recursive, try again + updateCache(stashCapacityCache.get, key, value) // recursive, try again } if (dispatcher == Dispatchers.DefaultDispatcherId && mailbox == Mailboxes.DefaultMailboxId) diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala index 39f35a7452..4f5c29df72 100644 --- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala @@ -14,19 +14,17 @@ import scala.concurrent.duration.FiniteDuration * The preferred way of creating dispatchers is to define configuration of it and use the * the `lookup` method in [[akka.dispatch.Dispatchers]]. */ -class PinnedDispatcher( - _configurator: MessageDispatcherConfigurator, - _actor: ActorCell, - _id: String, - _shutdownTimeout: FiniteDuration, - _threadPoolConfig: ThreadPoolConfig) - extends Dispatcher( - _configurator, - _id, - Int.MaxValue, - Duration.Zero, - _threadPoolConfig.copy(corePoolSize = 1, maxPoolSize = 1), - _shutdownTimeout) { +class PinnedDispatcher(_configurator: MessageDispatcherConfigurator, + _actor: ActorCell, + _id: String, + _shutdownTimeout: FiniteDuration, + _threadPoolConfig: ThreadPoolConfig) + extends Dispatcher(_configurator, + _id, + Int.MaxValue, + Duration.Zero, + _threadPoolConfig.copy(corePoolSize = 1, maxPoolSize = 1), + _shutdownTimeout) { @volatile private var owner: ActorCell = _actor @@ -34,7 +32,8 @@ class PinnedDispatcher( //Relies on an external lock provided by MessageDispatcher.attach protected[akka] override def register(actorCell: ActorCell) = { val actor = owner - if ((actor ne null) && actorCell != actor) throw new IllegalArgumentException("Cannot register to anyone but " + actor) + if ((actor ne null) && actorCell != actor) + throw new IllegalArgumentException("Cannot register to anyone but " + actor) owner = actorCell super.register(actorCell) } @@ -44,4 +43,3 @@ class PinnedDispatcher( owner = null } } - diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 073197e19e..752f89b70d 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -14,14 +14,14 @@ import java.util.concurrent.{ Callable, ExecutorService, LinkedBlockingQueue, - RejectedExecutionHandler, RejectedExecutionException, + RejectedExecutionHandler, SynchronousQueue, - TimeUnit, ThreadFactory, - ThreadPoolExecutor + ThreadPoolExecutor, + TimeUnit } -import java.util.concurrent.atomic.{ AtomicReference, AtomicLong } +import java.util.concurrent.atomic.{ AtomicLong, AtomicReference } object ThreadPoolConfig { type QueueFactory = () => BlockingQueue[Runnable] @@ -35,7 +35,8 @@ object ThreadPoolConfig { def scaledPoolSize(floor: Int, multiplier: Double, ceiling: Int): Int = math.min(math.max((Runtime.getRuntime.availableProcessors * multiplier).ceil.toInt, floor), ceiling) - def arrayBlockingQueue(capacity: Int, fair: Boolean): QueueFactory = () => new ArrayBlockingQueue[Runnable](capacity, fair) + def arrayBlockingQueue(capacity: Int, fair: Boolean): QueueFactory = + () => new ArrayBlockingQueue[Runnable](capacity, fair) def synchronousQueue(fair: Boolean): QueueFactory = () => new SynchronousQueue[Runnable](fair) @@ -65,24 +66,22 @@ trait ExecutorServiceFactoryProvider { /** * A small configuration DSL to create ThreadPoolExecutors that can be provided as an ExecutorServiceFactoryProvider to Dispatcher */ -final case class ThreadPoolConfig( - allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout, - corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize, - maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize, - threadTimeout: Duration = ThreadPoolConfig.defaultTimeout, - queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(), - rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy) - extends ExecutorServiceFactoryProvider { +final case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout, + corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize, + maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize, + threadTimeout: Duration = ThreadPoolConfig.defaultTimeout, + queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(), + rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy) + extends ExecutorServiceFactoryProvider { class ThreadPoolExecutorServiceFactory(val threadFactory: ThreadFactory) extends ExecutorServiceFactory { def createExecutorService: ExecutorService = { - val service: ThreadPoolExecutor = new ThreadPoolExecutor( - corePoolSize, - maxPoolSize, - threadTimeout.length, - threadTimeout.unit, - queueFactory(), - threadFactory, - rejectionPolicy) with LoadMetrics { + val service: ThreadPoolExecutor = new ThreadPoolExecutor(corePoolSize, + maxPoolSize, + threadTimeout.length, + threadTimeout.unit, + queueFactory(), + threadFactory, + rejectionPolicy) with LoadMetrics { def atFullThrottle(): Boolean = this.getActiveCount >= this.getPoolSize } service.allowCoreThreadTimeOut(allowCorePoolTimeout) @@ -121,7 +120,8 @@ final case class ThreadPoolConfigBuilder(config: ThreadPoolConfig) { def withNewThreadPoolWithSynchronousQueueWithFairness(fair: Boolean): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = synchronousQueue(fair))) - def withNewThreadPoolWithArrayBlockingQueueWithCapacityAndFairness(capacity: Int, fair: Boolean): ThreadPoolConfigBuilder = + def withNewThreadPoolWithArrayBlockingQueueWithCapacityAndFairness(capacity: Int, + fair: Boolean): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = arrayBlockingQueue(capacity, fair))) def setFixedPoolSize(size: Int): ThreadPoolConfigBuilder = @@ -159,7 +159,9 @@ object MonitorableThreadFactory { val doNothing: Thread.UncaughtExceptionHandler = new Thread.UncaughtExceptionHandler() { def uncaughtException(thread: Thread, cause: Throwable) = () } - private[akka] class AkkaForkJoinWorkerThread(_pool: ForkJoinPool) extends ForkJoinWorkerThread(_pool) with BlockContext { + private[akka] class AkkaForkJoinWorkerThread(_pool: ForkJoinPool) + extends ForkJoinWorkerThread(_pool) + with BlockContext { override def blockOn[T](thunk: => T)(implicit permission: CanAwait): T = { val result = new AtomicReference[Option[T]](None) ForkJoinPool.managedBlock(new ForkJoinPool.ManagedBlocker { @@ -174,13 +176,14 @@ object MonitorableThreadFactory { } } -final case class MonitorableThreadFactory( - name: String, - daemonic: Boolean, - contextClassLoader: Option[ClassLoader], - exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing, - protected val counter: AtomicLong = new AtomicLong) - extends ThreadFactory with ForkJoinPool.ForkJoinWorkerThreadFactory { +final case class MonitorableThreadFactory(name: String, + daemonic: Boolean, + contextClassLoader: Option[ClassLoader], + exceptionHandler: Thread.UncaughtExceptionHandler = + MonitorableThreadFactory.doNothing, + protected val counter: AtomicLong = new AtomicLong) + extends ThreadFactory + with ForkJoinPool.ForkJoinWorkerThreadFactory { def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { val t = wire(new MonitorableThreadFactory.AkkaForkJoinWorkerThread(pool)) @@ -196,7 +199,7 @@ final case class MonitorableThreadFactory( protected def wire[T <: Thread](t: T): T = { t.setUncaughtExceptionHandler(exceptionHandler) t.setDaemon(daemonic) - contextClassLoader foreach t.setContextClassLoader + contextClassLoader.foreach(t.setContextClassLoader) t } } @@ -228,11 +231,13 @@ trait ExecutorServiceDelegate extends ExecutorService { def invokeAll[T](callables: Collection[_ <: Callable[T]]) = executor.invokeAll(callables) - def invokeAll[T](callables: Collection[_ <: Callable[T]], l: Long, timeUnit: TimeUnit) = executor.invokeAll(callables, l, timeUnit) + def invokeAll[T](callables: Collection[_ <: Callable[T]], l: Long, timeUnit: TimeUnit) = + executor.invokeAll(callables, l, timeUnit) def invokeAny[T](callables: Collection[_ <: Callable[T]]) = executor.invokeAny(callables) - def invokeAny[T](callables: Collection[_ <: Callable[T]], l: Long, timeUnit: TimeUnit) = executor.invokeAny(callables, l, timeUnit) + def invokeAny[T](callables: Collection[_ <: Callable[T]], l: Long, timeUnit: TimeUnit) = + executor.invokeAny(callables, l, timeUnit) } /** diff --git a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala index 49c1aca1c0..434a8c592b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala +++ b/akka-actor/src/main/scala/akka/dispatch/affinity/AffinityPool.scala @@ -17,12 +17,12 @@ import akka.dispatch._ import akka.util.Helpers.Requiring import com.typesafe.config.Config -import akka.annotation.{ InternalApi, ApiMayChange } +import akka.annotation.{ ApiMayChange, InternalApi } import akka.event.Logging import akka.util.{ ImmutableIntMap, OptionVal, ReentrantGuard } -import scala.annotation.{ tailrec, switch } -import scala.collection.{ mutable, immutable } +import scala.annotation.{ switch, tailrec } +import scala.collection.{ immutable, mutable } import scala.util.control.NonFatal @InternalApi @@ -44,8 +44,7 @@ private[affinity] object AffinityPool { // Method handle to JDK9+ onSpinWait method private val onSpinWaitMethodHandle = - try - OptionVal.Some(MethodHandles.lookup.findStatic(classOf[Thread], "onSpinWait", methodType(classOf[Void]))) + try OptionVal.Some(MethodHandles.lookup.findStatic(classOf[Thread], "onSpinWait", methodType(classOf[Void]))) catch { case NonFatal(_) => OptionVal.None } @@ -124,15 +123,14 @@ private[affinity] object AffinityPool { */ @InternalApi @ApiMayChange -private[akka] class AffinityPool( - id: String, - parallelism: Int, - affinityGroupSize: Int, - threadFactory: ThreadFactory, - idleCpuLevel: Int, - final val queueSelector: QueueSelector, - rejectionHandler: RejectionHandler) - extends AbstractExecutorService { +private[akka] class AffinityPool(id: String, + parallelism: Int, + affinityGroupSize: Int, + threadFactory: ThreadFactory, + idleCpuLevel: Int, + final val queueSelector: QueueSelector, + rejectionHandler: RejectionHandler) + extends AbstractExecutorService { if (parallelism <= 0) throw new IllegalArgumentException("Size of pool cannot be less or equal to 0") @@ -249,11 +247,14 @@ private[akka] class AffinityPool( override def toString: String = s"${Logging.simpleName(this)}(id = $id, parallelism = $parallelism, affinityGroupSize = $affinityGroupSize, threadFactory = $threadFactory, idleCpuLevel = $idleCpuLevel, queueSelector = $queueSelector, rejectionHandler = $rejectionHandler)" - private[this] final class AffinityPoolWorker( final val q: BoundedAffinityTaskQueue, final val idleStrategy: IdleStrategy) extends Runnable { + private[this] final class AffinityPoolWorker(final val q: BoundedAffinityTaskQueue, + final val idleStrategy: IdleStrategy) + extends Runnable { final val thread: Thread = threadFactory.newThread(this) final def start(): Unit = - if (thread eq null) throw new IllegalStateException(s"Was not able to allocate worker thread for ${AffinityPool.this}") + if (thread eq null) + throw new IllegalStateException(s"Was not able to allocate worker thread for ${AffinityPool.this}") else thread.start() override final def run(): Unit = { @@ -310,32 +311,38 @@ private[akka] class AffinityPool( @InternalApi @ApiMayChange private[akka] final class AffinityPoolConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends ExecutorServiceConfigurator(config, prerequisites) { + extends ExecutorServiceConfigurator(config, prerequisites) { - private val poolSize = ThreadPoolConfig.scaledPoolSize( - config.getInt("parallelism-min"), - config.getDouble("parallelism-factor"), - config.getInt("parallelism-max")) + private val poolSize = ThreadPoolConfig.scaledPoolSize(config.getInt("parallelism-min"), + config.getDouble("parallelism-factor"), + config.getInt("parallelism-max")) private val taskQueueSize = config.getInt("task-queue-size") - private val idleCpuLevel = config.getInt("idle-cpu-level").requiring(level => - 1 <= level && level <= 10, "idle-cpu-level must be between 1 and 10") + private val idleCpuLevel = config + .getInt("idle-cpu-level") + .requiring(level => 1 <= level && level <= 10, "idle-cpu-level must be between 1 and 10") private val queueSelectorFactoryFQCN = config.getString("queue-selector") private val queueSelectorFactory: QueueSelectorFactory = - prerequisites.dynamicAccess.createInstanceFor[QueueSelectorFactory](queueSelectorFactoryFQCN, immutable.Seq(classOf[Config] -> config)) + prerequisites.dynamicAccess + .createInstanceFor[QueueSelectorFactory](queueSelectorFactoryFQCN, immutable.Seq(classOf[Config] -> config)) .recover({ - case _ => throw new IllegalArgumentException( - s"Cannot instantiate QueueSelectorFactory(queueSelector = $queueSelectorFactoryFQCN), make sure it has an accessible constructor which accepts a Config parameter") - }).get + case _ => + throw new IllegalArgumentException( + s"Cannot instantiate QueueSelectorFactory(queueSelector = $queueSelectorFactoryFQCN), make sure it has an accessible constructor which accepts a Config parameter") + }) + .get private val rejectionHandlerFactoryFCQN = config.getString("rejection-handler") private val rejectionHandlerFactory = prerequisites.dynamicAccess - .createInstanceFor[RejectionHandlerFactory](rejectionHandlerFactoryFCQN, Nil).recover({ - case exception => throw new IllegalArgumentException( - s"Cannot instantiate RejectionHandlerFactory(rejection-handler = $rejectionHandlerFactoryFCQN), make sure it has an accessible empty constructor", - exception) - }).get + .createInstanceFor[RejectionHandlerFactory](rejectionHandlerFactoryFCQN, Nil) + .recover({ + case exception => + throw new IllegalArgumentException( + s"Cannot instantiate RejectionHandlerFactory(rejection-handler = $rejectionHandlerFactoryFCQN), make sure it has an accessible empty constructor", + exception) + }) + .get override def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = { val tf = threadFactory match { @@ -347,7 +354,13 @@ private[akka] final class AffinityPoolConfigurator(config: Config, prerequisites new ExecutorServiceFactory { override def createExecutorService: ExecutorService = - new AffinityPool(id, poolSize, taskQueueSize, tf, idleCpuLevel, queueSelectorFactory.create(), rejectionHandlerFactory.create()).start() + new AffinityPool(id, + poolSize, + taskQueueSize, + tf, + idleCpuLevel, + queueSelectorFactory.create(), + rejectionHandlerFactory.create()).start() } } } @@ -369,6 +382,7 @@ trait QueueSelectorFactory { * queues, return which of the queues that `Runnable` should be placed in. */ trait QueueSelector { + /** * Must be deterministic—return the same value for the same input. * @returns given a `Runnable` a number between 0 .. `queues` (exclusive) @@ -393,34 +407,38 @@ private[akka] final class ThrowOnOverflowRejectionHandler extends RejectionHandl */ @InternalApi @ApiMayChange -private[akka] final class FairDistributionHashCache( final val config: Config) extends QueueSelectorFactory { +private[akka] final class FairDistributionHashCache(final val config: Config) extends QueueSelectorFactory { private final val MaxFairDistributionThreshold = 2048 - private[this] final val fairDistributionThreshold = config.getInt("fair-work-distribution.threshold").requiring(thr => - 0 <= thr && thr <= MaxFairDistributionThreshold, s"fair-work-distribution.threshold must be between 0 and $MaxFairDistributionThreshold") + private[this] final val fairDistributionThreshold = config + .getInt("fair-work-distribution.threshold") + .requiring(thr => 0 <= thr && thr <= MaxFairDistributionThreshold, + s"fair-work-distribution.threshold must be between 0 and $MaxFairDistributionThreshold") - override final def create(): QueueSelector = new AtomicReference[ImmutableIntMap](ImmutableIntMap.empty) with QueueSelector { - override def toString: String = s"FairDistributionHashCache(fairDistributionThreshold = $fairDistributionThreshold)" - private[this] final def improve(h: Int): Int = 0x7FFFFFFF & (reverseBytes(h * 0x9e3775cd) * 0x9e3775cd) // `sbhash`: In memory of Phil Bagwell. - override final def getQueue(command: Runnable, queues: Int): Int = { - val runnableHash = command.hashCode() - if (fairDistributionThreshold == 0) - improve(runnableHash) % queues - else { - @tailrec - def cacheLookup(prev: ImmutableIntMap, hash: Int): Int = { - val existingIndex = prev.get(runnableHash) - if (existingIndex >= 0) existingIndex - else if (prev.size > fairDistributionThreshold) improve(hash) % queues - else { - val index = prev.size % queues - if (compareAndSet(prev, prev.updated(runnableHash, index))) index - else cacheLookup(get(), hash) + override final def create(): QueueSelector = + new AtomicReference[ImmutableIntMap](ImmutableIntMap.empty) with QueueSelector { + override def toString: String = + s"FairDistributionHashCache(fairDistributionThreshold = $fairDistributionThreshold)" + private[this] final def improve(h: Int): Int = + 0x7FFFFFFF & (reverseBytes(h * 0x9e3775cd) * 0x9e3775cd) // `sbhash`: In memory of Phil Bagwell. + override final def getQueue(command: Runnable, queues: Int): Int = { + val runnableHash = command.hashCode() + if (fairDistributionThreshold == 0) + improve(runnableHash) % queues + else { + @tailrec + def cacheLookup(prev: ImmutableIntMap, hash: Int): Int = { + val existingIndex = prev.get(runnableHash) + if (existingIndex >= 0) existingIndex + else if (prev.size > fairDistributionThreshold) improve(hash) % queues + else { + val index = prev.size % queues + if (compareAndSet(prev, prev.updated(runnableHash, index))) index + else cacheLookup(get(), hash) + } } + cacheLookup(get(), runnableHash) } - cacheLookup(get(), runnableHash) } } - } } - diff --git a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala index c3b5da3d58..8265721e04 100644 --- a/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala +++ b/akka-actor/src/main/scala/akka/dispatch/sysmsg/SystemMessage.scala @@ -5,7 +5,7 @@ package akka.dispatch.sysmsg import scala.annotation.tailrec -import akka.actor.{ ActorInitializationException, InternalActorRef, ActorRef, PossiblyHarmful } +import akka.actor.{ ActorInitializationException, ActorRef, InternalActorRef, PossiblyHarmful } import akka.actor.DeadLetterSuppression /** @@ -19,11 +19,13 @@ private[akka] object SystemMessageList { final val ENil: EarliestFirstSystemMessageList = new EarliestFirstSystemMessageList(null) @tailrec - private[sysmsg] def sizeInner(head: SystemMessage, acc: Int): Int = if (head eq null) acc else sizeInner(head.next, acc + 1) + private[sysmsg] def sizeInner(head: SystemMessage, acc: Int): Int = + if (head eq null) acc else sizeInner(head.next, acc + 1) @tailrec private[sysmsg] def reverseInner(head: SystemMessage, acc: SystemMessage): SystemMessage = { - if (head eq null) acc else { + if (head eq null) acc + else { val next = head.next head.next = acc reverseInner(next, head) @@ -259,12 +261,14 @@ private[akka] case object NoMessage extends SystemMessage // switched into the m * INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class Failed(child: ActorRef, cause: Throwable, uid: Int) extends SystemMessage - with StashWhenFailed - with StashWhenWaitingForChildren +private[akka] final case class Failed(child: ActorRef, cause: Throwable, uid: Int) + extends SystemMessage + with StashWhenFailed + with StashWhenWaitingForChildren @SerialVersionUID(1L) -private[akka] final case class DeathWatchNotification( - actor: ActorRef, - existenceConfirmed: Boolean, - addressTerminated: Boolean) extends SystemMessage with DeadLetterSuppression +private[akka] final case class DeathWatchNotification(actor: ActorRef, + existenceConfirmed: Boolean, + addressTerminated: Boolean) + extends SystemMessage + with DeadLetterSuppression diff --git a/akka-actor/src/main/scala/akka/event/ActorClassificationUnsubscriber.scala b/akka-actor/src/main/scala/akka/event/ActorClassificationUnsubscriber.scala index 1eb86455cd..9b2089582b 100644 --- a/akka-actor/src/main/scala/akka/event/ActorClassificationUnsubscriber.scala +++ b/akka-actor/src/main/scala/akka/event/ActorClassificationUnsubscriber.scala @@ -15,7 +15,9 @@ import akka.util.unused * * Watches all actors which subscribe on the given event stream, and unsubscribes them from it when they are Terminated. */ -protected[akka] class ActorClassificationUnsubscriber(bus: ManagedActorClassification, debug: Boolean) extends Actor with Stash { +protected[akka] class ActorClassificationUnsubscriber(bus: ManagedActorClassification, debug: Boolean) + extends Actor + with Stash { import ActorClassificationUnsubscriber._ @@ -29,8 +31,10 @@ protected[akka] class ActorClassificationUnsubscriber(bus: ManagedActorClassific def receive = { case Register(actor, seq) if seq == nextSeq => - if (debug) context.system.eventStream.publish(Logging.Debug(simpleName(getClass), getClass, s"registered watch for $actor in $bus")) - context watch actor + if (debug) + context.system.eventStream + .publish(Logging.Debug(simpleName(getClass), getClass, s"registered watch for $actor in $bus")) + context.watch(actor) atSeq = nextSeq unstashAll() @@ -38,8 +42,10 @@ protected[akka] class ActorClassificationUnsubscriber(bus: ManagedActorClassific stash() case Unregister(actor, seq) if seq == nextSeq => - if (debug) context.system.eventStream.publish(Logging.Debug(simpleName(getClass), getClass, s"unregistered watch of $actor in $bus")) - context unwatch actor + if (debug) + context.system.eventStream + .publish(Logging.Debug(simpleName(getClass), getClass, s"unregistered watch of $actor in $bus")) + context.unwatch(actor) atSeq = nextSeq unstashAll() @@ -47,10 +53,12 @@ protected[akka] class ActorClassificationUnsubscriber(bus: ManagedActorClassific stash() case Terminated(actor) => - if (debug) context.system.eventStream.publish(Logging.Debug(simpleName(getClass), getClass, s"actor $actor has terminated, unsubscribing it from $bus")) + if (debug) + context.system.eventStream.publish( + Logging.Debug(simpleName(getClass), getClass, s"actor $actor has terminated, unsubscribing it from $bus")) // the `unsubscribe` will trigger another `Unregister(actor, _)` message to this unsubscriber; // but since that actor is terminated, there cannot be any harm in processing an Unregister for it. - bus unsubscribe actor + bus.unsubscribe(actor) } } @@ -69,10 +77,12 @@ private[akka] object ActorClassificationUnsubscriber { def start(system: ActorSystem, bus: ManagedActorClassification, @unused debug: Boolean = false) = { val debug = system.settings.config.getBoolean("akka.actor.debug.event-stream") - system.asInstanceOf[ExtendedActorSystem] + system + .asInstanceOf[ExtendedActorSystem] .systemActorOf(props(bus, debug), "actorClassificationUnsubscriber-" + unsubscribersCount.incrementAndGet()) } - private def props(eventBus: ManagedActorClassification, debug: Boolean) = Props(classOf[ActorClassificationUnsubscriber], eventBus, debug) + private def props(eventBus: ManagedActorClassification, debug: Boolean) = + Props(classOf[ActorClassificationUnsubscriber], eventBus, debug) } diff --git a/akka-actor/src/main/scala/akka/event/AddressTerminatedTopic.scala b/akka-actor/src/main/scala/akka/event/AddressTerminatedTopic.scala index f29522d55b..b83f7ad4fb 100644 --- a/akka-actor/src/main/scala/akka/event/AddressTerminatedTopic.scala +++ b/akka-actor/src/main/scala/akka/event/AddressTerminatedTopic.scala @@ -51,7 +51,7 @@ private[akka] final class AddressTerminatedTopic extends Extension { } def publish(msg: AddressTerminated): Unit = { - subscribers.get foreach { _.tell(msg, ActorRef.noSender) } + subscribers.get.foreach { _.tell(msg, ActorRef.noSender) } } } diff --git a/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala b/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala index bdf8d67e79..c6965ca77c 100644 --- a/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala +++ b/akka-actor/src/main/scala/akka/event/DeadLetterListener.scala @@ -32,11 +32,13 @@ class DeadLetterListener extends Actor { val origin = if (snd eq context.system.deadLetters) "without sender" else s"from $snd" val done = maxCount != Int.MaxValue && count >= maxCount val doneMsg = if (done) ", no more dead letters will be logged" else "" - eventStream.publish(Info(rcp.path.toString, rcp.getClass, - s"Message [${message.getClass.getName}] $origin to $rcp was not delivered. [$count] dead letters encountered$doneMsg. " + - s"If this is not an expected behavior, then [$rcp] may have terminated unexpectedly, " + - "This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' " + - "and 'akka.log-dead-letters-during-shutdown'.")) + eventStream.publish( + Info(rcp.path.toString, + rcp.getClass, + s"Message [${message.getClass.getName}] $origin to $rcp was not delivered. [$count] dead letters encountered$doneMsg. " + + s"If this is not an expected behavior, then [$rcp] may have terminated unexpectedly, " + + "This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' " + + "and 'akka.log-dead-letters-during-shutdown'.")) if (done) context.stop(self) } diff --git a/akka-actor/src/main/scala/akka/event/EventBus.scala b/akka-actor/src/main/scala/akka/event/EventBus.scala index a5210fc1ac..690458cfcb 100644 --- a/akka-actor/src/main/scala/akka/event/EventBus.scala +++ b/akka-actor/src/main/scala/akka/event/EventBus.scala @@ -4,7 +4,7 @@ package akka.event -import akka.actor.{ ActorSystem, ActorRef } +import akka.actor.{ ActorRef, ActorSystem } import akka.util.Index import java.util.concurrent.ConcurrentSkipListSet import java.util.Comparator @@ -55,7 +55,7 @@ trait EventBus { */ trait ActorEventBus extends EventBus { type Subscriber = ActorRef - protected def compareSubscribers(a: ActorRef, b: ActorRef) = a compareTo b + protected def compareSubscribers(a: ActorRef, b: ActorRef) = a.compareTo(b) } /** @@ -165,14 +165,15 @@ trait SubchannelClassification { this: EventBus => val c = classify(event) val recv = if (cache contains c) cache(c) // c will never be removed from cache - else subscriptions.synchronized { - if (cache contains c) cache(c) - else { - addToCache(subscriptions.addKey(c)) - cache(c) + else + subscriptions.synchronized { + if (cache contains c) cache(c) + else { + addToCache(subscriptions.addKey(c)) + cache(c) + } } - } - recv foreach (publish(event, _)) + recv.foreach(publish(event, _)) } /** @@ -182,16 +183,16 @@ trait SubchannelClassification { this: EventBus => private[akka] def hasSubscriptions(subscriber: Subscriber): Boolean = // FIXME binary incompatible, but I think it is safe to filter out this problem, // since it is only called from new functionality in EventStreamUnsubscriber - cache.values exists { _ contains subscriber } + cache.values.exists { _ contains subscriber } private def removeFromCache(changes: immutable.Seq[(Classifier, Set[Subscriber])]): Unit = cache = changes.foldLeft(cache) { - case (m, (c, cs)) => m.updated(c, m.getOrElse(c, Set.empty[Subscriber]) diff cs) + case (m, (c, cs)) => m.updated(c, m.getOrElse(c, Set.empty[Subscriber]).diff(cs)) } private def addToCache(changes: immutable.Seq[(Classifier, Set[Subscriber])]): Unit = cache = changes.foldLeft(cache) { - case (m, (c, cs)) => m.updated(c, m.getOrElse(c, Set.empty[Subscriber]) union cs) + case (m, (c, cs)) => m.updated(c, m.getOrElse(c, Set.empty[Subscriber]).union(cs)) } } @@ -203,12 +204,14 @@ trait SubchannelClassification { this: EventBus => * Note: the compareClassifiers and compareSubscribers must together form an absolute ordering (think java.util.Comparator.compare) */ trait ScanningClassification { self: EventBus => - protected final val subscribers = new ConcurrentSkipListSet[(Classifier, Subscriber)](new Comparator[(Classifier, Subscriber)] { - def compare(a: (Classifier, Subscriber), b: (Classifier, Subscriber)): Int = compareClassifiers(a._1, b._1) match { - case 0 => compareSubscribers(a._2, b._2) - case other => other - } - }) + protected final val subscribers = + new ConcurrentSkipListSet[(Classifier, Subscriber)](new Comparator[(Classifier, Subscriber)] { + def compare(a: (Classifier, Subscriber), b: (Classifier, Subscriber)): Int = + compareClassifiers(a._1, b._1) match { + case 0 => compareSubscribers(a._2, b._2) + case other => other + } + }) /** * Provides a total ordering of Classifiers (think java.util.Comparator.compare) @@ -263,7 +266,8 @@ trait ManagedActorClassification { this: ActorEventBus with ActorClassifier => protected def system: ActorSystem - private class ManagedActorClassificationMappings(val seqNr: Int, val backing: Map[ActorRef, immutable.TreeSet[ActorRef]]) { + private class ManagedActorClassificationMappings(val seqNr: Int, + val backing: Map[ActorRef, immutable.TreeSet[ActorRef]]) { def get(monitored: ActorRef): immutable.TreeSet[ActorRef] = backing.getOrElse(monitored, empty) @@ -342,7 +346,11 @@ trait ManagedActorClassification { this: ActorEventBus with ActorClassifier => } } - try { dissociateAsMonitored(actor) } finally { dissociateAsMonitor(actor) } + try { + dissociateAsMonitored(actor) + } finally { + dissociateAsMonitor(actor) + } } @tailrec @@ -411,4 +419,3 @@ trait ManagedActorClassification { this: ActorEventBus with ActorClassifier => true } } - diff --git a/akka-actor/src/main/scala/akka/event/EventStream.scala b/akka-actor/src/main/scala/akka/event/EventStream.scala index 9b04a44002..0b397f040a 100644 --- a/akka-actor/src/main/scala/akka/event/EventStream.scala +++ b/akka-actor/src/main/scala/akka/event/EventStream.scala @@ -31,7 +31,7 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB protected implicit val subclassification = new Subclassification[Class[_]] { def isEqual(x: Class[_], y: Class[_]) = x == y - def isSubclass(x: Class[_], y: Class[_]) = y isAssignableFrom x + def isSubclass(x: Class[_], y: Class[_]) = y.isAssignableFrom(x) } protected def classify(event: Any): Class[_] = event.getClass @@ -43,7 +43,8 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB override def subscribe(subscriber: ActorRef, channel: Class[_]): Boolean = { if (subscriber eq null) throw new IllegalArgumentException("subscriber is null") - if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "subscribing " + subscriber + " to channel " + channel)) + if (debug) + publish(Logging.Debug(simpleName(this), this.getClass, "subscribing " + subscriber + " to channel " + channel)) registerWithUnsubscriber(subscriber) super.subscribe(subscriber, channel) } @@ -51,7 +52,9 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB override def unsubscribe(subscriber: ActorRef, channel: Class[_]): Boolean = { if (subscriber eq null) throw new IllegalArgumentException("subscriber is null") val ret = super.unsubscribe(subscriber, channel) - if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from channel " + channel)) + if (debug) + publish( + Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from channel " + channel)) unregisterIfNoMoreSubscribedChannels(subscriber) ret } @@ -59,7 +62,8 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB override def unsubscribe(subscriber: ActorRef): Unit = { if (subscriber eq null) throw new IllegalArgumentException("subscriber is null") super.unsubscribe(subscriber) - if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from all channels")) + if (debug) + publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from all channels")) unregisterIfNoMoreSubscribedChannels(subscriber) } @@ -78,22 +82,33 @@ class EventStream(sys: ActorSystem, private val debug: Boolean) extends LoggingB final private[akka] def initUnsubscriber(unsubscriber: ActorRef): Boolean = { // sys may be null for backwards compatibility reasons if (sys eq null) false - else initiallySubscribedOrUnsubscriber.get match { - case value @ Left(subscribers) => - if (initiallySubscribedOrUnsubscriber.compareAndSet(value, Right(unsubscriber))) { - if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "initialized unsubscriber to: " + unsubscriber + ", registering " + subscribers.size + " initial subscribers with it")) - subscribers foreach registerWithUnsubscriber - true - } else { - // recurse, because either new subscribers have been registered since `get` (retry Left case), - // or another thread has succeeded in setting it's unsubscriber (end on Right case) - initUnsubscriber(unsubscriber) - } + else + initiallySubscribedOrUnsubscriber.get match { + case value @ Left(subscribers) => + if (initiallySubscribedOrUnsubscriber.compareAndSet(value, Right(unsubscriber))) { + if (debug) + publish( + Logging.Debug( + simpleName(this), + this.getClass, + "initialized unsubscriber to: " + unsubscriber + ", registering " + subscribers.size + " initial subscribers with it")) + subscribers.foreach(registerWithUnsubscriber) + true + } else { + // recurse, because either new subscribers have been registered since `get` (retry Left case), + // or another thread has succeeded in setting it's unsubscriber (end on Right case) + initUnsubscriber(unsubscriber) + } - case Right(presentUnsubscriber) => - if (debug) publish(Logging.Debug(simpleName(this), this.getClass, s"not using unsubscriber $unsubscriber, because already initialized with $presentUnsubscriber")) - false - } + case Right(presentUnsubscriber) => + if (debug) + publish( + Logging.Debug( + simpleName(this), + this.getClass, + s"not using unsubscriber $unsubscriber, because already initialized with $presentUnsubscriber")) + false + } } /** diff --git a/akka-actor/src/main/scala/akka/event/EventStreamUnsubscriber.scala b/akka-actor/src/main/scala/akka/event/EventStreamUnsubscriber.scala index 90f9483ccb..aed20235d9 100644 --- a/akka-actor/src/main/scala/akka/event/EventStreamUnsubscriber.scala +++ b/akka-actor/src/main/scala/akka/event/EventStreamUnsubscriber.scala @@ -25,26 +25,36 @@ protected[akka] class EventStreamUnsubscriber(eventStream: EventStream, debug: B import EventStreamUnsubscriber._ override def preStart(): Unit = { - if (debug) eventStream.publish(Logging.Debug(simpleName(getClass), getClass, s"registering unsubscriber with $eventStream")) - eventStream initUnsubscriber self + if (debug) + eventStream.publish(Logging.Debug(simpleName(getClass), getClass, s"registering unsubscriber with $eventStream")) + eventStream.initUnsubscriber(self) } def receive = { case Register(actor) => - if (debug) eventStream.publish(Logging.Debug(simpleName(getClass), getClass, s"watching $actor in order to unsubscribe from EventStream when it terminates")) - context watch actor + if (debug) + eventStream.publish( + Logging.Debug(simpleName(getClass), + getClass, + s"watching $actor in order to unsubscribe from EventStream when it terminates")) + context.watch(actor) case UnregisterIfNoMoreSubscribedChannels(actor) if eventStream.hasSubscriptions(actor) => // do nothing // hasSubscriptions can be slow, but it's better for this actor to take the hit than the EventStream case UnregisterIfNoMoreSubscribedChannels(actor) => - if (debug) eventStream.publish(Logging.Debug(simpleName(getClass), getClass, s"unwatching $actor, since has no subscriptions")) - context unwatch actor + if (debug) + eventStream.publish( + Logging.Debug(simpleName(getClass), getClass, s"unwatching $actor, since has no subscriptions")) + context.unwatch(actor) case Terminated(actor) => - if (debug) eventStream.publish(Logging.Debug(simpleName(getClass), getClass, s"unsubscribe $actor from $eventStream, because it was terminated")) - eventStream unsubscribe actor + if (debug) + eventStream.publish( + Logging + .Debug(simpleName(getClass), getClass, s"unsubscribe $actor from $eventStream, because it was terminated")) + eventStream.unsubscribe(actor) } } @@ -68,7 +78,8 @@ private[akka] object EventStreamUnsubscriber { def start(system: ActorSystem, stream: EventStream) = { val debug = system.settings.config.getBoolean("akka.actor.debug.event-stream") - system.asInstanceOf[ExtendedActorSystem] + system + .asInstanceOf[ExtendedActorSystem] .systemActorOf(props(stream, debug), "eventStreamUnsubscriber-" + unsubscribersCount.incrementAndGet()) } diff --git a/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala b/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala index bcd0efa1e1..ea7af5a2eb 100644 --- a/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala +++ b/akka-actor/src/main/scala/akka/event/LoggerMailbox.scala @@ -19,8 +19,9 @@ trait LoggerMessageQueueSemantics /** * INTERNAL API */ -private[akka] class LoggerMailboxType(@unused settings: ActorSystem.Settings, @unused config: Config) extends MailboxType - with ProducesMessageQueue[LoggerMailbox] { +private[akka] class LoggerMailboxType(@unused settings: ActorSystem.Settings, @unused config: Config) + extends MailboxType + with ProducesMessageQueue[LoggerMailbox] { override def create(owner: Option[ActorRef], system: Option[ActorSystem]) = (owner, system) match { case (Some(o), Some(s)) => new LoggerMailbox(o, s) @@ -32,7 +33,8 @@ private[akka] class LoggerMailboxType(@unused settings: ActorSystem.Settings, @u * INTERNAL API */ private[akka] class LoggerMailbox(@unused owner: ActorRef, system: ActorSystem) - extends UnboundedMailbox.MessageQueue with LoggerMessageQueueSemantics { + extends UnboundedMailbox.MessageQueue + with LoggerMessageQueueSemantics { override def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = { if (hasMessages) { diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 7449dbe403..b492e38f8d 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -12,7 +12,7 @@ import akka.actor._ import akka.annotation.{ DoNotInherit, InternalApi } import akka.dispatch.RequiresMessageQueue import akka.event.Logging._ -import akka.util.{ Helpers, ReentrantGuard, unused } +import akka.util.{ unused, Helpers, ReentrantGuard } import akka.{ AkkaException, ConfigurationException } import scala.annotation.implicitNotFound @@ -73,12 +73,16 @@ trait LoggingBus extends ActorEventBus { } private def setUpStdoutLogger(config: Settings): Unit = { - val level = levelFor(config.StdoutLogLevel) getOrElse { + val level = levelFor(config.StdoutLogLevel).getOrElse { // only log initialization errors directly with StandardOutLogger.print - StandardOutLogger.print(Error(new LoggerException, simpleName(this), this.getClass, "unknown akka.stdout-loglevel " + config.StdoutLogLevel)) + StandardOutLogger.print( + Error(new LoggerException, + simpleName(this), + this.getClass, + "unknown akka.stdout-loglevel " + config.StdoutLogLevel)) ErrorLevel } - AllLogLevels filter (level >= _) foreach (l => subscribe(StandardOutLogger, classFor(l))) + AllLogLevels.filter(level >= _).foreach(l => subscribe(StandardOutLogger, classFor(l))) guard.withGuard { loggers :+= StandardOutLogger _logLevel = level @@ -98,9 +102,10 @@ trait LoggingBus extends ActorEventBus { */ private[akka] def startDefaultLoggers(system: ActorSystemImpl): Unit = { val logName = simpleName(this) + "(" + system + ")" - val level = levelFor(system.settings.LogLevel) getOrElse { + val level = levelFor(system.settings.LogLevel).getOrElse { // only log initialization errors directly with StandardOutLogger.print - StandardOutLogger.print(Error(new LoggerException, logName, this.getClass, "unknown akka.loglevel " + system.settings.LogLevel)) + StandardOutLogger.print( + Error(new LoggerException, logName, this.getClass, "unknown akka.loglevel " + system.settings.LogLevel)) ErrorLevel } try { @@ -113,13 +118,18 @@ trait LoggingBus extends ActorEventBus { loggerName <- defaultLoggers if loggerName != StandardOutLogger.getClass.getName } yield { - system.dynamicAccess.getClassFor[Actor](loggerName).map({ - case actorClass => addLogger(system, actorClass, level, logName) - }).recover({ - case e => throw new ConfigurationException( - "Logger specified in config can't be loaded [" + loggerName + - "] due to [" + e.toString + "]", e) - }).get + system.dynamicAccess + .getClassFor[Actor](loggerName) + .map({ + case actorClass => addLogger(system, actorClass, level, logName) + }) + .recover({ + case e => + throw new ConfigurationException("Logger specified in config can't be loaded [" + loggerName + + "] due to [" + e.toString + "]", + e) + }) + .get } guard.withGuard { loggers = myloggers @@ -127,12 +137,14 @@ trait LoggingBus extends ActorEventBus { } try { if (system.settings.DebugUnhandledMessage) - subscribe(system.systemActorOf(Props(new Actor { - def receive = { - case UnhandledMessage(msg, sender, rcp) => - publish(Debug(rcp.path.toString, rcp.getClass, "unhandled message from " + sender + ": " + msg)) - } - }), "UnhandledMessageForwarder"), classOf[UnhandledMessage]) + subscribe( + system.systemActorOf(Props(new Actor { + def receive = { + case UnhandledMessage(msg, sender, rcp) => + publish(Debug(rcp.path.toString, rcp.getClass, "unhandled message from " + sender + ": " + msg)) + } + }), "UnhandledMessageForwarder"), + classOf[UnhandledMessage]) } catch { case _: InvalidActorNameException => // ignore if it is already running } @@ -174,19 +186,27 @@ trait LoggingBus extends ActorEventBus { /** * INTERNAL API */ - private def addLogger(system: ActorSystemImpl, clazz: Class[_ <: Actor], level: LogLevel, logName: String): ActorRef = { + private def addLogger(system: ActorSystemImpl, + clazz: Class[_ <: Actor], + level: LogLevel, + logName: String): ActorRef = { val name = "log" + LogExt(system).id() + "-" + simpleName(clazz) val actor = system.systemActorOf(Props(clazz).withDispatcher(system.settings.LoggersDispatcher), name) implicit def timeout = system.settings.LoggerStartTimeout import akka.pattern.ask - val response = try Await.result(actor ? InitializeLogger(this), timeout.duration) catch { + val response = try Await.result(actor ? InitializeLogger(this), timeout.duration) + catch { case _: TimeoutException => - publish(Warning(logName, this.getClass, "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) + publish( + Warning(logName, + this.getClass, + "Logger " + name + " did not respond within " + timeout + " to InitializeLogger(bus)")) "[TIMEOUT]" } if (response != LoggerInitialized) - throw new LoggerInitializationException("Logger " + name + " did not respond with LoggerInitialized, sent instead " + response) - AllLogLevels filter (level >= _) foreach (l => subscribe(actor, classFor(l))) + throw new LoggerInitializationException( + "Logger " + name + " did not respond with LoggerInitialized, sent instead " + response) + AllLogLevels.filter(level >= _).foreach(l => subscribe(actor, classFor(l))) publish(Debug(logName, this.getClass, "logger " + name + " started")) actor } @@ -235,7 +255,9 @@ trait LoggingBus extends ActorEventBus { * * The default implementation of the second variant will just call the first. */ -@implicitNotFound("Cannot find LogSource for ${T} please see ScalaDoc for LogSource for how to obtain or construct one.") trait LogSource[-T] { +@implicitNotFound( + "Cannot find LogSource for ${T} please see ScalaDoc for LogSource for how to obtain or construct one.") trait LogSource[ + -T] { def genString(t: T): String def genString(t: T, @unused system: ActorSystem): String = genString(t) def getClazz(t: T): Class[_] = t.getClass @@ -281,12 +303,13 @@ object LogSource { implicit val fromActorRef: LogSource[ActorRef] = new LogSource[ActorRef] { def genString(a: ActorRef) = a.path.toString - override def genString(a: ActorRef, system: ActorSystem) = try { - a.path.toStringWithAddress(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress) - } catch { - // it can fail if the ActorSystem (remoting) is not completely started yet - case NonFatal(_) => a.path.toString - } + override def genString(a: ActorRef, system: ActorSystem) = + try { + a.path.toStringWithAddress(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress) + } catch { + // it can fail if the ActorSystem (remoting) is not completely started yet + case NonFatal(_) => a.path.toString + } } // this one unfortunately does not work as implicit, because existential types have some weird behavior @@ -516,6 +539,7 @@ object Logging { val (str, clazz) = LogSource(logSource, system) new BusLogging(system.eventStream, str, clazz, system.asInstanceOf[ExtendedActorSystem].logFilter) } + /** * Obtain LoggingAdapter with additional "marker" support (which some logging frameworks are able to utilise) * for the given actor system and source object. This will use the system’s event stream and include the system’s @@ -559,6 +583,7 @@ object Logging { val (str, clazz) = LogSource(logSource) new BusLogging(bus, str, clazz) } + /** * Obtain LoggingAdapter for the given logging bus and source object. * @@ -587,6 +612,7 @@ object Logging { val system = logSource.context.system.asInstanceOf[ExtendedActorSystem] new BusLogging(system.eventStream, str, clazz, system.logFilter) with DiagnosticLoggingAdapter } + /** * Obtain LoggingAdapter with marker and MDC support for the given actor. * Don't use it outside its specific Actor as it isn't thread safe @@ -672,6 +698,7 @@ object Logging { * Base type of LogEvents */ sealed trait LogEvent extends NoSerializationVerificationNeeded { + /** * The thread that created this log event */ @@ -734,7 +761,12 @@ object Logging { case level => throw new IllegalArgumentException(s"Unsupported log level [$level]") } - def apply(level: LogLevel, logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker): LogEvent = level match { + def apply(level: LogLevel, + logSource: String, + logClass: Class[_], + message: Any, + mdc: MDC, + marker: LogMarker): LogEvent = level match { case ErrorLevel => Error(logSource, logClass, message, mdc, marker) case WarningLevel => Warning(logSource, logClass, message, mdc, marker) case InfoLevel => Info(logSource, logClass, message, mdc, marker) @@ -751,16 +783,31 @@ object Logging { /** * For ERROR Logging */ - case class Error(override val cause: Throwable, logSource: String, logClass: Class[_], message: Any = "") extends LogEvent with LogEventWithCause { + case class Error(override val cause: Throwable, logSource: String, logClass: Class[_], message: Any = "") + extends LogEvent + with LogEventWithCause { def this(logSource: String, logClass: Class[_], message: Any) = this(Error.NoCause, logSource, logClass, message) override def level = ErrorLevel } - class Error2(override val cause: Throwable, logSource: String, logClass: Class[_], message: Any = "", override val mdc: MDC) extends Error(cause, logSource, logClass, message) { - def this(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = this(Error.NoCause, logSource, logClass, message, mdc) + class Error2(override val cause: Throwable, + logSource: String, + logClass: Class[_], + message: Any = "", + override val mdc: MDC) + extends Error(cause, logSource, logClass, message) { + def this(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = + this(Error.NoCause, logSource, logClass, message, mdc) } - class Error3(override val cause: Throwable, logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker) - extends Error2(cause, logSource, logClass, message, mdc) with LogEventWithMarker { - def this(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = this(Error.NoCause, logSource, logClass, message, mdc, marker) + class Error3(override val cause: Throwable, + logSource: String, + logClass: Class[_], + message: Any, + override val mdc: MDC, + override val marker: LogMarker) + extends Error2(cause, logSource, logClass, message, mdc) + with LogEventWithMarker { + def this(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = + this(Error.NoCause, logSource, logClass, message, mdc, marker) } object Error { @@ -790,17 +837,34 @@ object Logging { case class Warning(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = WarningLevel } - class Warning2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) extends Warning(logSource, logClass, message) - class Warning3(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker) - extends Warning2(logSource, logClass, message, mdc) with LogEventWithMarker - class Warning4(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker, override val cause: Throwable) - extends Warning2(logSource, logClass, message, mdc) with LogEventWithMarker with LogEventWithCause + class Warning2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) + extends Warning(logSource, logClass, message) + class Warning3(logSource: String, + logClass: Class[_], + message: Any, + override val mdc: MDC, + override val marker: LogMarker) + extends Warning2(logSource, logClass, message, mdc) + with LogEventWithMarker + class Warning4(logSource: String, + logClass: Class[_], + message: Any, + override val mdc: MDC, + override val marker: LogMarker, + override val cause: Throwable) + extends Warning2(logSource, logClass, message, mdc) + with LogEventWithMarker + with LogEventWithCause object Warning { - def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = new Warning2(logSource, logClass, message, mdc) - def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = new Warning3(logSource, logClass, message, mdc, marker) + def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = + new Warning2(logSource, logClass, message, mdc) + def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = + new Warning3(logSource, logClass, message, mdc, marker) - def apply(cause: Throwable, logSource: String, logClass: Class[_], message: Any, mdc: MDC) = new Warning4(logSource, logClass, message, mdc, null, cause) - def apply(cause: Throwable, logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = new Warning4(logSource, logClass, message, mdc, marker, cause) + def apply(cause: Throwable, logSource: String, logClass: Class[_], message: Any, mdc: MDC) = + new Warning4(logSource, logClass, message, mdc, null, cause) + def apply(cause: Throwable, logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = + new Warning4(logSource, logClass, message, mdc, marker, cause) } /** @@ -809,12 +873,20 @@ object Logging { case class Info(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = InfoLevel } - class Info2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) extends Info(logSource, logClass, message) - class Info3(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker) - extends Info2(logSource, logClass, message, mdc) with LogEventWithMarker + class Info2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) + extends Info(logSource, logClass, message) + class Info3(logSource: String, + logClass: Class[_], + message: Any, + override val mdc: MDC, + override val marker: LogMarker) + extends Info2(logSource, logClass, message, mdc) + with LogEventWithMarker object Info { - def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = new Info2(logSource, logClass, message, mdc) - def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = new Info3(logSource, logClass, message, mdc, marker) + def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = + new Info2(logSource, logClass, message, mdc) + def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = + new Info3(logSource, logClass, message, mdc, marker) } /** @@ -823,18 +895,28 @@ object Logging { case class Debug(logSource: String, logClass: Class[_], message: Any = "") extends LogEvent { override def level = DebugLevel } - class Debug2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) extends Debug(logSource, logClass, message) - class Debug3(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC, override val marker: LogMarker) - extends Debug2(logSource, logClass, message, mdc) with LogEventWithMarker + class Debug2(logSource: String, logClass: Class[_], message: Any, override val mdc: MDC) + extends Debug(logSource, logClass, message) + class Debug3(logSource: String, + logClass: Class[_], + message: Any, + override val mdc: MDC, + override val marker: LogMarker) + extends Debug2(logSource, logClass, message, mdc) + with LogEventWithMarker object Debug { - def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = new Debug2(logSource, logClass, message, mdc) - def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = new Debug3(logSource, logClass, message, mdc, marker) + def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC) = + new Debug2(logSource, logClass, message, mdc) + def apply(logSource: String, logClass: Class[_], message: Any, mdc: MDC, marker: LogMarker) = + new Debug3(logSource, logClass, message, mdc, marker) } /** INTERNAL API, Marker interface for LogEvents containing Markers, which can be set for example on an slf4j logger */ sealed trait LogEventWithMarker extends LogEvent { + /** Marker attribute is nullable due to backward binary compatibility in the class `Warning4` */ def marker: LogMarker + /** Appends the marker to the Debug/Info/Warning/Error toString representations */ override def toString = { val s = super.toString @@ -859,6 +941,7 @@ object Logging { */ abstract class LoggerInitialized case object LoggerInitialized extends LoggerInitialized { + /** * Java API: get the singleton instance */ @@ -888,83 +971,75 @@ object Logging { case e: Warning => warning(e) case e: Info => info(e) case e: Debug => debug(e) - case e => warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e)) + case e => + warning(Warning(simpleName(this), this.getClass, "received unexpected event of class " + e.getClass + ": " + e)) } def error(event: Error): Unit = event match { case e: Error3 => // has marker val f = if (event.cause == Error.NoCause) ErrorWithoutCauseWithMarkerFormat else ErrorFormatWithMarker - println(f.format( - e.marker.name, - timestamp(event), - event.thread.getName, - event.logSource, - formatMDC(event.mdc), - event.message, - stackTraceFor(event.cause))) + println( + f.format(e.marker.name, + timestamp(event), + event.thread.getName, + event.logSource, + formatMDC(event.mdc), + event.message, + stackTraceFor(event.cause))) case _ => val f = if (event.cause == Error.NoCause) ErrorFormatWithoutCause else ErrorFormat - println(f.format( - timestamp(event), - event.thread.getName, - event.logSource, - formatMDC(event.mdc), - event.message, - stackTraceFor(event.cause))) + println( + f.format(timestamp(event), + event.thread.getName, + event.logSource, + formatMDC(event.mdc), + event.message, + stackTraceFor(event.cause))) } def warning(event: Warning): Unit = event match { case e: Warning3 => // has marker - println(WarningWithMarkerFormat.format( - e.marker.name, - timestamp(event), - event.thread.getName, - event.logSource, - formatMDC(event.mdc), - event.message)) + println( + WarningWithMarkerFormat.format(e.marker.name, + timestamp(event), + event.thread.getName, + event.logSource, + formatMDC(event.mdc), + event.message)) case _ => - println(WarningFormat.format( - timestamp(event), - event.thread.getName, - event.logSource, - formatMDC(event.mdc), - event.message)) + println( + WarningFormat + .format(timestamp(event), event.thread.getName, event.logSource, formatMDC(event.mdc), event.message)) } def info(event: Info): Unit = event match { case e: Info3 => // has marker - println(InfoWithMarkerFormat.format( - e.marker.name, - timestamp(event), - event.thread.getName, - event.logSource, - formatMDC(event.mdc), - event.message)) + println( + InfoWithMarkerFormat.format(e.marker.name, + timestamp(event), + event.thread.getName, + event.logSource, + formatMDC(event.mdc), + event.message)) case _ => - println(InfoFormat.format( - timestamp(event), - event.thread.getName, - event.logSource, - formatMDC(event.mdc), - event.message)) + println( + InfoFormat + .format(timestamp(event), event.thread.getName, event.logSource, formatMDC(event.mdc), event.message)) } def debug(event: Debug): Unit = event match { case e: Debug3 => // has marker - println(DebugWithMarkerFormat.format( - e.marker.name, - timestamp(event), - event.thread.getName, - event.logSource, - formatMDC(event.mdc), - event.message)) + println( + DebugWithMarkerFormat.format(e.marker.name, + timestamp(event), + event.thread.getName, + event.logSource, + formatMDC(event.mdc), + event.message)) case _ => - println(DebugFormat.format( - timestamp(event), - event.thread.getName, - event.logSource, - formatMDC(event.mdc), - event.message)) + println( + DebugFormat + .format(timestamp(event), event.thread.getName, event.logSource, formatMDC(event.mdc), event.message)) } private def formatMDC(mdc: Map[String, Any]): String = { @@ -1112,6 +1187,7 @@ trait LoggingAdapter { * @see [[LoggingAdapter]] */ def error(cause: Throwable, message: String): Unit = { if (isErrorEnabled) notifyError(cause, message) } + /** * Message template with 1 replacement argument. * @@ -1119,28 +1195,40 @@ trait LoggingAdapter { * there are more than four arguments. * @see [[LoggingAdapter]] */ - def error(cause: Throwable, template: String, arg1: Any): Unit = { if (isErrorEnabled) notifyError(cause, format1(template, arg1)) } + def error(cause: Throwable, template: String, arg1: Any): Unit = { + if (isErrorEnabled) notifyError(cause, format1(template, arg1)) + } + /** * Message template with 2 replacement arguments. * @see [[LoggingAdapter]] */ - def error(cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = { + if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2)) + } + /** * Message template with 3 replacement arguments. * @see [[LoggingAdapter]] */ - def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { + if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3)) + } + /** * Message template with 4 replacement arguments. * @see [[LoggingAdapter]] */ - def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3, arg4)) } + def error(cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { + if (isErrorEnabled) notifyError(cause, format(template, arg1, arg2, arg3, arg4)) + } /** * Log message at error level, without providing the exception that caused the error. * @see [[LoggingAdapter]] */ def error(message: String): Unit = { if (isErrorEnabled) notifyError(message) } + /** * Message template with 1 replacement argument. * @@ -1149,27 +1237,37 @@ trait LoggingAdapter { * @see [[LoggingAdapter]] */ def error(template: String, arg1: Any): Unit = { if (isErrorEnabled) notifyError(format1(template, arg1)) } + /** * Message template with 2 replacement arguments. * @see [[LoggingAdapter]] */ - def error(template: String, arg1: Any, arg2: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2)) } + def error(template: String, arg1: Any, arg2: Any): Unit = { + if (isErrorEnabled) notifyError(format(template, arg1, arg2)) + } + /** * Message template with 3 replacement arguments. * @see [[LoggingAdapter]] */ - def error(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3)) } + def error(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { + if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3)) + } + /** * Message template with 4 replacement arguments. * @see [[LoggingAdapter]] */ - def error(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3, arg4)) } + def error(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { + if (isErrorEnabled) notifyError(format(template, arg1, arg2, arg3, arg4)) + } /** * Log message at warning level. * @see [[LoggingAdapter]] */ def warning(message: String): Unit = { if (isWarningEnabled) notifyWarning(message) } + /** * Message template with 1 replacement argument. * @@ -1178,27 +1276,37 @@ trait LoggingAdapter { * @see [[LoggingAdapter]] */ def warning(template: String, arg1: Any): Unit = { if (isWarningEnabled) notifyWarning(format1(template, arg1)) } + /** * Message template with 2 replacement arguments. * @see [[LoggingAdapter]] */ - def warning(template: String, arg1: Any, arg2: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2)) } + def warning(template: String, arg1: Any, arg2: Any): Unit = { + if (isWarningEnabled) notifyWarning(format(template, arg1, arg2)) + } + /** * Message template with 3 replacement arguments. * @see [[LoggingAdapter]] */ - def warning(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3)) } + def warning(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { + if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3)) + } + /** * Message template with 4 replacement arguments. * @see [[LoggingAdapter]] */ - def warning(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4)) } + def warning(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { + if (isWarningEnabled) notifyWarning(format(template, arg1, arg2, arg3, arg4)) + } /** * Log message at info level. * @see [[LoggingAdapter]] */ def info(message: String): Unit = { if (isInfoEnabled) notifyInfo(message) } + /** * Message template with 1 replacement argument. * @@ -1207,27 +1315,37 @@ trait LoggingAdapter { * @see [[LoggingAdapter]] */ def info(template: String, arg1: Any): Unit = { if (isInfoEnabled) notifyInfo(format1(template, arg1)) } + /** * Message template with 2 replacement arguments. * @see [[LoggingAdapter]] */ - def info(template: String, arg1: Any, arg2: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2)) } + def info(template: String, arg1: Any, arg2: Any): Unit = { + if (isInfoEnabled) notifyInfo(format(template, arg1, arg2)) + } + /** * Message template with 3 replacement arguments. * @see [[LoggingAdapter]] */ - def info(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3)) } + def info(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { + if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3)) + } + /** * Message template with 4 replacement arguments. * @see [[LoggingAdapter]] */ - def info(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3, arg4)) } + def info(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { + if (isInfoEnabled) notifyInfo(format(template, arg1, arg2, arg3, arg4)) + } /** * Log message at debug level. * @see [[LoggingAdapter]] */ def debug(message: String): Unit = { if (isDebugEnabled) notifyDebug(message) } + /** * Message template with 1 replacement argument. * @@ -1236,45 +1354,66 @@ trait LoggingAdapter { * @see [[LoggingAdapter]] */ def debug(template: String, arg1: Any): Unit = { if (isDebugEnabled) notifyDebug(format1(template, arg1)) } + /** * Message template with 2 replacement arguments. * @see [[LoggingAdapter]] */ - def debug(template: String, arg1: Any, arg2: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2)) } + def debug(template: String, arg1: Any, arg2: Any): Unit = { + if (isDebugEnabled) notifyDebug(format(template, arg1, arg2)) + } + /** * Message template with 3 replacement arguments. * @see [[LoggingAdapter]] */ - def debug(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3)) } + def debug(template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { + if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3)) + } + /** * Message template with 4 replacement arguments. * @see [[LoggingAdapter]] */ - def debug(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3, arg4)) } + def debug(template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { + if (isDebugEnabled) notifyDebug(format(template, arg1, arg2, arg3, arg4)) + } /** * Log message at the specified log level. */ def log(level: Logging.LogLevel, message: String): Unit = { if (isEnabled(level)) notifyLog(level, message) } + /** * Message template with 1 replacement argument. * * If `arg1` is an `Array` it will be expanded into replacement arguments, which is useful when * there are more than four arguments. */ - def log(level: Logging.LogLevel, template: String, arg1: Any): Unit = { if (isEnabled(level)) notifyLog(level, format1(template, arg1)) } + def log(level: Logging.LogLevel, template: String, arg1: Any): Unit = { + if (isEnabled(level)) notifyLog(level, format1(template, arg1)) + } + /** * Message template with 2 replacement arguments. */ - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any): Unit = { + if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2)) + } + /** * Message template with 3 replacement arguments. */ - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = { + if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3)) + } + /** * Message template with 4 replacement arguments. */ - def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3, arg4)) } + def log(level: Logging.LogLevel, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = { + if (isEnabled(level)) notifyLog(level, format(template, arg1, arg2, arg3, arg4)) + } /** * @return true if the specified log level is enabled @@ -1301,8 +1440,8 @@ trait LoggingAdapter { */ private def format1(t: String, arg: Any): String = arg match { case a: Array[_] if !a.getClass.getComponentType.isPrimitive => format(t, a: _*) - case a: Array[_] => format(t, a.map(_.asInstanceOf[AnyRef]): _*) - case x => format(t, x) + case a: Array[_] => format(t, a.map(_.asInstanceOf[AnyRef]): _*) + case x => format(t, x) } def format(t: String, arg: Any*): String = { @@ -1312,14 +1451,11 @@ trait LoggingAdapter { while (p < arg.length) { val index = t.indexOf("{}", startIndex) if (index == -1) { - sb.append(t.substring(startIndex, t.length)) - .append(" WARNING arguments left: ") - .append(arg.length - p) + sb.append(t.substring(startIndex, t.length)).append(" WARNING arguments left: ").append(arg.length - p) p = arg.length startIndex = t.length } else { - sb.append(t.substring(startIndex, index)) - .append(arg(p)) + sb.append(t.substring(startIndex, index)).append(arg(p)) startIndex = index + 2 p += 1 } @@ -1347,10 +1483,14 @@ trait LoggingFilter { } trait LoggingFilterWithMarker extends LoggingFilter { - def isErrorEnabled(logClass: Class[_], logSource: String, marker: LogMarker): Boolean = isErrorEnabled(logClass, logSource) - def isWarningEnabled(logClass: Class[_], logSource: String, marker: LogMarker): Boolean = isWarningEnabled(logClass, logSource) - def isInfoEnabled(logClass: Class[_], logSource: String, marker: LogMarker): Boolean = isInfoEnabled(logClass, logSource) - def isDebugEnabled(logClass: Class[_], logSource: String, marker: LogMarker): Boolean = isDebugEnabled(logClass, logSource) + def isErrorEnabled(logClass: Class[_], logSource: String, marker: LogMarker): Boolean = + isErrorEnabled(logClass, logSource) + def isWarningEnabled(logClass: Class[_], logSource: String, marker: LogMarker): Boolean = + isWarningEnabled(logClass, logSource) + def isInfoEnabled(logClass: Class[_], logSource: String, marker: LogMarker): Boolean = + isInfoEnabled(logClass, logSource) + def isDebugEnabled(logClass: Class[_], logSource: String, marker: LogMarker): Boolean = + isDebugEnabled(logClass, logSource) } object LoggingFilterWithMarker { @@ -1362,10 +1502,14 @@ object LoggingFilterWithMarker { } class LoggingFilterWithMarkerWrapper(loggingFilter: LoggingFilter) extends LoggingFilterWithMarker { - override def isErrorEnabled(logClass: Class[_], logSource: String): Boolean = loggingFilter.isErrorEnabled(logClass, logSource) - override def isWarningEnabled(logClass: Class[_], logSource: String): Boolean = loggingFilter.isWarningEnabled(logClass, logSource) - override def isInfoEnabled(logClass: Class[_], logSource: String): Boolean = loggingFilter.isInfoEnabled(logClass, logSource) - override def isDebugEnabled(logClass: Class[_], logSource: String): Boolean = loggingFilter.isDebugEnabled(logClass, logSource) + override def isErrorEnabled(logClass: Class[_], logSource: String): Boolean = + loggingFilter.isErrorEnabled(logClass, logSource) + override def isWarningEnabled(logClass: Class[_], logSource: String): Boolean = + loggingFilter.isWarningEnabled(logClass, logSource) + override def isInfoEnabled(logClass: Class[_], logSource: String): Boolean = + loggingFilter.isInfoEnabled(logClass, logSource) + override def isDebugEnabled(logClass: Class[_], logSource: String): Boolean = + loggingFilter.isDebugEnabled(logClass, logSource) } /** @@ -1454,10 +1598,12 @@ trait DiagnosticLoggingAdapter extends LoggingAdapter { @DoNotInherit class LogMarker(val name: String) object LogMarker { + /** The Marker is internally transferred via MDC using using this key */ private[akka] final val MDCKey = "marker" def apply(name: String): LogMarker = new LogMarker(name) + /** Java API */ def create(name: String): LogMarker = apply(name) @@ -1476,12 +1622,11 @@ object LogMarker { /** * [[LoggingAdapter]] extension which adds Marker support. */ -class MarkerLoggingAdapter( - override val bus: LoggingBus, - override val logSource: String, - override val logClass: Class[_], - loggingFilter: LoggingFilter) - extends BusLogging(bus, logSource, logClass, loggingFilter) { +class MarkerLoggingAdapter(override val bus: LoggingBus, + override val logSource: String, + override val logClass: Class[_], + loggingFilter: LoggingFilter) + extends BusLogging(bus, logSource, logClass, loggingFilter) { // TODO when breaking binary compatibility, these marker methods should become baked into LoggingAdapter itself // For backwards compatibility, and when LoggingAdapter is created without direct @@ -1521,7 +1666,8 @@ class MarkerLoggingAdapter( * @see [[LoggingAdapter]] */ def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = - if (isErrorEnabled(marker)) bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2), mdc, marker)) + if (isErrorEnabled(marker)) + bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2), mdc, marker)) /** * Message template with 3 replacement arguments. @@ -1529,7 +1675,8 @@ class MarkerLoggingAdapter( * @see [[LoggingAdapter]] */ def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = - if (isErrorEnabled(marker)) bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker)) + if (isErrorEnabled(marker)) + bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker)) /** * Message template with 4 replacement arguments. @@ -1537,7 +1684,8 @@ class MarkerLoggingAdapter( * @see [[LoggingAdapter]] */ def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = - if (isErrorEnabled(marker)) bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) + if (isErrorEnabled(marker)) + bus.publish(Error(cause, logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) /** * Log message at error level, without providing the exception that caused the error. @@ -1580,7 +1728,8 @@ class MarkerLoggingAdapter( * @see [[LoggingAdapter]] */ def error(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = - if (isErrorEnabled(marker)) bus.publish(Error(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) + if (isErrorEnabled(marker)) + bus.publish(Error(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) /** * Log message at warning level. @@ -1615,7 +1764,8 @@ class MarkerLoggingAdapter( * @see [[LoggingAdapter]] */ def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = - if (isWarningEnabled(marker)) bus.publish(Warning(logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker)) + if (isWarningEnabled(marker)) + bus.publish(Warning(logSource, logClass, format(template, arg1, arg2, arg3), mdc, marker)) /** * Message template with 4 replacement arguments. @@ -1623,7 +1773,8 @@ class MarkerLoggingAdapter( * @see [[LoggingAdapter]] */ def warning(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = - if (isWarningEnabled(marker)) bus.publish(Warning(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) + if (isWarningEnabled(marker)) + bus.publish(Warning(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) /** * Log message at info level. @@ -1666,7 +1817,8 @@ class MarkerLoggingAdapter( * @see [[LoggingAdapter]] */ def info(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = - if (isInfoEnabled(marker)) bus.publish(Info(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) + if (isInfoEnabled(marker)) + bus.publish(Info(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) /** * Log message at debug level. @@ -1709,28 +1861,29 @@ class MarkerLoggingAdapter( * @see [[LoggingAdapter]] */ def debug(marker: LogMarker, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = - if (isDebugEnabled(marker)) bus.publish(Debug(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) + if (isDebugEnabled(marker)) + bus.publish(Debug(logSource, logClass, format(template, arg1, arg2, arg3, arg4), mdc, marker)) // Copy of LoggingAdapter.format1 due to binary compatibility restrictions private def format1(t: String, arg: Any): String = arg match { case a: Array[_] if !a.getClass.getComponentType.isPrimitive => format(t, a: _*) - case a: Array[_] => format(t, a.map(_.asInstanceOf[AnyRef]): _*) - case x => format(t, x) + case a: Array[_] => format(t, a.map(_.asInstanceOf[AnyRef]): _*) + case x => format(t, x) } } -final class DiagnosticMarkerBusLoggingAdapter( - override val bus: LoggingBus, - override val logSource: String, - override val logClass: Class[_], - loggingFilter: LoggingFilter) - extends MarkerLoggingAdapter(bus, logSource, logClass, loggingFilter) with DiagnosticLoggingAdapter +final class DiagnosticMarkerBusLoggingAdapter(override val bus: LoggingBus, + override val logSource: String, + override val logClass: Class[_], + loggingFilter: LoggingFilter) + extends MarkerLoggingAdapter(bus, logSource, logClass, loggingFilter) + with DiagnosticLoggingAdapter /** * [[akka.event.LoggingAdapter]] that publishes [[akka.event.Logging.LogEvent]] to event stream. */ class BusLogging(val bus: LoggingBus, val logSource: String, val logClass: Class[_], loggingFilter: LoggingFilter) - extends LoggingAdapter { + extends LoggingAdapter { // For backwards compatibility, and when LoggingAdapter is created without direct // association to an ActorSystem @@ -1778,6 +1931,7 @@ object NoLogging extends LoggingAdapter { final protected override def notifyInfo(message: String): Unit = () final protected override def notifyDebug(message: String): Unit = () } + /** * NoLogging is a MarkerLoggingAdapter that does absolutely nothing – no logging at all. */ @@ -1802,8 +1956,19 @@ object NoMarkerLogging extends MarkerLoggingAdapter(null, "source", classOf[Stri final override def error(marker: LogMarker, cause: Throwable, message: String): Unit = () final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any): Unit = () final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any): Unit = () - final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any): Unit = () - final override def error(marker: LogMarker, cause: Throwable, template: String, arg1: Any, arg2: Any, arg3: Any, arg4: Any): Unit = () + final override def error(marker: LogMarker, + cause: Throwable, + template: String, + arg1: Any, + arg2: Any, + arg3: Any): Unit = () + final override def error(marker: LogMarker, + cause: Throwable, + template: String, + arg1: Any, + arg2: Any, + arg3: Any, + arg4: Any): Unit = () final override def error(marker: LogMarker, message: String): Unit = () final override def error(marker: LogMarker, template: String, arg1: Any): Unit = () final override def error(marker: LogMarker, template: String, arg1: Any, arg2: Any): Unit = () diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index 5096353b86..96b32de8dd 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -48,8 +48,9 @@ object LoggingReceive { * Java API: compatible with lambda expressions */ def create(r: AbstractActor.Receive, context: AbstractActor.ActorContext): AbstractActor.Receive = - new AbstractActor.Receive(apply(r.onMessage.asInstanceOf[PartialFunction[Any, Unit]])(context) - .asInstanceOf[PartialFunction[Any, BoxedUnit]]) + new AbstractActor.Receive( + apply(r.onMessage.asInstanceOf[PartialFunction[Any, Unit]])(context) + .asInstanceOf[PartialFunction[Any, BoxedUnit]]) /** * Create a decorated logger which will append `" in state " + label` to each message it logs. @@ -62,22 +63,28 @@ object LoggingReceive { /** * Create a decorated logger which will append `" in state " + label` to each message it logs. */ - def withLabel(label: String)(r: Receive)(implicit context: ActorContext): Receive = withLabel(label, Logging.DebugLevel)(r) + def withLabel(label: String)(r: Receive)(implicit context: ActorContext): Receive = + withLabel(label, Logging.DebugLevel)(r) } /** * This decorator adds invocation logging to a Receive function. * @param source the log source, if not defined the actor of the context will be used */ -class LoggingReceive(source: Option[AnyRef], r: Receive, label: Option[String], logLevel: LogLevel)(implicit context: ActorContext) extends Receive { - def this(source: Option[AnyRef], r: Receive, label: Option[String])(implicit context: ActorContext) = this(source, r, label, Logging.DebugLevel) - def this(source: Option[AnyRef], r: Receive)(implicit context: ActorContext) = this(source, r, None, Logging.DebugLevel) +class LoggingReceive(source: Option[AnyRef], r: Receive, label: Option[String], logLevel: LogLevel)( + implicit context: ActorContext) + extends Receive { + def this(source: Option[AnyRef], r: Receive, label: Option[String])(implicit context: ActorContext) = + this(source, r, label, Logging.DebugLevel) + def this(source: Option[AnyRef], r: Receive)(implicit context: ActorContext) = + this(source, r, None, Logging.DebugLevel) def isDefinedAt(o: Any): Boolean = { val handled = r.isDefinedAt(o) if (context.system.eventStream.logLevel >= logLevel) { - val src = source getOrElse context.asInstanceOf[ActorCell].actor + val src = source.getOrElse(context.asInstanceOf[ActorCell].actor) val (str, clazz) = LogSource.fromAnyRef(src) - val message = "received " + (if (handled) "handled" else "unhandled") + " message " + o + " from " + context.sender() + + val message = "received " + (if (handled) "handled" else "unhandled") + " message " + o + " from " + context + .sender() + (label match { case Some(l) => " in state " + l case _ => "" diff --git a/akka-actor/src/main/scala/akka/event/japi/EventBusJavaAPI.scala b/akka-actor/src/main/scala/akka/event/japi/EventBusJavaAPI.scala index 9c7ad0414a..b395b6be1e 100644 --- a/akka-actor/src/main/scala/akka/event/japi/EventBusJavaAPI.scala +++ b/akka-actor/src/main/scala/akka/event/japi/EventBusJavaAPI.scala @@ -5,7 +5,7 @@ package akka.event.japi import akka.util.Subclassification -import akka.actor.{ ActorSystem, ActorRef } +import akka.actor.{ ActorRef, ActorSystem } /** * Java API: See documentation for [[akka.event.EventBus]] @@ -191,7 +191,8 @@ abstract class ScanningEventBus[E, S, C] extends EventBus[E, S, C] { * E is the Event type */ abstract class ManagedActorEventBus[E](system: ActorSystem) extends EventBus[E, ActorRef, ActorRef] { - private val bus = new akka.event.ActorEventBus with akka.event.ManagedActorClassification with akka.event.ActorClassifier { + private val bus = new akka.event.ActorEventBus with akka.event.ManagedActorClassification + with akka.event.ActorClassifier { type Event = E override val system = ManagedActorEventBus.this.system @@ -217,4 +218,3 @@ abstract class ManagedActorEventBus[E](system: ActorSystem) extends EventBus[E, override def unsubscribe(subscriber: ActorRef): Unit = bus.unsubscribe(subscriber) override def publish(event: E): Unit = bus.publish(event) } - diff --git a/akka-actor/src/main/scala/akka/event/jul/JavaLogger.scala b/akka-actor/src/main/scala/akka/event/jul/JavaLogger.scala index bd421810d2..4fec3e322f 100644 --- a/akka-actor/src/main/scala/akka/event/jul/JavaLogger.scala +++ b/akka-actor/src/main/scala/akka/event/jul/JavaLogger.scala @@ -54,6 +54,7 @@ trait JavaLogging { * Logger is a factory for obtaining JUL Loggers */ object Logger { + /** * @param logger - which logger * @return a Logger that corresponds for the given logger name @@ -67,7 +68,7 @@ object Logger { */ def apply(logClass: Class[_], logSource: String): logging.Logger = logClass match { case c if c == classOf[DummyClassForStringSources] => apply(logSource) - case _ => logging.Logger.getLogger(logClass.getName) + case _ => logging.Logger.getLogger(logClass.getName) } /** diff --git a/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala b/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala index 24db9c684d..95af6305ab 100644 --- a/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala +++ b/akka-actor/src/main/scala/akka/io/DirectByteBufferPool.scala @@ -71,7 +71,8 @@ private[akka] class DirectByteBufferPool(defaultBufferSize: Int, maxPoolEntries: tryCleanDirectByteBuffer(buf) } - private final def tryCleanDirectByteBuffer(toBeDestroyed: ByteBuffer): Unit = DirectByteBufferPool.tryCleanDirectByteBuffer(toBeDestroyed) + private final def tryCleanDirectByteBuffer(toBeDestroyed: ByteBuffer): Unit = + DirectByteBufferPool.tryCleanDirectByteBuffer(toBeDestroyed) } /** INTERNAL API */ @@ -85,12 +86,10 @@ private[akka] object DirectByteBufferPool { cleanMethod.setAccessible(true) { (bb: ByteBuffer) => - try - if (bb.isDirect) { - val cleaner = cleanerMethod.invoke(bb) - cleanMethod.invoke(cleaner) - } - catch { case NonFatal(_) => /* ok, best effort attempt to cleanup failed */ } + try if (bb.isDirect) { + val cleaner = cleanerMethod.invoke(bb) + cleanMethod.invoke(cleaner) + } catch { case NonFatal(_) => /* ok, best effort attempt to cleanup failed */ } } } catch { case NonFatal(_) => _ => () /* reflection failed, use no-op fallback */ } diff --git a/akka-actor/src/main/scala/akka/io/Dns.scala b/akka-actor/src/main/scala/akka/io/Dns.scala index 14667a474c..4abf5b7be0 100644 --- a/akka-actor/src/main/scala/akka/io/Dns.scala +++ b/akka-actor/src/main/scala/akka/io/Dns.scala @@ -45,7 +45,8 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { override def consistentHashKey = name } - case class Resolved(name: String, ipv4: immutable.Seq[Inet4Address], ipv6: immutable.Seq[Inet6Address]) extends Command { + case class Resolved(name: String, ipv4: immutable.Seq[Inet4Address], ipv6: immutable.Seq[Inet6Address]) + extends Command { val addrOption: Option[InetAddress] = IpVersionSelector.getInetAddress(ipv4.headOption, ipv6.headOption) @throws[UnknownHostException] @@ -57,12 +58,16 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { object Resolved { def apply(name: String, addresses: Iterable[InetAddress]): Resolved = { - val ipv4: immutable.Seq[Inet4Address] = addresses.iterator.collect({ - case a: Inet4Address => a - }).to(immutable.IndexedSeq) - val ipv6: immutable.Seq[Inet6Address] = addresses.iterator.collect({ - case a: Inet6Address => a - }).to(immutable.IndexedSeq) + val ipv4: immutable.Seq[Inet4Address] = addresses.iterator + .collect({ + case a: Inet4Address => a + }) + .to(immutable.IndexedSeq) + val ipv6: immutable.Seq[Inet6Address] = addresses.iterator + .collect({ + case a: Inet6Address => a + }) + .to(immutable.IndexedSeq) Resolved(name, ipv4, ipv6) } } @@ -93,7 +98,8 @@ object Dns extends ExtensionId[DnsExt] with ExtensionIdProvider { override def get(system: ActorSystem): DnsExt = super.get(system) } -class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: String, managerName: String) extends IO.Extension { +class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: String, managerName: String) + extends IO.Extension { private val asyncDns = new ConcurrentHashMap[String, ActorRef] @@ -109,16 +115,29 @@ class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: Strin @InternalApi private[akka] def loadAsyncDns(managerName: String): ActorRef = { // This can't pass in `this` as then AsyncDns would pick up the system settings - asyncDns.computeIfAbsent(managerName, new JFunction[String, ActorRef] { - override def apply(r: String): ActorRef = { - val settings = new Settings(system.settings.config.getConfig("akka.io.dns"), "async-dns") - val provider = system.dynamicAccess.getClassFor[DnsProvider](settings.ProviderObjectName).get.newInstance() - system.log.info("Creating async dns resolver {} with manager name {}", settings.Resolver, managerName) - system.systemActorOf( - props = Props(provider.managerClass, settings.Resolver, system, settings.ResolverConfig, provider.cache, settings.Dispatcher, provider).withDeploy(Deploy.local).withDispatcher(settings.Dispatcher), - name = managerName) - } - }) + asyncDns.computeIfAbsent(managerName, + new JFunction[String, ActorRef] { + override def apply(r: String): ActorRef = { + val settings = + new Settings(system.settings.config.getConfig("akka.io.dns"), "async-dns") + val provider = system.dynamicAccess + .getClassFor[DnsProvider](settings.ProviderObjectName) + .get + .newInstance() + system.log.info("Creating async dns resolver {} with manager name {}", + settings.Resolver, + managerName) + system.systemActorOf( + props = Props(provider.managerClass, + settings.Resolver, + system, + settings.ResolverConfig, + provider.cache, + settings.Dispatcher, + provider).withDeploy(Deploy.local).withDispatcher(settings.Dispatcher), + name = managerName) + } + }) } /** @@ -129,9 +148,11 @@ class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: Strin * For binary compat as DnsExt constructor didn't used to have internal API on */ @InternalApi - def this(system: ExtendedActorSystem) = this(system, system.settings.config.getString("akka.io.dns.resolver"), "IO-DNS") + def this(system: ExtendedActorSystem) = + this(system, system.settings.config.getString("akka.io.dns.resolver"), "IO-DNS") class Settings private[DnsExt] (config: Config, resolverName: String) { + /** * Load the default resolver */ @@ -149,7 +170,8 @@ class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: Strin val Settings: Settings = new Settings(system.settings.config.getConfig("akka.io.dns"), resolverName) // System DNS resolver - val provider: DnsProvider = system.dynamicAccess.getClassFor[DnsProvider](Settings.ProviderObjectName).get.newInstance() + val provider: DnsProvider = + system.dynamicAccess.getClassFor[DnsProvider](Settings.ProviderObjectName).get.newInstance() // System DNS cache val cache: Dns = provider.cache @@ -169,7 +191,7 @@ class DnsExt private[akka] (val system: ExtendedActorSystem, resolverName: Strin object IpVersionSelector { def getInetAddress(ipv4: Option[Inet4Address], ipv6: Option[Inet6Address]): Option[InetAddress] = System.getProperty("java.net.preferIPv6Addresses") match { - case "true" => ipv6 orElse ipv4 - case _ => ipv4 orElse ipv6 + case "true" => ipv6.orElse(ipv4) + case _ => ipv4.orElse(ipv6) } } diff --git a/akka-actor/src/main/scala/akka/io/Inet.scala b/akka-actor/src/main/scala/akka/io/Inet.scala index 6a53d81c15..c2df6f3640 100644 --- a/akka-actor/src/main/scala/akka/io/Inet.scala +++ b/akka-actor/src/main/scala/akka/io/Inet.scala @@ -48,6 +48,7 @@ object Inet { abstract class AbstractSocketOption extends SocketOption trait SocketOptionV2 extends SocketOption { + /** * Action to be taken for this option after connect returned (i.e. on * the slave socket for servers). @@ -145,6 +146,7 @@ object Inet { } trait SoForwarders { + /** * [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option * @@ -178,6 +180,7 @@ object Inet { trait SoJavaFactories { import SO._ + /** * [[akka.io.Inet.SocketOption]] to set the SO_RCVBUF option * diff --git a/akka-actor/src/main/scala/akka/io/InetAddressDnsResolver.scala b/akka-actor/src/main/scala/akka/io/InetAddressDnsResolver.scala index 124ca825c5..6c9f3ef7b6 100644 --- a/akka-actor/src/main/scala/akka/io/InetAddressDnsResolver.scala +++ b/akka-actor/src/main/scala/akka/io/InetAddressDnsResolver.scala @@ -33,26 +33,30 @@ class InetAddressDnsResolver(cache: SimpleDnsCache, config: Config) extends Acto private final val DefaultPositive = Ttl.fromPositive(30.seconds) private lazy val defaultCachePolicy: CachePolicy = - Option(Security.getProperty(CachePolicyProp)).filter(_ != "") - .orElse(Option(System.getProperty(CachePolicyPropFallback))).filter(_ != "") + Option(Security.getProperty(CachePolicyProp)) + .filter(_ != "") + .orElse(Option(System.getProperty(CachePolicyPropFallback))) + .filter(_ != "") .map(x => Try(x.toInt)) match { - case None => DefaultPositive - case Some(Success(n)) => parsePolicy(n) - case Some(Failure(_)) => - log.warning("Caching TTL misconfigured. Using default value {}.", DefaultPositive) - DefaultPositive - } + case None => DefaultPositive + case Some(Success(n)) => parsePolicy(n) + case Some(Failure(_)) => + log.warning("Caching TTL misconfigured. Using default value {}.", DefaultPositive) + DefaultPositive + } private lazy val defaultNegativeCachePolicy: CachePolicy = - Option(Security.getProperty(NegativeCachePolicyProp)).filter(_ != "") - .orElse(Option(System.getProperty(NegativeCachePolicyPropFallback))).filter(_ != "") + Option(Security.getProperty(NegativeCachePolicyProp)) + .filter(_ != "") + .orElse(Option(System.getProperty(NegativeCachePolicyPropFallback))) + .filter(_ != "") .map(x => Try(x.toInt)) match { - case None => Never - case Some(Success(n)) => parsePolicy(n) - case Some(Failure(_)) => - log.warning("Negative caching TTL misconfigured. Using default value {}.", Never) - Never - } + case None => Never + case Some(Success(n)) => parsePolicy(n) + case Some(Failure(_)) => + log.warning("Negative caching TTL misconfigured. Using default value {}.", Never) + Never + } private def parsePolicy(n: Int): CachePolicy = { n match { diff --git a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala index 8605d7540c..3def2c3c3f 100644 --- a/akka-actor/src/main/scala/akka/io/SelectionHandler.scala +++ b/akka-actor/src/main/scala/akka/io/SelectionHandler.scala @@ -31,10 +31,10 @@ abstract class SelectionHandlerSettings(config: Config) { val MaxChannels: Int = getString("max-channels") match { case "unlimited" => -1 - case _ => getInt("max-channels") requiring (_ > 0, "max-channels must be > 0 or 'unlimited'") + case _ => getInt("max-channels").requiring(_ > 0, "max-channels must be > 0 or 'unlimited'") } - val SelectorAssociationRetries: Int = getInt("selector-association-retries") requiring ( - _ >= 0, "selector-association-retries must be >= 0") + val SelectorAssociationRetries: Int = + getInt("selector-association-retries").requiring(_ >= 0, "selector-association-retries must be >= 0") val SelectorDispatcher: String = getString("selector-dispatcher") val WorkerDispatcher: String = getString("worker-dispatcher") @@ -47,6 +47,7 @@ abstract class SelectionHandlerSettings(config: Config) { * Interface behind which we hide our selector management logic from the connection actors */ private[io] trait ChannelRegistry { + /** * Registers the given channel with the selector, creates a ChannelRegistration instance for it * and dispatches it back to the channelActor calling this `register` @@ -82,22 +83,28 @@ private[io] object SelectionHandler { def failureMessage: Any } - final case class WorkerForCommand(apiCommand: HasFailureMessage, commander: ActorRef, childProps: ChannelRegistry => Props) - extends NoSerializationVerificationNeeded + final case class WorkerForCommand(apiCommand: HasFailureMessage, + commander: ActorRef, + childProps: ChannelRegistry => Props) + extends NoSerializationVerificationNeeded - final case class Retry(command: WorkerForCommand, retriesLeft: Int) extends NoSerializationVerificationNeeded { require(retriesLeft >= 0) } + final case class Retry(command: WorkerForCommand, retriesLeft: Int) extends NoSerializationVerificationNeeded { + require(retriesLeft >= 0) + } case object ChannelConnectable case object ChannelAcceptable case object ChannelReadable extends DeadLetterSuppression case object ChannelWritable extends DeadLetterSuppression - private[io] abstract class SelectorBasedManager(selectorSettings: SelectionHandlerSettings, nrOfSelectors: Int) extends Actor { + private[io] abstract class SelectorBasedManager(selectorSettings: SelectionHandlerSettings, nrOfSelectors: Int) + extends Actor { override def supervisorStrategy = connectionSupervisorStrategy val selectorPool = context.actorOf( - props = RandomPool(nrOfSelectors).props(Props(classOf[SelectionHandler], selectorSettings)).withDeploy(Deploy.local), + props = + RandomPool(nrOfSelectors).props(Props(classOf[SelectionHandler], selectorSettings)).withDeploy(Deploy.local), name = "selectors") final def workerForCommandHandler(pf: PartialFunction[HasFailureMessage, ChannelRegistry => Props]): Receive = { @@ -111,7 +118,9 @@ private[io] object SelectionHandler { */ private[io] final val connectionSupervisorStrategy: SupervisorStrategy = new OneForOneStrategy()(SupervisorStrategy.stoppingStrategy.decider) { - override def logFailure(context: ActorContext, child: ActorRef, cause: Throwable, + override def logFailure(context: ActorContext, + child: ActorRef, + cause: Throwable, decision: SupervisorStrategy.Directive): Unit = if (cause.isInstanceOf[DeathPactException]) { try context.system.eventStream.publish { @@ -120,7 +129,10 @@ private[io] object SelectionHandler { } else super.logFailure(context, child, cause, decision) } - private class ChannelRegistryImpl(executionContext: ExecutionContext, settings: SelectionHandlerSettings, log: LoggingAdapter) extends ChannelRegistry { + private class ChannelRegistryImpl(executionContext: ExecutionContext, + settings: SelectionHandlerSettings, + log: LoggingAdapter) + extends ChannelRegistry { private[this] val selector = SelectorProvider.provider.openSelector private[this] val wakeUp = new AtomicBoolean(false) @@ -171,20 +183,21 @@ private[io] object SelectionHandler { if (settings.TraceLogging) log.debug(s"Scheduling Registering channel $channel with initialOps $initialOps") execute { new Task { - def tryRun(): Unit = try { - if (settings.TraceLogging) log.debug(s"Registering channel $channel with initialOps $initialOps") - val key = channel.register(selector, initialOps, channelActor) - channelActor ! new ChannelRegistration { - def enableInterest(ops: Int): Unit = enableInterestOps(key, ops) + def tryRun(): Unit = + try { + if (settings.TraceLogging) log.debug(s"Registering channel $channel with initialOps $initialOps") + val key = channel.register(selector, initialOps, channelActor) + channelActor ! new ChannelRegistration { + def enableInterest(ops: Int): Unit = enableInterestOps(key, ops) - def disableInterest(ops: Int): Unit = disableInterestOps(key, ops) + def disableInterest(ops: Int): Unit = disableInterestOps(key, ops) - def cancelAndClose(andThen: () => Unit): Unit = cancelKeyAndClose(key, andThen) + def cancelAndClose(andThen: () => Unit): Unit = cancelKeyAndClose(key, andThen) + } + } catch { + case _: ClosedChannelException => + // ignore, might happen if a connection is closed in the same moment as an interest is registered } - } catch { - case _: ClosedChannelException => - // ignore, might happen if a connection is closed in the same moment as an interest is registered - } } } } @@ -195,7 +208,8 @@ private[io] object SelectionHandler { def tryRun(): Unit = { // thorough 'close' of the Selector @tailrec def closeNextChannel(it: JIterator[SelectionKey]): Unit = if (it.hasNext) { - try it.next().channel.close() catch { case NonFatal(e) => log.debug("Error closing channel: {}", e) } + try it.next().channel.close() + catch { case NonFatal(e) => log.debug("Error closing channel: {}", e) } closeNextChannel(it) } try closeNextChannel(selector.keys.iterator) @@ -276,8 +290,10 @@ private[io] object SelectionHandler { } } -private[io] class SelectionHandler(settings: SelectionHandlerSettings) extends Actor with ActorLogging - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[io] class SelectionHandler(settings: SelectionHandlerSettings) + extends Actor + with ActorLogging + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import SelectionHandler._ import settings._ @@ -289,13 +305,13 @@ private[io] class SelectionHandler(settings: SelectionHandlerSettings) extends A } def receive: Receive = { - case cmd: WorkerForCommand => spawnChildWithCapacityProtection(cmd, SelectorAssociationRetries) + case cmd: WorkerForCommand => spawnChildWithCapacityProtection(cmd, SelectorAssociationRetries) case Retry(cmd, retriesLeft) => spawnChildWithCapacityProtection(cmd, retriesLeft) // since our ActorRef is never exposed to the user and we are only assigning watches to our // children all incoming `Terminated` events must be for a child of ours - case _: Terminated => childCount -= 1 + case _: Terminated => childCount -= 1 } override def postStop(): Unit = registry.shutdown() @@ -307,11 +323,14 @@ private[io] class SelectionHandler(settings: SelectionHandlerSettings) extends A case _: Exception => SupervisorStrategy.Stop } new OneForOneStrategy()(stoppingDecider) { - override def logFailure(context: ActorContext, child: ActorRef, cause: Throwable, + override def logFailure(context: ActorContext, + child: ActorRef, + cause: Throwable, decision: SupervisorStrategy.Directive): Unit = try { val logMessage = cause match { - case e: ActorInitializationException if (e.getCause ne null) && (e.getCause.getMessage ne null) => e.getCause.getMessage + case e: ActorInitializationException if (e.getCause ne null) && (e.getCause.getMessage ne null) => + e.getCause.getMessage case e: ActorInitializationException if e.getCause ne null => e.getCause match { case ie: java.lang.reflect.InvocationTargetException => ie.getTargetException.toString @@ -319,8 +338,7 @@ private[io] class SelectionHandler(settings: SelectionHandlerSettings) extends A } case e => e.getMessage } - context.system.eventStream.publish( - Logging.Debug(child.path.toString, classOf[SelectionHandler], logMessage)) + context.system.eventStream.publish(Logging.Debug(child.path.toString, classOf[SelectionHandler], logMessage)) } catch { case NonFatal(_) => } } } @@ -330,13 +348,15 @@ private[io] class SelectionHandler(settings: SelectionHandlerSettings) extends A if (MaxChannelsPerSelector == -1 || childCount < MaxChannelsPerSelector) { val newName = sequenceNumber.toString sequenceNumber += 1 - val child = context.actorOf(props = cmd.childProps(registry).withDispatcher(WorkerDispatcher).withDeploy(Deploy.local), name = newName) + val child = context.actorOf(props = + cmd.childProps(registry).withDispatcher(WorkerDispatcher).withDeploy(Deploy.local), + name = newName) childCount += 1 if (MaxChannelsPerSelector > 0) context.watch(child) // we don't need to watch if we aren't limited } else { if (retriesLeft >= 1) { log.debug("Rejecting [{}] with [{}] retries left, retrying...", cmd, retriesLeft) - context.parent forward Retry(cmd, retriesLeft - 1) + context.parent.forward(Retry(cmd, retriesLeft - 1)) } else { log.warning("Rejecting [{}] with no retries left, aborting...", cmd) cmd.commander ! cmd.apiCommand.failureMessage // I can't do it, Captain! diff --git a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala index 108235641a..2a5e6d4d31 100644 --- a/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala +++ b/akka-actor/src/main/scala/akka/io/SimpleDnsCache.scala @@ -20,9 +20,8 @@ private[io] trait PeriodicCacheCleanup { class SimpleDnsCache extends Dns with PeriodicCacheCleanup { import SimpleDnsCache._ - private val cache = new AtomicReference(new Cache[String, Dns.Resolved]( - immutable.SortedSet()(expiryEntryOrdering[String]()), - Map(), () => clock)) + private val cache = new AtomicReference( + new Cache[String, Dns.Resolved](immutable.SortedSet()(expiryEntryOrdering[String]()), Map(), () => clock)) private val nanoBase = System.nanoTime() @@ -57,7 +56,9 @@ object SimpleDnsCache { * INTERNAL API */ @InternalApi - private[io] class Cache[K, V](queue: immutable.SortedSet[ExpiryEntry[K]], cache: immutable.Map[K, CacheEntry[V]], clock: () => Long) { + private[io] class Cache[K, V](queue: immutable.SortedSet[ExpiryEntry[K]], + cache: immutable.Map[K, CacheEntry[V]], + clock: () => Long) { def get(name: K): Option[V] = { for { e <- cache.get(name) @@ -72,10 +73,7 @@ object SimpleDnsCache { case Ttl(ttl) => clock() + ttl.toMillis } - new Cache[K, V]( - queue + new ExpiryEntry[K](name, until), - cache + (name -> CacheEntry(answer, until)), - clock) + new Cache[K, V](queue + new ExpiryEntry[K](name, until), cache + (name -> CacheEntry(answer, until)), clock) } def cleanup(): Cache[K, V] = { diff --git a/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala b/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala index cd692b9061..60dbd1e40c 100644 --- a/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala +++ b/akka-actor/src/main/scala/akka/io/SimpleDnsManager.scala @@ -13,12 +13,19 @@ import akka.routing.FromConfig import scala.concurrent.duration.Duration -class SimpleDnsManager(val ext: DnsExt) extends Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] with ActorLogging { +class SimpleDnsManager(val ext: DnsExt) + extends Actor + with RequiresMessageQueue[UnboundedMessageQueueSemantics] + with ActorLogging { import context._ - private val resolver = actorOf(FromConfig.props(Props(ext.provider.actorClass, ext.cache, ext.Settings.ResolverConfig) - .withDeploy(Deploy.local).withDispatcher(ext.Settings.Dispatcher)), ext.Settings.Resolver) + private val resolver = actorOf( + FromConfig.props( + Props(ext.provider.actorClass, ext.cache, ext.Settings.ResolverConfig) + .withDeploy(Deploy.local) + .withDispatcher(ext.Settings.Dispatcher)), + ext.Settings.Resolver) private val inetDnsEnabled = ext.provider.actorClass == classOf[InetAddressDnsResolver] @@ -27,8 +34,9 @@ class SimpleDnsManager(val ext: DnsExt) extends Actor with RequiresMessageQueue[ case _ => None } - private val cleanupTimer = cacheCleanup map { _ => - val interval = Duration(ext.Settings.ResolverConfig.getDuration("cache-cleanup-interval", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS) + private val cleanupTimer = cacheCleanup.map { _ => + val interval = Duration(ext.Settings.ResolverConfig.getDuration("cache-cleanup-interval", TimeUnit.MILLISECONDS), + TimeUnit.MILLISECONDS) system.scheduler.schedule(interval, interval, self, SimpleDnsManager.CacheCleanup) } @@ -44,8 +52,8 @@ class SimpleDnsManager(val ext: DnsExt) extends Actor with RequiresMessageQueue[ if (inetDnsEnabled) { log.error( "Message of [akka.io.dns.DnsProtocol.Protocol] received ({}) while inet-address dns was configured. Dropping DNS resolve request. " + - "Only use [akka.io.dns.DnsProtocol] to create resolution requests for the Async DNS resolver (enabled by `akka.io.dns = async-dns`). " + - "For the classic (now used) DNS resolver use [akka.io.Dns] messages.", + "Only use [akka.io.dns.DnsProtocol] to create resolution requests for the Async DNS resolver (enabled by `akka.io.dns = async-dns`). " + + "For the classic (now used) DNS resolver use [akka.io.Dns] messages.", Logging.simpleName(m)) } else resolver.forward(m) } diff --git a/akka-actor/src/main/scala/akka/io/Tcp.scala b/akka-actor/src/main/scala/akka/io/Tcp.scala index 38030347d5..87f546ec8b 100644 --- a/akka-actor/src/main/scala/akka/io/Tcp.scala +++ b/akka-actor/src/main/scala/akka/io/Tcp.scala @@ -116,12 +116,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * @param localAddress optionally specifies a specific address to bind to * @param options Please refer to the `Tcp.SO` object for a list of all supported options. */ - final case class Connect( - remoteAddress: InetSocketAddress, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil, - timeout: Option[FiniteDuration] = None, - pullMode: Boolean = false) extends Command + final case class Connect(remoteAddress: InetSocketAddress, + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil, + timeout: Option[FiniteDuration] = None, + pullMode: Boolean = false) + extends Command /** * The Bind message is send to the TCP manager actor, which is obtained via @@ -142,12 +142,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * * @param options Please refer to the `Tcp.SO` object for a list of all supported options. */ - final case class Bind( - handler: ActorRef, - localAddress: InetSocketAddress, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - pullMode: Boolean = false) extends Command + final case class Bind(handler: ActorRef, + localAddress: InetSocketAddress, + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + pullMode: Boolean = false) + extends Command /** * This message must be sent to a TCP connection actor after receiving the @@ -167,7 +167,8 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * notification until `ResumeWriting` is received. This can * be used to implement NACK-based write backpressure. */ - final case class Register(handler: ActorRef, keepOpenOnPeerClosed: Boolean = false, useResumeWriting: Boolean = true) extends Command + final case class Register(handler: ActorRef, keepOpenOnPeerClosed: Boolean = false, useResumeWriting: Boolean = true) + extends Command /** * In order to close down a listening socket, send this message to that socket’s @@ -180,6 +181,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * Common interface for all commands which aim to close down an open connection. */ sealed trait CloseCommand extends Command with DeadLetterSuppression { + /** * The corresponding event which is sent as an acknowledgment once the * close operation is finished. @@ -194,6 +196,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * message. */ case object Close extends CloseCommand { + /** * The corresponding event which is sent as an acknowledgment once the * close operation is finished. @@ -208,6 +211,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * once the socket is closed using a `ConfirmedClosed` message. */ case object ConfirmedClose extends CloseCommand { + /** * The corresponding event which is sent as an acknowledgment once the * close operation is finished. @@ -223,6 +227,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * `Aborted` message. */ case object Abort extends CloseCommand { + /** * The corresponding event which is sent as an acknowledgment once the * close operation is finished. @@ -248,6 +253,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * Common interface for all write commands. */ sealed abstract class WriteCommand extends Command { + /** * Prepends this command with another `Write` or `WriteFile` to form * a `CompoundWrite`. @@ -280,6 +286,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { } object WriteCommand { + /** * Combines the given number of write commands into one atomic `WriteCommand`. */ @@ -326,6 +333,7 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { */ final case class Write(data: ByteString, ack: Event) extends SimpleWriteCommand object Write { + /** * The empty Write doesn't write anything and isn't acknowledged. * It will, however, be denied and sent back with `CommandFailed` if the @@ -373,8 +381,9 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * If the sub commands contain `ack` requests they will be honored as soon as the * respective write has been written completely. */ - final case class CompoundWrite(override val head: SimpleWriteCommand, tailCommand: WriteCommand) extends WriteCommand - with immutable.Iterable[SimpleWriteCommand] { + final case class CompoundWrite(override val head: SimpleWriteCommand, tailCommand: WriteCommand) + extends WriteCommand + with immutable.Iterable[SimpleWriteCommand] { def iterator: Iterator[SimpleWriteCommand] = new Iterator[SimpleWriteCommand] { @@ -462,17 +471,20 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { } @InternalApi - private[akka] def causedByString = _cause.map(t => { - val msg = - if (t.getCause == null) - t.getMessage - else if (t.getCause.getCause == null) - s"${t.getMessage}, caused by: ${t.getCause}" - else - s"${t.getMessage}, caused by: ${t.getCause}, caused by: ${t.getCause.getCause}" + private[akka] def causedByString = + _cause + .map(t => { + val msg = + if (t.getCause == null) + t.getMessage + else if (t.getCause.getCause == null) + s"${t.getMessage}, caused by: ${t.getCause}" + else + s"${t.getMessage}, caused by: ${t.getCause}, caused by: ${t.getCause.getCause}" - s" because of ${t.getClass.getName}: $msg" - }).getOrElse("") + s" because of ${t.getClass.getName}: $msg" + }) + .getOrElse("") override def toString: String = s"CommandFailed($cmd)$causedByString" } @@ -506,41 +518,49 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { * has been closed or half-closed. */ sealed trait ConnectionClosed extends Event with DeadLetterSuppression { + /** * `true` iff the connection has been closed in response to an `Abort` command. */ def isAborted: Boolean = false + /** * `true` iff the connection has been fully closed in response to a * `ConfirmedClose` command. */ def isConfirmed: Boolean = false + /** * `true` iff the connection has been closed by the peer; in case * `keepOpenOnPeerClosed` is in effect as per the [[Register]] command, * this connection’s reading half is now closed. */ def isPeerClosed: Boolean = false + /** * `true` iff the connection has been closed due to an IO error. */ def isErrorClosed: Boolean = false + /** * If `isErrorClosed` returns true, then the error condition can be * retrieved by this method. */ def getErrorCause: String = null } + /** * The connection has been closed normally in response to a `Close` command. */ case object Closed extends ConnectionClosed + /** * The connection has been aborted in response to an `Abort` command. */ case object Aborted extends ConnectionClosed { override def isAborted = true } + /** * The connection has been half-closed by us and then half-close by the peer * in response to a `ConfirmedClose` command. @@ -548,12 +568,14 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider { case object ConfirmedClosed extends ConnectionClosed { override def isConfirmed = true } + /** * The peer has closed its writing half of the connection. */ case object PeerClosed extends ConnectionClosed { override def isPeerClosed = true } + /** * The connection has been closed due to an IO error. */ @@ -570,9 +592,9 @@ class TcpExt(system: ExtendedActorSystem) extends IO.Extension { import akka.util.Helpers.ConfigOps import _config._ - val NrOfSelectors: Int = getInt("nr-of-selectors") requiring (_ > 0, "nr-of-selectors must be > 0") + val NrOfSelectors: Int = getInt("nr-of-selectors").requiring(_ > 0, "nr-of-selectors must be > 0") - val BatchAcceptLimit: Int = getInt("batch-accept-limit") requiring (_ > 0, "batch-accept-limit must be > 0") + val BatchAcceptLimit: Int = getInt("batch-accept-limit").requiring(_ > 0, "batch-accept-limit must be > 0") val DirectBufferSize: Int = getIntBytes("direct-buffer-size") val MaxDirectBufferPoolSize: Int = getInt("direct-buffer-pool-limit") val RegisterTimeout: Duration = getString("register-timeout") match { @@ -591,10 +613,11 @@ class TcpExt(system: ExtendedActorSystem) extends IO.Extension { } val MaxChannelsPerSelector: Int = if (MaxChannels == -1) -1 else math.max(MaxChannels / NrOfSelectors, 1) - val FinishConnectRetries: Int = getInt("finish-connect-retries") requiring (_ > 0, - "finish-connect-retries must be > 0") + val FinishConnectRetries: Int = + getInt("finish-connect-retries").requiring(_ > 0, "finish-connect-retries must be > 0") - val WindowsConnectionAbortWorkaroundEnabled: Boolean = getString("windows-connection-abort-workaround-enabled") match { + val WindowsConnectionAbortWorkaroundEnabled + : Boolean = getString("windows-connection-abort-workaround-enabled") match { case "auto" => Helpers.isWindows case _ => getBoolean("windows-connection-abort-workaround-enabled") } @@ -674,12 +697,12 @@ object TcpMessage { * @param timeout is the desired connection timeout, `null` means "no timeout" * @param pullMode enables pull based reading from the connection */ - def connect( - remoteAddress: InetSocketAddress, - localAddress: InetSocketAddress, - options: JIterable[SocketOption], - timeout: FiniteDuration, - pullMode: Boolean): Command = Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode) + def connect(remoteAddress: InetSocketAddress, + localAddress: InetSocketAddress, + options: JIterable[SocketOption], + timeout: FiniteDuration, + pullMode: Boolean): Command = + Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode) /** * The Connect message is sent to the TCP manager actor, which is obtained via @@ -693,12 +716,11 @@ object TcpMessage { * @param timeout is the desired connection timeout, `null` means "no timeout" * @param pullMode enables pull based reading from the connection */ - def connect( - remoteAddress: InetSocketAddress, - localAddress: InetSocketAddress, - options: JIterable[SocketOption], - timeout: java.time.Duration, - pullMode: Boolean): Command = connect(remoteAddress, localAddress, options, timeout.asScala, pullMode) + def connect(remoteAddress: InetSocketAddress, + localAddress: InetSocketAddress, + options: JIterable[SocketOption], + timeout: java.time.Duration, + pullMode: Boolean): Command = connect(remoteAddress, localAddress, options, timeout.asScala, pullMode) /** * Connect to the given `remoteAddress` without binding to a local address and without @@ -728,19 +750,17 @@ object TcpMessage { * @param pullMode enables pull based accepting and of connections and pull * based reading from the accepted connections. */ - def bind( - handler: ActorRef, - endpoint: InetSocketAddress, - backlog: Int, - options: JIterable[SocketOption], - pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode) + def bind(handler: ActorRef, + endpoint: InetSocketAddress, + backlog: Int, + options: JIterable[SocketOption], + pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode) + /** * Open a listening socket without specifying options. */ - def bind( - handler: ActorRef, - endpoint: InetSocketAddress, - backlog: Int): Command = Bind(handler, endpoint, backlog, Nil) + def bind(handler: ActorRef, endpoint: InetSocketAddress, backlog: Int): Command = + Bind(handler, endpoint, backlog, Nil) /** * This message must be sent to a TCP connection actor after receiving the @@ -762,6 +782,7 @@ object TcpMessage { */ def register(handler: ActorRef, keepOpenOnPeerClosed: Boolean, useResumeWriting: Boolean): Command = Register(handler, keepOpenOnPeerClosed, useResumeWriting) + /** * The same as `register(handler, false, false)`. */ @@ -806,6 +827,7 @@ object TcpMessage { * to recognize which write failed when receiving a [[Tcp.CommandFailed]] message. */ def noAck(token: AnyRef): NoAck = NoAck(token) + /** * Default [[Tcp.NoAck]] instance which is used when no acknowledgment information is * explicitly provided. Its “token” is `null`. @@ -823,6 +845,7 @@ object TcpMessage { * a particular write has been sent by the O/S. */ def write(data: ByteString, ack: Event): Command = Write(data, ack) + /** * The same as `write(data, noAck())`. */ diff --git a/akka-actor/src/main/scala/akka/io/TcpConnection.scala b/akka-actor/src/main/scala/akka/io/TcpConnection.scala index 26b7a83491..ff5a9cdaba 100644 --- a/akka-actor/src/main/scala/akka/io/TcpConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpConnection.scala @@ -29,7 +29,9 @@ import scala.util.control.{ NoStackTrace, NonFatal } * INTERNAL API */ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketChannel, val pullMode: Boolean) - extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { + extends Actor + with ActorLogging + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import TcpConnection._ import tcp.Settings._ @@ -73,7 +75,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha // if we are in push mode or already have resumed reading in pullMode while waiting for Register // then register OP_READ interest - if (!pullMode || ( /*pullMode && */ !readingSuspended)) resumeReading(info) + if (!pullMode || (/*pullMode && */ !readingSuspended)) resumeReading(info) context.setReceiveTimeout(Duration.Undefined) context.become(connected(info)) @@ -96,7 +98,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha /** normal connected state */ def connected(info: ConnectionInfo): Receive = - handleWriteMessages(info) orElse { + handleWriteMessages(info).orElse { case SuspendReading => suspendReading(info) case ResumeReading => resumeReading(info) case ChannelReadable => doRead(info, None) @@ -105,13 +107,14 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha /** the peer sent EOF first, but we may still want to send */ def peerSentEOF(info: ConnectionInfo): Receive = - handleWriteMessages(info) orElse { + handleWriteMessages(info).orElse { case cmd: CloseCommand => handleClose(info, Some(sender()), cmd.event) case ResumeReading => // ignore, no more data to read } /** connection is closing but a write has to be finished first */ - def closingWithPendingWrite(info: ConnectionInfo, closeCommander: Option[ActorRef], + def closingWithPendingWrite(info: ConnectionInfo, + closeCommander: Option[ActorRef], closedEvent: ConnectionClosed): Receive = { case SuspendReading => suspendReading(info) case ResumeReading => resumeReading(info) @@ -130,7 +133,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha case WriteFileFailed(e) => handleError(info.handler, e) // rethrow exception from dispatcher task - case Abort => handleClose(info, Some(sender()), Aborted) + case Abort => handleClose(info, Some(sender()), Aborted) } /** connection is closed on our side and we're waiting from confirmation from the other side */ @@ -199,12 +202,14 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha // AUXILIARIES and IMPLEMENTATION /** used in subclasses to start the common machinery above once a channel is connected */ - def completeConnect(registration: ChannelRegistration, commander: ActorRef, + def completeConnect(registration: ChannelRegistration, + commander: ActorRef, options: immutable.Traversable[SocketOption]): Unit = { this.registration = Some(registration) // Turn off Nagle's algorithm by default - try channel.socket.setTcpNoDelay(true) catch { + try channel.socket.setTcpNoDelay(true) + catch { case e: SocketException => // as reported in #16653 some versions of netcat (`nc -z`) doesn't allow setTcpNoDelay // continue anyway @@ -212,9 +217,8 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha } options.foreach(_.afterConnect(channel.socket)) - commander ! Connected( - channel.socket.getRemoteSocketAddress.asInstanceOf[InetSocketAddress], - channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]) + commander ! Connected(channel.socket.getRemoteSocketAddress.asInstanceOf[InetSocketAddress], + channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]) context.setReceiveTimeout(RegisterTimeout) @@ -253,9 +257,10 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha if (readBytes > 0) info.handler ! Received(ByteString(buffer)) readBytes match { - case `maxBufferSpace` => if (pullMode) MoreDataWaiting else innerRead(buffer, remainingLimit - maxBufferSpace) - case x if x >= 0 => AllRead - case -1 => EndOfStream + case `maxBufferSpace` => + if (pullMode) MoreDataWaiting else innerRead(buffer, remainingLimit - maxBufferSpace) + case x if x >= 0 => AllRead + case -1 => EndOfStream case _ => throw new IllegalStateException("Unexpected value returned from read: " + readBytes) } @@ -284,36 +289,36 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha if (channel.socket.isOutputShutdown) ConfirmedClosed else PeerClosed - def handleClose(info: ConnectionInfo, closeCommander: Option[ActorRef], - closedEvent: ConnectionClosed): Unit = closedEvent match { - case Aborted => - if (TraceLogging) log.debug("Got Abort command. RESETing connection.") - doCloseConnection(info.handler, closeCommander, closedEvent) - case PeerClosed if info.keepOpenOnPeerClosed => - // report that peer closed the connection - info.handler ! PeerClosed - // used to check if peer already closed its side later - peerClosed = true - context.become(peerSentEOF(info)) - case _ if writePending => // finish writing first - // Our registered actor is now free to terminate cleanly - unsignDeathPact() - if (TraceLogging) log.debug("Got Close command but write is still pending.") - context.become(closingWithPendingWrite(info, closeCommander, closedEvent)) - case ConfirmedClosed => // shutdown output and wait for confirmation - if (TraceLogging) log.debug("Got ConfirmedClose command, sending FIN.") - - // If peer closed first, the socket is now fully closed. - // Also, if shutdownOutput threw an exception we expect this to be an indication - // that the peer closed first or concurrently with this code running. - // also see http://bugs.sun.com/view_bug.do?bug_id=4516760 - if (peerClosed || !safeShutdownOutput()) + def handleClose(info: ConnectionInfo, closeCommander: Option[ActorRef], closedEvent: ConnectionClosed): Unit = + closedEvent match { + case Aborted => + if (TraceLogging) log.debug("Got Abort command. RESETing connection.") doCloseConnection(info.handler, closeCommander, closedEvent) - else context.become(closing(info, closeCommander)) - case _ => // close now - if (TraceLogging) log.debug("Got Close command, closing connection.") - doCloseConnection(info.handler, closeCommander, closedEvent) - } + case PeerClosed if info.keepOpenOnPeerClosed => + // report that peer closed the connection + info.handler ! PeerClosed + // used to check if peer already closed its side later + peerClosed = true + context.become(peerSentEOF(info)) + case _ if writePending => // finish writing first + // Our registered actor is now free to terminate cleanly + unsignDeathPact() + if (TraceLogging) log.debug("Got Close command but write is still pending.") + context.become(closingWithPendingWrite(info, closeCommander, closedEvent)) + case ConfirmedClosed => // shutdown output and wait for confirmation + if (TraceLogging) log.debug("Got ConfirmedClose command, sending FIN.") + + // If peer closed first, the socket is now fully closed. + // Also, if shutdownOutput threw an exception we expect this to be an indication + // that the peer closed first or concurrently with this code running. + // also see http://bugs.sun.com/view_bug.do?bug_id=4516760 + if (peerClosed || !safeShutdownOutput()) + doCloseConnection(info.handler, closeCommander, closedEvent) + else context.become(closing(info, closeCommander)) + case _ => // close now + if (TraceLogging) log.debug("Got Close command, closing connection.") + doCloseConnection(info.handler, closeCommander, closedEvent) + } def doCloseConnection(handler: ActorRef, closeCommander: Option[ActorRef], closedEvent: ConnectionClosed): Unit = { stopWith(CloseInformation(Set(handler) ++ closeCommander, closedEvent)) @@ -377,7 +382,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha val interestedInClose: Set[ActorRef] = (if (writePending) Set(pendingWrite.commander) else Set.empty) ++ - closedMessage.toSet[CloseInformation].flatMap(_.notificationsTo) + closedMessage.toSet[CloseInformation].flatMap(_.notificationsTo) if (channel.isOpen) // if channel is still open here, we didn't go through stopWith => unexpected actor termination prepareAbort() @@ -403,9 +408,10 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha def PendingWrite(commander: ActorRef, write: WriteCommand): PendingWrite = { @tailrec def create(head: WriteCommand, tail: WriteCommand = Write.empty): PendingWrite = head match { - case Write.empty => if (tail eq Write.empty) EmptyPendingWrite else create(tail) - case Write(data, ack) if data.nonEmpty => PendingBufferWrite(commander, data, ack, tail) - case WriteFile(path, offset, count, ack) => PendingWriteFile(commander, Paths.get(path), offset, count, ack, tail) + case Write.empty => if (tail eq Write.empty) EmptyPendingWrite else create(tail) + case Write(data, ack) if data.nonEmpty => PendingBufferWrite(commander, data, ack, tail) + case WriteFile(path, offset, count, ack) => + PendingWriteFile(commander, Paths.get(path), offset, count, ack, tail) case WritePath(path, offset, count, ack) => PendingWriteFile(commander, path, offset, count, ack, tail) case CompoundWrite(h, t) => create(h, t) case x @ Write(_, ack) => // empty write with either an ACK or a non-standard NoACK @@ -428,12 +434,12 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha } } - class PendingBufferWrite( - val commander: ActorRef, - remainingData: ByteString, - ack: Any, - buffer: ByteBuffer, - tail: WriteCommand) extends PendingWrite { + class PendingBufferWrite(val commander: ActorRef, + remainingData: ByteString, + ack: Any, + buffer: ByteBuffer, + tail: WriteCommand) + extends PendingWrite { def doWrite(info: ConnectionInfo): PendingWrite = { @tailrec def writeToChannel(data: ByteString): PendingWrite = { @@ -448,7 +454,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha buffer.clear() val copied = data.copyToBuffer(buffer) buffer.flip() - writeToChannel(data drop copied) + writeToChannel(data.drop(copied)) } else { if (!ack.isInstanceOf[NoAck]) commander ! ack @@ -466,17 +472,22 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha def release(): Unit = bufferPool.release(buffer) } - def PendingWriteFile(commander: ActorRef, filePath: Path, offset: Long, count: Long, ack: Event, + def PendingWriteFile(commander: ActorRef, + filePath: Path, + offset: Long, + count: Long, + ack: Event, tail: WriteCommand): PendingWriteFile = new PendingWriteFile(commander, FileChannel.open(filePath), offset, count, ack, tail) - class PendingWriteFile( - val commander: ActorRef, - fileChannel: FileChannel, - offset: Long, - remaining: Long, - ack: Event, - tail: WriteCommand) extends PendingWrite with Runnable { + class PendingWriteFile(val commander: ActorRef, + fileChannel: FileChannel, + offset: Long, + remaining: Long, + ack: Event, + tail: WriteCommand) + extends PendingWrite + with Runnable { def doWrite(info: ConnectionInfo): PendingWrite = { tcp.fileIoDispatcher.execute(this) @@ -495,7 +506,7 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha self ! UpdatePendingWriteAndThen(updated, TcpConnection.doNothing) } else { release() - val andThen = if (!ack.isInstanceOf[NoAck]) () => commander ! ack else doNothing + val andThen = if (!ack.isInstanceOf[NoAck])() => commander ! ack else doNothing self ! UpdatePendingWriteAndThen(PendingWrite(commander, tail), andThen) } } catch { @@ -522,15 +533,15 @@ private[io] object TcpConnection { /** * Groups required connection-related data that are only available once the connection has been fully established. */ - final case class ConnectionInfo( - registration: ChannelRegistration, - handler: ActorRef, - keepOpenOnPeerClosed: Boolean, - useResumeWriting: Boolean) + final case class ConnectionInfo(registration: ChannelRegistration, + handler: ActorRef, + keepOpenOnPeerClosed: Boolean, + useResumeWriting: Boolean) // INTERNAL MESSAGES - final case class UpdatePendingWriteAndThen(remainingWrite: PendingWrite, work: () => Unit) extends NoSerializationVerificationNeeded + final case class UpdatePendingWriteAndThen(remainingWrite: PendingWrite, work: () => Unit) + extends NoSerializationVerificationNeeded final case class WriteFileFailed(e: IOException) case object Unregistered diff --git a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala index 716b031c26..da4023a5b7 100644 --- a/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpIncomingConnection.scala @@ -15,14 +15,13 @@ import akka.io.Inet.SocketOption * * INTERNAL API */ -private[io] class TcpIncomingConnection( - _tcp: TcpExt, - _channel: SocketChannel, - registry: ChannelRegistry, - bindHandler: ActorRef, - options: immutable.Traversable[SocketOption], - readThrottling: Boolean) - extends TcpConnection(_tcp, _channel, readThrottling) { +private[io] class TcpIncomingConnection(_tcp: TcpExt, + _channel: SocketChannel, + registry: ChannelRegistry, + bindHandler: ActorRef, + options: immutable.Traversable[SocketOption], + readThrottling: Boolean) + extends TcpConnection(_tcp, _channel, readThrottling) { signDeathPact(bindHandler) diff --git a/akka-actor/src/main/scala/akka/io/TcpListener.scala b/akka-actor/src/main/scala/akka/io/TcpListener.scala index 57bbe809b3..a074e2f143 100644 --- a/akka-actor/src/main/scala/akka/io/TcpListener.scala +++ b/akka-actor/src/main/scala/akka/io/TcpListener.scala @@ -19,7 +19,9 @@ import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } */ private[io] object TcpListener { - final case class RegisterIncoming(channel: SocketChannel) extends HasFailureMessage with NoSerializationVerificationNeeded { + final case class RegisterIncoming(channel: SocketChannel) + extends HasFailureMessage + with NoSerializationVerificationNeeded { def failureMessage = FailedRegisterIncoming(channel) } @@ -30,13 +32,14 @@ private[io] object TcpListener { /** * INTERNAL API */ -private[io] class TcpListener( - selectorRouter: ActorRef, - tcp: TcpExt, - channelRegistry: ChannelRegistry, - bindCommander: ActorRef, - bind: Bind) - extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[io] class TcpListener(selectorRouter: ActorRef, + tcp: TcpExt, + channelRegistry: ChannelRegistry, + bindCommander: ActorRef, + bind: Bind) + extends Actor + with ActorLogging + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import TcpListener._ import tcp.Settings._ @@ -97,7 +100,9 @@ private[io] class TcpListener( case Unbind => log.debug("Unbinding endpoint {}", localAddress) - registration.cancelAndClose { () => self ! Unbound } + registration.cancelAndClose { () => + self ! Unbound + } context.become(unregistering(sender())) } @@ -123,7 +128,8 @@ private[io] class TcpListener( Props(classOf[TcpIncomingConnection], tcp, socketChannel, registry, bind.handler, bind.options, bind.pullMode) selectorRouter ! WorkerForCommand(RegisterIncoming(socketChannel), self, props) acceptAllPending(registration, limit - 1) - } else if (bind.pullMode) limit else BatchAcceptLimit + } else if (bind.pullMode) limit + else BatchAcceptLimit } override def postStop(): Unit = { diff --git a/akka-actor/src/main/scala/akka/io/TcpManager.scala b/akka-actor/src/main/scala/akka/io/TcpManager.scala index 6b70581703..c2d0825efd 100644 --- a/akka-actor/src/main/scala/akka/io/TcpManager.scala +++ b/akka-actor/src/main/scala/akka/io/TcpManager.scala @@ -45,7 +45,8 @@ import akka.actor.{ ActorLogging, Props } * */ private[io] class TcpManager(tcp: TcpExt) - extends SelectionHandler.SelectorBasedManager(tcp.Settings, tcp.Settings.NrOfSelectors) with ActorLogging { + extends SelectionHandler.SelectorBasedManager(tcp.Settings, tcp.Settings.NrOfSelectors) + with ActorLogging { def receive = workerForCommandHandler { case c: Connect => diff --git a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala index 5ecd481764..2deadf7481 100644 --- a/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala +++ b/akka-actor/src/main/scala/akka/io/TcpOutgoingConnection.scala @@ -21,12 +21,13 @@ import akka.io.Tcp._ * * INTERNAL API */ -private[io] class TcpOutgoingConnection( - _tcp: TcpExt, - channelRegistry: ChannelRegistry, - commander: ActorRef, - connect: Connect) - extends TcpConnection(_tcp, SocketChannel.open().configureBlocking(false).asInstanceOf[SocketChannel], connect.pullMode) { +private[io] class TcpOutgoingConnection(_tcp: TcpExt, + channelRegistry: ChannelRegistry, + commander: ActorRef, + connect: Connect) + extends TcpConnection(_tcp, + SocketChannel.open().configureBlocking(false).asInstanceOf[SocketChannel], + connect.pullMode) { import TcpOutgoingConnection._ import context._ @@ -37,7 +38,7 @@ private[io] class TcpOutgoingConnection( options.foreach(_.beforeConnect(channel.socket)) localAddress.foreach(channel.socket.bind) channelRegistry.register(channel, 0) - timeout foreach context.setReceiveTimeout //Initiate connection timeout if supplied + timeout.foreach(context.setReceiveTimeout) //Initiate connection timeout if supplied private def stop(cause: Throwable): Unit = stopWith(CloseInformation(Set(commander), connect.failureMessage.withCause(cause)), shouldAbort = true) @@ -108,7 +109,8 @@ private[io] class TcpOutgoingConnection( }(context.dispatcher) context.become(connecting(registration, remainingFinishConnectRetries - 1)) } else { - log.debug("Could not establish connection because finishConnect " + + log.debug( + "Could not establish connection because finishConnect " + "never returned true (consider increasing akka.io.tcp.finish-connect-retries)") stop(FinishConnectNeverReturnedTrueException) } diff --git a/akka-actor/src/main/scala/akka/io/Udp.scala b/akka-actor/src/main/scala/akka/io/Udp.scala index 35abda6217..06aea64137 100644 --- a/akka-actor/src/main/scala/akka/io/Udp.scala +++ b/akka-actor/src/main/scala/akka/io/Udp.scala @@ -94,10 +94,10 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider { * The listener actor for the newly bound port will reply with a [[Bound]] * message, or the manager will reply with a [[CommandFailed]] message. */ - final case class Bind( - handler: ActorRef, - localAddress: InetSocketAddress, - options: immutable.Traversable[SocketOption] = Nil) extends Command + final case class Bind(handler: ActorRef, + localAddress: InetSocketAddress, + options: immutable.Traversable[SocketOption] = Nil) + extends Command /** * Send this message to the listener actor that previously sent a [[Bound]] @@ -191,7 +191,7 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider { private[io] class UdpSettings(_config: Config) extends SelectionHandlerSettings(_config) { import _config._ - val NrOfSelectors: Int = getInt("nr-of-selectors") requiring (_ > 0, "nr-of-selectors must be > 0") + val NrOfSelectors: Int = getInt("nr-of-selectors").requiring(_ > 0, "nr-of-selectors must be > 0") val DirectBufferSize: Int = getIntBytes("direct-buffer-size") val MaxDirectBufferPoolSize: Int = getInt("direct-buffer-pool-limit") val BatchReceiveLimit: Int = getInt("receive-throughput") @@ -215,9 +215,7 @@ class UdpExt(system: ExtendedActorSystem) extends IO.Extension { val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("akka.io.udp")) val manager: ActorRef = { - system.systemActorOf( - props = Props(classOf[UdpManager], this).withDeploy(Deploy.local), - name = "IO-UDP-FF") + system.systemActorOf(props = Props(classOf[UdpManager], this).withDeploy(Deploy.local), name = "IO-UDP-FF") } /** @@ -228,7 +226,8 @@ class UdpExt(system: ExtendedActorSystem) extends IO.Extension { /** * INTERNAL API */ - private[io] val bufferPool: BufferPool = new DirectByteBufferPool(settings.DirectBufferSize, settings.MaxDirectBufferPoolSize) + private[io] val bufferPool: BufferPool = + new DirectByteBufferPool(settings.DirectBufferSize, settings.MaxDirectBufferPoolSize) } /** @@ -246,6 +245,7 @@ object UdpMessage { * to recognize which write failed when receiving a [[Udp.CommandFailed]] message. */ def noAck(token: AnyRef): NoAck = NoAck(token) + /** * Default [[Udp.NoAck]] instance which is used when no acknowledgment information is * explicitly provided. Its “token” is `null`. @@ -269,6 +269,7 @@ object UdpMessage { * [[Udp.Bind]] in that case. */ def send(payload: ByteString, target: InetSocketAddress, ack: Event): Command = Send(payload, target, ack) + /** * The same as `send(payload, target, noAck())`. */ @@ -282,6 +283,7 @@ object UdpMessage { */ def bind(handler: ActorRef, endpoint: InetSocketAddress, options: JIterable[SocketOption]): Command = Bind(handler, endpoint, options.asScala.to(immutable.IndexedSeq)) + /** * Bind without specifying options. */ @@ -305,6 +307,7 @@ object UdpMessage { * when you want to close the socket. */ def simpleSender(options: JIterable[SocketOption]): Command = SimpleSender(options.asScala.to(immutable.IndexedSeq)) + /** * Retrieve a simple sender without specifying options. */ diff --git a/akka-actor/src/main/scala/akka/io/UdpConnected.scala b/akka-actor/src/main/scala/akka/io/UdpConnected.scala index 92bea743ff..ba1acd7cec 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnected.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnected.scala @@ -72,7 +72,8 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide */ final case class Send(payload: ByteString, ack: Any) extends Command { require(ack - != null, "ack must be non-null. Use NoAck if you don't want acks.") + != null, + "ack must be non-null. Use NoAck if you don't want acks.") def wantsAck: Boolean = !ack.isInstanceOf[NoAck] } @@ -86,11 +87,11 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide * which is restricted to sending to and receiving from the given `remoteAddress`. * All received datagrams will be sent to the designated `handler` actor. */ - final case class Connect( - handler: ActorRef, - remoteAddress: InetSocketAddress, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil) extends Command + final case class Connect(handler: ActorRef, + remoteAddress: InetSocketAddress, + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil) + extends Command /** * Send this message to a connection actor (which had previously sent the @@ -152,9 +153,8 @@ class UdpConnectedExt(system: ExtendedActorSystem) extends IO.Extension { val settings: UdpSettings = new UdpSettings(system.settings.config.getConfig("akka.io.udp-connected")) val manager: ActorRef = { - system.systemActorOf( - props = Props(classOf[UdpConnectedManager], this).withDeploy(Deploy.local), - name = "IO-UDP-CONN") + system.systemActorOf(props = Props(classOf[UdpConnectedManager], this).withDeploy(Deploy.local), + name = "IO-UDP-CONN") } /** @@ -179,24 +179,21 @@ object UdpConnectedMessage { * which is restricted to sending to and receiving from the given `remoteAddress`. * All received datagrams will be sent to the designated `handler` actor. */ - def connect( - handler: ActorRef, - remoteAddress: InetSocketAddress, - localAddress: InetSocketAddress, - options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options) + def connect(handler: ActorRef, + remoteAddress: InetSocketAddress, + localAddress: InetSocketAddress, + options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options) + /** * Connect without specifying the `localAddress`. */ - def connect( - handler: ActorRef, - remoteAddress: InetSocketAddress, - options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, None, options) + def connect(handler: ActorRef, remoteAddress: InetSocketAddress, options: JIterable[SocketOption]): Command = + Connect(handler, remoteAddress, None, options) + /** * Connect without specifying the `localAddress` or `options`. */ - def connect( - handler: ActorRef, - remoteAddress: InetSocketAddress): Command = Connect(handler, remoteAddress, None, Nil) + def connect(handler: ActorRef, remoteAddress: InetSocketAddress): Command = Connect(handler, remoteAddress, None, Nil) /** * This message is understood by the connection actors to send data to their @@ -207,6 +204,7 @@ object UdpConnectedMessage { * has been successfully enqueued to the O/S kernel. */ def send(data: ByteString, ack: AnyRef): Command = Send(data, ack) + /** * Send without requesting acknowledgment. */ diff --git a/akka-actor/src/main/scala/akka/io/UdpConnectedManager.scala b/akka-actor/src/main/scala/akka/io/UdpConnectedManager.scala index fe072c5740..71206553b3 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnectedManager.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnectedManager.scala @@ -11,7 +11,7 @@ import akka.io.UdpConnected.Connect * INTERNAL API */ private[io] class UdpConnectedManager(udpConn: UdpConnectedExt) - extends SelectionHandler.SelectorBasedManager(udpConn.settings, udpConn.settings.NrOfSelectors) { + extends SelectionHandler.SelectorBasedManager(udpConn.settings, udpConn.settings.NrOfSelectors) { def receive = workerForCommandHandler { case c: Connect => diff --git a/akka-actor/src/main/scala/akka/io/UdpConnection.scala b/akka-actor/src/main/scala/akka/io/UdpConnection.scala index c6d8b78712..aeafb51268 100644 --- a/akka-actor/src/main/scala/akka/io/UdpConnection.scala +++ b/akka-actor/src/main/scala/akka/io/UdpConnection.scala @@ -13,19 +13,20 @@ import scala.annotation.tailrec import scala.util.control.NonFatal import akka.actor.{ Actor, ActorLogging, ActorRef } import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } -import akka.util.{ ByteString, unused } +import akka.util.{ unused, ByteString } import akka.io.SelectionHandler._ import akka.io.UdpConnected._ /** * INTERNAL API */ -private[io] class UdpConnection( - udpConn: UdpConnectedExt, - channelRegistry: ChannelRegistry, - commander: ActorRef, - connect: Connect) - extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[io] class UdpConnection(udpConn: UdpConnectedExt, + channelRegistry: ChannelRegistry, + commander: ActorRef, + connect: Connect) + extends Actor + with ActorLogging + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import connect._ import udpConn._ @@ -61,7 +62,7 @@ private[io] class UdpConnection( channel.configureBlocking(false) val socket = channel.socket options.foreach(_.beforeDatagramBind(socket)) - localAddress foreach socket.bind + localAddress.foreach(socket.bind) channel.connect(remoteAddress) channelRegistry.register(channel, OP_READ) } @@ -117,7 +118,8 @@ private[io] class UdpConnection( } } val buffer = bufferPool.acquire() - try innerRead(BatchReceiveLimit, buffer) finally { + try innerRead(BatchReceiveLimit, buffer) + finally { registration.enableInterest(OP_READ) bufferPool.release(buffer) } @@ -156,9 +158,10 @@ private[io] class UdpConnection( thunk } catch { case NonFatal(e) => - log.debug( - "Failure while connecting UDP channel to remote address [{}] local address [{}]: {}", - remoteAddress, localAddress.getOrElse("undefined"), e) + log.debug("Failure while connecting UDP channel to remote address [{}] local address [{}]: {}", + remoteAddress, + localAddress.getOrElse("undefined"), + e) commander ! CommandFailed(connect) context.stop(self) } diff --git a/akka-actor/src/main/scala/akka/io/UdpListener.scala b/akka-actor/src/main/scala/akka/io/UdpListener.scala index ba33ef0560..e184f87c7d 100644 --- a/akka-actor/src/main/scala/akka/io/UdpListener.scala +++ b/akka-actor/src/main/scala/akka/io/UdpListener.scala @@ -20,12 +20,11 @@ import akka.io.Udp._ /** * INTERNAL API */ -private[io] class UdpListener( - val udp: UdpExt, - channelRegistry: ChannelRegistry, - bindCommander: ActorRef, - bind: Bind) - extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[io] class UdpListener(val udp: UdpExt, channelRegistry: ChannelRegistry, bindCommander: ActorRef, bind: Bind) + extends Actor + with ActorLogging + with WithUdpSend + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import udp.bufferPool import udp.settings._ @@ -34,9 +33,12 @@ private[io] class UdpListener( context.watch(bind.handler) // sign death pact - val channel = bind.options.collectFirst { - case creator: DatagramChannelCreator => creator - }.getOrElse(DatagramChannelCreator()).create() + val channel = bind.options + .collectFirst { + case creator: DatagramChannelCreator => creator + } + .getOrElse(DatagramChannelCreator()) + .create() channel.configureBlocking(false) val localAddress = @@ -65,7 +67,7 @@ private[io] class UdpListener( def receive: Receive = { case registration: ChannelRegistration => bindCommander ! Bound(channel.socket.getLocalSocketAddress.asInstanceOf[InetSocketAddress]) - context.become(readHandlers(registration) orElse sendHandlers(registration), discardOld = true) + context.become(readHandlers(registration).orElse(sendHandlers(registration)), discardOld = true) } def readHandlers(registration: ChannelRegistration): Receive = { @@ -101,7 +103,8 @@ private[io] class UdpListener( } val buffer = bufferPool.acquire() - try innerReceive(BatchReceiveLimit, buffer) finally { + try innerReceive(BatchReceiveLimit, buffer) + finally { bufferPool.release(buffer) registration.enableInterest(OP_READ) } diff --git a/akka-actor/src/main/scala/akka/io/UdpManager.scala b/akka-actor/src/main/scala/akka/io/UdpManager.scala index 0927c135a1..cbeb2b92a2 100644 --- a/akka-actor/src/main/scala/akka/io/UdpManager.scala +++ b/akka-actor/src/main/scala/akka/io/UdpManager.scala @@ -44,7 +44,8 @@ import akka.io.Udp._ * discarded. * */ -private[io] class UdpManager(udp: UdpExt) extends SelectionHandler.SelectorBasedManager(udp.settings, udp.settings.NrOfSelectors) { +private[io] class UdpManager(udp: UdpExt) + extends SelectionHandler.SelectorBasedManager(udp.settings, udp.settings.NrOfSelectors) { def receive = workerForCommandHandler { case b: Bind => diff --git a/akka-actor/src/main/scala/akka/io/UdpSender.scala b/akka-actor/src/main/scala/akka/io/UdpSender.scala index 9531d845f8..cdef62689c 100644 --- a/akka-actor/src/main/scala/akka/io/UdpSender.scala +++ b/akka-actor/src/main/scala/akka/io/UdpSender.scala @@ -15,18 +15,20 @@ import akka.actor._ /** * INTERNAL API */ -private[io] class UdpSender( - val udp: UdpExt, - channelRegistry: ChannelRegistry, - commander: ActorRef, - options: immutable.Traversable[SocketOption]) - extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[io] class UdpSender(val udp: UdpExt, + channelRegistry: ChannelRegistry, + commander: ActorRef, + options: immutable.Traversable[SocketOption]) + extends Actor + with ActorLogging + with WithUdpSend + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { val channel = { val datagramChannel = DatagramChannel.open datagramChannel.configureBlocking(false) val socket = datagramChannel.socket - options foreach { _.beforeDatagramBind(socket) } + options.foreach { _.beforeDatagramBind(socket) } datagramChannel } @@ -50,4 +52,3 @@ private[io] class UdpSender( } } } - diff --git a/akka-actor/src/main/scala/akka/io/WithUdpSend.scala b/akka-actor/src/main/scala/akka/io/WithUdpSend.scala index 275ca2da2c..63190d2c66 100644 --- a/akka-actor/src/main/scala/akka/io/WithUdpSend.scala +++ b/akka-actor/src/main/scala/akka/io/WithUdpSend.scala @@ -5,8 +5,8 @@ package akka.io import java.net.InetSocketAddress -import java.nio.channels.{ SelectionKey, DatagramChannel } -import akka.actor.{ ActorRef, ActorLogging, Actor } +import java.nio.channels.{ DatagramChannel, SelectionKey } +import akka.actor.{ Actor, ActorLogging, ActorRef } import akka.io.Udp.{ CommandFailed, Send } import akka.io.SelectionHandler._ @@ -52,18 +52,14 @@ private[io] trait WithUdpSend { } catch { case NonFatal(e) => sender() ! CommandFailed(send) - log.debug( - "Failure while sending UDP datagram to remote address [{}]: {}", - send.target, e) + log.debug("Failure while sending UDP datagram to remote address [{}]: {}", send.target, e) retriedSend = false pendingSend = null pendingCommander = null } case None => sender() ! CommandFailed(send) - log.debug( - "Name resolution failed for remote address [{}]", - send.target) + log.debug("Name resolution failed for remote address [{}]", send.target) retriedSend = false pendingSend = null pendingCommander = null diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala b/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala index 2076573f26..67ee405475 100644 --- a/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala +++ b/akka-actor/src/main/scala/akka/io/dns/DnsProtocol.scala @@ -62,13 +62,16 @@ object DnsProtocol { * @param records resource records for the query * @param additionalRecords records that relate to the query but are not strictly answers */ - final case class Resolved(name: String, records: im.Seq[ResourceRecord], additionalRecords: im.Seq[ResourceRecord]) extends NoSerializationVerificationNeeded { + final case class Resolved(name: String, records: im.Seq[ResourceRecord], additionalRecords: im.Seq[ResourceRecord]) + extends NoSerializationVerificationNeeded { + /** * Java API * * Records for the query */ def getRecords(): util.List[ResourceRecord] = records.asJava + /** * Java API * @@ -85,4 +88,3 @@ object DnsProtocol { } } - diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala b/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala index e20f1823f2..18f4943139 100644 --- a/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala +++ b/akka-actor/src/main/scala/akka/io/dns/DnsResourceRecords.scala @@ -10,18 +10,16 @@ import akka.actor.NoSerializationVerificationNeeded import akka.annotation.InternalApi import CachePolicy._ import akka.io.dns.internal.{ DomainName, _ } -import akka.util.{ ByteIterator, ByteString, unused } +import akka.util.{ unused, ByteIterator, ByteString } import scala.annotation.switch import scala.concurrent.duration._ sealed abstract class ResourceRecord(val name: String, val ttl: Ttl, val recType: Short, val recClass: Short) - extends NoSerializationVerificationNeeded { -} + extends NoSerializationVerificationNeeded {} -final case class ARecord(override val name: String, override val ttl: Ttl, - ip: InetAddress) extends ResourceRecord(name, ttl, RecordType.A.code, RecordClass.IN.code) { -} +final case class ARecord(override val name: String, override val ttl: Ttl, ip: InetAddress) + extends ResourceRecord(name, ttl, RecordType.A.code, RecordClass.IN.code) {} /** * INTERNAL API @@ -35,9 +33,8 @@ private[dns] object ARecord { } } -final case class AAAARecord(override val name: String, override val ttl: Ttl, - ip: Inet6Address) extends ResourceRecord(name, ttl, RecordType.AAAA.code, RecordClass.IN.code) { -} +final case class AAAARecord(override val name: String, override val ttl: Ttl, ip: Inet6Address) + extends ResourceRecord(name, ttl, RecordType.AAAA.code, RecordClass.IN.code) {} /** * INTERNAL API @@ -56,12 +53,12 @@ private[dns] object AAAARecord { } } -final case class CNameRecord(override val name: String, override val ttl: Ttl, - canonicalName: String) extends ResourceRecord(name, ttl, RecordType.CNAME.code, RecordClass.IN.code) { -} +final case class CNameRecord(override val name: String, override val ttl: Ttl, canonicalName: String) + extends ResourceRecord(name, ttl, RecordType.CNAME.code, RecordClass.IN.code) {} @InternalApi private[dns] object CNameRecord { + /** * INTERNAL API */ @@ -71,15 +68,20 @@ private[dns] object CNameRecord { } } -final case class SRVRecord(override val name: String, override val ttl: Ttl, - priority: Int, weight: Int, port: Int, target: String) extends ResourceRecord(name, ttl, RecordType.SRV.code, RecordClass.IN.code) { -} +final case class SRVRecord(override val name: String, + override val ttl: Ttl, + priority: Int, + weight: Int, + port: Int, + target: String) + extends ResourceRecord(name, ttl, RecordType.SRV.code, RecordClass.IN.code) {} /** * INTERNAL API */ @InternalApi private[dns] object SRVRecord { + /** * INTERNAL API */ @@ -92,21 +94,29 @@ private[dns] object SRVRecord { } } -final case class UnknownRecord(override val name: String, override val ttl: Ttl, - override val recType: Short, override val recClass: Short, - data: ByteString) extends ResourceRecord(name, ttl, recType, recClass) { -} +final case class UnknownRecord(override val name: String, + override val ttl: Ttl, + override val recType: Short, + override val recClass: Short, + data: ByteString) + extends ResourceRecord(name, ttl, recType, recClass) {} /** * INTERNAL API */ @InternalApi private[dns] object UnknownRecord { + /** * INTERNAL API */ @InternalApi - def parseBody(name: String, ttl: Ttl, recType: Short, recClass: Short, @unused length: Short, it: ByteIterator): UnknownRecord = + def parseBody(name: String, + ttl: Ttl, + recType: Short, + recClass: Short, + @unused length: Short, + it: ByteIterator): UnknownRecord = UnknownRecord(name, ttl, recType, recClass, it.toByteString) } @@ -115,6 +125,7 @@ private[dns] object UnknownRecord { */ @InternalApi private[dns] object ResourceRecord { + /** * INTERNAL API */ @@ -137,4 +148,3 @@ private[dns] object ResourceRecord { } } } - diff --git a/akka-actor/src/main/scala/akka/io/dns/DnsSettings.scala b/akka-actor/src/main/scala/akka/io/dns/DnsSettings.scala index 56d9a9bfa6..76485e80c3 100644 --- a/akka-actor/src/main/scala/akka/io/dns/DnsSettings.scala +++ b/akka-actor/src/main/scala/akka/io/dns/DnsSettings.scala @@ -39,7 +39,8 @@ private[dns] final class DnsSettings(system: ExtendedActorSystem, c: Config) { parseNameserverAddress(other) :: Nil } case ConfigValueType.LIST => - val userAddresses = c.getStringList("nameservers").asScala.iterator.map(parseNameserverAddress).to(immutable.IndexedSeq) + val userAddresses = + c.getStringList("nameservers").asScala.iterator.map(parseNameserverAddress).to(immutable.IndexedSeq) require(userAddresses.nonEmpty, "nameservers can not be empty") userAddresses.toList case _ => throw new IllegalArgumentException("Invalid type for nameservers. Must be a string or string list") @@ -84,7 +85,8 @@ private[dns] final class DnsSettings(system: ExtendedActorSystem, c: Config) { case ConfigValueType.STRING => c.getString("ndots") match { case "default" => resolvConf.map(_.ndots).getOrElse(1) - case _ => throw new IllegalArgumentException("Invalid value for ndots. Must be the string 'default' or an integer.") + case _ => + throw new IllegalArgumentException("Invalid value for ndots. Must be the string 'default' or an integer.") } case ConfigValueType.NUMBER => val ndots = c.getInt("ndots") @@ -92,16 +94,17 @@ private[dns] final class DnsSettings(system: ExtendedActorSystem, c: Config) { throw new IllegalArgumentException("Invalid value for ndots, ndots must not be negative.") } ndots - case _ => throw new IllegalArgumentException("Invalid value for ndots. Must be the string 'default' or an integer.") + case _ => + throw new IllegalArgumentException("Invalid value for ndots. Must be the string 'default' or an integer.") } } // ------------------------- def failUnableToDetermineDefaultNameservers = - throw new IllegalStateException("Unable to obtain default nameservers from JNDI or via reflection. " + - "Please set `akka.io.dns.async-dns.nameservers` explicitly in order to be able to resolve domain names. " - ) + throw new IllegalStateException( + "Unable to obtain default nameservers from JNDI or via reflection. " + + "Please set `akka.io.dns.async-dns.nameservers` explicitly in order to be able to resolve domain names. ") } @@ -160,7 +163,9 @@ object DnsSettings { // Only try if not empty as otherwise we will produce an exception if (dnsUrls != null && !dnsUrls.isEmpty) { val servers = dnsUrls.split(" ") - servers.flatMap { server => asInetSocketAddress(server).toOption }.toList + servers.flatMap { server => + asInetSocketAddress(server).toOption + }.toList } else Nil } } @@ -168,20 +173,21 @@ object DnsSettings { // this method is used as a fallback in case JNDI results in an empty list // this method will not work when running modularised of course since it needs access to internal sun classes def getNameserversUsingReflection: Try[List[InetSocketAddress]] = { - system.dynamicAccess.getClassFor("sun.net.dns.ResolverConfiguration") - .flatMap { c => - Try { - val open = c.getMethod("open") - val nameservers = c.getMethod("nameservers") - val instance = open.invoke(null) - val ns = nameservers.invoke(instance).asInstanceOf[util.List[String]] - val res = if (ns.isEmpty) throw new IllegalStateException("Empty nameservers list discovered using reflection. Consider configuring default nameservers manually!") - else ns.asScala.toList - res.flatMap(s => asInetSocketAddress(s).toOption) - } + system.dynamicAccess.getClassFor("sun.net.dns.ResolverConfiguration").flatMap { c => + Try { + val open = c.getMethod("open") + val nameservers = c.getMethod("nameservers") + val instance = open.invoke(null) + val ns = nameservers.invoke(instance).asInstanceOf[util.List[String]] + val res = if (ns.isEmpty) + throw new IllegalStateException( + "Empty nameservers list discovered using reflection. Consider configuring default nameservers manually!") + else ns.asScala.toList + res.flatMap(s => asInetSocketAddress(s).toOption) } + } } - getNameserversUsingJNDI orElse getNameserversUsingReflection + getNameserversUsingJNDI.orElse(getNameserversUsingReflection) } } diff --git a/akka-actor/src/main/scala/akka/io/dns/RecordType.scala b/akka-actor/src/main/scala/akka/io/dns/RecordType.scala index 4c1e0a8cb8..c87e2cb4da 100644 --- a/akka-actor/src/main/scala/akka/io/dns/RecordType.scala +++ b/akka-actor/src/main/scala/akka/io/dns/RecordType.scala @@ -12,6 +12,7 @@ import akka.util.OptionVal final case class RecordType(code: Short, name: String) object RecordType { + /** * array for fast lookups by id * wasteful, but we get trivial indexing into it for lookup @@ -31,34 +32,49 @@ object RecordType { /** A host address */ final val A: RecordType = register(RecordType(1, "A")) + /** An authoritative name server */ final val NS: RecordType = register(RecordType(2, "NS")) + /** A mail destination (Obsolete - use MX) */ final val MD: RecordType = register(RecordType(3, "MD")) + /** A mail forwarder (Obsolete - use MX) */ final val MF: RecordType = register(RecordType(4, "MF")) + /** the canonical name for an alias */ final val CNAME: RecordType = register(RecordType(5, "CNAME")) + /** marks the start of a zone of authority */ final val SOA: RecordType = register(RecordType(6, "SOA")) + /** A mailbox domain name (EXPERIMENTAL) */ final val MB: RecordType = register(RecordType(7, "MB")) + /** A mail group member (EXPERIMENTAL) */ final val MG: RecordType = register(RecordType(8, "MG")) + /** A mail rename domain name (EXPERIMENTAL) */ final val MR: RecordType = register(RecordType(9, "MR")) + /** A null RR (EXPERIMENTAL) */ final val NULL: RecordType = register(RecordType(10, "NULL")) + /** A well known service description */ final val WKS: RecordType = register(RecordType(11, "WKS")) + /** A domain name pointer */ final val PTR: RecordType = register(RecordType(12, "PTR")) + /** host information */ final val HINFO: RecordType = register(RecordType(13, "HINFO")) + /** mailbox or mail list information */ final val MINFO: RecordType = register(RecordType(14, "MINFO")) + /** mail exchange */ final val MX: RecordType = register(RecordType(15, "MX")) + /** text strings */ final val TXT: RecordType = register(RecordType(16, "TXT")) @@ -80,4 +96,3 @@ object RecordType { final val MAILA: RecordType = register(RecordType(254, "MAILA")) final val WILDCARD: RecordType = register(RecordType(255, "WILDCARD")) } - diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsCache.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsCache.scala index 2b11874f35..92cae34cb5 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsCache.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsCache.scala @@ -20,9 +20,10 @@ import scala.collection.immutable * Internal API */ @InternalApi class AsyncDnsCache extends Dns with PeriodicCacheCleanup with NoSerializationVerificationNeeded { - private val cacheRef = new AtomicReference(new Cache[(String, RequestType), Resolved]( - immutable.SortedSet()(expiryEntryOrdering()), - immutable.Map(), () => clock)) + private val cacheRef = new AtomicReference( + new Cache[(String, RequestType), Resolved](immutable.SortedSet()(expiryEntryOrdering()), + immutable.Map(), + () => clock)) private val nanoBase = System.nanoTime() diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala index a4ae6be326..9c1d95f666 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsManager.scala @@ -33,15 +33,29 @@ private[akka] object AsyncDnsManager { * INTERNAL API */ @InternalApi -private[io] final class AsyncDnsManager(name: String, system: ExtendedActorSystem, resolverConfig: Config, cache: Dns, dispatcher: String, provider: DnsProvider) extends Actor - with RequiresMessageQueue[UnboundedMessageQueueSemantics] with ActorLogging with Timers { +private[io] final class AsyncDnsManager(name: String, + system: ExtendedActorSystem, + resolverConfig: Config, + cache: Dns, + dispatcher: String, + provider: DnsProvider) + extends Actor + with RequiresMessageQueue[UnboundedMessageQueueSemantics] + with ActorLogging + with Timers { import akka.pattern.ask import akka.pattern.pipe /** * Ctr expected by the DnsExt for all DnsMangers */ - def this(ext: DnsExt) = this(ext.Settings.Resolver, ext.system, ext.Settings.ResolverConfig, ext.cache, ext.Settings.Dispatcher, ext.provider) + def this(ext: DnsExt) = + this(ext.Settings.Resolver, + ext.system, + ext.Settings.ResolverConfig, + ext.cache, + ext.Settings.Dispatcher, + ext.provider) implicit val ec = context.dispatcher @@ -49,9 +63,10 @@ private[io] final class AsyncDnsManager(name: String, system: ExtendedActorSyste implicit val timeout = Timeout(settings.ResolveTimeout) private val resolver = { - val props: Props = FromConfig.props(Props(provider.actorClass, settings, cache, (factory: ActorRefFactory, dns: List[InetSocketAddress]) => { - dns.map(ns => factory.actorOf(Props(new DnsClient(ns)))) - }).withDeploy(Deploy.local).withDispatcher(dispatcher)) + val props: Props = FromConfig.props( + Props(provider.actorClass, settings, cache, (factory: ActorRefFactory, dns: List[InetSocketAddress]) => { + dns.map(ns => factory.actorOf(Props(new DnsClient(ns)))) + }).withDeploy(Deploy.local).withDispatcher(dispatcher)) context.actorOf(props, name) } @@ -62,7 +77,8 @@ private[io] final class AsyncDnsManager(name: String, system: ExtendedActorSyste override def preStart(): Unit = { cacheCleanup.foreach { _ => - val interval = Duration(resolverConfig.getDuration("cache-cleanup-interval", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS) + val interval = + Duration(resolverConfig.getDuration("cache-cleanup-interval", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS) timers.startPeriodicTimer(CacheCleanup, CacheCleanup, interval) } } @@ -76,15 +92,14 @@ private[io] final class AsyncDnsManager(name: String, system: ExtendedActorSyste // adapt legacy protocol to new protocol log.debug("Resolution request for {} from {}", name, sender()) val adapted = DnsProtocol.Resolve(name) - val reply = (resolver ? adapted).mapTo[DnsProtocol.Resolved] - .map { asyncResolved => - val ips = asyncResolved.records.collect { - case a: ARecord => a.ip - case a: AAAARecord => a.ip - } - Dns.Resolved(asyncResolved.name, ips) + val reply = (resolver ? adapted).mapTo[DnsProtocol.Resolved].map { asyncResolved => + val ips = asyncResolved.records.collect { + case a: ARecord => a.ip + case a: AAAARecord => a.ip } - reply pipeTo sender() + Dns.Resolved(asyncResolved.name, ips) + } + reply.pipeTo(sender()) case CacheCleanup => cacheCleanup.foreach(_.cleanup()) @@ -94,4 +109,3 @@ private[io] final class AsyncDnsManager(name: String, system: ExtendedActorSyste } } - diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala index 787b155874..d3b5acb310 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/AsyncDnsResolver.scala @@ -25,10 +25,11 @@ import scala.util.control.NonFatal * INTERNAL API */ @InternalApi -private[io] final class AsyncDnsResolver( - settings: DnsSettings, - cache: AsyncDnsCache, - clientFactory: (ActorRefFactory, List[InetSocketAddress]) => List[ActorRef]) extends Actor with ActorLogging { +private[io] final class AsyncDnsResolver(settings: DnsSettings, + cache: AsyncDnsCache, + clientFactory: (ActorRefFactory, List[InetSocketAddress]) => List[ActorRef]) + extends Actor + with ActorLogging { import AsyncDnsResolver._ @@ -39,7 +40,10 @@ private[io] final class AsyncDnsResolver( val nameServers = settings.NameServers - log.debug("Using name servers [{}] and search domains [{}] with ndots={}", nameServers, settings.SearchDomains, settings.NDots) + log.debug("Using name servers [{}] and search domains [{}] with ndots={}", + nameServers, + settings.SearchDomains, + settings.NDots) private var requestId: Short = 0 private def nextId(): Short = { @@ -56,17 +60,21 @@ private[io] final class AsyncDnsResolver( log.debug("{} cached {}", mode, resolved) sender() ! resolved case None => - resolveWithResolvers(name, mode, resolvers).map { resolved => - if (resolved.records.nonEmpty) { - val minTtl = resolved.records.minBy[Duration](_.ttl.value).ttl - cache.put((name, mode), resolved, minTtl) + resolveWithResolvers(name, mode, resolvers) + .map { resolved => + if (resolved.records.nonEmpty) { + val minTtl = resolved.records.minBy[Duration](_.ttl.value).ttl + cache.put((name, mode), resolved, minTtl) + } + resolved } - resolved - } pipeTo sender() + .pipeTo(sender()) } } - private def resolveWithResolvers(name: String, requestType: RequestType, resolvers: List[ActorRef]): Future[DnsProtocol.Resolved] = + private def resolveWithResolvers(name: String, + requestType: RequestType, + resolvers: List[ActorRef]): Future[DnsProtocol.Resolved] = if (isInetAddress(name)) { Future.fromTry { Try { @@ -82,21 +90,26 @@ private[io] final class AsyncDnsResolver( resolvers match { case Nil => Future.failed(ResolveFailedException(s"Failed to resolve $name with nameservers: $nameServers")) - case head :: tail => resolveWithSearch(name, requestType, head).recoverWith { - case NonFatal(t) => - log.error(t, "Resolve failed. Trying next name server") - resolveWithResolvers(name, requestType, tail) - } + case head :: tail => + resolveWithSearch(name, requestType, head).recoverWith { + case NonFatal(t) => + log.error(t, "Resolve failed. Trying next name server") + resolveWithResolvers(name, requestType, tail) + } } } private def sendQuestion(resolver: ActorRef, message: DnsQuestion): Future[Answer] = { val result = (resolver ? message).mapTo[Answer] - result.failed.foreach { _ => resolver ! DropRequest(message.id) } + result.failed.foreach { _ => + resolver ! DropRequest(message.id) + } result } - private def resolveWithSearch(name: String, requestType: RequestType, resolver: ActorRef): Future[DnsProtocol.Resolved] = { + private def resolveWithSearch(name: String, + requestType: RequestType, + resolver: ActorRef): Future[DnsProtocol.Resolved] = { if (settings.SearchDomains.nonEmpty) { val nameWithSearch = settings.SearchDomains.map(sd => name + "." + sd) // ndots is a heuristic used to try and work out whether the name passed in is a fully qualified domain name, @@ -118,7 +131,9 @@ private[io] final class AsyncDnsResolver( } } - private def resolveFirst(searchNames: List[String], requestType: RequestType, resolver: ActorRef): Future[DnsProtocol.Resolved] = { + private def resolveFirst(searchNames: List[String], + requestType: RequestType, + resolver: ActorRef): Future[DnsProtocol.Resolved] = { searchNames match { case searchName :: Nil => resolve(searchName, requestType, resolver) @@ -138,16 +153,17 @@ private[io] final class AsyncDnsResolver( val caseFoldedName = Helpers.toRootLowerCase(name) requestType match { case Ip(ipv4, ipv6) => + val ipv4Recs: Future[Answer] = + if (ipv4) + sendQuestion(resolver, Question4(nextId(), caseFoldedName)) + else + Empty - val ipv4Recs: Future[Answer] = if (ipv4) - sendQuestion(resolver, Question4(nextId(), caseFoldedName)) - else - Empty - - val ipv6Recs = if (ipv6) - sendQuestion(resolver, Question6(nextId(), caseFoldedName)) - else - Empty + val ipv6Recs = + if (ipv6) + sendQuestion(resolver, Question6(nextId(), caseFoldedName)) + else + Empty for { ipv4 <- ipv4Recs @@ -155,10 +171,9 @@ private[io] final class AsyncDnsResolver( } yield DnsProtocol.Resolved(name, ipv4.rrs ++ ipv6.rrs, ipv4.additionalRecs ++ ipv6.additionalRecs) case Srv => - sendQuestion(resolver, SrvQuestion(nextId(), caseFoldedName)) - .map(answer => { - DnsProtocol.Resolved(name, answer.rrs, answer.additionalRecs) - }) + sendQuestion(resolver, SrvQuestion(nextId(), caseFoldedName)).map(answer => { + DnsProtocol.Resolved(name, answer.rrs, answer.additionalRecs) + }) } } @@ -178,9 +193,10 @@ private[io] object AsyncDnsResolver { private def isInetAddress(name: String): Boolean = ipv4Address.findAllMatchIn(name).nonEmpty || - ipv6Address.findAllMatchIn(name).nonEmpty + ipv6Address.findAllMatchIn(name).nonEmpty - private val Empty = Future.successful(Answer(-1, immutable.Seq.empty[ResourceRecord], immutable.Seq.empty[ResourceRecord])) + private val Empty = + Future.successful(Answer(-1, immutable.Seq.empty[ResourceRecord], immutable.Seq.empty[ResourceRecord])) case class ResolveFailedException(msg: String) extends Exception(msg) } diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala index fd5ce11136..cfca83da78 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/DnsClient.scala @@ -27,7 +27,8 @@ import scala.concurrent.duration._ final case class SrvQuestion(id: Short, name: String) extends DnsQuestion final case class Question4(id: Short, name: String) extends DnsQuestion final case class Question6(id: Short, name: String) extends DnsQuestion - final case class Answer(id: Short, rrs: im.Seq[ResourceRecord], additionalRecs: im.Seq[ResourceRecord] = Nil) extends NoSerializationVerificationNeeded + final case class Answer(id: Short, rrs: im.Seq[ResourceRecord], additionalRecs: im.Seq[ResourceRecord] = Nil) + extends NoSerializationVerificationNeeded final case class DropRequest(id: Short) } @@ -123,7 +124,8 @@ import scala.concurrent.duration._ log.debug("Client for id {} not found. Discarding unsuccessful response.", msg.id) } } else { - val (recs, additionalRecs) = if (msg.flags.responseCode == ResponseCode.SUCCESS) (msg.answerRecs, msg.additionalRecs) else (Nil, Nil) + val (recs, additionalRecs) = + if (msg.flags.responseCode == ResponseCode.SUCCESS) (msg.answerRecs, msg.additionalRecs) else (Nil, Nil) self ! Answer(msg.id, recs, additionalRecs) } case response: Answer => @@ -139,12 +141,12 @@ import scala.concurrent.duration._ } def createTcpClient() = { - context.actorOf(BackoffSupervisor.props( - Props(classOf[TcpDnsClient], tcp, ns, self), - childName = "tcpDnsClient", - minBackoff = 10.millis, - maxBackoff = 20.seconds, - randomFactor = 0.1 - ), "tcpDnsClientSupervisor") + context.actorOf( + BackoffSupervisor.props(Props(classOf[TcpDnsClient], tcp, ns, self), + childName = "tcpDnsClient", + minBackoff = 10.millis, + maxBackoff = 20.seconds, + randomFactor = 0.1), + "tcpDnsClientSupervisor") } } diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala b/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala index e658484aae..e7e0e99a54 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/DnsMessage.scala @@ -76,11 +76,15 @@ private[internal] case class MessageFlags(flags: Short) extends AnyVal { */ @InternalApi private[internal] object MessageFlags { - def apply(answer: Boolean = false, opCode: OpCode.Value = OpCode.QUERY, authoritativeAnswer: Boolean = false, - truncated: Boolean = false, recursionDesired: Boolean = true, recursionAvailable: Boolean = false, + def apply(answer: Boolean = false, + opCode: OpCode.Value = OpCode.QUERY, + authoritativeAnswer: Boolean = false, + truncated: Boolean = false, + recursionDesired: Boolean = true, + recursionAvailable: Boolean = false, responseCode: ResponseCode.Value = ResponseCode.SUCCESS): MessageFlags = { - new MessageFlags(( - (if (answer) 0x8000 else 0) | + new MessageFlags( + ((if (answer) 0x8000 else 0) | (opCode.id << 11) | (if (authoritativeAnswer) 1 << 10 else 0) | (if (truncated) 1 << 9 else 0) | @@ -94,13 +98,12 @@ private[internal] object MessageFlags { * INTERNAL API */ @InternalApi -private[internal] case class Message( - id: Short, - flags: MessageFlags, - questions: Seq[Question] = Seq.empty, - answerRecs: Seq[ResourceRecord] = Seq.empty, - authorityRecs: Seq[ResourceRecord] = Seq.empty, - additionalRecs: Seq[ResourceRecord] = Seq.empty) { +private[internal] case class Message(id: Short, + flags: MessageFlags, + questions: Seq[Question] = Seq.empty, + answerRecs: Seq[ResourceRecord] = Seq.empty, + authorityRecs: Seq[ResourceRecord] = Seq.empty, + additionalRecs: Seq[ResourceRecord] = Seq.empty) { def write(): ByteString = { val ret = ByteString.newBuilder write(ret) @@ -108,7 +111,8 @@ private[internal] case class Message( } def write(ret: ByteStringBuilder): Unit = { - ret.putShort(id) + ret + .putShort(id) .putShort(flags.flags) .putShort(questions.size) // We only send questions, never answers with resource records in @@ -135,18 +139,27 @@ private[internal] object Message { val nsCount = it.getShort val arCount = it.getShort - val qs = (0 until qdCount).map { _ => Try(Question.parse(it, msg)) } - val ans = (0 until anCount).map { _ => Try(ResourceRecord.parse(it, msg)) } - val nss = (0 until nsCount).map { _ => Try(ResourceRecord.parse(it, msg)) } - val ars = (0 until arCount).map { _ => Try(ResourceRecord.parse(it, msg)) } + val qs = (0 until qdCount).map { _ => + Try(Question.parse(it, msg)) + } + val ans = (0 until anCount).map { _ => + Try(ResourceRecord.parse(it, msg)) + } + val nss = (0 until nsCount).map { _ => + Try(ResourceRecord.parse(it, msg)) + } + val ars = (0 until arCount).map { _ => + Try(ResourceRecord.parse(it, msg)) + } import scala.language.implicitConversions implicit def flattener[T](tried: Try[T]): GenTraversableOnce[T] = if (flags.isTruncated) tried.toOption - else tried match { - case Success(value) => Some(value) - case Failure(reason) => throw reason - } + else + tried match { + case Success(value) => Some(value) + case Failure(reason) => throw reason + } new Message(id, flags, qs.flatten, ans.flatten, nss.flatten, ars.flatten) } diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/Question.scala b/akka-actor/src/main/scala/akka/io/dns/internal/Question.scala index c2a37e37af..b3e4b6f829 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/Question.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/Question.scala @@ -19,6 +19,7 @@ private[akka] final case class Question(name: String, qType: RecordType, qClass: RecordClassSerializer.write(out, qClass) } } + /** * INTERNAL API */ diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/ResolvConfParser.scala b/akka-actor/src/main/scala/akka/io/dns/internal/ResolvConfParser.scala index 80376e48a5..f0551de066 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/ResolvConfParser.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/ResolvConfParser.scala @@ -35,7 +35,8 @@ private[dns] object ResolvConfParser { var search = List.empty[String] var ndots = 1 - lines.map(_.trim) + lines + .map(_.trim) .filter { line => // Ignore blank lines and comments line.nonEmpty && line(0) != ';' && line(0) != '#' diff --git a/akka-actor/src/main/scala/akka/io/dns/internal/TcpDnsClient.scala b/akka-actor/src/main/scala/akka/io/dns/internal/TcpDnsClient.scala index 71bf0cd000..db0264d88a 100644 --- a/akka-actor/src/main/scala/akka/io/dns/internal/TcpDnsClient.scala +++ b/akka-actor/src/main/scala/akka/io/dns/internal/TcpDnsClient.scala @@ -16,7 +16,10 @@ import akka.util.ByteString /** * INTERNAL API */ -@InternalApi private[akka] class TcpDnsClient(tcp: ActorRef, ns: InetSocketAddress, answerRecipient: ActorRef) extends Actor with ActorLogging with Stash { +@InternalApi private[akka] class TcpDnsClient(tcp: ActorRef, ns: InetSocketAddress, answerRecipient: ActorRef) + extends Actor + with ActorLogging + with Stash { import TcpDnsClient._ override def receive: Receive = idle @@ -76,7 +79,8 @@ import akka.util.ByteString if (msg.flags.isTruncated) { log.warning("TCP DNS response truncated") } - val (recs, additionalRecs) = if (msg.flags.responseCode == ResponseCode.SUCCESS) (msg.answerRecs, msg.additionalRecs) else (Nil, Nil) + val (recs, additionalRecs) = + if (msg.flags.responseCode == ResponseCode.SUCCESS) (msg.answerRecs, msg.additionalRecs) else (Nil, Nil) Answer(msg.id, recs, additionalRecs) } } diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index d9a3f096ff..a02893e252 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -84,6 +84,7 @@ object Pair { */ @SerialVersionUID(1L) trait Creator[T] extends Serializable { + /** * This method must return a different instance upon every call. */ @@ -139,9 +140,16 @@ abstract class JavaPartialFunction[A, B] extends AbstractPartialFunction[A, B] { @throws(classOf[Exception]) def apply(x: A, isCheck: Boolean): B - final def isDefinedAt(x: A): Boolean = try { apply(x, true); true } catch { case NoMatch => false } - final override def apply(x: A): B = try apply(x, false) catch { case NoMatch => throw new MatchError(x) } - final override def applyOrElse[A1 <: A, B1 >: B](x: A1, default: A1 => B1): B1 = try apply(x, false) catch { case NoMatch => default(x) } + final def isDefinedAt(x: A): Boolean = + try { + apply(x, true); true + } catch { case NoMatch => false } + final override def apply(x: A): B = + try apply(x, false) + catch { case NoMatch => throw new MatchError(x) } + final override def applyOrElse[A1 <: A, B1 >: B](x: A1, default: A1 => B1): B1 = + try apply(x, false) + catch { case NoMatch => default(x) } } /** @@ -151,6 +159,7 @@ abstract class JavaPartialFunction[A, B] extends AbstractPartialFunction[A, B] { */ sealed abstract class Option[A] extends java.lang.Iterable[A] { def get: A + /** * Returns a if this is some(a) or defaultValue if * this is none. @@ -163,6 +172,7 @@ sealed abstract class Option[A] extends java.lang.Iterable[A] { } object Option { + /** * Option factory that creates Some */ diff --git a/akka-actor/src/main/scala/akka/japi/Throwables.scala b/akka-actor/src/main/scala/akka/japi/Throwables.scala index 94dc5fbc69..8c33b9b703 100644 --- a/akka-actor/src/main/scala/akka/japi/Throwables.scala +++ b/akka-actor/src/main/scala/akka/japi/Throwables.scala @@ -28,6 +28,7 @@ import scala.util.control.NonFatal * }}} */ object Throwables { + /** * Returns true if the provided `Throwable` is to be considered non-fatal, * or false if it is to be considered fatal diff --git a/akka-actor/src/main/scala/akka/japi/function/Function.scala b/akka-actor/src/main/scala/akka/japi/function/Function.scala index b9fc23d0c5..bd78ffd96a 100644 --- a/akka-actor/src/main/scala/akka/japi/function/Function.scala +++ b/akka-actor/src/main/scala/akka/japi/function/Function.scala @@ -65,10 +65,10 @@ trait Predicate[-T] extends java.io.Serializable { */ @SerialVersionUID(1L) trait Creator[+T] extends Serializable { + /** * This method must return a different instance upon every call. */ @throws(classOf[Exception]) def create(): T } - diff --git a/akka-actor/src/main/scala/akka/japi/pf/CaseStatements.scala b/akka-actor/src/main/scala/akka/japi/pf/CaseStatements.scala index 96d71f6285..6835cc0f08 100644 --- a/akka-actor/src/main/scala/akka/japi/pf/CaseStatements.scala +++ b/akka-actor/src/main/scala/akka/japi/pf/CaseStatements.scala @@ -4,22 +4,20 @@ package akka.japi.pf -import FI.{ UnitApply, Apply, Predicate } +import FI.{ Apply, Predicate, UnitApply } private[pf] object CaseStatement { def empty[F, T](): PartialFunction[F, T] = PartialFunction.empty } -private[pf] class CaseStatement[-F, +P, T](predicate: Predicate, apply: Apply[P, T]) - extends PartialFunction[F, T] { +private[pf] class CaseStatement[-F, +P, T](predicate: Predicate, apply: Apply[P, T]) extends PartialFunction[F, T] { override def isDefinedAt(o: F) = predicate.defined(o) override def apply(o: F) = apply.apply(o.asInstanceOf[P]) } -private[pf] class UnitCaseStatement[F, P](predicate: Predicate, apply: UnitApply[P]) - extends PartialFunction[F, Unit] { +private[pf] class UnitCaseStatement[F, P](predicate: Predicate, apply: UnitApply[P]) extends PartialFunction[F, Unit] { override def isDefinedAt(o: F) = predicate.defined(o) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 1e9d6e18cb..28295231dc 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -193,7 +193,8 @@ trait ExplicitAskSupport { */ def ask(actorRef: ActorRef, messageFactory: ActorRef => Any)(implicit timeout: Timeout): Future[Any] = actorRef.internalAsk(messageFactory, timeout, ActorRef.noSender) - def ask(actorRef: ActorRef, messageFactory: ActorRef => Any, sender: ActorRef)(implicit timeout: Timeout): Future[Any] = + def ask(actorRef: ActorRef, messageFactory: ActorRef => Any, sender: ActorRef)( + implicit timeout: Timeout): Future[Any] = actorRef.internalAsk(messageFactory, timeout, sender) /** @@ -217,7 +218,8 @@ trait ExplicitAskSupport { * All of the above use a required implicit [[akka.util.Timeout]] and optional implicit * sender [[akka.actor.ActorRef]]. */ - implicit def ask(actorSelection: ActorSelection): ExplicitlyAskableActorSelection = new ExplicitlyAskableActorSelection(actorSelection) + implicit def ask(actorSelection: ActorSelection): ExplicitlyAskableActorSelection = + new ExplicitlyAskableActorSelection(actorSelection) /** * Sends a message asynchronously and returns a [[scala.concurrent.Future]] @@ -250,11 +252,13 @@ trait ExplicitAskSupport { */ def ask(actorSelection: ActorSelection, messageFactory: ActorRef => Any)(implicit timeout: Timeout): Future[Any] = actorSelection.internalAsk(messageFactory, timeout, ActorRef.noSender) - def ask(actorSelection: ActorSelection, messageFactory: ActorRef => Any, sender: ActorRef)(implicit timeout: Timeout): Future[Any] = + def ask(actorSelection: ActorSelection, messageFactory: ActorRef => Any, sender: ActorRef)( + implicit timeout: Timeout): Future[Any] = actorSelection.internalAsk(messageFactory, timeout, sender) } object AskableActorRef { + /** * INTERNAL API: for binary compatibility */ @@ -276,24 +280,33 @@ object AskableActorRef { /** * INTERNAL API */ - @InternalApi private[akka] def negativeTimeoutException(recipient: Any, message: Any, sender: ActorRef): IllegalArgumentException = { - new IllegalArgumentException(s"Timeout length must be positive, question not sent to [$recipient]. " + + @InternalApi private[akka] def negativeTimeoutException(recipient: Any, + message: Any, + sender: ActorRef): IllegalArgumentException = { + new IllegalArgumentException( + s"Timeout length must be positive, question not sent to [$recipient]. " + messagePartOfException(message, sender)) } /** * INTERNAL API */ - @InternalApi private[akka] def recipientTerminatedException(recipient: Any, message: Any, sender: ActorRef): AskTimeoutException = { - new AskTimeoutException(s"Recipient [$recipient] had already been terminated. " + + @InternalApi private[akka] def recipientTerminatedException(recipient: Any, + message: Any, + sender: ActorRef): AskTimeoutException = { + new AskTimeoutException( + s"Recipient [$recipient] had already been terminated. " + messagePartOfException(message, sender)) } /** * INTERNAL API */ - @InternalApi private[akka] def unsupportedRecipientType(recipient: Any, message: Any, sender: ActorRef): IllegalArgumentException = { - new IllegalArgumentException(s"Unsupported recipient type, question not sent to [$recipient]. " + + @InternalApi private[akka] def unsupportedRecipientType(recipient: Any, + message: Any, + sender: ActorRef): IllegalArgumentException = { + new IllegalArgumentException( + s"Unsupported recipient type, question not sent to [$recipient]. " + messagePartOfException(message, sender)) } } @@ -355,32 +368,37 @@ final class ExplicitlyAskableActorRef(val actorRef: ActorRef) extends AnyVal { /** * INTERNAL API: for binary compatibility */ - private[pattern] def internalAsk(messageFactory: ActorRef => Any, timeout: Timeout, sender: ActorRef): Future[Any] = actorRef match { - case ref: InternalActorRef if ref.isTerminated => - val message = messageFactory(ref.provider.deadLetters) - actorRef ! message - Future.failed[Any](AskableActorRef.recipientTerminatedException(actorRef, message, sender)) - case ref: InternalActorRef => - if (timeout.duration.length <= 0) { + private[pattern] def internalAsk(messageFactory: ActorRef => Any, timeout: Timeout, sender: ActorRef): Future[Any] = + actorRef match { + case ref: InternalActorRef if ref.isTerminated => val message = messageFactory(ref.provider.deadLetters) - Future.failed[Any](AskableActorRef.negativeTimeoutException(actorRef, message, sender)) - } else { - val a = PromiseActorRef(ref.provider, timeout, targetName = actorRef, "unknown", sender) - val message = messageFactory(a) - a.messageClassName = message.getClass.getName - actorRef.tell(message, a) - a.result.future - } - case _ if sender eq null => - Future.failed[Any](new IllegalArgumentException("No recipient for the reply was provided, " + - s"question not sent to [$actorRef].")) - case _ => - val message = if (sender == null) null else messageFactory(sender.asInstanceOf[InternalActorRef].provider.deadLetters) - Future.failed[Any](AskableActorRef.unsupportedRecipientType(actorRef, message, sender)) - } + actorRef ! message + Future.failed[Any](AskableActorRef.recipientTerminatedException(actorRef, message, sender)) + case ref: InternalActorRef => + if (timeout.duration.length <= 0) { + val message = messageFactory(ref.provider.deadLetters) + Future.failed[Any](AskableActorRef.negativeTimeoutException(actorRef, message, sender)) + } else { + val a = PromiseActorRef(ref.provider, timeout, targetName = actorRef, "unknown", sender) + val message = messageFactory(a) + a.messageClassName = message.getClass.getName + actorRef.tell(message, a) + a.result.future + } + case _ if sender eq null => + Future.failed[Any]( + new IllegalArgumentException( + "No recipient for the reply was provided, " + + s"question not sent to [$actorRef].")) + case _ => + val message = + if (sender == null) null else messageFactory(sender.asInstanceOf[InternalActorRef].provider.deadLetters) + Future.failed[Any](AskableActorRef.unsupportedRecipientType(actorRef, message, sender)) + } } object AskableActorSelection { + /** * INTERNAL API: for binary compatibility */ @@ -420,17 +438,18 @@ final class AskableActorSelection(val actorSel: ActorSelection) extends AnyVal { /** * INTERNAL API: for binary compatibility */ - private[pattern] def internalAsk(message: Any, timeout: Timeout, sender: ActorRef): Future[Any] = actorSel.anchor match { - case ref: InternalActorRef => - if (timeout.duration.length <= 0) - Future.failed[Any](AskableActorRef.negativeTimeoutException(actorSel, message, sender)) - else { - val a = PromiseActorRef(ref.provider, timeout, targetName = actorSel, message.getClass.getName, sender) - actorSel.tell(message, a) - a.result.future - } - case _ => Future.failed[Any](AskableActorRef.unsupportedRecipientType(actorSel, message, sender)) - } + private[pattern] def internalAsk(message: Any, timeout: Timeout, sender: ActorRef): Future[Any] = + actorSel.anchor match { + case ref: InternalActorRef => + if (timeout.duration.length <= 0) + Future.failed[Any](AskableActorRef.negativeTimeoutException(actorSel, message, sender)) + else { + val a = PromiseActorRef(ref.provider, timeout, targetName = actorSel, message.getClass.getName, sender) + actorSel.tell(message, a) + a.result.future + } + case _ => Future.failed[Any](AskableActorRef.unsupportedRecipientType(actorSel, message, sender)) + } } /* @@ -447,25 +466,29 @@ final class ExplicitlyAskableActorSelection(val actorSel: ActorSelection) extend /** * INTERNAL API: for binary compatibility */ - private[pattern] def internalAsk(messageFactory: ActorRef => Any, timeout: Timeout, sender: ActorRef): Future[Any] = actorSel.anchor match { - case ref: InternalActorRef => - if (timeout.duration.length <= 0) { - val message = messageFactory(ref.provider.deadLetters) - Future.failed[Any](AskableActorRef.negativeTimeoutException(actorSel, message, sender)) - } else { - val a = PromiseActorRef(ref.provider, timeout, targetName = actorSel, "unknown", sender) - val message = messageFactory(a) - a.messageClassName = message.getClass.getName - actorSel.tell(message, a) - a.result.future - } - case _ if sender eq null => - Future.failed[Any](new IllegalArgumentException("No recipient for the reply was provided, " + - s"question not sent to [$actorSel].")) - case _ => - val message = if (sender == null) null else messageFactory(sender.asInstanceOf[InternalActorRef].provider.deadLetters) - Future.failed[Any](AskableActorRef.unsupportedRecipientType(actorSel, message, sender)) - } + private[pattern] def internalAsk(messageFactory: ActorRef => Any, timeout: Timeout, sender: ActorRef): Future[Any] = + actorSel.anchor match { + case ref: InternalActorRef => + if (timeout.duration.length <= 0) { + val message = messageFactory(ref.provider.deadLetters) + Future.failed[Any](AskableActorRef.negativeTimeoutException(actorSel, message, sender)) + } else { + val a = PromiseActorRef(ref.provider, timeout, targetName = actorSel, "unknown", sender) + val message = messageFactory(a) + a.messageClassName = message.getClass.getName + actorSel.tell(message, a) + a.result.future + } + case _ if sender eq null => + Future.failed[Any]( + new IllegalArgumentException( + "No recipient for the reply was provided, " + + s"question not sent to [$actorSel].")) + case _ => + val message = + if (sender == null) null else messageFactory(sender.asInstanceOf[InternalActorRef].provider.deadLetters) + Future.failed[Any](AskableActorRef.unsupportedRecipientType(actorSel, message, sender)) + } } /** @@ -474,8 +497,10 @@ final class ExplicitlyAskableActorSelection(val actorSel: ActorSelection) extend * * INTERNAL API */ -private[akka] final class PromiseActorRef private (val provider: ActorRefProvider, val result: Promise[Any], _mcn: String) - extends MinimalActorRef { +private[akka] final class PromiseActorRef private (val provider: ActorRefProvider, + val result: Promise[Any], + _mcn: String) + extends MinimalActorRef { import AbstractPromiseActorRef.{ stateOffset, watchedByOffset } import PromiseActorRef._ @@ -501,7 +526,8 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide private[this] var _watchedByDoNotCallMeDirectly: Set[ActorRef] = ActorCell.emptyActorRefSet @inline - private[this] def watchedBy: Set[ActorRef] = Unsafe.instance.getObjectVolatile(this, watchedByOffset).asInstanceOf[Set[ActorRef]] + private[this] def watchedBy: Set[ActorRef] = + Unsafe.instance.getObjectVolatile(this, watchedByOffset).asInstanceOf[Set[ActorRef]] @inline private[this] def updateWatchedBy(oldWatchedBy: Set[ActorRef], newWatchedBy: Set[ActorRef]): Boolean = @@ -554,11 +580,13 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide p = provider.tempPath() provider.registerTempActor(this, p) p - } finally { setState(p) } + } finally { + setState(p) + } } else path case p: ActorPath => p case StoppedWithPath(p) => p - case Stopped => + case Stopped => // even if we are already stopped we still need to produce a proper path updateState(Stopped, StoppedWithPath(provider.tempPath())) path @@ -569,12 +597,11 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide case Stopped | _: StoppedWithPath => provider.deadLetters ! message case _ => if (message == null) throw InvalidMessageException("Message is null") - if (!(result.tryComplete( - message match { - case Status.Success(r) => Success(r) - case Status.Failure(f) => Failure(f) - case other => Success(other) - }))) provider.deadLetters ! message + if (!(result.tryComplete(message match { + case Status.Success(r) => Success(r) + case Status.Failure(f) => Failure(f) + case other => Success(other) + }))) provider.deadLetters ! message } override def sendSystemMessage(message: SystemMessage): Unit = message match { @@ -584,7 +611,8 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide if (watchee == this && watcher != this) { if (!addWatcher(watcher)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - watcher.sendSystemMessage(DeathWatchNotification(watchee, existenceConfirmed = true, addressTerminated = false)) + watcher.sendSystemMessage( + DeathWatchNotification(watchee, existenceConfirmed = true, addressTerminated = false)) } else System.err.println("BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, this)) case Unwatch(watchee, watcher) => if (watchee == this && watcher != this) remWatcher(watcher) @@ -601,12 +629,13 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @tailrec override def stop(): Unit = { def ensureCompleted(): Unit = { - result tryComplete ActorStopResult + result.tryComplete(ActorStopResult) val watchers = clearWatchers() if (!watchers.isEmpty) { - watchers foreach { watcher => + watchers.foreach { watcher => // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - watcher.asInstanceOf[InternalActorRef] + watcher + .asInstanceOf[InternalActorRef] .sendSystemMessage(DeathWatchNotification(this, existenceConfirmed = true, addressTerminated = false)) } } @@ -615,7 +644,10 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide case null => // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either if (updateState(null, Stopped)) ensureCompleted() else stop() case p: ActorPath => - if (updateState(p, StoppedWithPath(p))) { try ensureCompleted() finally provider.unregisterTempActor(p) } else stop() + if (updateState(p, StoppedWithPath(p))) { + try ensureCompleted() + finally provider.unregisterTempActor(p) + } else stop() case Stopped | _: StoppedWithPath => // already stopped case Registering => stop() // spin until registration is completed before stopping } @@ -634,23 +666,31 @@ private[akka] object PromiseActorRef { private val ActorStopResult = Failure(ActorKilledException("Stopped")) private val defaultOnTimeout: String => Throwable = str => new AskTimeoutException(str) - def apply(provider: ActorRefProvider, timeout: Timeout, targetName: Any, messageClassName: String, - sender: ActorRef = Actor.noSender, onTimeout: String => Throwable = defaultOnTimeout): PromiseActorRef = { + def apply(provider: ActorRefProvider, + timeout: Timeout, + targetName: Any, + messageClassName: String, + sender: ActorRef = Actor.noSender, + onTimeout: String => Throwable = defaultOnTimeout): PromiseActorRef = { val result = Promise[Any]() val scheduler = provider.guardian.underlying.system.scheduler val a = new PromiseActorRef(provider, result, messageClassName) implicit val ec = a.internalCallingThreadExecutionContext val f = scheduler.scheduleOnce(timeout.duration) { - result tryComplete { + result.tryComplete { val wasSentBy = if (sender == ActorRef.noSender) "" else s" was sent by [$sender]" val messagePart = s"Message of type [${a.messageClassName}]$wasSentBy." Failure( - onTimeout(s"Ask timed out on [$targetName] after [${timeout.duration.toMillis} ms]. " + + onTimeout( + s"Ask timed out on [$targetName] after [${timeout.duration.toMillis} ms]. " + messagePart + " A typical reason for `AskTimeoutException` is that the recipient actor didn't send a reply.")) } } - result.future onComplete { _ => try a.stop() finally f.cancel() } + result.future.onComplete { _ => + try a.stop() + finally f.cancel() + } a } diff --git a/akka-actor/src/main/scala/akka/pattern/Backoff.scala b/akka-actor/src/main/scala/akka/pattern/Backoff.scala index f82d168d97..611016a3c9 100644 --- a/akka-actor/src/main/scala/akka/pattern/Backoff.scala +++ b/akka-actor/src/main/scala/akka/pattern/Backoff.scala @@ -17,6 +17,7 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } @Deprecated @deprecated("Use new API from BackoffOpts object instead", since = "2.5.22") object Backoff { + /** * Back-off options for creating a back-off supervisor actor that expects a child actor to restart on failure. * @@ -68,14 +69,14 @@ object Backoff { * */ @deprecated("Use BackoffOpts.onFailure instead", "2.5.22") - def onFailure( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - maxNrOfRetries: Int): BackoffOptions = - BackoffOptionsImpl(RestartImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor).withMaxNrOfRetries(maxNrOfRetries) + def onFailure(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxNrOfRetries: Int): BackoffOptions = + BackoffOptionsImpl(RestartImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor) + .withMaxNrOfRetries(maxNrOfRetries) /** * Back-off options for creating a back-off supervisor actor that expects a child actor to restart on failure. @@ -124,12 +125,11 @@ object Backoff { * In order to skip this additional delay pass in `0`. */ @deprecated("Use BackoffOpts.onFailure instead", "2.5.22") - def onFailure( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double): BackoffOptions = + def onFailure(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double): BackoffOptions = BackoffOptionsImpl(RestartImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor) /** @@ -183,13 +183,12 @@ object Backoff { */ @Deprecated @deprecated("Use BackoffOpts.onFailure instead", "2.5.22") - def onFailure( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double, - maxNrOfRetries: Int): BackoffOptions = + def onFailure(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + maxNrOfRetries: Int): BackoffOptions = onFailure(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, maxNrOfRetries) /** @@ -240,12 +239,11 @@ object Backoff { */ @Deprecated @deprecated("Use the overloaded one which accepts maxNrOfRetries instead.", "2.5.17") - def onFailure( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double): BackoffOptions = + def onFailure(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double): BackoffOptions = onFailure(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, -1) /** @@ -305,14 +303,14 @@ object Backoff { * In order to restart infinitely pass in `-1`. */ @deprecated("Use BackoffOpts.onStop instead", "2.5.22") - def onStop( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - maxNrOfRetries: Int): BackoffOptions = - BackoffOptionsImpl(StopImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor).withMaxNrOfRetries(maxNrOfRetries) + def onStop(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxNrOfRetries: Int): BackoffOptions = + BackoffOptionsImpl(StopImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor) + .withMaxNrOfRetries(maxNrOfRetries) /** * Back-off options for creating a back-off supervisor actor that expects a child actor to stop on failure. @@ -368,12 +366,11 @@ object Backoff { * In order to skip this additional delay pass in `0`. */ @deprecated("Use BackoffOpts.onStop instead", "2.5.22") - def onStop( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double): BackoffOptions = + def onStop(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double): BackoffOptions = BackoffOptionsImpl(StopImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor) /** @@ -434,13 +431,12 @@ object Backoff { */ @Deprecated @deprecated("Use BackoffOpts.onStop instead", "2.5.22") - def onStop( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double, - maxNrOfRetries: Int): BackoffOptions = + def onStop(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + maxNrOfRetries: Int): BackoffOptions = onStop(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, maxNrOfRetries) /** @@ -498,12 +494,11 @@ object Backoff { */ @Deprecated @deprecated("Use the overloaded one which accepts maxNrOfRetries instead.", "2.5.17") - def onStop( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double): BackoffOptions = + def onStop(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double): BackoffOptions = onStop(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, -1) } @@ -562,27 +557,31 @@ trait BackoffOptions { private[akka] def props: Props } -private final case class BackoffOptionsImpl( - backoffType: BackoffType = RestartImpliesFailure, - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - reset: Option[BackoffReset] = None, - supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider), - replyWhileStopped: Option[Any] = None, - finalStopMessage: Option[Any => Boolean] = None -) extends akka.pattern.BackoffOptions { +private final case class BackoffOptionsImpl(backoffType: BackoffType = RestartImpliesFailure, + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + reset: Option[BackoffReset] = None, + supervisorStrategy: OneForOneStrategy = + OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider), + replyWhileStopped: Option[Any] = None, + finalStopMessage: Option[Any => Boolean] = None) + extends akka.pattern.BackoffOptions { val backoffReset = reset.getOrElse(AutoReset(minBackoff)) def withAutoReset(resetBackoff: FiniteDuration) = copy(reset = Some(AutoReset(resetBackoff))) def withManualReset = copy(reset = Some(ManualReset)) def withSupervisorStrategy(supervisorStrategy: OneForOneStrategy) = copy(supervisorStrategy = supervisorStrategy) - def withDefaultStoppingStrategy = copy(supervisorStrategy = OneForOneStrategy(supervisorStrategy.maxNrOfRetries)(SupervisorStrategy.stoppingStrategy.decider)) + def withDefaultStoppingStrategy = + copy( + supervisorStrategy = + OneForOneStrategy(supervisorStrategy.maxNrOfRetries)(SupervisorStrategy.stoppingStrategy.decider)) def withReplyWhileStopped(replyWhileStopped: Any) = copy(replyWhileStopped = Some(replyWhileStopped)) - def withMaxNrOfRetries(maxNrOfRetries: Int) = copy(supervisorStrategy = supervisorStrategy.withMaxNrOfRetries(maxNrOfRetries)) + def withMaxNrOfRetries(maxNrOfRetries: Int) = + copy(supervisorStrategy = supervisorStrategy.withMaxNrOfRetries(maxNrOfRetries)) def withFinalStopMessage(action: Any => Boolean) = copy(finalStopMessage = Some(action)) def props = { @@ -598,10 +597,27 @@ private final case class BackoffOptionsImpl( backoffType match { //onFailure method in companion object case RestartImpliesFailure => - Props(new BackoffOnRestartSupervisor(childProps, childName, minBackoff, maxBackoff, backoffReset, randomFactor, supervisorStrategy, replyWhileStopped)) + Props( + new BackoffOnRestartSupervisor(childProps, + childName, + minBackoff, + maxBackoff, + backoffReset, + randomFactor, + supervisorStrategy, + replyWhileStopped)) //onStop method in companion object case StopImpliesFailure => - Props(new BackoffOnStopSupervisor(childProps, childName, minBackoff, maxBackoff, backoffReset, randomFactor, supervisorStrategy, replyWhileStopped, finalStopMessage)) + Props( + new BackoffOnStopSupervisor(childProps, + childName, + minBackoff, + maxBackoff, + backoffReset, + randomFactor, + supervisorStrategy, + replyWhileStopped, + finalStopMessage)) } } } diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala index f237479552..92098fe8b0 100644 --- a/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala +++ b/akka-actor/src/main/scala/akka/pattern/BackoffOptions.scala @@ -15,6 +15,7 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } * Backoff options allow to specify a number of properties for backoff supervisors. */ object BackoffOpts { + /** * Back-off options for creating a back-off supervisor actor that expects a child actor to restart on failure. * @@ -61,12 +62,11 @@ object BackoffOpts { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. */ - def onFailure( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double): BackoffOnFailureOptions = + def onFailure(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double): BackoffOnFailureOptions = BackoffOnFailureOptionsImpl(childProps, childName, minBackoff, maxBackoff, randomFactor) /** @@ -115,12 +115,11 @@ object BackoffOpts { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. */ - def onFailure( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double): BackoffOnFailureOptions = + def onFailure(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double): BackoffOnFailureOptions = onFailure(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor) /** @@ -176,12 +175,11 @@ object BackoffOpts { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. */ - def onStop( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double): BackoffOnStopOptions = + def onStop(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double): BackoffOnStopOptions = BackoffOnStopOptionsImpl(childProps, childName, minBackoff, maxBackoff, randomFactor) /** @@ -237,12 +235,11 @@ object BackoffOpts { * random delay based on this factor is added, e.g. `0.2` adds up to `20%` delay. * In order to skip this additional delay pass in `0`. */ - def onStop( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double): BackoffOnStopOptions = + def onStop(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double): BackoffOnStopOptions = onStop(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor) } @@ -325,17 +322,17 @@ sealed trait BackoffOnStopOptions extends ExtendedBackoffOptions[BackoffOnStopOp @DoNotInherit sealed trait BackoffOnFailureOptions extends ExtendedBackoffOptions[BackoffOnFailureOptions] -private final case class BackoffOnStopOptionsImpl[T]( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - reset: Option[BackoffReset] = None, - supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider), - replyWhileStopped: Option[Any] = None, - finalStopMessage: Option[Any => Boolean] = None -) extends BackoffOnStopOptions { +private final case class BackoffOnStopOptionsImpl[T](childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + reset: Option[BackoffReset] = None, + supervisorStrategy: OneForOneStrategy = + OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider), + replyWhileStopped: Option[Any] = None, + finalStopMessage: Option[Any => Boolean] = None) + extends BackoffOnStopOptions { private val backoffReset = reset.getOrElse(AutoReset(minBackoff)) @@ -344,10 +341,14 @@ private final case class BackoffOnStopOptionsImpl[T]( def withManualReset = copy(reset = Some(ManualReset)) def withSupervisorStrategy(supervisorStrategy: OneForOneStrategy) = copy(supervisorStrategy = supervisorStrategy) def withReplyWhileStopped(replyWhileStopped: Any) = copy(replyWhileStopped = Some(replyWhileStopped)) - def withMaxNrOfRetries(maxNrOfRetries: Int) = copy(supervisorStrategy = supervisorStrategy.withMaxNrOfRetries(maxNrOfRetries)) + def withMaxNrOfRetries(maxNrOfRetries: Int) = + copy(supervisorStrategy = supervisorStrategy.withMaxNrOfRetries(maxNrOfRetries)) // additional - def withDefaultStoppingStrategy = copy(supervisorStrategy = OneForOneStrategy(supervisorStrategy.maxNrOfRetries)(SupervisorStrategy.stoppingStrategy.decider)) + def withDefaultStoppingStrategy = + copy( + supervisorStrategy = + OneForOneStrategy(supervisorStrategy.maxNrOfRetries)(SupervisorStrategy.stoppingStrategy.decider)) def withFinalStopMessage(action: Any => Boolean) = copy(finalStopMessage = Some(action)) def props: Props = { @@ -360,20 +361,29 @@ private final case class BackoffOnStopOptionsImpl[T]( case _ => // ignore } - Props(new BackoffOnStopSupervisor(childProps, childName, minBackoff, maxBackoff, backoffReset, randomFactor, supervisorStrategy, replyWhileStopped, finalStopMessage)) + Props( + new BackoffOnStopSupervisor(childProps, + childName, + minBackoff, + maxBackoff, + backoffReset, + randomFactor, + supervisorStrategy, + replyWhileStopped, + finalStopMessage)) } } private final case class BackoffOnFailureOptionsImpl[T]( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - reset: Option[BackoffReset] = None, - supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider), - replyWhileStopped: Option[Any] = None -) extends BackoffOnFailureOptions { + childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + reset: Option[BackoffReset] = None, + supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider), + replyWhileStopped: Option[Any] = None) + extends BackoffOnFailureOptions { private val backoffReset = reset.getOrElse(AutoReset(minBackoff)) @@ -382,7 +392,8 @@ private final case class BackoffOnFailureOptionsImpl[T]( def withManualReset = copy(reset = Some(ManualReset)) def withSupervisorStrategy(supervisorStrategy: OneForOneStrategy) = copy(supervisorStrategy = supervisorStrategy) def withReplyWhileStopped(replyWhileStopped: Any) = copy(replyWhileStopped = Some(replyWhileStopped)) - def withMaxNrOfRetries(maxNrOfRetries: Int) = copy(supervisorStrategy = supervisorStrategy.withMaxNrOfRetries(maxNrOfRetries)) + def withMaxNrOfRetries(maxNrOfRetries: Int) = + copy(supervisorStrategy = supervisorStrategy.withMaxNrOfRetries(maxNrOfRetries)) def props: Props = { require(minBackoff > Duration.Zero, "minBackoff must be > 0") @@ -394,7 +405,15 @@ private final case class BackoffOnFailureOptionsImpl[T]( case _ => // ignore } - Props(new BackoffOnRestartSupervisor(childProps, childName, minBackoff, maxBackoff, backoffReset, randomFactor, supervisorStrategy, replyWhileStopped)) + Props( + new BackoffOnRestartSupervisor(childProps, + childName, + minBackoff, + maxBackoff, + backoffReset, + randomFactor, + supervisorStrategy, + replyWhileStopped)) } } diff --git a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala index be16c27995..1700d0c235 100644 --- a/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/BackoffSupervisor.scala @@ -34,13 +34,17 @@ object BackoffSupervisor { * In order to skip this additional delay pass in `0`. */ @deprecated("Use props with BackoffOpts instead", since = "2.5.22") - def props( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double): Props = { - propsWithSupervisorStrategy(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy) + def props(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double): Props = { + propsWithSupervisorStrategy(childProps, + childName, + minBackoff, + maxBackoff, + randomFactor, + SupervisorStrategy.defaultStrategy) } /** @@ -64,13 +68,12 @@ object BackoffSupervisor { * In order to restart infinitely pass in `-1`. */ @deprecated("Use props with BackoffOpts instead", since = "2.5.22") - def props( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - maxNrOfRetries: Int): Props = { + def props(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxNrOfRetries: Int): Props = { val supervisionStrategy = SupervisorStrategy.defaultStrategy match { case oneForOne: OneForOneStrategy => oneForOne.withMaxNrOfRetries(maxNrOfRetries) case s => s @@ -96,12 +99,11 @@ object BackoffSupervisor { * In order to skip this additional delay pass in `0`. */ @deprecated("Use props with BackoffOpts instead", since = "2.5.22") - def props( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double): Props = { + def props(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double): Props = { props(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor) } @@ -126,13 +128,12 @@ object BackoffSupervisor { * In order to restart infinitely pass in `-1`. */ @deprecated("Use props with BackoffOpts instead", since = "2.5.22") - def props( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double, - maxNrOfRetries: Int): Props = { + def props(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + maxNrOfRetries: Int): Props = { props(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, maxNrOfRetries) } @@ -158,17 +159,25 @@ object BackoffSupervisor { * backoff process, only a [[OneForOneStrategy]] makes sense here. */ @deprecated("Use props with BackoffOpts instead", since = "2.5.22") - def propsWithSupervisorStrategy( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - strategy: SupervisorStrategy): Props = { + def propsWithSupervisorStrategy(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + strategy: SupervisorStrategy): Props = { require(minBackoff > Duration.Zero, "minBackoff must be > 0") require(maxBackoff >= minBackoff, "maxBackoff must be >= minBackoff") require(0.0 <= randomFactor && randomFactor <= 1.0, "randomFactor must be between 0.0 and 1.0") - Props(new BackoffOnStopSupervisor(childProps, childName, minBackoff, maxBackoff, AutoReset(minBackoff), randomFactor, strategy, None, None)) + Props( + new BackoffOnStopSupervisor(childProps, + childName, + minBackoff, + maxBackoff, + AutoReset(minBackoff), + randomFactor, + strategy, + None, + None)) } /** @@ -193,13 +202,12 @@ object BackoffSupervisor { * backoff process, only a [[OneForOneStrategy]] makes sense here. */ @deprecated("Use props with BackoffOpts instead", since = "2.5.22") - def propsWithSupervisorStrategy( - childProps: Props, - childName: String, - minBackoff: java.time.Duration, - maxBackoff: java.time.Duration, - randomFactor: Double, - strategy: SupervisorStrategy): Props = { + def propsWithSupervisorStrategy(childProps: Props, + childName: String, + minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + strategy: SupervisorStrategy): Props = { propsWithSupervisorStrategy(childProps, childName, minBackoff.asScala, maxBackoff.asScala, randomFactor, strategy) } @@ -242,6 +250,7 @@ object BackoffSupervisor { * [[BackoffSupervisor.CurrentChild]] containing the `ActorRef` of the current child, if any. */ final case class CurrentChild(ref: Option[ActorRef]) { + /** * Java API: The `ActorRef` of the current child, if any */ @@ -285,11 +294,10 @@ object BackoffSupervisor { * * Calculates an exponential back off delay. */ - private[akka] def calculateDelay( - restartCount: Int, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double): FiniteDuration = { + private[akka] def calculateDelay(restartCount: Int, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double): FiniteDuration = { val rnd = 1.0 + ThreadLocalRandom.current().nextDouble() * randomFactor val calculatedDuration = Try(maxBackoff.min(minBackoff * math.pow(2, restartCount)) * rnd).getOrElse(maxBackoff) calculatedDuration match { @@ -301,45 +309,58 @@ object BackoffSupervisor { // for backwards compability @deprecated("Use `BackoffSupervisor.props` method instead", since = "2.5.22") -final class BackoffSupervisor( - override val childProps: Props, - override val childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - override val reset: BackoffReset, - randomFactor: Double, - strategy: SupervisorStrategy, - val replyWhileStopped: Option[Any], - val finalStopMessage: Option[Any => Boolean]) - extends BackoffOnStopSupervisor(childProps, childName, minBackoff, maxBackoff, reset, randomFactor, strategy, replyWhileStopped, finalStopMessage) { +final class BackoffSupervisor(override val childProps: Props, + override val childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + override val reset: BackoffReset, + randomFactor: Double, + strategy: SupervisorStrategy, + val replyWhileStopped: Option[Any], + val finalStopMessage: Option[Any => Boolean]) + extends BackoffOnStopSupervisor(childProps, + childName, + minBackoff, + maxBackoff, + reset, + randomFactor, + strategy, + replyWhileStopped, + finalStopMessage) { // for binary compatibility with 2.5.18 - def this( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - reset: BackoffReset, - randomFactor: Double, - strategy: SupervisorStrategy, - replyWhileStopped: Option[Any]) = this(childProps, childName, minBackoff, maxBackoff, reset, randomFactor, strategy, replyWhileStopped, None) + def this(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + reset: BackoffReset, + randomFactor: Double, + strategy: SupervisorStrategy, + replyWhileStopped: Option[Any]) = + this(childProps, childName, minBackoff, maxBackoff, reset, randomFactor, strategy, replyWhileStopped, None) // for binary compatibility with 2.4.1 - def this( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - supervisorStrategy: SupervisorStrategy) = - this(childProps, childName, minBackoff, maxBackoff, AutoReset(minBackoff), randomFactor, supervisorStrategy, None, None) + def this(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + supervisorStrategy: SupervisorStrategy) = + this(childProps, + childName, + minBackoff, + maxBackoff, + AutoReset(minBackoff), + randomFactor, + supervisorStrategy, + None, + None) // for binary compatibility with 2.4.0 - def this( - childProps: Props, - childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double) = + def this(childProps: Props, + childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double) = this(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy) } diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index cc016bfec6..eb5fb23852 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -43,7 +43,10 @@ object CircuitBreaker { * @param callTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to consider a call a failure * @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit */ - def apply(scheduler: Scheduler, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration): CircuitBreaker = + def apply(scheduler: Scheduler, + maxFailures: Int, + callTimeout: FiniteDuration, + resetTimeout: FiniteDuration): CircuitBreaker = new CircuitBreaker(scheduler, maxFailures, callTimeout, resetTimeout)(sameThreadExecutionContext) /** @@ -59,7 +62,10 @@ object CircuitBreaker { * @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit */ @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def create(scheduler: Scheduler, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration): CircuitBreaker = + def create(scheduler: Scheduler, + maxFailures: Int, + callTimeout: FiniteDuration, + resetTimeout: FiniteDuration): CircuitBreaker = apply(scheduler, maxFailures, callTimeout, resetTimeout) /** @@ -74,7 +80,10 @@ object CircuitBreaker { * @param callTimeout [[java.time.Duration]] of time after which to consider a call a failure * @param resetTimeout [[java.time.Duration]] of time after which to attempt to close the circuit */ - def create(scheduler: Scheduler, maxFailures: Int, callTimeout: java.time.Duration, resetTimeout: java.time.Duration): CircuitBreaker = + def create(scheduler: Scheduler, + maxFailures: Int, + callTimeout: java.time.Duration, + resetTimeout: java.time.Duration): CircuitBreaker = apply(scheduler, maxFailures, callTimeout.asScala, resetTimeout.asScala) private val exceptionAsFailure: Try[_] => Boolean = { @@ -92,7 +101,8 @@ object CircuitBreaker { } } - protected def convertJavaFailureFnToScala[T](javaFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): Try[T] => Boolean = { + protected def convertJavaFailureFnToScala[T]( + javaFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): Try[T] => Boolean = { val failureFnInScala: Try[T] => Boolean = { case Success(t) => javaFn(Optional.of(t), Optional.empty()) case Failure(err) => javaFn(Optional.empty(), Optional.of(err)) @@ -120,27 +130,36 @@ object CircuitBreaker { * @param resetTimeout [[scala.concurrent.duration.FiniteDuration]] of time after which to attempt to close the circuit * @param executor [[scala.concurrent.ExecutionContext]] used for execution of state transition listeners */ -class CircuitBreaker( - scheduler: Scheduler, - maxFailures: Int, - callTimeout: FiniteDuration, - val resetTimeout: FiniteDuration, - maxResetTimeout: FiniteDuration, - exponentialBackoffFactor: Double)(implicit executor: ExecutionContext) extends AbstractCircuitBreaker { +class CircuitBreaker(scheduler: Scheduler, + maxFailures: Int, + callTimeout: FiniteDuration, + val resetTimeout: FiniteDuration, + maxResetTimeout: FiniteDuration, + exponentialBackoffFactor: Double)(implicit executor: ExecutionContext) + extends AbstractCircuitBreaker { require(exponentialBackoffFactor >= 1.0, "factor must be >= 1.0") @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def this(executor: ExecutionContext, scheduler: Scheduler, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration) = { + def this(executor: ExecutionContext, + scheduler: Scheduler, + maxFailures: Int, + callTimeout: FiniteDuration, + resetTimeout: FiniteDuration) = { this(scheduler, maxFailures, callTimeout, resetTimeout, 36500.days, 1.0)(executor) } - def this(executor: ExecutionContext, scheduler: Scheduler, maxFailures: Int, callTimeout: java.time.Duration, resetTimeout: java.time.Duration) = { + def this(executor: ExecutionContext, + scheduler: Scheduler, + maxFailures: Int, + callTimeout: java.time.Duration, + resetTimeout: java.time.Duration) = { this(scheduler, maxFailures, callTimeout.asScala, resetTimeout.asScala, 36500.days, 1.0)(executor) } // add the old constructor to make it binary compatible - def this(scheduler: Scheduler, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration)(implicit executor: ExecutionContext) = { + def this(scheduler: Scheduler, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration)( + implicit executor: ExecutionContext) = { this(scheduler, maxFailures, callTimeout, resetTimeout, 36500.days, 1.0)(executor) } @@ -201,7 +220,10 @@ class CircuitBreaker( */ @inline private[this] def swapResetTimeout(oldResetTimeout: FiniteDuration, newResetTimeout: FiniteDuration): Boolean = - Unsafe.instance.compareAndSwapObject(this, AbstractCircuitBreaker.resetTimeoutOffset, oldResetTimeout, newResetTimeout) + Unsafe.instance.compareAndSwapObject(this, + AbstractCircuitBreaker.resetTimeoutOffset, + oldResetTimeout, + newResetTimeout) /** * Helper method for accessing to the underlying resetTimeout via Unsafe @@ -229,7 +251,8 @@ class CircuitBreaker( * `scala.concurrent.TimeoutException` if the call timed out * */ - def withCircuitBreaker[T](body: => Future[T]): Future[T] = currentState.invoke(body, CircuitBreaker.exceptionAsFailure) + def withCircuitBreaker[T](body: => Future[T]): Future[T] = + currentState.invoke(body, CircuitBreaker.exceptionAsFailure) /** * Java API for [[#withCircuitBreaker]] @@ -249,7 +272,9 @@ class CircuitBreaker( * @return [[scala.concurrent.Future]] containing the call result or a * `scala.concurrent.TimeoutException` if the call timed out */ - def callWithCircuitBreaker[T](body: Callable[Future[T]], defineFailureFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): Future[T] = { + def callWithCircuitBreaker[T]( + body: Callable[Future[T]], + defineFailureFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): Future[T] = { val failureFnInScala = CircuitBreaker.convertJavaFailureFnToScala(defineFailureFn) withCircuitBreaker(body.call, failureFnInScala) @@ -274,8 +299,8 @@ class CircuitBreaker( * `scala.concurrent.TimeoutException` if the call timed out */ def callWithCircuitBreakerCS[T]( - body: Callable[CompletionStage[T]], - defineFailureFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): CompletionStage[T] = + body: Callable[CompletionStage[T]], + defineFailureFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): CompletionStage[T] = FutureConverters.toJava[T](callWithCircuitBreaker(new Callable[Future[T]] { override def call(): Future[T] = FutureConverters.toScala(body.call()) }, defineFailureFn)) @@ -309,11 +334,10 @@ class CircuitBreaker( * @return The result of the call */ def withSyncCircuitBreaker[T](body: => T, defineFailureFn: Try[T] => Boolean): T = - Await.result( - withCircuitBreaker( - try Future.successful(body) catch { case NonFatal(t) => Future.failed(t) }, - defineFailureFn), - callTimeout) + Await.result(withCircuitBreaker(try Future.successful(body) + catch { case NonFatal(t) => Future.failed(t) }, + defineFailureFn), + callTimeout) /** * Java API for [[#withSyncCircuitBreaker]]. Throws [[java.util.concurrent.TimeoutException]] if the call timed out. @@ -331,7 +355,9 @@ class CircuitBreaker( * @param defineFailureFn function that define what should be consider failure and thus increase failure count * @return The result of the call */ - def callWithSyncCircuitBreaker[T](body: Callable[T], defineFailureFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): T = { + def callWithSyncCircuitBreaker[T]( + body: Callable[T], + defineFailureFn: BiFunction[Optional[T], Optional[Throwable], java.lang.Boolean]): T = { val failureFnInScala = CircuitBreaker.convertJavaFailureFnToScala(defineFailureFn) withSyncCircuitBreaker(body.call, failureFnInScala) } @@ -410,7 +436,7 @@ class CircuitBreaker( * @return CircuitBreaker for fluent usage */ def addOnOpenListener(callback: Runnable): CircuitBreaker = { - Open addListener callback + Open.addListener(callback) this } @@ -439,7 +465,7 @@ class CircuitBreaker( * @return CircuitBreaker for fluent usage */ def addOnHalfOpenListener(callback: Runnable): CircuitBreaker = { - HalfOpen addListener callback + HalfOpen.addListener(callback) this } @@ -469,7 +495,7 @@ class CircuitBreaker( * @return CircuitBreaker for fluent usage */ def addOnCloseListener(callback: Runnable): CircuitBreaker = { - Closed addListener callback + Closed.addListener(callback) this } @@ -481,9 +507,10 @@ class CircuitBreaker( * @param callback Handler to be invoked on successful call, where passed value is elapsed time in nanoseconds. * @return CircuitBreaker for fluent usage */ - def onCallSuccess(callback: Long => Unit): CircuitBreaker = addOnCallSuccessListener(new Consumer[Long] { - def accept(result: Long): Unit = callback(result) - }) + def onCallSuccess(callback: Long => Unit): CircuitBreaker = + addOnCallSuccessListener(new Consumer[Long] { + def accept(result: Long): Unit = callback(result) + }) /** * JavaAPI for onCallSuccess @@ -492,7 +519,7 @@ class CircuitBreaker( * @return CircuitBreaker for fluent usage */ def addOnCallSuccessListener(callback: Consumer[Long]): CircuitBreaker = { - successListeners add callback + successListeners.add(callback) this } @@ -504,9 +531,10 @@ class CircuitBreaker( * @param callback Handler to be invoked on failed call, where passed value is elapsed time in nanoseconds. * @return CircuitBreaker for fluent usage */ - def onCallFailure(callback: Long => Unit): CircuitBreaker = addOnCallFailureListener(new Consumer[Long] { - def accept(result: Long): Unit = callback(result) - }) + def onCallFailure(callback: Long => Unit): CircuitBreaker = + addOnCallFailureListener(new Consumer[Long] { + def accept(result: Long): Unit = callback(result) + }) /** * JavaAPI for onCallFailure @@ -515,7 +543,7 @@ class CircuitBreaker( * @return CircuitBreaker for fluent usage */ def addOnCallFailureListener(callback: Consumer[Long]): CircuitBreaker = { - callFailureListeners add callback + callFailureListeners.add(callback) this } @@ -527,9 +555,10 @@ class CircuitBreaker( * @param callback Handler to be invoked on call finished with timeout, where passed value is elapsed time in nanoseconds. * @return CircuitBreaker for fluent usage */ - def onCallTimeout(callback: Long => Unit): CircuitBreaker = addOnCallTimeoutListener(new Consumer[Long] { - def accept(result: Long): Unit = callback(result) - }) + def onCallTimeout(callback: Long => Unit): CircuitBreaker = + addOnCallTimeoutListener(new Consumer[Long] { + def accept(result: Long): Unit = callback(result) + }) /** * JavaAPI for onCallTimeout @@ -538,7 +567,7 @@ class CircuitBreaker( * @return CircuitBreaker for fluent usage */ def addOnCallTimeoutListener(callback: Consumer[Long]): CircuitBreaker = { - callTimeoutListeners add callback + callTimeoutListeners.add(callback) this } @@ -550,7 +579,8 @@ class CircuitBreaker( * @param callback Handler to be invoked on call failed due to open breaker. * @return CircuitBreaker for fluent usage */ - def onCallBreakerOpen(callback: => Unit): CircuitBreaker = addOnCallBreakerOpenListener(new Runnable { def run = callback }) + def onCallBreakerOpen(callback: => Unit): CircuitBreaker = + addOnCallBreakerOpenListener(new Runnable { def run = callback }) /** * JavaAPI for onCallBreakerOpen. @@ -559,7 +589,7 @@ class CircuitBreaker( * @return CircuitBreaker for fluent usage */ def addOnCallBreakerOpenListener(callback: Runnable): CircuitBreaker = { - callBreakerOpenListeners add callback + callBreakerOpenListeners.add(callback) this } @@ -681,7 +711,7 @@ class CircuitBreaker( * * @param listener listener implementation */ - def addListener(listener: Runnable): Unit = listeners add listener + def addListener(listener: Runnable): Unit = listeners.add(listener) /** * Test for whether listeners exist @@ -715,7 +745,9 @@ class CircuitBreaker( */ def callThrough[T](body: => Future[T], defineFailureFn: Try[T] => Boolean): Future[T] = { - def materialize[U](value: => Future[U]): Future[U] = try value catch { case NonFatal(t) => Future.failed(t) } + def materialize[U](value: => Future[U]): Future[U] = + try value + catch { case NonFatal(t) => Future.failed(t) } if (callTimeout == Duration.Zero) { val start = System.nanoTime() @@ -747,7 +779,7 @@ class CircuitBreaker( } val timeout = scheduler.scheduleOnce(callTimeout) { - if (p tryFailure timeoutEx) { + if (p.tryFailure(timeoutEx)) { notifyCallTimeoutListeners(start) } } @@ -996,7 +1028,7 @@ class CircuitBreaker( * currently in half-open state. * @param message Defaults to "Circuit Breaker is open; calls are failing fast" */ -class CircuitBreakerOpenException( - val remainingDuration: FiniteDuration, - message: String = "Circuit Breaker is open; calls are failing fast") - extends AkkaException(message) with NoStackTrace +class CircuitBreakerOpenException(val remainingDuration: FiniteDuration, + message: String = "Circuit Breaker is open; calls are failing fast") + extends AkkaException(message) + with NoStackTrace diff --git a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala index 99b7c003fa..d47db28d4f 100644 --- a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala @@ -4,7 +4,7 @@ package akka.pattern -import scala.concurrent.{ ExecutionContext, Promise, Future } +import scala.concurrent.{ ExecutionContext, Future, Promise } import akka.actor._ import scala.util.control.NonFatal import scala.concurrent.duration.FiniteDuration @@ -14,16 +14,24 @@ import akka.dispatch.Futures import java.util.function.BiConsumer trait FutureTimeoutSupport { + /** * Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided value * after the specified duration. */ - def after[T](duration: FiniteDuration, using: Scheduler)(value: => Future[T])(implicit ec: ExecutionContext): Future[T] = + def after[T](duration: FiniteDuration, using: Scheduler)(value: => Future[T])( + implicit ec: ExecutionContext): Future[T] = if (duration.isFinite && duration.length < 1) { - try value catch { case NonFatal(t) => Future.failed(t) } + try value + catch { case NonFatal(t) => Future.failed(t) } } else { val p = Promise[T]() - using.scheduleOnce(duration) { p completeWith { try value catch { case NonFatal(t) => Future.failed(t) } } } + using.scheduleOnce(duration) { + p.completeWith { + try value + catch { case NonFatal(t) => Future.failed(t) } + } + } p.future } @@ -31,9 +39,11 @@ trait FutureTimeoutSupport { * Returns a [[java.util.concurrent.CompletionStage]] that will be completed with the success or failure of the provided value * after the specified duration. */ - def afterCompletionStage[T](duration: FiniteDuration, using: Scheduler)(value: => CompletionStage[T])(implicit ec: ExecutionContext): CompletionStage[T] = + def afterCompletionStage[T](duration: FiniteDuration, using: Scheduler)(value: => CompletionStage[T])( + implicit ec: ExecutionContext): CompletionStage[T] = if (duration.isFinite && duration.length < 1) { - try value catch { case NonFatal(t) => Futures.failedCompletionStage(t) } + try value + catch { case NonFatal(t) => Futures.failedCompletionStage(t) } } else { val p = new CompletableFuture[T] using.scheduleOnce(duration) { diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 082f687e64..e192849e46 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -11,6 +11,7 @@ import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration trait GracefulStopSupport { + /** * Returns a [[scala.concurrent.Future]] that will be completed with success (value `true`) when * existing messages of the target actor has been processed and the actor has been @@ -48,11 +49,9 @@ trait GracefulStopSupport { val ref = PromiseActorRef(internalTarget.provider, Timeout(timeout), target, stopMessage.getClass.getName) internalTarget.sendSystemMessage(Watch(internalTarget, ref)) target.tell(stopMessage, Actor.noSender) - ref.result.future.transform( - { - case Terminated(t) if t.path == target.path => true - case _ => { internalTarget.sendSystemMessage(Unwatch(target, ref)); false } - }, - t => { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ref.internalCallingThreadExecutionContext) + ref.result.future.transform({ + case Terminated(t) if t.path == target.path => true + case _ => { internalTarget.sendSystemMessage(Unwatch(target, ref)); false } + }, t => { internalTarget.sendSystemMessage(Unwatch(target, ref)); t })(ref.internalCallingThreadExecutionContext) } } diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index dc1ca06420..06ca1df74e 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -61,7 +61,8 @@ object Patterns { * }); * }}} */ - def ask(actor: ActorRef, message: Any, timeout: Timeout): Future[AnyRef] = scalaAsk(actor, message)(timeout).asInstanceOf[Future[AnyRef]] + def ask(actor: ActorRef, message: Any, timeout: Timeout): Future[AnyRef] = + scalaAsk(actor, message)(timeout).asInstanceOf[Future[AnyRef]] /** * Java API for `akka.pattern.ask`: @@ -122,7 +123,9 @@ object Patterns { * @param messageFactory function taking an actor ref and returning the message to be sent * @param timeout the timeout for the response before failing the returned completion stage */ - def askWithReplyTo(actor: ActorRef, messageFactory: japi.function.Function[ActorRef, Any], timeout: java.time.Duration): CompletionStage[AnyRef] = + def askWithReplyTo(actor: ActorRef, + messageFactory: japi.function.Function[ActorRef, Any], + timeout: java.time.Duration): CompletionStage[AnyRef] = extended.ask(actor, messageFactory.apply _)(Timeout.create(timeout)).toJava.asInstanceOf[CompletionStage[AnyRef]] /** @@ -170,7 +173,9 @@ object Patterns { * timeout); * }}} */ - def askWithReplyTo(actor: ActorRef, messageFactory: japi.Function[ActorRef, Any], timeoutMillis: Long): Future[AnyRef] = + def askWithReplyTo(actor: ActorRef, + messageFactory: japi.Function[ActorRef, Any], + timeoutMillis: Long): Future[AnyRef] = extended.ask(actor, messageFactory.apply _)(Timeout(timeoutMillis.millis)).asInstanceOf[Future[AnyRef]] /** @@ -282,7 +287,9 @@ object Patterns { * timeout); * }}} */ - def askWithReplyTo(selection: ActorSelection, messageFactory: japi.Function[ActorRef, Any], timeoutMillis: Long): Future[AnyRef] = + def askWithReplyTo(selection: ActorSelection, + messageFactory: japi.Function[ActorRef, Any], + timeoutMillis: Long): Future[AnyRef] = extended.ask(selection, messageFactory.apply _)(Timeout(timeoutMillis.millis)).asInstanceOf[Future[AnyRef]] /** @@ -296,7 +303,9 @@ object Patterns { * timeout); * }}} */ - def askWithReplyTo(selection: ActorSelection, messageFactory: japi.Function[ActorRef, Any], timeout: java.time.Duration): CompletionStage[AnyRef] = + def askWithReplyTo(selection: ActorSelection, + messageFactory: japi.Function[ActorRef, Any], + timeout: java.time.Duration): CompletionStage[AnyRef] = extended.ask(selection, messageFactory.apply _)(timeout.asScala).toJava.asInstanceOf[CompletionStage[AnyRef]] /** @@ -335,7 +344,8 @@ object Patterns { * Patterns.pipe(transformed, context).to(nextActor); * }}} */ - def pipe[T](future: CompletionStage[T], context: ExecutionContext): PipeableCompletionStage[T] = pipeCompletionStage(future)(context) + def pipe[T](future: CompletionStage[T], context: ExecutionContext): PipeableCompletionStage[T] = + pipeCompletionStage(future)(context) /** * Returns a [[scala.concurrent.Future]] that will be completed with success (value `true`) when @@ -392,21 +402,29 @@ object Patterns { * If the target actor isn't terminated within the timeout the [[java.util.concurrent.CompletionStage]] * is completed with failure [[akka.pattern.AskTimeoutException]]. */ - def gracefulStop(target: ActorRef, timeout: java.time.Duration, stopMessage: Any): CompletionStage[java.lang.Boolean] = + def gracefulStop(target: ActorRef, + timeout: java.time.Duration, + stopMessage: Any): CompletionStage[java.lang.Boolean] = scalaGracefulStop(target, timeout.asScala, stopMessage).toJava.asInstanceOf[CompletionStage[java.lang.Boolean]] /** * Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided Callable * after the specified duration. */ - def after[T](duration: FiniteDuration, scheduler: Scheduler, context: ExecutionContext, value: Callable[Future[T]]): Future[T] = + def after[T](duration: FiniteDuration, + scheduler: Scheduler, + context: ExecutionContext, + value: Callable[Future[T]]): Future[T] = scalaAfter(duration, scheduler)(value.call())(context) /** * Returns a [[java.util.concurrent.CompletionStage]] that will be completed with the success or failure of the provided Callable * after the specified duration. */ - def after[T](duration: java.time.Duration, scheduler: Scheduler, context: ExecutionContext, value: Callable[CompletionStage[T]]): CompletionStage[T] = + def after[T](duration: java.time.Duration, + scheduler: Scheduler, + context: ExecutionContext, + value: Callable[CompletionStage[T]]): CompletionStage[T] = afterCompletionStage(duration.asScala, scheduler)(value.call())(context) /** @@ -420,7 +438,10 @@ object Patterns { * Returns a [[java.util.concurrent.CompletionStage]] that will be completed with the success or failure of the provided value * after the specified duration. */ - def after[T](duration: java.time.Duration, scheduler: Scheduler, context: ExecutionContext, value: CompletionStage[T]): CompletionStage[T] = + def after[T](duration: java.time.Duration, + scheduler: Scheduler, + context: ExecutionContext, + value: CompletionStage[T]): CompletionStage[T] = afterCompletionStage(duration.asScala, scheduler)(value)(context) /** @@ -431,7 +452,10 @@ object Patterns { * Note that the attempt function will be invoked on the given execution context for subsequent tries and * therefore must be thread safe (not touch unsafe mutable state). */ - def retry[T](attempt: Callable[Future[T]], attempts: Int, delay: FiniteDuration, scheduler: Scheduler, + def retry[T](attempt: Callable[Future[T]], + attempts: Int, + delay: FiniteDuration, + scheduler: Scheduler, context: ExecutionContext): Future[T] = scalaRetry(() => attempt.call, attempts, delay)(context, scheduler) @@ -443,7 +467,11 @@ object Patterns { * Note that the attempt function will be invoked on the given execution context for subsequent tries * and therefore must be thread safe (not touch unsafe mutable state). */ - def retry[T](attempt: Callable[CompletionStage[T]], attempts: Int, delay: java.time.Duration, scheduler: Scheduler, ec: ExecutionContext): CompletionStage[T] = + def retry[T](attempt: Callable[CompletionStage[T]], + attempts: Int, + delay: java.time.Duration, + scheduler: Scheduler, + ec: ExecutionContext): CompletionStage[T] = scalaRetry(() => attempt.call().toScala, attempts, delay.asScala)(ec, scheduler).toJava } @@ -539,7 +567,9 @@ object PatternsCS { * @param timeout the timeout for the response before failing the returned completion operator */ @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.15") - def askWithReplyTo(actor: ActorRef, messageFactory: japi.function.Function[ActorRef, Any], timeout: Timeout): CompletionStage[AnyRef] = + def askWithReplyTo(actor: ActorRef, + messageFactory: japi.function.Function[ActorRef, Any], + timeout: Timeout): CompletionStage[AnyRef] = extended.ask(actor, messageFactory.apply _)(timeout).toJava.asInstanceOf[CompletionStage[AnyRef]] /** @@ -558,7 +588,9 @@ object PatternsCS { * @param timeout the timeout for the response before failing the returned completion stage */ @deprecated("Use Pattens.askWithReplyTo instead.", since = "2.5.19") - def askWithReplyTo(actor: ActorRef, messageFactory: japi.function.Function[ActorRef, Any], timeout: java.time.Duration): CompletionStage[AnyRef] = + def askWithReplyTo(actor: ActorRef, + messageFactory: japi.function.Function[ActorRef, Any], + timeout: java.time.Duration): CompletionStage[AnyRef] = extended.ask(actor, messageFactory.apply _)(Timeout.create(timeout)).toJava.asInstanceOf[CompletionStage[AnyRef]] /** @@ -590,7 +622,8 @@ object PatternsCS { */ @deprecated("Use Pattens.ask which accepts java.time.Duration instead.", since = "2.5.19") def ask(actor: ActorRef, message: Any, timeoutMillis: Long): CompletionStage[AnyRef] = - scalaAsk(actor, message)(new Timeout(timeoutMillis, TimeUnit.MILLISECONDS)).toJava.asInstanceOf[CompletionStage[AnyRef]] + scalaAsk(actor, message)(new Timeout(timeoutMillis, TimeUnit.MILLISECONDS)).toJava + .asInstanceOf[CompletionStage[AnyRef]] /** * A variation of ask which allows to implement "replyTo" pattern by including @@ -608,7 +641,9 @@ object PatternsCS { * @param timeoutMillis the timeout for the response before failing the returned completion operator */ @deprecated("Use Pattens.askWithReplyTo which accepts java.time.Duration instead.", since = "2.5.19") - def askWithReplyTo(actor: ActorRef, messageFactory: japi.function.Function[ActorRef, Any], timeoutMillis: Long): CompletionStage[AnyRef] = + def askWithReplyTo(actor: ActorRef, + messageFactory: japi.function.Function[ActorRef, Any], + timeoutMillis: Long): CompletionStage[AnyRef] = askWithReplyTo(actor, messageFactory, Timeout(timeoutMillis.millis)) /** @@ -702,7 +737,8 @@ object PatternsCS { */ @deprecated("Use Pattens.ask which accepts java.time.Duration instead.", since = "2.5.19") def ask(selection: ActorSelection, message: Any, timeoutMillis: Long): CompletionStage[AnyRef] = - scalaAsk(selection, message)(new Timeout(timeoutMillis, TimeUnit.MILLISECONDS)).toJava.asInstanceOf[CompletionStage[AnyRef]] + scalaAsk(selection, message)(new Timeout(timeoutMillis, TimeUnit.MILLISECONDS)).toJava + .asInstanceOf[CompletionStage[AnyRef]] /** * A variation of ask which allows to implement "replyTo" pattern by including @@ -716,8 +752,13 @@ object PatternsCS { * }}} */ @deprecated("Use Pattens.askWithReplyTo which accepts java.time.Duration instead.", since = "2.5.19") - def askWithReplyTo(selection: ActorSelection, messageFactory: japi.Function[ActorRef, Any], timeoutMillis: Long): CompletionStage[AnyRef] = - extended.ask(selection, messageFactory.apply _)(Timeout(timeoutMillis.millis)).toJava.asInstanceOf[CompletionStage[AnyRef]] + def askWithReplyTo(selection: ActorSelection, + messageFactory: japi.Function[ActorRef, Any], + timeoutMillis: Long): CompletionStage[AnyRef] = + extended + .ask(selection, messageFactory.apply _)(Timeout(timeoutMillis.millis)) + .toJava + .asInstanceOf[CompletionStage[AnyRef]] /** * When this [[java.util.concurrent.CompletionStage]] finishes, send its result to the given @@ -737,7 +778,8 @@ object PatternsCS { * }}} */ @deprecated("Use Patterns.pipe instead.", since = "2.5.19") - def pipe[T](future: CompletionStage[T], context: ExecutionContext): PipeableCompletionStage[T] = pipeCompletionStage(future)(context) + def pipe[T](future: CompletionStage[T], context: ExecutionContext): PipeableCompletionStage[T] = + pipeCompletionStage(future)(context) /** * Returns a [[java.util.concurrent.CompletionStage]] that will be completed with success (value `true`) when @@ -798,7 +840,9 @@ object PatternsCS { * is completed with failure [[akka.pattern.AskTimeoutException]]. */ @deprecated("Use Patterns.gracefulStop instead.", since = "2.5.19") - def gracefulStop(target: ActorRef, timeout: java.time.Duration, stopMessage: Any): CompletionStage[java.lang.Boolean] = + def gracefulStop(target: ActorRef, + timeout: java.time.Duration, + stopMessage: Any): CompletionStage[java.lang.Boolean] = scalaGracefulStop(target, timeout.asScala, stopMessage).toJava.asInstanceOf[CompletionStage[java.lang.Boolean]] /** @@ -806,7 +850,10 @@ object PatternsCS { * after the specified duration. */ @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def after[T](duration: FiniteDuration, scheduler: Scheduler, context: ExecutionContext, value: Callable[CompletionStage[T]]): CompletionStage[T] = + def after[T](duration: FiniteDuration, + scheduler: Scheduler, + context: ExecutionContext, + value: Callable[CompletionStage[T]]): CompletionStage[T] = afterCompletionStage(duration, scheduler)(value.call())(context) /** @@ -814,7 +861,10 @@ object PatternsCS { * after the specified duration. */ @deprecated("Use Patterns.after instead.", since = "2.5.19") - def after[T](duration: java.time.Duration, scheduler: Scheduler, context: ExecutionContext, value: Callable[CompletionStage[T]]): CompletionStage[T] = + def after[T](duration: java.time.Duration, + scheduler: Scheduler, + context: ExecutionContext, + value: Callable[CompletionStage[T]]): CompletionStage[T] = afterCompletionStage(duration.asScala, scheduler)(value.call())(context) /** @@ -822,7 +872,10 @@ object PatternsCS { * after the specified duration. */ @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def after[T](duration: FiniteDuration, scheduler: Scheduler, context: ExecutionContext, value: CompletionStage[T]): CompletionStage[T] = + def after[T](duration: FiniteDuration, + scheduler: Scheduler, + context: ExecutionContext, + value: CompletionStage[T]): CompletionStage[T] = afterCompletionStage(duration, scheduler)(value)(context) /** @@ -830,7 +883,10 @@ object PatternsCS { * after the specified duration. */ @deprecated("Use Patterns.after instead.", since = "2.5.19") - def after[T](duration: java.time.Duration, scheduler: Scheduler, context: ExecutionContext, value: CompletionStage[T]): CompletionStage[T] = + def after[T](duration: java.time.Duration, + scheduler: Scheduler, + context: ExecutionContext, + value: CompletionStage[T]): CompletionStage[T] = afterCompletionStage(duration.asScala, scheduler)(value)(context) /** @@ -842,6 +898,10 @@ object PatternsCS { * and therefore must be thread safe (not touch unsafe mutable state). */ @deprecated("Use Patterns.retry instead.", since = "2.5.19") - def retry[T](attempt: Callable[CompletionStage[T]], attempts: Int, delay: java.time.Duration, scheduler: Scheduler, ec: ExecutionContext): CompletionStage[T] = + def retry[T](attempt: Callable[CompletionStage[T]], + attempts: Int, + delay: java.time.Duration, + scheduler: Scheduler, + ec: ExecutionContext): CompletionStage[T] = scalaRetry(() => attempt.call().toScala, attempts, delay.asScala)(ec, scheduler).toJava } diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index 13bb2ee3ed..6b327990bb 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -18,13 +18,13 @@ trait PipeToSupport { final class PipeableFuture[T](val future: Future[T])(implicit executionContext: ExecutionContext) { def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): Future[T] = { - future andThen { + future.andThen { case Success(r) => recipient ! r case Failure(f) => recipient ! Status.Failure(f) } } def pipeToSelection(recipient: ActorSelection)(implicit sender: ActorRef = Actor.noSender): Future[T] = { - future andThen { + future.andThen { case Success(r) => recipient ! r case Failure(f) => recipient ! Status.Failure(f) } @@ -41,22 +41,23 @@ trait PipeToSupport { } } - final class PipeableCompletionStage[T](val future: CompletionStage[T])(implicit @unused executionContext: ExecutionContext) { + final class PipeableCompletionStage[T](val future: CompletionStage[T])( + implicit @unused executionContext: ExecutionContext) { def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): CompletionStage[T] = { - future whenComplete new BiConsumer[T, Throwable] { + future.whenComplete(new BiConsumer[T, Throwable] { override def accept(t: T, ex: Throwable): Unit = { if (t != null) recipient ! t if (ex != null) recipient ! Status.Failure(ex) } - } + }) } def pipeToSelection(recipient: ActorSelection)(implicit sender: ActorRef = Actor.noSender): CompletionStage[T] = { - future whenComplete new BiConsumer[T, Throwable] { + future.whenComplete(new BiConsumer[T, Throwable] { override def accept(t: T, ex: Throwable): Unit = { if (t != null) recipient ! t if (ex != null) recipient ! Status.Failure(ex) } - } + }) } def to(recipient: ActorRef): PipeableCompletionStage[T] = to(recipient, Actor.noSender) def to(recipient: ActorRef, sender: ActorRef): PipeableCompletionStage[T] = { @@ -88,7 +89,8 @@ trait PipeToSupport { * The successful result of the future is sent as a message to the recipient, or * the failure is sent in a [[akka.actor.Status.Failure]] to the recipient. */ - implicit def pipe[T](future: Future[T])(implicit executionContext: ExecutionContext): PipeableFuture[T] = new PipeableFuture(future) + implicit def pipe[T](future: Future[T])(implicit executionContext: ExecutionContext): PipeableFuture[T] = + new PipeableFuture(future) /** * Import this implicit conversion to gain the `pipeTo` method on [[scala.concurrent.Future]]: @@ -108,5 +110,6 @@ trait PipeToSupport { * The successful result of the future is sent as a message to the recipient, or * the failure is sent in a [[akka.actor.Status.Failure]] to the recipient. */ - implicit def pipeCompletionStage[T](future: CompletionStage[T])(implicit executionContext: ExecutionContext): PipeableCompletionStage[T] = new PipeableCompletionStage(future) + implicit def pipeCompletionStage[T](future: CompletionStage[T])( + implicit executionContext: ExecutionContext): PipeableCompletionStage[T] = new PipeableCompletionStage(future) } diff --git a/akka-actor/src/main/scala/akka/pattern/PromiseRef.scala b/akka-actor/src/main/scala/akka/pattern/PromiseRef.scala index 2eb1b04bbe..b963c5c4d4 100644 --- a/akka-actor/src/main/scala/akka/pattern/PromiseRef.scala +++ b/akka-actor/src/main/scala/akka/pattern/PromiseRef.scala @@ -14,6 +14,7 @@ import scala.concurrent.{ Future, Promise } * to an actor performing a task which will eventually resolve the Future. */ trait FutureRef[T] { + /** * ActorRef associated with this FutureRef. */ @@ -30,6 +31,7 @@ trait FutureRef[T] { * to an actor performing a task which will eventually resolve the Promise. */ trait PromiseRef[T] { this: FutureRef[T] => + /** * ActorRef associated with this PromiseRef. */ @@ -52,6 +54,7 @@ trait PromiseRef[T] { this: FutureRef[T] => } object PromiseRef { + /** * Wraps an ActorRef and a Promise into a PromiseRef. */ @@ -95,6 +98,7 @@ object PromiseRef { } object FutureRef { + /** * Wraps an ActorRef and a Future into a FutureRef. */ @@ -137,15 +141,15 @@ object FutureRef { } private[akka] class PromiseRefImpl[T](val ref: ActorRef, val promise: Promise[T]) - extends PromiseRef[T] with FutureRef[T] { + extends PromiseRef[T] + with FutureRef[T] { def toFutureRef: FutureRef[T] = this } -private[akka] final class FutureRefImpl[T](val ref: ActorRef, val future: Future[T]) - extends FutureRef[T] +private[akka] final class FutureRefImpl[T](val ref: ActorRef, val future: Future[T]) extends FutureRef[T] private[akka] final class AskPromiseRef private (promiseActorRef: PromiseActorRef) - extends PromiseRefImpl[Any](promiseActorRef, promiseActorRef.result) + extends PromiseRefImpl[Any](promiseActorRef, promiseActorRef.result) private[akka] object AskPromiseRef { def apply(provider: ActorRefProvider, timeout: Timeout): AskPromiseRef = { @@ -157,4 +161,3 @@ private[akka] object AskPromiseRef { } } } - diff --git a/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala b/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala index 690cade024..90f92e91af 100644 --- a/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/RetrySupport.scala @@ -35,13 +35,15 @@ trait RetrySupport { * ) * }}} */ - def retry[T](attempt: () => Future[T], attempts: Int, delay: FiniteDuration)(implicit ec: ExecutionContext, scheduler: Scheduler): Future[T] = { + def retry[T](attempt: () => Future[T], attempts: Int, delay: FiniteDuration)(implicit ec: ExecutionContext, + scheduler: Scheduler): Future[T] = { try { if (attempts > 0) { - attempt() recoverWith { - case NonFatal(_) => after(delay, scheduler) { - retry(attempt, attempts - 1, delay) - } + attempt().recoverWith { + case NonFatal(_) => + after(delay, scheduler) { + retry(attempt, attempts - 1, delay) + } } } else { attempt() diff --git a/akka-actor/src/main/scala/akka/pattern/extended/package.scala b/akka-actor/src/main/scala/akka/pattern/extended/package.scala index eb7c349dd3..9819494b07 100644 --- a/akka-actor/src/main/scala/akka/pattern/extended/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/extended/package.scala @@ -41,4 +41,3 @@ package pattern * }}} */ package object extended extends ExplicitAskSupport - diff --git a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala index 75283c899b..45c913648a 100644 --- a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnRestartSupervisor.scala @@ -18,53 +18,56 @@ import scala.concurrent.duration._ * This back-off supervisor is created by using ``akka.pattern.BackoffSupervisor.props`` * with ``akka.pattern.BackoffOpts.onFailure``. */ -@InternalApi private[pattern] class BackoffOnRestartSupervisor( - val childProps: Props, - val childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - val reset: BackoffReset, - randomFactor: Double, - strategy: OneForOneStrategy, - replyWhileStopped: Option[Any]) - extends Actor with HandleBackoff - with ActorLogging { +@InternalApi private[pattern] class BackoffOnRestartSupervisor(val childProps: Props, + val childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + val reset: BackoffReset, + randomFactor: Double, + strategy: OneForOneStrategy, + replyWhileStopped: Option[Any]) + extends Actor + with HandleBackoff + with ActorLogging { import BackoffSupervisor._ import context._ - override val supervisorStrategy = OneForOneStrategy(strategy.maxNrOfRetries, strategy.withinTimeRange, strategy.loggingEnabled) { - case ex => - val defaultDirective: Directive = - super.supervisorStrategy.decider.applyOrElse(ex, (_: Any) => Escalate) + override val supervisorStrategy = + OneForOneStrategy(strategy.maxNrOfRetries, strategy.withinTimeRange, strategy.loggingEnabled) { + case ex => + val defaultDirective: Directive = + super.supervisorStrategy.decider.applyOrElse(ex, (_: Any) => Escalate) - strategy.decider.applyOrElse(ex, (_: Any) => defaultDirective) match { + strategy.decider.applyOrElse(ex, (_: Any) => defaultDirective) match { - // Whatever the final Directive is, we will translate all Restarts - // to our own Restarts, which involves stopping the child. - case Restart => - if (strategy.withinTimeRange.isFinite && restartCount == 0) { - // If the user has defined a time range for the maxNrOfRetries, we'll schedule a message - // to ourselves every time that range elapses, to reset the restart counter. We hide it - // behind this conditional to avoid queuing the message unnecessarily - val finiteWithinTimeRange = strategy.withinTimeRange.asInstanceOf[FiniteDuration] - system.scheduler.scheduleOnce(finiteWithinTimeRange, self, ResetRestartCount(restartCount)) - } - val childRef = sender() - val nextRestartCount = restartCount + 1 - if (strategy.maxNrOfRetries >= 0 && nextRestartCount > strategy.maxNrOfRetries) { - // If we've exceeded the maximum # of retries allowed by the Strategy, die. - log.debug(s"Terminating on restart #{} which exceeds max allowed restarts ({})", nextRestartCount, strategy.maxNrOfRetries) - become(receive) - stop(self) - } else { - become(waitChildTerminatedBeforeBackoff(childRef) orElse handleBackoff) - } - Stop + // Whatever the final Directive is, we will translate all Restarts + // to our own Restarts, which involves stopping the child. + case Restart => + if (strategy.withinTimeRange.isFinite && restartCount == 0) { + // If the user has defined a time range for the maxNrOfRetries, we'll schedule a message + // to ourselves every time that range elapses, to reset the restart counter. We hide it + // behind this conditional to avoid queuing the message unnecessarily + val finiteWithinTimeRange = strategy.withinTimeRange.asInstanceOf[FiniteDuration] + system.scheduler.scheduleOnce(finiteWithinTimeRange, self, ResetRestartCount(restartCount)) + } + val childRef = sender() + val nextRestartCount = restartCount + 1 + if (strategy.maxNrOfRetries >= 0 && nextRestartCount > strategy.maxNrOfRetries) { + // If we've exceeded the maximum # of retries allowed by the Strategy, die. + log.debug(s"Terminating on restart #{} which exceeds max allowed restarts ({})", + nextRestartCount, + strategy.maxNrOfRetries) + become(receive) + stop(self) + } else { + become(waitChildTerminatedBeforeBackoff(childRef).orElse(handleBackoff)) + } + Stop - case other => other - } - } + case other => other + } + } def waitChildTerminatedBeforeBackoff(childRef: ActorRef): Receive = { case Terminated(`childRef`) => @@ -83,7 +86,7 @@ import scala.concurrent.duration._ stop(self) } - def receive: Receive = onTerminated orElse handleBackoff + def receive: Receive = onTerminated.orElse(handleBackoff) protected def handleMessageToChild(msg: Any): Unit = child match { case Some(c) => diff --git a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala index 5f59abfe5f..8180d7f630 100644 --- a/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala +++ b/akka-actor/src/main/scala/akka/pattern/internal/BackoffOnStopSupervisor.scala @@ -18,18 +18,18 @@ import scala.concurrent.duration.FiniteDuration * This back-off supervisor is created by using `akka.pattern.BackoffSupervisor.props` * with `BackoffOpts.onStop`. */ -@InternalApi private[pattern] class BackoffOnStopSupervisor( - val childProps: Props, - val childName: String, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - val reset: BackoffReset, - randomFactor: Double, - strategy: SupervisorStrategy, - replyWhileStopped: Option[Any], - finalStopMessage: Option[Any => Boolean]) - extends Actor with HandleBackoff - with ActorLogging { +@InternalApi private[pattern] class BackoffOnStopSupervisor(val childProps: Props, + val childName: String, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + val reset: BackoffReset, + randomFactor: Double, + strategy: SupervisorStrategy, + replyWhileStopped: Option[Any], + finalStopMessage: Option[Any => Boolean]) + extends Actor + with HandleBackoff + with ActorLogging { import BackoffSupervisor._ import context.dispatcher @@ -64,14 +64,16 @@ import scala.concurrent.duration.FiniteDuration context.system.scheduler.scheduleOnce(restartDelay, self, StartChild) restartCount = nextRestartCount } else { - log.debug(s"Terminating on restart #{} which exceeds max allowed restarts ({})", nextRestartCount, maxNrOfRetries) + log.debug(s"Terminating on restart #{} which exceeds max allowed restarts ({})", + nextRestartCount, + maxNrOfRetries) context.stop(self) } } } - def receive: Receive = onTerminated orElse handleBackoff + def receive: Receive = onTerminated.orElse(handleBackoff) protected def handleMessageToChild(msg: Any): Unit = child match { case Some(c) => diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index e4bdcce20f..a4e6de784c 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -39,6 +39,9 @@ package akka * ask(actor, message); * }}} */ -package object pattern extends PipeToSupport with AskSupport with GracefulStopSupport with FutureTimeoutSupport - with RetrySupport - +package object pattern + extends PipeToSupport + with AskSupport + with GracefulStopSupport + with FutureTimeoutSupport + with RetrySupport diff --git a/akka-actor/src/main/scala/akka/routing/Balancing.scala b/akka-actor/src/main/scala/akka/routing/Balancing.scala index cab477b4dc..ba16268114 100644 --- a/akka-actor/src/main/scala/akka/routing/Balancing.scala +++ b/akka-actor/src/main/scala/akka/routing/Balancing.scala @@ -66,11 +66,10 @@ private[akka] final class BalancingRoutingLogic extends RoutingLogic { * supervision, death watch and router management messages */ @SerialVersionUID(1L) -final case class BalancingPool( - val nrOfInstances: Int, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) - extends Pool { +final case class BalancingPool(val nrOfInstances: Int, + override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + extends Pool { def this(config: Config) = this(nrOfInstances = config.getInt("nr-of-instances")) @@ -113,15 +112,14 @@ final case class BalancingPool( // dispatcher of this pool val deployDispatcherConfigPath = s"akka.actor.deployment.$deployPath.pool-dispatcher" val systemConfig = context.system.settings.config - val dispatcherConfig = context.system.dispatchers.config( - dispatcherId, - // use the user defined 'pool-dispatcher' config as fallback, if any - if (systemConfig.hasPath(deployDispatcherConfigPath)) systemConfig.getConfig(deployDispatcherConfigPath) - else ConfigFactory.empty) + val dispatcherConfig = context.system.dispatchers.config(dispatcherId, + // use the user defined 'pool-dispatcher' config as fallback, if any + if (systemConfig.hasPath(deployDispatcherConfigPath)) + systemConfig.getConfig(deployDispatcherConfigPath) + else ConfigFactory.empty) - dispatchers.registerConfigurator(dispatcherId, new BalancingDispatcherConfigurator( - dispatcherConfig, - dispatchers.prerequisites)) + dispatchers.registerConfigurator(dispatcherId, + new BalancingDispatcherConfigurator(dispatcherConfig, dispatchers.prerequisites)) } val routeePropsWithDispatcher = routeeProps.withDispatcher(dispatcherId) @@ -139,7 +137,7 @@ final case class BalancingPool( other match { case p: Pool => if ((this.supervisorStrategy eq Pool.defaultSupervisorStrategy) - && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) + && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) this.withSupervisorStrategy(p.supervisorStrategy) else this diff --git a/akka-actor/src/main/scala/akka/routing/Broadcast.scala b/akka-actor/src/main/scala/akka/routing/Broadcast.scala index 0542fff655..125167550c 100644 --- a/akka-actor/src/main/scala/akka/routing/Broadcast.scala +++ b/akka-actor/src/main/scala/akka/routing/Broadcast.scala @@ -56,18 +56,18 @@ final class BroadcastRoutingLogic extends RoutingLogic { * supervision, death watch and router management messages */ @SerialVersionUID(1L) -final case class BroadcastPool( - val nrOfInstances: Int, override val resizer: Option[Resizer] = None, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) - extends Pool with PoolOverrideUnsetConfig[BroadcastPool] { +final case class BroadcastPool(val nrOfInstances: Int, + override val resizer: Option[Resizer] = None, + override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) + extends Pool + with PoolOverrideUnsetConfig[BroadcastPool] { def this(config: Config) = - this( - nrOfInstances = config.getInt("nr-of-instances"), - resizer = Resizer.fromConfig(config), - usePoolDispatcher = config.hasPath("pool-dispatcher")) + this(nrOfInstances = config.getInt("nr-of-instances"), + resizer = Resizer.fromConfig(config), + usePoolDispatcher = config.hasPath("pool-dispatcher")) /** * Java API @@ -118,10 +118,9 @@ final case class BroadcastPool( * router management messages */ @SerialVersionUID(1L) -final case class BroadcastGroup( - val paths: immutable.Iterable[String], - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) - extends Group { +final case class BroadcastGroup(val paths: immutable.Iterable[String], + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + extends Group { def this(config: Config) = this(paths = immutableSeq(config.getStringList("routees.paths"))) diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala index 805bd5ed4e..53c156472b 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala @@ -39,9 +39,9 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v */ def :+(node: T): ConsistentHash[T] = { val nodeHash = hashFor(node.toString) - new ConsistentHash( - nodes ++ ((1 to virtualNodesFactor) map { r => (concatenateNodeHash(nodeHash, r) -> node) }), - virtualNodesFactor) + new ConsistentHash(nodes ++ ((1 to virtualNodesFactor).map { r => + (concatenateNodeHash(nodeHash, r) -> node) + }), virtualNodesFactor) } /** @@ -58,9 +58,9 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v */ def :-(node: T): ConsistentHash[T] = { val nodeHash = hashFor(node.toString) - new ConsistentHash( - nodes -- ((1 to virtualNodesFactor) map { r => concatenateNodeHash(nodeHash, r) }), - virtualNodesFactor) + new ConsistentHash(nodes -- ((1 to virtualNodesFactor).map { r => + concatenateNodeHash(nodeHash, r) + }), virtualNodesFactor) } /** @@ -87,7 +87,7 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v * otherwise throws `IllegalStateException` */ def nodeFor(key: Array[Byte]): T = { - if (isEmpty) throw new IllegalStateException("Can't get node for [%s] from an empty node ring" format key) + if (isEmpty) throw new IllegalStateException("Can't get node for [%s] from an empty node ring".format(key)) nodeRing(idx(Arrays.binarySearch(nodeHashRing, hashFor(key)))) } @@ -98,7 +98,7 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v * otherwise throws `IllegalStateException` */ def nodeFor(key: String): T = { - if (isEmpty) throw new IllegalStateException("Can't get node for [%s] from an empty node ring" format key) + if (isEmpty) throw new IllegalStateException("Can't get node for [%s] from an empty node ring".format(key)) nodeRing(idx(Arrays.binarySearch(nodeHashRing, hashFor(key)))) } @@ -112,14 +112,13 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v object ConsistentHash { def apply[T: ClassTag](nodes: Iterable[T], virtualNodesFactor: Int): ConsistentHash[T] = { - new ConsistentHash( - immutable.SortedMap.empty[Int, T] ++ - (for { - node <- nodes - nodeHash = hashFor(node.toString) - vnode <- 1 to virtualNodesFactor - } yield (concatenateNodeHash(nodeHash, vnode) -> node)), - virtualNodesFactor) + new ConsistentHash(immutable.SortedMap.empty[Int, T] ++ + (for { + node <- nodes + nodeHash = hashFor(node.toString) + vnode <- 1 to virtualNodesFactor + } yield (concatenateNodeHash(nodeHash, vnode) -> node)), + virtualNodesFactor) } /** diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala index 529c597dfe..94cc20bf80 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHashing.scala @@ -50,7 +50,8 @@ object ConsistentHashingRouter { */ @SerialVersionUID(1L) final case class ConsistentHashableEnvelope(message: Any, hashKey: Any) - extends ConsistentHashable with RouterEnvelope { + extends ConsistentHashable + with RouterEnvelope { override def consistentHashKey: Any = hashKey } @@ -99,6 +100,7 @@ object ConsistentHashingRouter { } object ConsistentHashingRoutingLogic { + /** * Address to use for the selfAddress parameter */ @@ -135,11 +137,11 @@ object ConsistentHashingRoutingLogic { * */ @SerialVersionUID(1L) -final case class ConsistentHashingRoutingLogic( - system: ActorSystem, - virtualNodesFactor: Int = 0, - hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping) - extends RoutingLogic { +final case class ConsistentHashingRoutingLogic(system: ActorSystem, + virtualNodesFactor: Int = 0, + hashMapping: ConsistentHashingRouter.ConsistentHashMapping = + ConsistentHashingRouter.emptyConsistentHashMapping) + extends RoutingLogic { import ConsistentHashingRouter._ @@ -179,7 +181,8 @@ final case class ConsistentHashingRoutingLogic( copy(hashMapping = ConsistentHashingRouter.hashMappingAdapter(mapper)) // tuple of routees and the ConsistentHash, updated together in updateConsistentHash - private val consistentHashRef = new AtomicReference[(immutable.IndexedSeq[Routee], ConsistentHash[ConsistentRoutee])]((null, null)) + private val consistentHashRef = + new AtomicReference[(immutable.IndexedSeq[Routee], ConsistentHash[ConsistentRoutee])]((null, null)) override def select(message: Any, routees: immutable.IndexedSeq[Routee]): Routee = if (routees.isEmpty) NoRoutee @@ -202,28 +205,30 @@ final case class ConsistentHashingRoutingLogic( } else oldConsistentHash } - def target(hashData: Any): Routee = try { - val currentConsistenHash = updateConsistentHash() - if (currentConsistenHash.isEmpty) NoRoutee - else hashData match { - case bytes: Array[Byte] => currentConsistenHash.nodeFor(bytes).routee - case str: String => currentConsistenHash.nodeFor(str).routee - case x: AnyRef => currentConsistenHash.nodeFor(SerializationExtension(system).serialize(x).get).routee + def target(hashData: Any): Routee = + try { + val currentConsistenHash = updateConsistentHash() + if (currentConsistenHash.isEmpty) NoRoutee + else + hashData match { + case bytes: Array[Byte] => currentConsistenHash.nodeFor(bytes).routee + case str: String => currentConsistenHash.nodeFor(str).routee + case x: AnyRef => currentConsistenHash.nodeFor(SerializationExtension(system).serialize(x).get).routee + } + } catch { + case NonFatal(e) => + log.warning("Couldn't route message with consistent hash key [{}] due to [{}]", hashData, e.getMessage) + NoRoutee } - } catch { - case NonFatal(e) => - log.warning("Couldn't route message with consistent hash key [{}] due to [{}]", hashData, e.getMessage) - NoRoutee - } message match { case _ if hashMapping.isDefinedAt(message) => target(hashMapping(message)) case hashable: ConsistentHashable => target(hashable.consistentHashKey) case _ => - log.warning( - "Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]", - message.getClass.getName, classOf[ConsistentHashable].getName, - classOf[ConsistentHashableEnvelope].getName) + log.warning("Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]", + message.getClass.getName, + classOf[ConsistentHashable].getName, + classOf[ConsistentHashableEnvelope].getName) NoRoutee } } @@ -268,20 +273,20 @@ final case class ConsistentHashingRoutingLogic( */ @SerialVersionUID(1L) final case class ConsistentHashingPool( - val nrOfInstances: Int, - override val resizer: Option[Resizer] = None, - val virtualNodesFactor: Int = 0, - val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) - extends Pool with PoolOverrideUnsetConfig[ConsistentHashingPool] { + val nrOfInstances: Int, + override val resizer: Option[Resizer] = None, + val virtualNodesFactor: Int = 0, + val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping, + override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) + extends Pool + with PoolOverrideUnsetConfig[ConsistentHashingPool] { def this(config: Config) = - this( - nrOfInstances = config.getInt("nr-of-instances"), - resizer = Resizer.fromConfig(config), - usePoolDispatcher = config.hasPath("pool-dispatcher")) + this(nrOfInstances = config.getInt("nr-of-instances"), + resizer = Resizer.fromConfig(config), + usePoolDispatcher = config.hasPath("pool-dispatcher")) /** * Java API @@ -355,12 +360,12 @@ final case class ConsistentHashingPool( * router management messages */ @SerialVersionUID(1L) -final case class ConsistentHashingGroup( - val paths: immutable.Iterable[String], - val virtualNodesFactor: Int = 0, - val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) - extends Group { +final case class ConsistentHashingGroup(val paths: immutable.Iterable[String], + val virtualNodesFactor: Int = 0, + val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = + ConsistentHashingRouter.emptyConsistentHashMapping, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + extends Group { def this(config: Config) = this(paths = immutableSeq(config.getStringList("routees.paths"))) diff --git a/akka-actor/src/main/scala/akka/routing/Listeners.scala b/akka-actor/src/main/scala/akka/routing/Listeners.scala index 60374838c0..3fd7d73062 100644 --- a/akka-actor/src/main/scala/akka/routing/Listeners.scala +++ b/akka-actor/src/main/scala/akka/routing/Listeners.scala @@ -32,8 +32,8 @@ trait Listeners { self: Actor => * {{{ def receive = listenerManagement orElse … }}} */ protected def listenerManagement: Actor.Receive = { - case Listen(l) => listeners add l - case Deafen(l) => listeners remove l + case Listen(l) => listeners.add(l) + case Deafen(l) => listeners.remove(l) case WithListeners(f) => val i = listeners.iterator while (i.hasNext) f(i.next) diff --git a/akka-actor/src/main/scala/akka/routing/MurmurHash.scala b/akka-actor/src/main/scala/akka/routing/MurmurHash.scala index 752f114f4a..8838cd20ea 100644 --- a/akka-actor/src/main/scala/akka/routing/MurmurHash.scala +++ b/akka-actor/src/main/scala/akka/routing/MurmurHash.scala @@ -17,7 +17,6 @@ * @version 2.9 * @since 2.9 */ - package akka.routing import java.lang.Integer.{ rotateLeft => rotl } @@ -32,7 +31,6 @@ import akka.util.ccompat._ * incorporate a new integer) to update the values. Only one method * needs to be called to finalize the hash. */ - object MurmurHash { // Magic values used for MurmurHash's 32 bit hash. // Don't change these without consulting a hashing expert! diff --git a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala index ec116e8a88..a4a23e0295 100644 --- a/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala +++ b/akka-actor/src/main/scala/akka/routing/OptimalSizeExploringResizer.scala @@ -17,6 +17,7 @@ import OptimalSizeExploringResizer._ import akka.annotation.InternalApi trait OptimalSizeExploringResizer extends Resizer { + /** * Report the messageCount as well as current routees so that the * it can collect metrics. @@ -29,6 +30,7 @@ trait OptimalSizeExploringResizer extends Resizer { } case object OptimalSizeExploringResizer { + /** * INTERNAL API */ @@ -42,11 +44,10 @@ case object OptimalSizeExploringResizer { /** * INTERNAL API */ - private[routing] case class ResizeRecord( - underutilizationStreak: Option[UnderUtilizationStreak] = None, - messageCount: Long = 0, - totalQueueLength: Int = 0, - checkTime: Long = 0) + private[routing] case class ResizeRecord(underutilizationStreak: Option[UnderUtilizationStreak] = None, + messageCount: Long = 0, + totalQueueLength: Int = 0, + checkTime: Long = 0) /** * INTERNAL API @@ -54,17 +55,19 @@ case object OptimalSizeExploringResizer { private[routing] type PerformanceLog = Map[PoolSize, Duration] def apply(resizerCfg: Config): OptimalSizeExploringResizer = - DefaultOptimalSizeExploringResizer( - lowerBound = resizerCfg.getInt("lower-bound"), - upperBound = resizerCfg.getInt("upper-bound"), - chanceOfScalingDownWhenFull = resizerCfg.getDouble("chance-of-ramping-down-when-full"), - actionInterval = resizerCfg.getDuration("action-interval").asScala, - downsizeAfterUnderutilizedFor = resizerCfg.getDuration("downsize-after-underutilized-for").asScala, - numOfAdjacentSizesToConsiderDuringOptimization = resizerCfg.getInt("optimization-range"), - exploreStepSize = resizerCfg.getDouble("explore-step-size"), - explorationProbability = resizerCfg.getDouble("chance-of-exploration"), - weightOfLatestMetric = resizerCfg.getDouble("weight-of-latest-metric"), - downsizeRatio = resizerCfg.getDouble("downsize-ratio")) + DefaultOptimalSizeExploringResizer(lowerBound = resizerCfg.getInt("lower-bound"), + upperBound = resizerCfg.getInt("upper-bound"), + chanceOfScalingDownWhenFull = + resizerCfg.getDouble("chance-of-ramping-down-when-full"), + actionInterval = resizerCfg.getDuration("action-interval").asScala, + downsizeAfterUnderutilizedFor = + resizerCfg.getDuration("downsize-after-underutilized-for").asScala, + numOfAdjacentSizesToConsiderDuringOptimization = + resizerCfg.getInt("optimization-range"), + exploreStepSize = resizerCfg.getDouble("explore-step-size"), + explorationProbability = resizerCfg.getDouble("chance-of-exploration"), + weightOfLatestMetric = resizerCfg.getDouble("weight-of-latest-metric"), + downsizeRatio = resizerCfg.getDouble("downsize-ratio")) } @@ -114,17 +117,18 @@ case object OptimalSizeExploringResizer { * */ @SerialVersionUID(1L) -case class DefaultOptimalSizeExploringResizer( - lowerBound: PoolSize = 1, - upperBound: PoolSize = 30, - chanceOfScalingDownWhenFull: Double = 0.2, - actionInterval: Duration = 5.seconds, - numOfAdjacentSizesToConsiderDuringOptimization: Int = 16, - exploreStepSize: Double = 0.1, - downsizeRatio: Double = 0.8, - downsizeAfterUnderutilizedFor: Duration = 72.hours, - explorationProbability: Double = 0.4, - weightOfLatestMetric: Double = 0.5) extends OptimalSizeExploringResizer { +case class DefaultOptimalSizeExploringResizer(lowerBound: PoolSize = 1, + upperBound: PoolSize = 30, + chanceOfScalingDownWhenFull: Double = 0.2, + actionInterval: Duration = 5.seconds, + numOfAdjacentSizesToConsiderDuringOptimization: Int = 16, + exploreStepSize: Double = 0.1, + downsizeRatio: Double = 0.8, + downsizeAfterUnderutilizedFor: Duration = 72.hours, + explorationProbability: Double = 0.4, + weightOfLatestMetric: Double = 0.5) + extends OptimalSizeExploringResizer { + /** * INTERNAL API * @@ -132,6 +136,7 @@ case class DefaultOptimalSizeExploringResizer( */ @InternalApi private[routing] var performanceLog: PerformanceLog = Map.empty + /** * INTERNAL API * @@ -151,20 +156,28 @@ case class DefaultOptimalSizeExploringResizer( private def random = ThreadLocalRandom.current() private def checkParamAsProbability(value: Double, paramName: String): Unit = - if (value < 0 || value > 1) throw new IllegalArgumentException(s"$paramName must be between 0 and 1 (inclusive), was: [%s]".format(value)) + if (value < 0 || value > 1) + throw new IllegalArgumentException(s"$paramName must be between 0 and 1 (inclusive), was: [%s]".format(value)) - private def checkParamAsPositiveNum(value: Double, paramName: String): Unit = checkParamLowerBound(value, 0, paramName) + private def checkParamAsPositiveNum(value: Double, paramName: String): Unit = + checkParamLowerBound(value, 0, paramName) private def checkParamLowerBound(value: Double, lowerBound: Double, paramName: String): Unit = - if (value < lowerBound) throw new IllegalArgumentException(s"$paramName must be >= $lowerBound, was: [%s]".format(value)) + if (value < lowerBound) + throw new IllegalArgumentException(s"$paramName must be >= $lowerBound, was: [%s]".format(value)) checkParamAsPositiveNum(lowerBound, "lowerBound") checkParamAsPositiveNum(upperBound, "upperBound") - if (upperBound < lowerBound) throw new IllegalArgumentException("upperBound must be >= lowerBound, was: [%s] < [%s]".format(upperBound, lowerBound)) + if (upperBound < lowerBound) + throw new IllegalArgumentException( + "upperBound must be >= lowerBound, was: [%s] < [%s]".format(upperBound, lowerBound)) - checkParamLowerBound(numOfAdjacentSizesToConsiderDuringOptimization, 2, "numOfAdjacentSizesToConsiderDuringOptimization") + checkParamLowerBound(numOfAdjacentSizesToConsiderDuringOptimization, + 2, + "numOfAdjacentSizesToConsiderDuringOptimization") checkParamAsProbability(chanceOfScalingDownWhenFull, "chanceOfScalingDownWhenFull") - checkParamAsPositiveNum(numOfAdjacentSizesToConsiderDuringOptimization, "numOfAdjacentSizesToConsiderDuringOptimization") + checkParamAsPositiveNum(numOfAdjacentSizesToConsiderDuringOptimization, + "numOfAdjacentSizesToConsiderDuringOptimization") checkParamAsPositiveNum(exploreStepSize, "exploreStepSize") checkParamAsPositiveNum(downsizeRatio, "downsizeRatio") checkParamAsProbability(explorationProbability, "explorationProbability") @@ -183,11 +196,12 @@ case class DefaultOptimalSizeExploringResizer( record = newRecord } - private[routing] def updatedStats(currentRoutees: immutable.IndexedSeq[Routee], messageCounter: Long): (PerformanceLog, ResizeRecord) = { + private[routing] def updatedStats(currentRoutees: immutable.IndexedSeq[Routee], + messageCounter: Long): (PerformanceLog, ResizeRecord) = { val now = LocalDateTime.now val currentSize = currentRoutees.length - val messagesInRoutees = currentRoutees map { + val messagesInRoutees = currentRoutees.map { case ActorRefRoutee(a: ActorRefWithCell) => a.underlying match { case cell: ActorCell => @@ -206,9 +220,9 @@ case class DefaultOptimalSizeExploringResizer( if (fullyUtilized) None else - Some(UnderUtilizationStreak( - record.underutilizationStreak.fold(now)(_.start), - Math.max(record.underutilizationStreak.fold(0)(_.highestUtilization), utilized))) + Some( + UnderUtilizationStreak(record.underutilizationStreak.fold(now)(_.start), + Math.max(record.underutilizationStreak.fold(0)(_.highestUtilization), utilized))) val newPerformanceLog: PerformanceLog = if (fullyUtilized && record.underutilizationStreak.isEmpty && record.checkTime > 0) { @@ -226,11 +240,10 @@ case class DefaultOptimalSizeExploringResizer( } else performanceLog } else performanceLog - val newRecord = record.copy( - underutilizationStreak = newUnderutilizationStreak, - messageCount = messageCounter, - totalQueueLength = totalQueueLength, - checkTime = System.nanoTime()) + val newRecord = record.copy(underutilizationStreak = newUnderutilizationStreak, + messageCount = messageCounter, + totalQueueLength = totalQueueLength, + checkTime = System.nanoTime()) (newPerformanceLog, newRecord) @@ -260,8 +273,10 @@ case class DefaultOptimalSizeExploringResizer( def adjacency = (size: Int) => Math.abs(currentSize - size) val sizes = performanceLog.keys.toSeq val numOfSizesEachSide = numOfAdjacentSizesToConsiderDuringOptimization / 2 - val leftBoundary = sizes.filter(_ < currentSize).sortBy(adjacency).take(numOfSizesEachSide).lastOption.getOrElse(currentSize) - val rightBoundary = sizes.filter(_ >= currentSize).sortBy(adjacency).take(numOfSizesEachSide).lastOption.getOrElse(currentSize) + val leftBoundary = + sizes.filter(_ < currentSize).sortBy(adjacency).take(numOfSizesEachSide).lastOption.getOrElse(currentSize) + val rightBoundary = + sizes.filter(_ >= currentSize).sortBy(adjacency).take(numOfSizesEachSide).lastOption.getOrElse(currentSize) performanceLog.filter { case (size, _) => size >= leftBoundary && size <= rightBoundary } } diff --git a/akka-actor/src/main/scala/akka/routing/Random.scala b/akka-actor/src/main/scala/akka/routing/Random.scala index e08888e5d9..7f905acf2e 100644 --- a/akka-actor/src/main/scala/akka/routing/Random.scala +++ b/akka-actor/src/main/scala/akka/routing/Random.scala @@ -57,18 +57,18 @@ final class RandomRoutingLogic extends RoutingLogic { * supervision, death watch and router management messages */ @SerialVersionUID(1L) -final case class RandomPool( - val nrOfInstances: Int, override val resizer: Option[Resizer] = None, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) - extends Pool with PoolOverrideUnsetConfig[RandomPool] { +final case class RandomPool(val nrOfInstances: Int, + override val resizer: Option[Resizer] = None, + override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) + extends Pool + with PoolOverrideUnsetConfig[RandomPool] { def this(config: Config) = - this( - nrOfInstances = config.getInt("nr-of-instances"), - resizer = Resizer.fromConfig(config), - usePoolDispatcher = config.hasPath("pool-dispatcher")) + this(nrOfInstances = config.getInt("nr-of-instances"), + resizer = Resizer.fromConfig(config), + usePoolDispatcher = config.hasPath("pool-dispatcher")) /** * Java API @@ -119,10 +119,9 @@ final case class RandomPool( * router management messages */ @SerialVersionUID(1L) -final case class RandomGroup( - val paths: immutable.Iterable[String], - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) - extends Group { +final case class RandomGroup(val paths: immutable.Iterable[String], + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + extends Group { def this(config: Config) = this(paths = immutableSeq(config.getStringList("routees.paths"))) diff --git a/akka-actor/src/main/scala/akka/routing/Resizer.scala b/akka-actor/src/main/scala/akka/routing/Resizer.scala index b964800f0c..a695721070 100644 --- a/akka-actor/src/main/scala/akka/routing/Resizer.scala +++ b/akka-actor/src/main/scala/akka/routing/Resizer.scala @@ -29,6 +29,7 @@ import akka.dispatch.MessageDispatcher * implementation in the [[akka.routing.Pool]] configuration. */ trait Resizer { + /** * Is it time for resizing. Typically implemented with modulo of nth message, but * could be based on elapsed time or something else. The messageCounter starts with 0 @@ -78,14 +79,13 @@ case object DefaultResizer { * Creates a new DefaultResizer from the given configuration */ def apply(resizerConfig: Config): DefaultResizer = - DefaultResizer( - lowerBound = resizerConfig.getInt("lower-bound"), - upperBound = resizerConfig.getInt("upper-bound"), - pressureThreshold = resizerConfig.getInt("pressure-threshold"), - rampupRate = resizerConfig.getDouble("rampup-rate"), - backoffThreshold = resizerConfig.getDouble("backoff-threshold"), - backoffRate = resizerConfig.getDouble("backoff-rate"), - messagesPerResize = resizerConfig.getInt("messages-per-resize")) + DefaultResizer(lowerBound = resizerConfig.getInt("lower-bound"), + upperBound = resizerConfig.getInt("upper-bound"), + pressureThreshold = resizerConfig.getInt("pressure-threshold"), + rampupRate = resizerConfig.getDouble("rampup-rate"), + backoffThreshold = resizerConfig.getDouble("backoff-threshold"), + backoffRate = resizerConfig.getDouble("backoff-rate"), + messagesPerResize = resizerConfig.getInt("messages-per-resize")) def fromConfig(resizerConfig: Config): Option[DefaultResizer] = if (resizerConfig.getBoolean("resizer.enabled")) @@ -126,14 +126,14 @@ case object DefaultResizer { * Use 1 to resize before each message. */ @SerialVersionUID(1L) -case class DefaultResizer( - val lowerBound: Int = 1, - val upperBound: Int = 10, - val pressureThreshold: Int = 1, - val rampupRate: Double = 0.2, - val backoffThreshold: Double = 0.3, - val backoffRate: Double = 0.1, - val messagesPerResize: Int = 10) extends Resizer { +case class DefaultResizer(val lowerBound: Int = 1, + val upperBound: Int = 10, + val pressureThreshold: Int = 1, + val rampupRate: Double = 0.2, + val backoffThreshold: Double = 0.3, + val backoffRate: Double = 0.1, + val messagesPerResize: Int = 10) + extends Resizer { /** * Java API constructor for default values except bounds. @@ -142,11 +142,15 @@ case class DefaultResizer( if (lowerBound < 0) throw new IllegalArgumentException("lowerBound must be >= 0, was: [%s]".format(lowerBound)) if (upperBound < 0) throw new IllegalArgumentException("upperBound must be >= 0, was: [%s]".format(upperBound)) - if (upperBound < lowerBound) throw new IllegalArgumentException("upperBound must be >= lowerBound, was: [%s] < [%s]".format(upperBound, lowerBound)) + if (upperBound < lowerBound) + throw new IllegalArgumentException( + "upperBound must be >= lowerBound, was: [%s] < [%s]".format(upperBound, lowerBound)) if (rampupRate < 0.0) throw new IllegalArgumentException("rampupRate must be >= 0.0, was [%s]".format(rampupRate)) - if (backoffThreshold > 1.0) throw new IllegalArgumentException("backoffThreshold must be <= 1.0, was [%s]".format(backoffThreshold)) + if (backoffThreshold > 1.0) + throw new IllegalArgumentException("backoffThreshold must be <= 1.0, was [%s]".format(backoffThreshold)) if (backoffRate < 0.0) throw new IllegalArgumentException("backoffRate must be >= 0.0, was [%s]".format(backoffRate)) - if (messagesPerResize <= 0) throw new IllegalArgumentException("messagesPerResize must be > 0, was [%s]".format(messagesPerResize)) + if (messagesPerResize <= 0) + throw new IllegalArgumentException("messagesPerResize must be > 0, was [%s]".format(messagesPerResize)) def isTimeForResize(messageCounter: Long): Boolean = (messageCounter % messagesPerResize == 0) @@ -189,7 +193,7 @@ case class DefaultResizer( * @return number of busy routees, between 0 and routees.size */ def pressure(routees: immutable.IndexedSeq[Routee]): Int = { - routees count { + routees.count { case ActorRefRoutee(a: ActorRefWithCell) => a.underlying match { case cell: ActorCell => @@ -246,15 +250,14 @@ case class DefaultResizer( /** * INTERNAL API */ -private[akka] final class ResizablePoolCell( - _system: ActorSystemImpl, - _ref: InternalActorRef, - _routerProps: Props, - _routerDispatcher: MessageDispatcher, - _routeeProps: Props, - _supervisor: InternalActorRef, - val pool: Pool) - extends RoutedActorCell(_system, _ref, _routerProps, _routerDispatcher, _routeeProps, _supervisor) { +private[akka] final class ResizablePoolCell(_system: ActorSystemImpl, + _ref: InternalActorRef, + _routerProps: Props, + _routerDispatcher: MessageDispatcher, + _routeeProps: Props, + _supervisor: InternalActorRef, + val pool: Pool) + extends RoutedActorCell(_system, _ref, _routerProps, _routerDispatcher, _routeeProps, _supervisor) { require(pool.resizer.isDefined, "RouterConfig must be a Pool with defined resizer") val resizer = pool.resizer.get @@ -270,7 +273,7 @@ private[akka] final class ResizablePoolCell( override def sendMessage(envelope: Envelope): Unit = { if (!routerConfig.isManagementMessage(envelope.message) && - resizer.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) { + resizer.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) { super.sendMessage(Envelope(ResizablePoolActor.Resize, self, system)) } @@ -315,18 +318,20 @@ private[akka] object ResizablePoolActor { * INTERNAL API */ private[akka] class ResizablePoolActor(supervisorStrategy: SupervisorStrategy) - extends RouterPoolActor(supervisorStrategy) { + extends RouterPoolActor(supervisorStrategy) { import ResizablePoolActor._ val resizerCell = context match { case x: ResizablePoolCell => x case _ => - throw ActorInitializationException("Resizable router actor can only be used when resizer is defined, not in " + context.getClass) + throw ActorInitializationException( + "Resizable router actor can only be used when resizer is defined, not in " + context.getClass) } - override def receive = ({ - case Resize => - resizerCell.resize(initial = false) - }: Actor.Receive) orElse super.receive + override def receive = + ({ + case Resize => + resizerCell.resize(initial = false) + }: Actor.Receive).orElse(super.receive) } diff --git a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala index e78a685e6e..ae0a0b2e38 100644 --- a/akka-actor/src/main/scala/akka/routing/RoundRobin.scala +++ b/akka-actor/src/main/scala/akka/routing/RoundRobin.scala @@ -65,18 +65,18 @@ final class RoundRobinRoutingLogic extends RoutingLogic { * supervision, death watch and router management messages */ @SerialVersionUID(1L) -final case class RoundRobinPool( - val nrOfInstances: Int, override val resizer: Option[Resizer] = None, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) - extends Pool with PoolOverrideUnsetConfig[RoundRobinPool] { +final case class RoundRobinPool(val nrOfInstances: Int, + override val resizer: Option[Resizer] = None, + override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) + extends Pool + with PoolOverrideUnsetConfig[RoundRobinPool] { def this(config: Config) = - this( - nrOfInstances = config.getInt("nr-of-instances"), - resizer = Resizer.fromConfig(config), - usePoolDispatcher = config.hasPath("pool-dispatcher")) + this(nrOfInstances = config.getInt("nr-of-instances"), + resizer = Resizer.fromConfig(config), + usePoolDispatcher = config.hasPath("pool-dispatcher")) /** * Java API @@ -128,10 +128,9 @@ final case class RoundRobinPool( * router management messages */ @SerialVersionUID(1L) -final case class RoundRobinGroup( - val paths: immutable.Iterable[String], - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) - extends Group { +final case class RoundRobinGroup(val paths: immutable.Iterable[String], + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + extends Group { def this(config: Config) = this(paths = immutableSeq(config.getStringList("routees.paths"))) diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala index 4608965c8c..90bb69b39f 100644 --- a/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala +++ b/akka-actor/src/main/scala/akka/routing/RoutedActorCell.scala @@ -36,14 +36,13 @@ private[akka] object RoutedActorCell { /** * INTERNAL API */ -private[akka] class RoutedActorCell( - _system: ActorSystemImpl, - _ref: InternalActorRef, - _routerProps: Props, - _routerDispatcher: MessageDispatcher, - val routeeProps: Props, - _supervisor: InternalActorRef) - extends ActorCell(_system, _ref, _routerProps, _routerDispatcher, _supervisor) { +private[akka] class RoutedActorCell(_system: ActorSystemImpl, + _ref: InternalActorRef, + _routerProps: Props, + _routerDispatcher: MessageDispatcher, + val routeeProps: Props, + _supervisor: InternalActorRef) + extends ActorCell(_system, _ref, _routerProps, _routerDispatcher, _supervisor) { private[akka] val routerConfig = _routerProps.routerConfig @@ -58,7 +57,7 @@ private[akka] class RoutedActorCell( * the old `Router` instance containing the old routees. */ def addRoutees(routees: immutable.Iterable[Routee]): Unit = { - routees foreach watch + routees.foreach(watch) val r = _router _router = r.withRoutees(r.routees ++ routees) } @@ -72,9 +71,11 @@ private[akka] class RoutedActorCell( */ def removeRoutees(routees: immutable.Iterable[Routee], stopChild: Boolean): Unit = { val r = _router - val newRoutees = routees.foldLeft(r.routees) { (xs, x) => unwatch(x); xs.filterNot(_ == x) } + val newRoutees = routees.foldLeft(r.routees) { (xs, x) => + unwatch(x); xs.filterNot(_ == x) + } _router = r.withRoutees(newRoutees) - if (stopChild) routees foreach stopIfChild + if (stopChild) routees.foreach(stopIfChild) } private def watch(routee: Routee): Unit = routee match { @@ -88,14 +89,15 @@ private[akka] class RoutedActorCell( } private def stopIfChild(routee: Routee): Unit = routee match { - case ActorRefRoutee(ref) => child(ref.path.name) match { - case Some(`ref`) => - // The reason for the delay is to give concurrent - // messages a chance to be placed in mailbox before sending PoisonPill, - // best effort. - system.scheduler.scheduleOnce(100.milliseconds, ref, PoisonPill)(dispatcher) - case _ => - } + case ActorRefRoutee(ref) => + child(ref.path.name) match { + case Some(`ref`) => + // The reason for the delay is to give concurrent + // messages a chance to be placed in mailbox before sending PoisonPill, + // best effort. + system.scheduler.scheduleOnce(100.milliseconds, ref, PoisonPill)(dispatcher) + case _ => + } case _ => } @@ -149,10 +151,9 @@ private[akka] class RouterActor extends Actor { throw ActorInitializationException("Router actor can only be used in RoutedActorRef, not in " + context.getClass) } - val routingLogicController: Option[ActorRef] = cell.routerConfig.routingLogicController( - cell.router.logic).map(props => context.actorOf( - props.withDispatcher(context.props.dispatcher), - name = "routingLogicController")) + val routingLogicController: Option[ActorRef] = cell.routerConfig + .routingLogicController(cell.router.logic) + .map(props => context.actorOf(props.withDispatcher(context.props.dispatcher), name = "routingLogicController")) def receive = { case GetRoutees => @@ -189,17 +190,17 @@ private[akka] class RouterPoolActor(override val supervisorStrategy: SupervisorS throw ActorInitializationException("RouterPoolActor can only be used with Pool, not " + other.getClass) } - override def receive = ({ - case AdjustPoolSize(change: Int) => - if (change > 0) { - val newRoutees = Vector.fill(change)(pool.newRoutee(cell.routeeProps, context)) - cell.addRoutees(newRoutees) - } else if (change < 0) { - val currentRoutees = cell.router.routees - val abandon = currentRoutees.drop(currentRoutees.length + change) - cell.removeRoutees(abandon, stopChild = true) - } - }: Actor.Receive) orElse super.receive + override def receive = + ({ + case AdjustPoolSize(change: Int) => + if (change > 0) { + val newRoutees = Vector.fill(change)(pool.newRoutee(cell.routeeProps, context)) + cell.addRoutees(newRoutees) + } else if (change < 0) { + val currentRoutees = cell.router.routees + val abandon = currentRoutees.drop(currentRoutees.length + change) + cell.removeRoutees(abandon, stopChild = true) + } + }: Actor.Receive).orElse(super.receive) } - diff --git a/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala b/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala index b512902628..477bcf41a0 100644 --- a/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala +++ b/akka-actor/src/main/scala/akka/routing/RoutedActorRef.scala @@ -22,21 +22,20 @@ import akka.dispatch.MessageDispatcher * A RoutedActorRef is an ActorRef that has a set of connected ActorRef and it uses a Router to * send a message to one (or more) of these actors. */ -private[akka] class RoutedActorRef( - _system: ActorSystemImpl, - _routerProps: Props, - _routerDispatcher: MessageDispatcher, - _routerMailbox: MailboxType, - _routeeProps: Props, - _supervisor: InternalActorRef, - _path: ActorPath) - extends RepointableActorRef(_system, _routerProps, _routerDispatcher, _routerMailbox, _supervisor, _path) { +private[akka] class RoutedActorRef(_system: ActorSystemImpl, + _routerProps: Props, + _routerDispatcher: MessageDispatcher, + _routerMailbox: MailboxType, + _routeeProps: Props, + _supervisor: InternalActorRef, + _path: ActorPath) + extends RepointableActorRef(_system, _routerProps, _routerDispatcher, _routerMailbox, _supervisor, _path) { // verify that a BalancingDispatcher is not used with a Router if (_routerProps.routerConfig != NoRouter && _routerDispatcher.isInstanceOf[BalancingDispatcher]) { throw new ConfigurationException( "Configuration for " + this + - " is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") + " is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") } else _routerProps.routerConfig.verifyConfig(_path) override def newCell(old: UnstartedCell): Cell = { diff --git a/akka-actor/src/main/scala/akka/routing/Router.scala b/akka-actor/src/main/scala/akka/routing/Router.scala index 2ef2337faa..8267157309 100644 --- a/akka-actor/src/main/scala/akka/routing/Router.scala +++ b/akka-actor/src/main/scala/akka/routing/Router.scala @@ -18,6 +18,7 @@ import akka.actor.NoSerializationVerificationNeeded * The implementation must be thread safe. */ trait RoutingLogic extends NoSerializationVerificationNeeded { + /** * Pick the destination for a given message. Normally it picks one of the * passed `routees`, but in the end it is up to the implementation to @@ -185,4 +186,3 @@ final case class Broadcast(message: Any) extends RouterEnvelope trait RouterEnvelope { def message: Any } - diff --git a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala index 793d9ffb9c..e8bff85efb 100644 --- a/akka-actor/src/main/scala/akka/routing/RouterConfig.scala +++ b/akka-actor/src/main/scala/akka/routing/RouterConfig.scala @@ -67,7 +67,7 @@ trait RouterConfig extends Serializable { */ def isManagementMessage(msg: Any): Boolean = msg match { case _: AutoReceivedMessage | _: Terminated | _: RouterManagementMesssage => true - case _ => false + case _ => false } /* @@ -109,7 +109,7 @@ private[akka] trait PoolOverrideUnsetConfig[T <: Pool] extends Pool { case p: Pool => val wssConf: PoolOverrideUnsetConfig[T] = if ((this.supervisorStrategy eq Pool.defaultSupervisorStrategy) - && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) + && (p.supervisorStrategy ne Pool.defaultSupervisorStrategy)) this.withSupervisorStrategy(p.supervisorStrategy).asInstanceOf[PoolOverrideUnsetConfig[T]] else this @@ -204,7 +204,8 @@ trait Pool extends RouterConfig { */ private[akka] def enrichWithPoolDispatcher(routeeProps: Props, context: ActorContext): Props = if (usePoolDispatcher && routeeProps.dispatcher == Dispatchers.DefaultDispatcherId) - routeeProps.withDispatcher("akka.actor.deployment." + context.self.path.elements.drop(1).mkString("/", "/", "") + routeeProps.withDispatcher( + "akka.actor.deployment." + context.self.path.elements.drop(1).mkString("/", "/", "") + ".pool-dispatcher") else routeeProps @@ -251,6 +252,7 @@ trait Pool extends RouterConfig { * a [[Pool]] it may extend this base class. */ abstract class CustomRouterConfig extends RouterConfig { + /** * INTERNAL API */ @@ -265,14 +267,14 @@ abstract class CustomRouterConfig extends RouterConfig { * in the configuration. */ case object FromConfig extends FromConfig { + /** * Java API: get the singleton instance */ def getInstance = this - @inline final def apply( - resizer: Option[Resizer] = None, - supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - routerDispatcher: String = Dispatchers.DefaultDispatcherId) = + @inline final def apply(resizer: Option[Resizer] = None, + supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + routerDispatcher: String = Dispatchers.DefaultDispatcherId) = new FromConfig(resizer, supervisorStrategy, routerDispatcher) @inline final def unapply(fc: FromConfig): Option[String] = Some(fc.routerDispatcher) @@ -287,10 +289,10 @@ case object FromConfig extends FromConfig { * (defaults to default-dispatcher). */ @SerialVersionUID(1L) -class FromConfig( - override val resizer: Option[Resizer], - override val supervisorStrategy: SupervisorStrategy, - override val routerDispatcher: String) extends Pool { +class FromConfig(override val resizer: Option[Resizer], + override val supervisorStrategy: SupervisorStrategy, + override val routerDispatcher: String) + extends Pool { def this() = this(None, Pool.defaultSupervisorStrategy, Dispatchers.DefaultDispatcherId) @@ -345,7 +347,9 @@ class FromConfig( abstract class NoRouter extends RouterConfig case object NoRouter extends NoRouter { - override def createRouter(system: ActorSystem): Router = throw new UnsupportedOperationException("NoRouter has no Router") + override def createRouter(system: ActorSystem): Router = + throw new UnsupportedOperationException("NoRouter has no Router") + /** * INTERNAL API */ @@ -376,6 +380,7 @@ case object NoRouter extends NoRouter { @SerialVersionUID(1L) abstract class GetRoutees extends RouterManagementMesssage @SerialVersionUID(1L) case object GetRoutees extends GetRoutees { + /** * Java API: get the singleton instance */ @@ -387,6 +392,7 @@ case object NoRouter extends NoRouter { */ @SerialVersionUID(1L) final case class Routees(routees: immutable.IndexedSeq[Routee]) { + /** * Java API */ diff --git a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala index 4147fe77a6..a5fb5b9970 100644 --- a/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala +++ b/akka-actor/src/main/scala/akka/routing/ScatterGatherFirstCompleted.scala @@ -38,8 +38,9 @@ final case class ScatterGatherFirstCompletedRoutingLogic(within: FiniteDuration) * INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class ScatterGatherFirstCompletedRoutees( - routees: immutable.IndexedSeq[Routee], within: FiniteDuration) extends Routee { +private[akka] final case class ScatterGatherFirstCompletedRoutees(routees: immutable.IndexedSeq[Routee], + within: FiniteDuration) + extends Routee { override def send(message: Any, sender: ActorRef): Unit = if (routees.isEmpty) { @@ -97,19 +98,20 @@ private[akka] final case class ScatterGatherFirstCompletedRoutees( */ @SerialVersionUID(1L) final case class ScatterGatherFirstCompletedPool( - val nrOfInstances: Int, override val resizer: Option[Resizer] = None, - within: FiniteDuration, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) - extends Pool with PoolOverrideUnsetConfig[ScatterGatherFirstCompletedPool] { + val nrOfInstances: Int, + override val resizer: Option[Resizer] = None, + within: FiniteDuration, + override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) + extends Pool + with PoolOverrideUnsetConfig[ScatterGatherFirstCompletedPool] { def this(config: Config) = - this( - nrOfInstances = config.getInt("nr-of-instances"), - within = config.getMillisDuration("within"), - resizer = Resizer.fromConfig(config), - usePoolDispatcher = config.hasPath("pool-dispatcher")) + this(nrOfInstances = config.getInt("nr-of-instances"), + within = config.getMillisDuration("within"), + resizer = Resizer.fromConfig(config), + usePoolDispatcher = config.hasPath("pool-dispatcher")) /** * Java API @@ -134,7 +136,8 @@ final case class ScatterGatherFirstCompletedPool( /** * Setting the supervisor strategy to be used for the “head” Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy): ScatterGatherFirstCompletedPool = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): ScatterGatherFirstCompletedPool = + copy(supervisorStrategy = strategy) /** * Setting the resizer to be used. @@ -173,16 +176,14 @@ final case class ScatterGatherFirstCompletedPool( * router management messages */ @SerialVersionUID(1L) -final case class ScatterGatherFirstCompletedGroup( - val paths: immutable.Iterable[String], - within: FiniteDuration, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) - extends Group { +final case class ScatterGatherFirstCompletedGroup(val paths: immutable.Iterable[String], + within: FiniteDuration, + override val routerDispatcher: String = + Dispatchers.DefaultDispatcherId) + extends Group { def this(config: Config) = - this( - paths = immutableSeq(config.getStringList("routees.paths")), - within = config.getMillisDuration("within")) + this(paths = immutableSeq(config.getStringList("routees.paths")), within = config.getMillisDuration("within")) /** * Java API diff --git a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala index a36daf344d..dafc92602d 100644 --- a/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala +++ b/akka-actor/src/main/scala/akka/routing/SmallestMailbox.scala @@ -46,12 +46,11 @@ class SmallestMailboxRoutingLogic extends RoutingLogic { // 4. An ActorRef with unknown mailbox size that isn't processing anything // 5. An ActorRef with a known mailbox size // 6. An ActorRef without any messages - @tailrec private def selectNext( - targets: immutable.IndexedSeq[Routee], - proposedTarget: Routee = NoRoutee, - currentScore: Long = Long.MaxValue, - at: Int = 0, - deep: Boolean = false): Routee = { + @tailrec private def selectNext(targets: immutable.IndexedSeq[Routee], + proposedTarget: Routee = NoRoutee, + currentScore: Long = Long.MaxValue, + at: Int = 0, + deep: Boolean = false): Routee = { if (targets.isEmpty) NoRoutee else if (at >= targets.size) { @@ -61,12 +60,14 @@ class SmallestMailboxRoutingLogic extends RoutingLogic { } else { val target = targets(at) val newScore: Long = - if (isSuspended(target)) Long.MaxValue - 1 else { //Just about better than the DeadLetters - (if (isProcessingMessage(target)) 1l else 0l) + - (if (!hasMessages(target)) 0l else { //Race between hasMessages and numberOfMessages here, unfortunate the numberOfMessages returns 0 if unknown - val noOfMsgs: Long = if (deep) numberOfMessages(target) else 0 - if (noOfMsgs > 0) noOfMsgs else Long.MaxValue - 3 //Just better than a suspended actorref - }) + if (isSuspended(target)) Long.MaxValue - 1 + else { //Just about better than the DeadLetters + (if (isProcessingMessage(target)) 1L else 0L) + + (if (!hasMessages(target)) 0L + else { //Race between hasMessages and numberOfMessages here, unfortunate the numberOfMessages returns 0 if unknown + val noOfMsgs: Long = if (deep) numberOfMessages(target) else 0 + if (noOfMsgs > 0) noOfMsgs else Long.MaxValue - 3 //Just better than a suspended actorref + }) } if (newScore == 0) target @@ -173,18 +174,19 @@ class SmallestMailboxRoutingLogic extends RoutingLogic { * supervision, death watch and router management messages */ @SerialVersionUID(1L) -final case class SmallestMailboxPool( - val nrOfInstances: Int, override val resizer: Option[Resizer] = None, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) - extends Pool with PoolOverrideUnsetConfig[SmallestMailboxPool] { +final case class SmallestMailboxPool(val nrOfInstances: Int, + override val resizer: Option[Resizer] = None, + override val supervisorStrategy: SupervisorStrategy = + Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) + extends Pool + with PoolOverrideUnsetConfig[SmallestMailboxPool] { def this(config: Config) = - this( - nrOfInstances = config.getInt("nr-of-instances"), - resizer = Resizer.fromConfig(config), - usePoolDispatcher = config.hasPath("pool-dispatcher")) + this(nrOfInstances = config.getInt("nr-of-instances"), + resizer = Resizer.fromConfig(config), + usePoolDispatcher = config.hasPath("pool-dispatcher")) /** * Java API diff --git a/akka-actor/src/main/scala/akka/routing/TailChopping.scala b/akka-actor/src/main/scala/akka/routing/TailChopping.scala index a2b70c5849..a0a4654174 100644 --- a/akka-actor/src/main/scala/akka/routing/TailChopping.scala +++ b/akka-actor/src/main/scala/akka/routing/TailChopping.scala @@ -12,7 +12,7 @@ import akka.dispatch.Dispatchers import com.typesafe.config.Config import akka.japi.Util.immutableSeq import scala.concurrent.{ ExecutionContext, Promise } -import akka.pattern.{ AskTimeoutException, ask, pipe } +import akka.pattern.{ ask, pipe, AskTimeoutException } import scala.concurrent.duration._ import akka.util.JavaDurationConverters._ import akka.util.Timeout @@ -46,8 +46,11 @@ import scala.util.Random * @param context execution context used by scheduler */ @SerialVersionUID(1L) -final case class TailChoppingRoutingLogic(scheduler: Scheduler, within: FiniteDuration, - interval: FiniteDuration, context: ExecutionContext) extends RoutingLogic { +final case class TailChoppingRoutingLogic(scheduler: Scheduler, + within: FiniteDuration, + interval: FiniteDuration, + context: ExecutionContext) + extends RoutingLogic { override def select(message: Any, routees: immutable.IndexedSeq[Routee]): Routee = { if (routees.isEmpty) NoRoutee else TailChoppingRoutees(scheduler, routees, within, interval)(context) @@ -58,9 +61,11 @@ final case class TailChoppingRoutingLogic(scheduler: Scheduler, within: FiniteDu * INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class TailChoppingRoutees( - scheduler: Scheduler, routees: immutable.IndexedSeq[Routee], - within: FiniteDuration, interval: FiniteDuration)(implicit ec: ExecutionContext) extends Routee { +private[akka] final case class TailChoppingRoutees(scheduler: Scheduler, + routees: immutable.IndexedSeq[Routee], + within: FiniteDuration, + interval: FiniteDuration)(implicit ec: ExecutionContext) + extends Routee { override def send(message: Any, sender: ActorRef): Unit = { implicit val timeout = Timeout(within) @@ -83,8 +88,8 @@ private[akka] final case class TailChoppingRoutees( } } - val sendTimeout = scheduler.scheduleOnce(within)(promise.tryFailure( - new AskTimeoutException(s"Ask timed out on [$sender] after [$within.toMillis} ms]"))) + val sendTimeout = scheduler.scheduleOnce(within)( + promise.tryFailure(new AskTimeoutException(s"Ask timed out on [$sender] after [$within.toMillis} ms]"))) val f = promise.future f.onComplete { @@ -142,22 +147,22 @@ private[akka] final case class TailChoppingRoutees( * supervision, death watch and router management messages */ @SerialVersionUID(1L) -final case class TailChoppingPool( - val nrOfInstances: Int, override val resizer: Option[Resizer] = None, - within: FiniteDuration, - interval: FiniteDuration, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) - extends Pool with PoolOverrideUnsetConfig[TailChoppingPool] { +final case class TailChoppingPool(val nrOfInstances: Int, + override val resizer: Option[Resizer] = None, + within: FiniteDuration, + interval: FiniteDuration, + override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) + extends Pool + with PoolOverrideUnsetConfig[TailChoppingPool] { def this(config: Config) = - this( - nrOfInstances = config.getInt("nr-of-instances"), - within = config.getMillisDuration("within"), - interval = config.getMillisDuration("tail-chopping-router.interval"), - resizer = Resizer.fromConfig(config), - usePoolDispatcher = config.hasPath("pool-dispatcher")) + this(nrOfInstances = config.getInt("nr-of-instances"), + within = config.getMillisDuration("within"), + interval = config.getMillisDuration("tail-chopping-router.interval"), + resizer = Resizer.fromConfig(config), + usePoolDispatcher = config.hasPath("pool-dispatcher")) /** * Java API @@ -180,8 +185,8 @@ final case class TailChoppingPool( this(nr, within.asScala, interval.asScala) override def createRouter(system: ActorSystem): Router = - new Router(TailChoppingRoutingLogic(system.scheduler, within, - interval, system.dispatchers.lookup(routerDispatcher))) + new Router( + TailChoppingRoutingLogic(system.scheduler, within, interval, system.dispatchers.lookup(routerDispatcher))) override def nrOfInstances(sys: ActorSystem) = this.nrOfInstances @@ -238,17 +243,16 @@ final case class TailChoppingPool( * @param routerDispatcher dispatcher to use for the router head actor, which handles * router management messages */ -final case class TailChoppingGroup( - val paths: immutable.Iterable[String], - within: FiniteDuration, - interval: FiniteDuration, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group { +final case class TailChoppingGroup(val paths: immutable.Iterable[String], + within: FiniteDuration, + interval: FiniteDuration, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + extends Group { def this(config: Config) = - this( - paths = immutableSeq(config.getStringList("routees.paths")), - within = config.getMillisDuration("within"), - interval = config.getMillisDuration("tail-chopping-router.interval")) + this(paths = immutableSeq(config.getStringList("routees.paths")), + within = config.getMillisDuration("within"), + interval = config.getMillisDuration("tail-chopping-router.interval")) /** * Java API @@ -273,7 +277,8 @@ final case class TailChoppingGroup( this(immutableSeq(routeePaths), within.asScala, interval.asScala) override def createRouter(system: ActorSystem): Router = - new Router(TailChoppingRoutingLogic(system.scheduler, within, interval, system.dispatchers.lookup(routerDispatcher))) + new Router( + TailChoppingRoutingLogic(system.scheduler, within, interval, system.dispatchers.lookup(routerDispatcher))) override def paths(system: ActorSystem): immutable.Iterable[String] = this.paths diff --git a/akka-actor/src/main/scala/akka/serialization/AsyncSerializer.scala b/akka-actor/src/main/scala/akka/serialization/AsyncSerializer.scala index f5f50c7576..2ae2fd11f6 100644 --- a/akka-actor/src/main/scala/akka/serialization/AsyncSerializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/AsyncSerializer.scala @@ -19,6 +19,7 @@ import scala.concurrent.{ Await, Future } * [[AsyncSerializerWithStringManifestCS]] that delegates synchronous calls to their async equivalents. */ trait AsyncSerializer { + /** * Serializes the given object into an Array of Byte */ @@ -34,14 +35,20 @@ trait AsyncSerializer { * Scala API: Async serializer with string manifest that delegates synchronous calls to the asynchronous calls * and blocks. */ -abstract class AsyncSerializerWithStringManifest(system: ExtendedActorSystem) extends SerializerWithStringManifest with AsyncSerializer { +abstract class AsyncSerializerWithStringManifest(system: ExtendedActorSystem) + extends SerializerWithStringManifest + with AsyncSerializer { final override def toBinary(o: AnyRef): Array[Byte] = { - system.log.warning("Async serializer called synchronously. This will block. Async serializers should only be used for akka persistence plugins that support them. Class: {}", o.getClass) + system.log.warning( + "Async serializer called synchronously. This will block. Async serializers should only be used for akka persistence plugins that support them. Class: {}", + o.getClass) Await.result(toBinaryAsync(o), Duration.Inf) } final override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = { - system.log.warning("Async serializer called synchronously. This will block. Async serializers should only be used for akka persistence plugins that support them. Manifest: [{}]", manifest) + system.log.warning( + "Async serializer called synchronously. This will block. Async serializers should only be used for akka persistence plugins that support them. Manifest: [{}]", + manifest) Await.result(fromBinaryAsync(bytes, manifest), Duration.Inf) } } @@ -50,7 +57,8 @@ abstract class AsyncSerializerWithStringManifest(system: ExtendedActorSystem) ex * Java API: Async serializer with string manifest that delegates synchronous calls to the asynchronous calls * and blocks. */ -abstract class AsyncSerializerWithStringManifestCS(system: ExtendedActorSystem) extends AsyncSerializerWithStringManifest(system) { +abstract class AsyncSerializerWithStringManifestCS(system: ExtendedActorSystem) + extends AsyncSerializerWithStringManifest(system) { import scala.compat.java8.FutureConverters._ def toBinaryAsyncCS(o: AnyRef): CompletionStage[Array[Byte]] @@ -69,4 +77,3 @@ abstract class AsyncSerializerWithStringManifestCS(system: ExtendedActorSystem) def fromBinaryAsync(bytes: Array[Byte], manifest: String): Future[AnyRef] = fromBinaryAsyncCS(bytes, manifest).toScala } - diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index fbedca50b3..81d33a83f7 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -44,8 +44,8 @@ object Serialization { val defaultBindings = config.getConfig("akka.actor.serialization-bindings") val bindings = { if (config.getBoolean("akka.actor.enable-additional-serialization-bindings") || - !config.getBoolean("akka.actor.allow-java-serialization") || - config.hasPath("akka.remote.artery.enabled") && config.getBoolean("akka.remote.artery.enabled")) { + !config.getBoolean("akka.actor.allow-java-serialization") || + config.hasPath("akka.remote.artery.enabled") && config.getBoolean("akka.remote.artery.enabled")) { val bs = defaultBindings.withFallback(config.getConfig("akka.actor.additional-serialization-bindings")) @@ -63,7 +63,7 @@ object Serialization { private final def configToMap(cfg: Config): Map[String, String] = { import scala.collection.JavaConverters._ - cfg.root.unwrapped.asScala.toMap map { case (k, v) => (k -> v.toString) } + cfg.root.unwrapped.asScala.toMap.map { case (k, v) => (k -> v.toString) } } } @@ -80,18 +80,20 @@ object Serialization { case _ => null } Serialization.currentTransportInformation.value match { - case null => originalSystem match { - case null => path.toSerializationFormat - case system => - try path.toSerializationFormatWithAddress(system.provider.getDefaultAddress) - catch { case NonFatal(_) => path.toSerializationFormat } - } + case null => + originalSystem match { + case null => path.toSerializationFormat + case system => + try path.toSerializationFormatWithAddress(system.provider.getDefaultAddress) + catch { case NonFatal(_) => path.toSerializationFormat } + } case Information(address, system) => if (originalSystem == null || originalSystem == system) path.toSerializationFormatWithAddress(address) else { val provider = originalSystem.provider - path.toSerializationFormatWithAddress(provider.getExternalAddressFor(address).getOrElse(provider.getDefaultAddress)) + path.toSerializationFormatWithAddress( + provider.getExternalAddressFor(address).getOrElse(provider.getDefaultAddress)) } } } @@ -135,8 +137,9 @@ object Serialization { */ def getCurrentTransportInformation(): Information = { Serialization.currentTransportInformation.value match { - case null => throw new IllegalStateException( - "currentTransportInformation is not set, use Serialization.withTransportInformation") + case null => + throw new IllegalStateException( + "currentTransportInformation is not set, use Serialization.withTransportInformation") case t => t } } @@ -187,9 +190,11 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { */ def deserialize[T](bytes: Array[Byte], serializerId: Int, clazz: Option[Class[_ <: T]]): Try[T] = Try { - val serializer = try getSerializerById(serializerId) catch { - case _: NoSuchElementException => throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId]${clazz.map(c => " (class [" + c.getName + "])").getOrElse("")}. " + + val serializer = try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId]${clazz.map(c => " (class [" + c.getName + "])").getOrElse("")}. " + "The most probable reason is that the configuration entry " + "akka.actor.serializers is not in sync between the two systems.") } @@ -205,9 +210,11 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { */ def deserialize(bytes: Array[Byte], serializerId: Int, manifest: String): Try[AnyRef] = Try { - val serializer = try getSerializerById(serializerId) catch { - case _: NoSuchElementException => throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + + val serializer = try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + "akka.actor.serializers is not in sync between the two systems.") } deserializeByteArray(bytes, serializer, manifest) @@ -217,7 +224,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { @tailrec def updateCache(cache: Map[String, Option[Class[_]]], key: String, value: Option[Class[_]]): Boolean = { manifestCache.compareAndSet(cache, cache.updated(key, value)) || - updateCache(manifestCache.get, key, value) // recursive, try again + updateCache(manifestCache.get, key, value) // recursive, try again } withTransportInformation { () => @@ -253,9 +260,11 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { */ @throws(classOf[NotSerializableException]) def deserializeByteBuffer(buf: ByteBuffer, serializerId: Int, manifest: String): AnyRef = { - val serializer = try getSerializerById(serializerId) catch { - case _: NoSuchElementException => throw new NotSerializableException( - s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + + val serializer = try getSerializerById(serializerId) + catch { + case _: NoSuchElementException => + throw new NotSerializableException( + s"Cannot find serializer with id [$serializerId] (manifest [$manifest]). The most probable reason is that the configuration entry " + "akka.actor.serializers is not in synch between the two systems.") } @@ -311,12 +320,12 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { case null => // bindings are ordered from most specific to least specific def unique(possibilities: immutable.Seq[(Class[_], Serializer)]): Boolean = possibilities.size == 1 || - (possibilities forall (_._1 isAssignableFrom possibilities(0)._1)) || - (possibilities forall (_._2 == possibilities(0)._2)) + (possibilities.forall(_._1.isAssignableFrom(possibilities(0)._1))) || + (possibilities.forall(_._2 == possibilities(0)._2)) val ser = { bindings.filter { - case (c, _) => c isAssignableFrom clazz + case (c, _) => c.isAssignableFrom(clazz) } match { case immutable.Seq() => throw new NotSerializableException(s"No configured serialization-bindings for class [${clazz.getName}]") @@ -332,13 +341,17 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { } if (possibilitiesWithoutJavaSerializer.isEmpty) { // shouldn't happen - throw new NotSerializableException(s"More than one JavaSerializer configured for class [${clazz.getName}]") + throw new NotSerializableException( + s"More than one JavaSerializer configured for class [${clazz.getName}]") } if (!unique(possibilitiesWithoutJavaSerializer)) { - _log.warning(LogMarker.Security, "Multiple serializers found for [{}], choosing first of: [{}]", - clazz.getName, - possibilitiesWithoutJavaSerializer.map { case (_, s) => s.getClass.getName }.mkString(", ")) + _log.warning(LogMarker.Security, + "Multiple serializers found for [{}], choosing first of: [{}]", + clazz.getName, + possibilitiesWithoutJavaSerializer + .map { case (_, s) => s.getClass.getName } + .mkString(", ")) } possibilitiesWithoutJavaSerializer.head._2 @@ -350,9 +363,11 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { serializerMap.putIfAbsent(clazz, ser) match { case null => if (shouldWarnAboutJavaSerializer(clazz, ser)) { - _log.warning(LogMarker.Security, "Using the default Java serializer for class [{}] which is not recommended because of " + - "performance implications. Use another serializer or disable this warning using the setting " + - "'akka.actor.warn-about-java-serializer-usage'", clazz.getName) + _log.warning(LogMarker.Security, + "Using the default Java serializer for class [{}] which is not recommended because of " + + "performance implications. Use another serializer or disable this warning using the setting " + + "'akka.actor.warn-about-java-serializer-usage'", + clazz.getName) } if (!warnUnexpectedNonAkkaSerializer(clazz, ser)) @@ -372,12 +387,13 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { // We override each instantiation of the JavaSerializer with the "disabled" serializer which will log warnings if used. val fqn = if (!system.settings.AllowJavaSerialization && serializerFQN == classOf[JavaSerializer].getName) { - log.debug("Replacing JavaSerializer with DisabledJavaSerializer, " + + log.debug( + "Replacing JavaSerializer with DisabledJavaSerializer, " + "due to `akka.actor.allow-java-serialization = off`.") classOf[DisabledJavaSerializer].getName } else serializerFQN - system.dynamicAccess.createInstanceFor[Serializer](fqn, List(classOf[ExtendedActorSystem] -> system)) recoverWith { + system.dynamicAccess.createInstanceFor[Serializer](fqn, List(classOf[ExtendedActorSystem] -> system)).recoverWith { case _: NoSuchMethodException => system.dynamicAccess.createInstanceFor[Serializer](fqn, Nil) // FIXME only needed on 2.13.0-M5 due to https://github.com/scala/bug/issues/11242 @@ -392,9 +408,10 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { (system.settings.setup.get[SerializationSetup] match { case None => Vector.empty case Some(setting) => setting.createSerializers(system) - }) collect { + }).collect { case det: SerializerDetails if isDisallowedJavaSerializer(det.serializer) => - log.debug("Replacing JavaSerializer with DisabledJavaSerializer, " + + log.debug( + "Replacing JavaSerializer with DisabledJavaSerializer, " + "due to `akka.actor.allow-java-serialization = off`.") SerializerDetails(det.alias, new DisabledJavaSerializer(system), det.useFor) case det => det @@ -440,8 +457,10 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { private def warnUnexpectedNonAkkaSerializer(clazz: Class[_], ser: Serializer): Boolean = { if (clazz.getName.startsWith("akka.") && !ser.getClass.getName.startsWith("akka.")) { log.warning("Using serializer [{}] for message [{}]. Note that this serializer " + - "is not implemented by Akka. It's not recommended to replace serializers for messages " + - "provided by Akka.", ser.getClass.getName, clazz.getName) + "is not implemented by Akka. It's not recommended to replace serializers for messages " + + "provided by Akka.", + ser.getClass.getName, + clazz.getName) true } else false } @@ -458,13 +477,15 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * obeying any order between unrelated subtypes (insert sort). */ private def sort(in: Iterable[ClassSerializer]): immutable.Seq[ClassSerializer] = - (in.foldLeft(new ArrayBuffer[ClassSerializer](in.size)) { (buf, ca) => - buf.indexWhere(_._1 isAssignableFrom ca._1) match { - case -1 => buf append ca - case x => buf insert (x, ca) - } - buf - }).to(immutable.Seq) + (in + .foldLeft(new ArrayBuffer[ClassSerializer](in.size)) { (buf, ca) => + buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { + case -1 => buf.append(ca) + case x => buf.insert(x, ca) + } + buf + }) + .to(immutable.Seq) /** * serializerMap is a Map whose keys is the class that is serializable and values is the serializer @@ -504,8 +525,10 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { serializerByIdentity(id) } - private val isJavaSerializationWarningEnabled = settings.config.getBoolean("akka.actor.warn-about-java-serializer-usage") - private val isWarningOnNoVerificationEnabled = settings.config.getBoolean("akka.actor.warn-on-no-serialization-verification") + private val isJavaSerializationWarningEnabled = + settings.config.getBoolean("akka.actor.warn-about-java-serializer-usage") + private val isWarningOnNoVerificationEnabled = + settings.config.getBoolean("akka.actor.warn-on-no-serialization-verification") private def isDisallowedJavaSerializer(serializer: Serializer): Boolean = { serializer.isInstanceOf[JavaSerializer] && !system.settings.AllowJavaSerialization @@ -522,10 +545,9 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { } isJavaSerializationWarningEnabled && - (serializer.isInstanceOf[JavaSerializer] || serializer.isInstanceOf[DisabledJavaSerializer]) && - !serializedClass.getName.startsWith("akka.") && - !serializedClass.getName.startsWith("java.lang.") && - !suppressWarningOnNonSerializationVerification(serializedClass) + (serializer.isInstanceOf[JavaSerializer] || serializer.isInstanceOf[DisabledJavaSerializer]) && + !serializedClass.getName.startsWith("akka.") && + !serializedClass.getName.startsWith("java.lang.") && + !suppressWarningOnNonSerializationVerification(serializedClass) } } - diff --git a/akka-actor/src/main/scala/akka/serialization/SerializationExtension.scala b/akka-actor/src/main/scala/akka/serialization/SerializationExtension.scala index 5633709398..74fc0e8c11 100644 --- a/akka-actor/src/main/scala/akka/serialization/SerializationExtension.scala +++ b/akka-actor/src/main/scala/akka/serialization/SerializationExtension.scala @@ -4,7 +4,7 @@ package akka.serialization -import akka.actor.{ ActorSystem, ExtensionId, ExtensionIdProvider, ExtendedActorSystem } +import akka.actor.{ ActorSystem, ExtendedActorSystem, ExtensionId, ExtensionIdProvider } /** * SerializationExtension is an Akka Extension to interact with the Serialization diff --git a/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala b/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala index a3afd82229..382d66e3e9 100644 --- a/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala +++ b/akka-actor/src/main/scala/akka/serialization/SerializationSetup.scala @@ -24,8 +24,8 @@ object SerializationSetup { * Java API: Programmatic definition of serializers * @param createSerializers create pairs of serializer and the set of classes it should be used for */ - def create( - createSerializers: akka.japi.Function[ExtendedActorSystem, java.util.List[SerializerDetails]]): SerializationSetup = + def create(createSerializers: akka.japi.Function[ExtendedActorSystem, java.util.List[SerializerDetails]]) + : SerializationSetup = apply(sys => createSerializers(sys).asScala.toVector) } @@ -33,11 +33,11 @@ object SerializationSetup { /** * Setup for the serialization subsystem, constructor is *Internal API*, use factories in [[SerializationSetup()]] */ -final class SerializationSetup private ( - val createSerializers: ExtendedActorSystem => immutable.Seq[SerializerDetails] -) extends Setup +final class SerializationSetup private (val createSerializers: ExtendedActorSystem => immutable.Seq[SerializerDetails]) + extends Setup object SerializerDetails { + /** * Scala API: factory for details about one programmatically setup serializer * @@ -64,7 +64,6 @@ object SerializerDetails { * Constructor is internal API: Use the factories [[SerializerDetails#create]] or [[SerializerDetails#apply]] * to construct */ -final class SerializerDetails private ( - val alias: String, - val serializer: Serializer, - val useFor: immutable.Seq[Class[_]]) +final class SerializerDetails private (val alias: String, + val serializer: Serializer, + val useFor: immutable.Seq[Class[_]]) diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index 44ac5e9b3c..7fa1c4a891 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -213,6 +213,7 @@ trait ByteBufferSerializer { * when globally unique serialization identifier is configured in the `reference.conf`. */ trait BaseSerializer extends Serializer { + /** * Actor system which is required by most serializer implementations. */ @@ -243,6 +244,7 @@ trait BaseSerializer extends Serializer { BaseSerializer.identifierFromConfig(getClass, system) } object BaseSerializer { + /** * Configuration namespace of serialization identifiers in the `reference.conf`. * @@ -298,6 +300,7 @@ object JavaSerializer { */ val currentSystem = new CurrentSystem final class CurrentSystem extends DynamicVariable[ExtendedActorSystem](null) { + /** * Java API: invoke the callable with the current system being set to the given value for this thread. * @@ -314,7 +317,8 @@ object JavaSerializer { */ class JavaSerializer(val system: ExtendedActorSystem) extends BaseSerializer { if (!system.settings.AllowJavaSerialization) - throw new DisabledJavaSerializer.JavaSerializationException("Attempted creation of `JavaSerializer` while `akka.actor.allow-java-serialization = off` was set!") + throw new DisabledJavaSerializer.JavaSerializationException( + "Attempted creation of `JavaSerializer` while `akka.actor.allow-java-serialization = off` was set!") def includeManifest: Boolean = false @@ -351,21 +355,28 @@ final case class DisabledJavaSerializer(system: ExtendedActorSystem) extends Ser def includeManifest: Boolean = false override def toBinary(o: AnyRef, buf: ByteBuffer): Unit = { - log.warning(LogMarker.Security, "Outgoing message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set! " + - "Message type was: [{}]", o.getClass) + log.warning( + LogMarker.Security, + "Outgoing message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set! " + + "Message type was: [{}]", + o.getClass) throw IllegalSerialization } @throws(classOf[NotSerializableException]) override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { - log.warning(LogMarker.Security, "Incoming message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set!") + log.warning( + LogMarker.Security, + "Incoming message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set!") throw IllegalDeserialization } @throws(classOf[NotSerializableException]) override def fromBinary(buf: ByteBuffer, manifest: String): AnyRef = { // we don't capture the manifest or mention it in the log as the default setting for includeManifest is set to false. - log.warning(LogMarker.Security, "Incoming message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set!") + log.warning( + LogMarker.Security, + "Incoming message attempted to use Java Serialization even though `akka.actor.allow-java-serialization = off` was set!") throw IllegalDeserialization } @@ -378,8 +389,10 @@ final case class DisabledJavaSerializer(system: ExtendedActorSystem) extends Ser object DisabledJavaSerializer { final class JavaSerializationException(msg: String) extends RuntimeException(msg) with NoStackTrace - final val IllegalSerialization = new JavaSerializationException("Attempted to serialize message using Java serialization while `akka.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") - final val IllegalDeserialization = new JavaSerializationException("Attempted to deserialize message using Java serialization while `akka.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") + final val IllegalSerialization = new JavaSerializationException( + "Attempted to serialize message using Java serialization while `akka.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") + final val IllegalDeserialization = new JavaSerializationException( + "Attempted to deserialize message using Java serialization while `akka.actor.allow-java-serialization` was disabled. Check WARNING logs for more details.") } /** @@ -404,8 +417,9 @@ class ByteArraySerializer(val system: ExtendedActorSystem) extends BaseSerialize def toBinary(o: AnyRef): Array[Byte] = o match { case null => null case o: Array[Byte] => o - case other => throw new IllegalArgumentException( - s"${getClass.getName} only serializes byte arrays, not [${other.getClass.getName}]") + case other => + throw new IllegalArgumentException( + s"${getClass.getName} only serializes byte arrays, not [${other.getClass.getName}]") } @throws(classOf[NotSerializableException]) @@ -415,8 +429,9 @@ class ByteArraySerializer(val system: ExtendedActorSystem) extends BaseSerialize o match { case null => case bytes: Array[Byte] => buf.put(bytes) - case other => throw new IllegalArgumentException( - s"${getClass.getName} only serializes byte arrays, not [${other.getClass.getName}]") + case other => + throw new IllegalArgumentException( + s"${getClass.getName} only serializes byte arrays, not [${other.getClass.getName}]") } @throws(classOf[NotSerializableException]) diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index c3e662a17e..6e41a2e958 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -5,8 +5,8 @@ package akka.util import java.util.concurrent.locks.{ Condition, ReentrantLock } -import java.util.concurrent.{ TimeUnit, BlockingQueue } -import java.util.{ AbstractQueue, Queue, Collection, Iterator } +import java.util.concurrent.{ BlockingQueue, TimeUnit } +import java.util.{ AbstractQueue, Collection, Iterator, Queue } import annotation.tailrec @@ -15,8 +15,9 @@ import annotation.tailrec * @param maxCapacity - the maximum capacity of this Queue, needs to be > 0 * @param backing - the backing Queue */ -class BoundedBlockingQueue[E <: AnyRef]( - val maxCapacity: Int, private val backing: Queue[E]) extends AbstractQueue[E] with BlockingQueue[E] { +class BoundedBlockingQueue[E <: AnyRef](val maxCapacity: Int, private val backing: Queue[E]) + extends AbstractQueue[E] + with BlockingQueue[E] { backing match { case null => throw new IllegalArgumentException("Backing Queue may not be null") @@ -135,7 +136,7 @@ class BoundedBlockingQueue[E <: AnyRef]( if (e eq null) throw new NullPointerException lock.lock() try { - if (backing remove e) { + if (backing.remove(e)) { notFull.signal() true } else false @@ -145,7 +146,8 @@ class BoundedBlockingQueue[E <: AnyRef]( override def contains(e: AnyRef): Boolean = { if (e eq null) throw new NullPointerException lock.lock() - try backing.contains(e) finally lock.unlock() + try backing.contains(e) + finally lock.unlock() } override def clear(): Unit = { @@ -165,12 +167,14 @@ class BoundedBlockingQueue[E <: AnyRef]( def size(): Int = { lock.lock() - try backing.size() finally lock.unlock() + try backing.size() + finally lock.unlock() } def peek(): E = { lock.lock() - try backing.peek() finally lock.unlock() + try backing.peek() + finally lock.unlock() } def drainTo(c: Collection[_ >: E]): Int = drainTo(c, Int.MaxValue) @@ -187,7 +191,7 @@ class BoundedBlockingQueue[E <: AnyRef]( if (n < maxElements) { backing.poll() match { case null => n - case e => c add e; drainOne(n + 1) + case e => c.add(e); drainOne(n + 1) } } else n } @@ -200,7 +204,8 @@ class BoundedBlockingQueue[E <: AnyRef]( override def containsAll(c: Collection[_]): Boolean = { lock.lock() - try backing.containsAll(c) finally lock.unlock() + try backing.containsAll(c) + finally lock.unlock() } override def removeAll(c: Collection[_]): Boolean = { @@ -267,16 +272,19 @@ class BoundedBlockingQueue[E <: AnyRef]( override def toArray(): Array[AnyRef] = { lock.lock() - try backing.toArray finally lock.unlock() + try backing.toArray + finally lock.unlock() } override def isEmpty(): Boolean = { lock.lock() - try backing.isEmpty() finally lock.unlock() + try backing.isEmpty() + finally lock.unlock() } override def toArray[X](a: Array[X with AnyRef]) = { lock.lock() - try backing.toArray[X](a) finally lock.unlock() + try backing.toArray[X](a) + finally lock.unlock() } } diff --git a/akka-actor/src/main/scala/akka/util/BoxedType.scala b/akka-actor/src/main/scala/akka/util/BoxedType.scala index 769ae04ed9..4472a4979f 100644 --- a/akka-actor/src/main/scala/akka/util/BoxedType.scala +++ b/akka-actor/src/main/scala/akka/util/BoxedType.scala @@ -7,16 +7,15 @@ package akka.util object BoxedType { import java.{ lang => jl } - private val toBoxed = Map[Class[_], Class[_]]( - classOf[Boolean] -> classOf[jl.Boolean], - classOf[Byte] -> classOf[jl.Byte], - classOf[Char] -> classOf[jl.Character], - classOf[Short] -> classOf[jl.Short], - classOf[Int] -> classOf[jl.Integer], - classOf[Long] -> classOf[jl.Long], - classOf[Float] -> classOf[jl.Float], - classOf[Double] -> classOf[jl.Double], - classOf[Unit] -> classOf[scala.runtime.BoxedUnit]) + private val toBoxed = Map[Class[_], Class[_]](classOf[Boolean] -> classOf[jl.Boolean], + classOf[Byte] -> classOf[jl.Byte], + classOf[Char] -> classOf[jl.Character], + classOf[Short] -> classOf[jl.Short], + classOf[Int] -> classOf[jl.Integer], + classOf[Long] -> classOf[jl.Long], + classOf[Float] -> classOf[jl.Float], + classOf[Double] -> classOf[jl.Double], + classOf[Unit] -> classOf[scala.runtime.BoxedUnit]) final def apply(c: Class[_]): Class[_] = if (c.isPrimitive) toBoxed(c) else c } diff --git a/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala b/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala index 3f35dcdbd9..988905a217 100644 --- a/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala +++ b/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala @@ -15,7 +15,8 @@ import java.io.{ InputStream, ObjectInputStream, ObjectStreamClass } */ class ClassLoaderObjectInputStream(classLoader: ClassLoader, is: InputStream) extends ObjectInputStream(is) { override protected def resolveClass(objectStreamClass: ObjectStreamClass): Class[_] = - try Class.forName(objectStreamClass.getName, false, classLoader) catch { + try Class.forName(objectStreamClass.getName, false, classLoader) + catch { case _: ClassNotFoundException => super.resolveClass(objectStreamClass) } } diff --git a/akka-actor/src/main/scala/akka/util/Collections.scala b/akka-actor/src/main/scala/akka/util/Collections.scala index 5131622566..3dc5f17f87 100644 --- a/akka-actor/src/main/scala/akka/util/Collections.scala +++ b/akka-actor/src/main/scala/akka/util/Collections.scala @@ -49,7 +49,7 @@ private[akka] object Collections { } override lazy val size: Int = iterator.size - override def foreach[C](f: To => C) = iterator foreach f + override def foreach[C](f: To => C) = iterator.foreach(f) } } diff --git a/akka-actor/src/main/scala/akka/util/HashCode.scala b/akka-actor/src/main/scala/akka/util/HashCode.scala index 272bdef54a..b078f1b18d 100644 --- a/akka-actor/src/main/scala/akka/util/HashCode.scala +++ b/akka-actor/src/main/scala/akka/util/HashCode.scala @@ -52,4 +52,3 @@ object HashCode { private def isArray(anyRef: AnyRef): Boolean = anyRef.getClass.isArray private val PRIME = 37 } - diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index bc082b44bc..b3f46b366b 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -12,7 +12,7 @@ import scala.concurrent.duration.FiniteDuration import scala.concurrent.duration.Duration import java.util.concurrent.TimeUnit import java.util.Locale -import java.time.{ Instant, ZoneId, LocalDateTime } +import java.time.{ Instant, LocalDateTime, ZoneId } import java.time.format.DateTimeFormatter object Helpers { @@ -21,7 +21,8 @@ object Helpers { val isWindows: Boolean = toRootLowerCase(System.getProperty("os.name", "")).indexOf("win") >= 0 - def makePattern(s: String): Pattern = Pattern.compile("^\\Q" + s.replace("?", "\\E.\\Q").replace("*", "\\E.*\\Q") + "\\E$") + def makePattern(s: String): Pattern = + Pattern.compile("^\\Q" + s.replace("?", "\\E.\\Q").replace("*", "\\E.*\\Q") + "\\E$") def compareIdentityHash(a: AnyRef, b: AnyRef): Int = { /* @@ -29,7 +30,7 @@ object Helpers { * that the ordering is actually consistent and you cannot have a * sequence which cyclically is monotone without end. */ - val diff = ((System.identityHashCode(a) & 0xffffffffL) - (System.identityHashCode(b) & 0xffffffffL)) + val diff = ((System.identityHashCode(a) & 0XFFFFFFFFL) - (System.identityHashCode(b) & 0XFFFFFFFFL)) if (diff > 0) 1 else if (diff < 0) -1 else 0 } @@ -76,7 +77,7 @@ object Helpers { @tailrec def base64(l: Long, sb: java.lang.StringBuilder = new java.lang.StringBuilder("$")): String = { - sb append base64chars.charAt(l.toInt & 63) + sb.append(base64chars.charAt(l.toInt & 63)) val next = l >>> 6 if (next == 0) sb.toString else base64(next, sb) @@ -101,6 +102,7 @@ object Helpers { * @param value The value to check. */ @inline final implicit class Requiring[A](val value: A) extends AnyVal { + /** * Check that a condition is true. If true, return `value`, otherwise throw * an `IllegalArgumentException` with the given message. diff --git a/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala b/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala index dc52042145..dbf3238aa6 100644 --- a/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala +++ b/akka-actor/src/main/scala/akka/util/ImmutableIntMap.scala @@ -20,7 +20,8 @@ import scala.annotation.tailrec * Keys and values are encoded consecutively in a single Int array and does copy-on-write with no * structural sharing, it's intended for rather small maps (<1000 elements). */ -@InternalApi private[akka] final class ImmutableIntMap private (private final val kvs: Array[Int], final val size: Int) { +@InternalApi private[akka] final class ImmutableIntMap private (private final val kvs: Array[Int], + final val size: Int) { private final def this(key: Int, value: Int) = { this(new Array[Int](2), 1) @@ -55,7 +56,8 @@ import scala.annotation.tailrec if (lo <= hi) { val lohi = lo + hi // Since we search in half the array we don't need to div by 2 to find the real index of key val k = kvs(lohi & ~1) // Since keys are in even slots, we get the key idx from lo+hi by removing the lowest bit if set (odd) - if (k == key) kvs(lohi | 1) // lohi, if odd, already points to the value-index, if even, we set the lowest bit to add 1 + if (k == key) + kvs(lohi | 1) // lohi, if odd, already points to the value-index, if even, we set the lowest bit to add 1 else if (k < key) find((lohi >>> 1) + 1, hi) else /* if (k > key) */ find(lo, (lohi >>> 1) - 1) } else Int.MinValue @@ -135,12 +137,17 @@ import scala.annotation.tailrec override final def toString: String = if (size < 1) "ImmutableIntMap()" - else Iterator.range(0, kvs.length - 1, 2).map(i => s"${kvs(i)} -> ${kvs(i + 1)}").mkString("ImmutableIntMap(", ", ", ")") + else + Iterator + .range(0, kvs.length - 1, 2) + .map(i => s"${kvs(i)} -> ${kvs(i + 1)}") + .mkString("ImmutableIntMap(", ", ", ")") override final def hashCode: Int = Arrays.hashCode(kvs) override final def equals(obj: Any): Boolean = obj match { - case other: ImmutableIntMap => Arrays.equals(kvs, other.kvs) // No need to test `this eq obj` since this is done for the kvs arrays anyway - case _ => false + case other: ImmutableIntMap => + Arrays.equals(kvs, other.kvs) // No need to test `this eq obj` since this is done for the kvs arrays anyway + case _ => false } } diff --git a/akka-actor/src/main/scala/akka/util/Index.scala b/akka-actor/src/main/scala/akka/util/Index.scala index b024aee039..ec152e5868 100644 --- a/akka-actor/src/main/scala/akka/util/Index.scala +++ b/akka-actor/src/main/scala/akka/util/Index.scala @@ -6,7 +6,7 @@ package akka.util import annotation.tailrec -import java.util.concurrent.{ ConcurrentSkipListSet, ConcurrentHashMap } +import java.util.concurrent.{ ConcurrentHashMap, ConcurrentSkipListSet } import java.util.Comparator import scala.collection.JavaConverters.{ asScalaIteratorConverter, collectionAsScalaIterableConverter } @@ -17,9 +17,10 @@ import scala.collection.JavaConverters.{ asScalaIteratorConverter, collectionAsS */ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { - def this(mapSize: Int, cmp: (V, V) => Int) = this(mapSize, new Comparator[V] { - def compare(a: V, b: V): Int = cmp(a, b) - }) + def this(mapSize: Int, cmp: (V, V) => Int) = + this(mapSize, new Comparator[V] { + def compare(a: V, b: V): Int = cmp(a, b) + }) private val container = new ConcurrentHashMap[K, ConcurrentSkipListSet[V]](mapSize) private val emptySet = new ConcurrentSkipListSet[V] @@ -34,19 +35,19 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { def spinPut(k: K, v: V): Boolean = { var retry = false var added = false - val set = container get k + val set = container.get(k) if (set ne null) { set.synchronized { if (set.isEmpty) retry = true //IF the set is empty then it has been removed, so signal retry else { //Else add the value to the set and signal that retry is not needed - added = set add v + added = set.add(v) retry = false } } } else { val newSet = new ConcurrentSkipListSet[V](valueComparator) - newSet add v + newSet.add(v) // Parry for two simultaneous putIfAbsent(id,newSet) val oldSet = container.putIfAbsent(k, newSet) @@ -54,7 +55,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { oldSet.synchronized { if (oldSet.isEmpty) retry = true //IF the set is empty then it has been removed, so signal retry else { //Else try to add the value to the set and signal that retry is not needed - added = oldSet add v + added = oldSet.add(v) retry = false } } @@ -73,9 +74,9 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { * if no matches it returns None */ def findValue(key: K)(f: (V) => Boolean): Option[V] = - container get key match { + container.get(key) match { case null => None - case set => set.iterator.asScala find f + case set => set.iterator.asScala.find(f) } /** @@ -92,7 +93,9 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { * Applies the supplied function to all keys and their values */ def foreach(fun: (K, V) => Unit): Unit = - container.entrySet.iterator.asScala foreach { e => e.getValue.iterator.asScala.foreach(fun(e.getKey, _)) } + container.entrySet.iterator.asScala.foreach { e => + e.getValue.iterator.asScala.foreach(fun(e.getKey, _)) + } /** * Returns the union of all value sets. @@ -116,7 +119,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { * @return true if the value was disassociated from the key and false if it wasn't previously associated with the key */ def remove(key: K, value: V): Boolean = { - val set = container get key + val set = container.get(key) if (set ne null) { set.synchronized { @@ -135,7 +138,7 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { * @return None if the key wasn't associated at all, or Some(scala.Iterable[V]) if it was associated */ def remove(key: K): Option[Iterable[V]] = { - val set = container get key + val set = container.get(key) if (set ne null) { set.synchronized { @@ -180,7 +183,9 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { while (i.hasNext) { val e = i.next() val set = e.getValue() - if (set ne null) { set.synchronized { set.clear(); container.remove(e.getKey, emptySet) } } + if (set ne null) { + set.synchronized { set.clear(); container.remove(e.getKey, emptySet) } + } } } } @@ -190,4 +195,5 @@ class Index[K, V](val mapSize: Int, val valueComparator: Comparator[V]) { * Adds/remove is serialized over the specified key * Reads are fully concurrent <-- el-cheapo */ -class ConcurrentMultiMap[K, V](mapSize: Int, valueComparator: Comparator[V]) extends Index[K, V](mapSize, valueComparator) +class ConcurrentMultiMap[K, V](mapSize: Int, valueComparator: Comparator[V]) + extends Index[K, V](mapSize, valueComparator) diff --git a/akka-actor/src/main/scala/akka/util/JavaDurationConverters.scala b/akka-actor/src/main/scala/akka/util/JavaDurationConverters.scala index 6718528317..78da58d03d 100644 --- a/akka-actor/src/main/scala/akka/util/JavaDurationConverters.scala +++ b/akka-actor/src/main/scala/akka/util/JavaDurationConverters.scala @@ -6,6 +6,7 @@ package akka.util import java.time.{ Duration => JDuration } import scala.concurrent.duration.{ Duration, FiniteDuration } + /** * INTERNAL API */ diff --git a/akka-actor/src/main/scala/akka/util/LineNumbers.scala b/akka-actor/src/main/scala/akka/util/LineNumbers.scala index 5f1b3479f7..6924b70c1a 100644 --- a/akka-actor/src/main/scala/akka/util/LineNumbers.scala +++ b/akka-actor/src/main/scala/akka/util/LineNumbers.scala @@ -90,7 +90,7 @@ object LineNumbers { def apply(idx: Int): String = _fwd(idx) def apply(str: String): Int = _rev(str) - def resolve(): Unit = _xref foreach (p => put(p._1, apply(p._2))) + def resolve(): Unit = _xref.foreach(p => put(p._1, apply(p._2))) def contains(str: String): Boolean = _rev contains str private def put(idx: Int, str: String): Unit = { @@ -161,7 +161,8 @@ object LineNumbers { skipID(dis) skipVersion(dis) implicit val constants = getConstants(dis) - if (debug) println(s"LNB: fwd(${constants.fwd.size}) rev(${constants.rev.size}) ${constants.fwd.keys.toList.sorted}") + if (debug) + println(s"LNB: fwd(${constants.fwd.size}) rev(${constants.rev.size}) ${constants.fwd.keys.toList.sorted}") skipClassInfo(dis) skipInterfaceInfo(dis) skipFields(dis) @@ -169,15 +170,17 @@ object LineNumbers { val source = readAttributes(dis) if (source.isEmpty) NoSourceInfo - else lines match { - case None => SourceFile(source.get) - case Some((from, to)) => SourceFileLines(source.get, from, to) - } + else + lines match { + case None => SourceFile(source.get) + case Some((from, to)) => SourceFileLines(source.get, from, to) + } } catch { case NonFatal(ex) => UnknownSourceFormat(s"parse error: ${ex.getMessage}") } finally { - try dis.close() catch { + try dis.close() + catch { case ex: InterruptedException => throw ex case NonFatal(_) => // ignore } @@ -199,7 +202,8 @@ object LineNumbers { writeReplace.setAccessible(true) writeReplace.invoke(l) match { case serialized: SerializedLambda => - if (debug) println(s"LNB: found Lambda implemented in ${serialized.getImplClass}:${serialized.getImplMethodName}") + if (debug) + println(s"LNB: found Lambda implemented in ${serialized.getImplClass}:${serialized.getImplMethodName}") Option(c.getClassLoader.getResourceAsStream(serialized.getImplClass + ".class")) .map(_ -> Some(serialized.getImplMethodName)) case _ => None @@ -271,9 +275,12 @@ object LineNumbers { val count = d.readUnsignedShort() if (debug) println(s"LNB: reading $count methods") if (c.contains("Code") && c.contains("LineNumberTable")) { - (1 to count).map(_ => readMethod(d, c("Code"), c("LineNumberTable"), filter)).flatten.foldLeft(Int.MaxValue -> 0) { - case ((low, high), (start, end)) => (Math.min(low, start), Math.max(high, end)) - } match { + (1 to count) + .map(_ => readMethod(d, c("Code"), c("LineNumberTable"), filter)) + .flatten + .foldLeft(Int.MaxValue -> 0) { + case ((low, high), (start, end)) => (Math.min(low, start), Math.max(high, end)) + } match { case (Int.MaxValue, 0) => None case other => Some(other) } @@ -284,11 +291,8 @@ object LineNumbers { } } - private def readMethod( - d: DataInputStream, - codeTag: Int, - lineNumberTableTag: Int, - filter: Option[String])(implicit c: Constants): Option[(Int, Int)] = { + private def readMethod(d: DataInputStream, codeTag: Int, lineNumberTableTag: Int, filter: Option[String])( + implicit c: Constants): Option[(Int, Int)] = { skip(d, 2) // access flags val name = d.readUnsignedShort() // name skip(d, 2) // signature diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index 3204104f79..967dc5decd 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -12,7 +12,8 @@ final class ReentrantGuard extends ReentrantLock { @inline final def withGuard[T](body: => T): T = { lock() - try body finally unlock() + try body + finally unlock() } } @@ -24,7 +25,8 @@ class Switch(startAsOn: Boolean = false) { protected def transcend(from: Boolean, action: => Unit): Boolean = synchronized { if (switch.compareAndSet(from, !from)) { - try action catch { + try action + catch { case t: Throwable => switch.compareAndSet(!from, from) // revert status throw t diff --git a/akka-actor/src/main/scala/akka/util/ManifestInfo.scala b/akka-actor/src/main/scala/akka/util/ManifestInfo.scala index 27b8342560..7038477fca 100644 --- a/akka-actor/src/main/scala/akka/util/ManifestInfo.scala +++ b/akka-actor/src/main/scala/akka/util/ManifestInfo.scala @@ -30,14 +30,12 @@ object ManifestInfo extends ExtensionId[ManifestInfo] with ExtensionIdProvider { private val BundleVersion = "Bundle-Version" private val BundleVendor = "Bundle-Vendor" - private val knownVendors = Set( - "com.typesafe.akka", - "com.lightbend.akka", - "Lightbend Inc.", - "Lightbend", - "com.lightbend.lagom", - "com.typesafe.play" - ) + private val knownVendors = Set("com.typesafe.akka", + "com.lightbend.akka", + "Lightbend Inc.", + "Lightbend", + "com.lightbend.lagom", + "com.typesafe.play") override def get(system: ActorSystem): ManifestInfo = super.get(system) @@ -143,9 +141,9 @@ final class ManifestInfo(val system: ExtendedActorSystem) extends Extension { } if (title != null - && version != null - && vendor != null - && knownVendors(vendor)) { + && version != null + && vendor != null + && knownVendors(vendor)) { manifests = manifests.updated(title, new Version(version)) } } finally { @@ -172,11 +170,13 @@ final class ManifestInfo(val system: ExtendedActorSystem) extends Extension { val highestVersion = values.max Logging(system, getClass).warning( "Detected possible incompatible versions on the classpath. " + - s"Please note that a given $productName version MUST be the same across all modules of $productName " + - "that you are using, e.g. if you use [{}] all other modules that are released together MUST be of the " + - "same version. Make sure you're using a compatible set of libraries. " + - "Possibly conflicting versions [{}] in libraries [{}]", - highestVersion, conflictingVersions, fullInfo) + s"Please note that a given $productName version MUST be the same across all modules of $productName " + + "that you are using, e.g. if you use [{}] all other modules that are released together MUST be of the " + + "same version. Make sure you're using a compatible set of libraries. " + + "Possibly conflicting versions [{}] in libraries [{}]", + highestVersion, + conflictingVersions, + fullInfo) } false } else diff --git a/akka-actor/src/main/scala/akka/util/MessageBuffer.scala b/akka-actor/src/main/scala/akka/util/MessageBuffer.scala index 1c7404072b..971d02256c 100644 --- a/akka-actor/src/main/scala/akka/util/MessageBuffer.scala +++ b/akka-actor/src/main/scala/akka/util/MessageBuffer.scala @@ -10,9 +10,7 @@ import akka.japi.function.Procedure2 /** * A non thread safe mutable message buffer that can be used to buffer messages inside actors. */ -final class MessageBuffer private ( - private var _head: MessageBuffer.Node, - private var _tail: MessageBuffer.Node) { +final class MessageBuffer private (private var _head: MessageBuffer.Node, private var _tail: MessageBuffer.Node) { import MessageBuffer._ private var _size: Int = if (_head eq null) 0 else 1 diff --git a/akka-actor/src/main/scala/akka/util/OptionVal.scala b/akka-actor/src/main/scala/akka/util/OptionVal.scala index 1c073d7ae8..0f29b9db9f 100644 --- a/akka-actor/src/main/scala/akka/util/OptionVal.scala +++ b/akka-actor/src/main/scala/akka/util/OptionVal.scala @@ -59,7 +59,7 @@ private[akka] final class OptionVal[+A](val x: A) extends AnyVal { /** * Returns the option's value if it is nonempty, or `null` if it is empty. */ - def orNull[A1 >: A](implicit ev: Null <:< A1): A1 = this getOrElse ev(null) + def orNull[A1 >: A](implicit ev: Null <:< A1): A1 = this.getOrElse(ev(null)) /** * Returns the option's value. diff --git a/akka-actor/src/main/scala/akka/util/PrettyByteString.scala b/akka-actor/src/main/scala/akka/util/PrettyByteString.scala index def0db0e69..5b2989cbfe 100644 --- a/akka-actor/src/main/scala/akka/util/PrettyByteString.scala +++ b/akka-actor/src/main/scala/akka/util/PrettyByteString.scala @@ -17,7 +17,7 @@ private[akka] object PrettyByteString { } def formatBytes(bs: ByteString, maxBytes: Int = 16 * 5): Iterator[String] = { - def asHex(b: Byte): String = b formatted "%02X" + def asHex(b: Byte): String = b.formatted("%02X") def asASCII(b: Byte): Char = if (b >= 0x20 && b < 0x7f) b.toChar else '.' @@ -35,11 +35,10 @@ private[akka] object PrettyByteString { if (bs.size <= maxBytes) Iterator(prefix + "\n", formatBytes(bs)) else - Iterator( - s"$prefix first + last $maxBytes:\n", - formatBytes(bs.take(maxBytes)), - s"\n$indent ... [${bs.size - maxBytes} bytes omitted] ...\n", - formatBytes(bs.takeRight(maxBytes))) + Iterator(s"$prefix first + last $maxBytes:\n", + formatBytes(bs.take(maxBytes)), + s"\n$indent ... [${bs.size - maxBytes} bytes omitted] ...\n", + formatBytes(bs.takeRight(maxBytes))) } } diff --git a/akka-actor/src/main/scala/akka/util/PrettyDuration.scala b/akka-actor/src/main/scala/akka/util/PrettyDuration.scala index 0c9280e586..cc6bb50e62 100644 --- a/akka-actor/src/main/scala/akka/util/PrettyDuration.scala +++ b/akka-actor/src/main/scala/akka/util/PrettyDuration.scala @@ -21,7 +21,8 @@ private[akka] object PrettyDuration { * JAVA API * Selects most appropriate TimeUnit for given duration and formats it accordingly */ - def format(duration: Duration, includeNanos: Boolean, precision: Int): String = duration.pretty(includeNanos, precision) + def format(duration: Duration, includeNanos: Boolean, precision: Int): String = + duration.pretty(includeNanos, precision) implicit class PrettyPrintableDuration(val duration: Duration) extends AnyVal { @@ -38,7 +39,10 @@ private[akka] object PrettyDuration { val unit = chooseUnit(nanos) val value = nanos.toDouble / NANOSECONDS.convert(1, unit) - s"%.${precision}g %s%s".formatLocal(Locale.ROOT, value, abbreviate(unit), if (includeNanos) s" ($nanos ns)" else "") + s"%.${precision}g %s%s".formatLocal(Locale.ROOT, + value, + abbreviate(unit), + if (includeNanos) s" ($nanos ns)" else "") case Duration.MinusInf => s"-∞ (minus infinity)" case Duration.Inf => s"∞ (infinity)" diff --git a/akka-actor/src/main/scala/akka/util/Reflect.scala b/akka-actor/src/main/scala/akka/util/Reflect.scala index 16c94ae591..dc7dc5e3ef 100644 --- a/akka-actor/src/main/scala/akka/util/Reflect.scala +++ b/akka-actor/src/main/scala/akka/util/Reflect.scala @@ -42,12 +42,14 @@ private[akka] object Reflect { * @param clazz the class which to instantiate an instance of * @return a new instance from the default constructor of the given class */ - private[akka] def instantiate[T](clazz: Class[T]): T = try clazz.newInstance catch { - case _: IllegalAccessException => - val ctor = clazz.getDeclaredConstructor() - ctor.setAccessible(true) - ctor.newInstance() - } + private[akka] def instantiate[T](clazz: Class[T]): T = + try clazz.newInstance + catch { + case _: IllegalAccessException => + val ctor = clazz.getDeclaredConstructor() + ctor.setAccessible(true) + ctor.newInstance() + } /** * INTERNAL API @@ -66,7 +68,7 @@ private[akka] object Reflect { try constructor.newInstance(args.asInstanceOf[Seq[AnyRef]]: _*) catch { case e: IllegalArgumentException => - val argString = args map safeGetClass mkString ("[", ", ", "]") + val argString = args.map(safeGetClass).mkString("[", ", ", "]") throw new IllegalArgumentException(s"constructor $constructor is incompatible with arguments $argString", e) } } @@ -78,23 +80,23 @@ private[akka] object Reflect { */ private[akka] def findConstructor[T](clazz: Class[T], args: immutable.Seq[Any]): Constructor[T] = { def error(msg: String): Nothing = { - val argClasses = args map safeGetClass mkString ", " + val argClasses = args.map(safeGetClass).mkString(", ") throw new IllegalArgumentException(s"$msg found on $clazz for arguments [$argClasses]") } val constructor: Constructor[T] = - if (args.isEmpty) Try { clazz.getDeclaredConstructor() } getOrElse (null) + if (args.isEmpty) Try { clazz.getDeclaredConstructor() }.getOrElse(null) else { val length = args.length val candidates = - clazz.getDeclaredConstructors.asInstanceOf[Array[Constructor[T]]].iterator filter { c => + clazz.getDeclaredConstructors.asInstanceOf[Array[Constructor[T]]].iterator.filter { c => val parameterTypes = c.getParameterTypes parameterTypes.length == length && - (parameterTypes.iterator zip args.iterator forall { - case (found, required) => - found.isInstance(required) || BoxedType(found).isInstance(required) || - (required == null && !found.isPrimitive) - }) + (parameterTypes.iterator.zip(args.iterator).forall { + case (found, required) => + found.isInstance(required) || BoxedType(found).isInstance(required) || + (required == null && !found.isPrimitive) + }) } if (candidates.hasNext) { val cstrtr = candidates.next() @@ -120,15 +122,16 @@ private[akka] object Reflect { def findMarker(root: Class[_], marker: Class[_]): Type = { @tailrec def rec(curr: Class[_]): Type = { if (curr.getSuperclass != null && marker.isAssignableFrom(curr.getSuperclass)) rec(curr.getSuperclass) - else curr.getGenericInterfaces collectFirst { - case c: Class[_] if marker isAssignableFrom c => c - case t: ParameterizedType if marker isAssignableFrom t.getRawType.asInstanceOf[Class[_]] => t - } match { - case None => throw new IllegalArgumentException(s"cannot find [$marker] in ancestors of [$root]") - case Some(c: Class[_]) => if (c == marker) c else rec(c) - case Some(t: ParameterizedType) => if (t.getRawType == marker) t else rec(t.getRawType.asInstanceOf[Class[_]]) - case _ => ??? // cannot happen due to collectFirst - } + else + curr.getGenericInterfaces.collectFirst { + case c: Class[_] if marker.isAssignableFrom(c) => c + case t: ParameterizedType if marker.isAssignableFrom(t.getRawType.asInstanceOf[Class[_]]) => t + } match { + case None => throw new IllegalArgumentException(s"cannot find [$marker] in ancestors of [$root]") + case Some(c: Class[_]) => if (c == marker) c else rec(c) + case Some(t: ParameterizedType) => if (t.getRawType == marker) t else rec(t.getRawType.asInstanceOf[Class[_]]) + case _ => ??? // cannot happen due to collectFirst + } } rec(root) } @@ -137,7 +140,10 @@ private[akka] object Reflect { * INTERNAL API * Set a val inside a class. */ - @tailrec protected[akka] final def lookupAndSetField(clazz: Class[_], instance: AnyRef, name: String, value: Any): Boolean = { + @tailrec protected[akka] final def lookupAndSetField(clazz: Class[_], + instance: AnyRef, + name: String, + value: Any): Boolean = { @tailrec def clearFirst(fields: Array[java.lang.reflect.Field], idx: Int): Boolean = if (idx < fields.length) { val field = fields(idx) @@ -161,19 +167,23 @@ private[akka] object Reflect { */ private[akka] def findClassLoader(): ClassLoader = { def findCaller(get: Int => Class[_]): ClassLoader = - Iterator.from(2 /*is the magic number, promise*/ ).map(get) dropWhile { c => - c != null && + Iterator + .from(2 /*is the magic number, promise*/ ) + .map(get) + .dropWhile { c => + c != null && (c.getName.startsWith("akka.actor.ActorSystem") || - c.getName.startsWith("scala.Option") || - c.getName.startsWith("scala.collection.Iterator") || - c.getName.startsWith("akka.util.Reflect")) - } next () match { + c.getName.startsWith("scala.Option") || + c.getName.startsWith("scala.collection.Iterator") || + c.getName.startsWith("akka.util.Reflect")) + } + .next() match { case null => getClass.getClassLoader case c => c.getClassLoader } - Option(Thread.currentThread.getContextClassLoader) orElse - (Reflect.getCallerClass map findCaller) getOrElse - getClass.getClassLoader + Option(Thread.currentThread.getContextClassLoader) + .orElse(Reflect.getCallerClass.map(findCaller)) + .getOrElse(getClass.getClassLoader) } } diff --git a/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala b/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala index 647e10ba83..57b8fb085c 100644 --- a/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala +++ b/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala @@ -7,7 +7,7 @@ package akka.util import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.ExecutionContext import scala.util.control.NonFatal -import scala.annotation.{ tailrec, switch } +import scala.annotation.{ switch, tailrec } import akka.dispatch.AbstractNodeQueue private[akka] object SerializedSuspendableExecutionContext { @@ -32,9 +32,12 @@ private[akka] object SerializedSuspendableExecutionContext { * @param context the underlying context which will be used to actually execute the submitted tasks */ private[akka] final class SerializedSuspendableExecutionContext(throughput: Int)(val context: ExecutionContext) - extends AbstractNodeQueue[Runnable] with Runnable with ExecutionContext { + extends AbstractNodeQueue[Runnable] + with Runnable + with ExecutionContext { import SerializedSuspendableExecutionContext._ - require(throughput > 0, s"SerializedSuspendableExecutionContext.throughput must be greater than 0 but was $throughput") + require(throughput > 0, + s"SerializedSuspendableExecutionContext.throughput must be greater than 0 but was $throughput") private final val state = new AtomicInteger(Off) @tailrec private final def addState(newState: Int): Boolean = { @@ -65,16 +68,20 @@ private[akka] final class SerializedSuspendableExecutionContext(throughput: Int) poll() match { case null => () case some => - try some.run() catch { case NonFatal(t) => context reportFailure t } + try some.run() + catch { case NonFatal(t) => context.reportFailure(t) } run(done + 1) } } - try run(0) finally remState(On) + try run(0) + finally remState(On) } - final def attach(): Unit = if (!isEmpty() && state.compareAndSet(Off, On)) context execute this - override final def execute(task: Runnable): Unit = try add(task) finally attach() - override final def reportFailure(t: Throwable): Unit = context reportFailure t + final def attach(): Unit = if (!isEmpty() && state.compareAndSet(Off, On)) context.execute(this) + override final def execute(task: Runnable): Unit = + try add(task) + finally attach() + override final def reportFailure(t: Throwable): Unit = context.reportFailure(t) /** * O(N) diff --git a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala index 8f37162664..8f8778985b 100644 --- a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala +++ b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala @@ -11,10 +11,12 @@ import akka.util.ccompat._ * Typeclass which describes a classification hierarchy. Observe the contract between `isEqual` and `isSubclass`! */ trait Subclassification[K] { + /** * True if and only if x and y are of the same class. */ def isEqual(x: K, y: K): Boolean + /** * True if and only if x is a subclass of y; equal classes must be considered sub-classes! */ @@ -23,7 +25,9 @@ trait Subclassification[K] { private[akka] object SubclassifiedIndex { - class Nonroot[K, V](override val root: SubclassifiedIndex[K, V], val key: K, _values: Set[V])(implicit sc: Subclassification[K]) extends SubclassifiedIndex[K, V](_values) { + class Nonroot[K, V](override val root: SubclassifiedIndex[K, V], val key: K, _values: Set[V])( + implicit sc: Subclassification[K]) + extends SubclassifiedIndex[K, V](_values) { override def innerAddValue(key: K, value: V): Changes = { // break the recursion on super when key is found and transition to recursive add-to-set @@ -31,7 +35,7 @@ private[akka] object SubclassifiedIndex { } private def addValue(value: V): Changes = { - val kids = subkeys flatMap (_ addValue value) + val kids = subkeys.flatMap(_.addValue(value)) if (!(values contains value)) { values += value kids :+ ((key, Set(value))) @@ -45,7 +49,7 @@ private[akka] object SubclassifiedIndex { } override def removeValue(value: V): Changes = { - val kids = subkeys flatMap (_ removeValue value) + val kids = subkeys.flatMap(_.removeValue(value)) if (values contains value) { values -= value kids :+ ((key, Set(value))) @@ -96,7 +100,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ protected def innerAddKey(key: K): Changes = { var found = false - val ch = subkeys flatMap { n => + val ch = subkeys.flatMap { n => if (sc.isEqual(key, n.key)) { found = true Nil @@ -120,7 +124,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ protected def innerAddValue(key: K, value: V): Changes = { var found = false - val ch = subkeys flatMap { n => + val ch = subkeys.flatMap { n => if (sc.isSubclass(key, n.key)) { found = true n.innerAddValue(key, value) @@ -142,14 +146,14 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ // the reason for not using the values in the returned diff is that we need to // go through the whole tree to find all values for the "changed" keys in other // parts of the tree as well, since new nodes might have been created - mergeChangesByKey(innerRemoveValue(key, value)) map { + mergeChangesByKey(innerRemoveValue(key, value)).map { case (k, _) => (k, findValues(k)) } // this will return the keys and values to be removed from the cache protected def innerRemoveValue(key: K, value: V): Changes = { var found = false - val ch = subkeys flatMap { n => + val ch = subkeys.flatMap { n => if (sc.isSubclass(key, n.key)) { found = true n.innerRemoveValue(key, value) @@ -166,7 +170,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ * * @return the diff that should be removed from the cache */ - def removeValue(value: V): Changes = mergeChangesByKey(subkeys flatMap (_ removeValue value)) + def removeValue(value: V): Changes = mergeChangesByKey(subkeys.flatMap(_.removeValue(value))) /** * Find all values for a given key in the index. @@ -183,16 +187,18 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ /** * Find all subkeys of a given key in the index excluding some subkeys. */ - protected final def findSubKeysExcept(key: K, except: Vector[Nonroot[K, V]]): Set[K] = root.innerFindSubKeys(key, except) + protected final def findSubKeysExcept(key: K, except: Vector[Nonroot[K, V]]): Set[K] = + root.innerFindSubKeys(key, except) protected def innerFindSubKeys(key: K, except: Vector[Nonroot[K, V]]): Set[K] = subkeys.foldLeft(Set.empty[K]) { (s, n) => if (sc.isEqual(key, n.key)) s - else n.innerFindSubKeys(key, except) ++ { - if (sc.isSubclass(n.key, key) && !except.exists(e => sc.isEqual(key, e.key))) - s + n.key - else - s - } + else + n.innerFindSubKeys(key, except) ++ { + if (sc.isSubclass(n.key, key) && !except.exists(e => sc.isEqual(key, e.key))) + s + n.key + else + s + } } override def toString = subkeys.mkString("SubclassifiedIndex(" + values + ",\n", ",\n", ")") @@ -202,7 +208,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ * Also needs to find subkeys in other parts of the tree to compensate for multiple inheritance. */ private def integrate(n: Nonroot[K, V]): Changes = { - val (subsub, sub) = subkeys partition (k => sc.isSubclass(k.key, n.key)) + val (subsub, sub) = subkeys.partition(k => sc.isSubclass(k.key, n.key)) subkeys = sub :+ n n.subkeys = if (subsub.nonEmpty) subsub else n.subkeys n.subkeys ++= findSubKeysExcept(n.key, n.subkeys).map(k => new Nonroot(root, k, values)) @@ -210,7 +216,9 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[ } private def mergeChangesByKey(changes: Changes): Changes = - changes.foldLeft(emptyMergeMap[K, V]) { - case (m, (k, s)) => m.updated(k, m(k) ++ s) - }.to(immutable.Seq) + changes + .foldLeft(emptyMergeMap[K, V]) { + case (m, (k, s)) => m.updated(k, m(k) ++ s) + } + .to(immutable.Seq) } diff --git a/akka-actor/src/main/scala/akka/util/WildcardIndex.scala b/akka-actor/src/main/scala/akka/util/WildcardIndex.scala index dc79f9f468..b7ba93da85 100644 --- a/akka-actor/src/main/scala/akka/util/WildcardIndex.scala +++ b/akka-actor/src/main/scala/akka/util/WildcardIndex.scala @@ -7,7 +7,8 @@ package akka.util import scala.annotation.tailrec import scala.collection.immutable.HashMap -private[akka] final case class WildcardIndex[T](wildcardTree: WildcardTree[T] = WildcardTree[T](), doubleWildcardTree: WildcardTree[T] = WildcardTree[T]()) { +private[akka] final case class WildcardIndex[T](wildcardTree: WildcardTree[T] = WildcardTree[T](), + doubleWildcardTree: WildcardTree[T] = WildcardTree[T]()) { def insert(elems: Array[String], d: T): WildcardIndex[T] = elems.lastOption match { case Some("**") => copy(doubleWildcardTree = doubleWildcardTree.insert(elems.iterator, d)) @@ -17,19 +18,19 @@ private[akka] final case class WildcardIndex[T](wildcardTree: WildcardTree[T] = def find(elems: Iterable[String]): Option[T] = (if (wildcardTree.isEmpty) { - if (doubleWildcardTree.isEmpty) { - WildcardTree[T]() // empty - } else { - doubleWildcardTree.findWithTerminalDoubleWildcard(elems.iterator) - } - } else { - val withSingleWildcard = wildcardTree.findWithSingleWildcard(elems.iterator) - if (withSingleWildcard.isEmpty) { - doubleWildcardTree.findWithTerminalDoubleWildcard(elems.iterator) - } else { - withSingleWildcard - } - }).data + if (doubleWildcardTree.isEmpty) { + WildcardTree[T]() // empty + } else { + doubleWildcardTree.findWithTerminalDoubleWildcard(elems.iterator) + } + } else { + val withSingleWildcard = wildcardTree.findWithSingleWildcard(elems.iterator) + if (withSingleWildcard.isEmpty) { + doubleWildcardTree.findWithTerminalDoubleWildcard(elems.iterator) + } else { + withSingleWildcard + } + }).data def isEmpty: Boolean = wildcardTree.isEmpty && doubleWildcardTree.isEmpty @@ -40,7 +41,9 @@ private[akka] object WildcardTree { def apply[T](): WildcardTree[T] = empty.asInstanceOf[WildcardTree[T]] } -private[akka] final case class WildcardTree[T](data: Option[T] = None, children: Map[String, WildcardTree[T]] = HashMap[String, WildcardTree[T]]()) { +private[akka] final case class WildcardTree[T](data: Option[T] = None, + children: Map[String, WildcardTree[T]] = + HashMap[String, WildcardTree[T]]()) { def isEmpty: Boolean = data.isEmpty && children.isEmpty @@ -57,25 +60,27 @@ private[akka] final case class WildcardTree[T](data: Option[T] = None, children: else { children.get(elems.next()) match { case Some(branch) => branch.findWithSingleWildcard(elems) - case None => children.get("*") match { - case Some(branch) => branch.findWithSingleWildcard(elems) - case None => WildcardTree[T]() - } + case None => + children.get("*") match { + case Some(branch) => branch.findWithSingleWildcard(elems) + case None => WildcardTree[T]() + } } } - @tailrec def findWithTerminalDoubleWildcard(elems: Iterator[String], alt: WildcardTree[T] = WildcardTree[T]()): WildcardTree[T] = { + @tailrec def findWithTerminalDoubleWildcard(elems: Iterator[String], + alt: WildcardTree[T] = WildcardTree[T]()): WildcardTree[T] = { if (!elems.hasNext) this else { val newAlt = children.getOrElse("**", alt) children.get(elems.next()) match { case Some(branch) => branch.findWithTerminalDoubleWildcard(elems, newAlt) - case None => children.get("*") match { - case Some(branch) => branch.findWithTerminalDoubleWildcard(elems, newAlt) - case None => newAlt - } + case None => + children.get("*") match { + case Some(branch) => branch.findWithTerminalDoubleWildcard(elems, newAlt) + case None => newAlt + } } } } } - diff --git a/akka-agent/src/main/scala/akka/agent/Agent.scala b/akka-agent/src/main/scala/akka/agent/Agent.scala index b8ece0e6b3..6cd55cf7e9 100644 --- a/akka-agent/src/main/scala/akka/agent/Agent.scala +++ b/akka-agent/src/main/scala/akka/agent/Agent.scala @@ -8,12 +8,15 @@ import scala.concurrent.stm._ import scala.concurrent.{ ExecutionContext, Future, Promise } import akka.util.SerializedSuspendableExecutionContext -@deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.", since = "2.5.0") +@deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.", + since = "2.5.0") object Agent { + /** * Factory method for creating an Agent. */ - @deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.", since = "2.5.0") + @deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.", + since = "2.5.0") def apply[T](initialValue: T)(implicit context: ExecutionContext): Agent[T] = new SecretAgent(initialValue, context) /** @@ -21,7 +24,8 @@ object Agent { * @deprecated Agents are deprecated and scheduled for removal in the next major version, use Actors instead.i */ @Deprecated - @deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.", since = "2.5.0") + @deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.", + since = "2.5.0") def create[T](initialValue: T, context: ExecutionContext): Agent[T] = Agent(initialValue)(context) /** @@ -37,10 +41,15 @@ object Agent { def send(f: T => T): Unit = withinTransaction(new Runnable { def run = ref.single.transform(f) }) - def sendOff(f: T => T)(implicit ec: ExecutionContext): Unit = withinTransaction( - new Runnable { + def sendOff(f: T => T)(implicit ec: ExecutionContext): Unit = + withinTransaction(new Runnable { def run = - try updater.suspend() finally ec.execute(new Runnable { def run = try ref.single.transform(f) finally updater.resume() }) + try updater.suspend() + finally ec.execute(new Runnable { + def run = + try ref.single.transform(f) + finally updater.resume() + }) }) def alter(newValue: T): Future[T] = doAlter({ ref.single.update(newValue); newValue }) @@ -52,7 +61,9 @@ object Agent { withinTransaction(new Runnable { def run = { updater.suspend() - result completeWith Future(try ref.single.transformAndGet(f) finally updater.resume()) + result.completeWith( + Future(try ref.single.transformAndGet(f) + finally updater.resume())) } }) result.future @@ -75,7 +86,7 @@ object Agent { Txn.findCurrent match { case Some(txn) => val result = Promise[T]() - Txn.afterCommit(status => result completeWith Future(f)(updater))(txn) + Txn.afterCommit(status => result.completeWith(Future(f)(updater)))(txn) result.future case _ => Future(f)(updater) } @@ -159,7 +170,8 @@ object Agent { * * @deprecated Agents are deprecated and scheduled for removal in the next major version, use Actors instead. */ -@deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.", since = "2.5.0") +@deprecated("Agents are deprecated and scheduled for removal in the next major version, use Actors instead.", + since = "2.5.0") abstract class Agent[T] { /** diff --git a/akka-agent/src/test/scala/akka/agent/AgentSpec.scala b/akka-agent/src/test/scala/akka/agent/AgentSpec.scala index 1cb89a008c..fd4763cd02 100644 --- a/akka-agent/src/test/scala/akka/agent/AgentSpec.scala +++ b/akka-agent/src/test/scala/akka/agent/AgentSpec.scala @@ -29,10 +29,10 @@ class AgentSpec extends AkkaSpec { val countDown = new CountDownFunction[String] val agent = Agent("a") - agent send (_ + "b") - agent send (_ + "c") - agent send (_ + "d") - agent send countDown + agent.send(_ + "b") + agent.send(_ + "c") + agent.send(_ + "d") + agent.send(countDown) countDown.await(5 seconds) agent() should ===("abcd") @@ -42,11 +42,11 @@ class AgentSpec extends AkkaSpec { val countDown = new CountDownFunction[String] val l1, l2 = new TestLatch(1) val agent = Agent("a") - agent send (_ + "b") + agent.send(_ + "b") agent.sendOff((s: String) => { l1.countDown; Await.ready(l2, timeout.duration); s + "c" }) Await.ready(l1, timeout.duration) - agent send (_ + "d") - agent send countDown + agent.send(_ + "d") + agent.send(countDown) l2.countDown countDown.await(5 seconds) agent() should ===("abcd") @@ -78,10 +78,10 @@ class AgentSpec extends AkkaSpec { Await.ready(readLatch, readTimeout) i + 5 } - agent send f1 + agent.send(f1) val read = agent() readLatch.countDown() - agent send countDown + agent.send(countDown) countDown.await(5 seconds) read should ===(5) @@ -90,7 +90,9 @@ class AgentSpec extends AkkaSpec { "be readable within a transaction" in { val agent = Agent(5) - val value = atomic { t => agent() } + val value = atomic { t => + agent() + } value should ===(5) } @@ -99,9 +101,9 @@ class AgentSpec extends AkkaSpec { val agent = Agent(5) atomic { t => - agent send (_ * 2) + agent.send(_ * 2) } - agent send countDown + agent.send(countDown) countDown.await(5 seconds) agent() should ===(10) @@ -114,12 +116,12 @@ class AgentSpec extends AkkaSpec { try { atomic { t => - agent send (_ * 2) + agent.send(_ * 2) throw new RuntimeException("Expected failure") } } catch { case NonFatal(_) => } - agent send countDown + agent.send(countDown) countDown.await(5 seconds) agent() should ===(5) @@ -127,23 +129,23 @@ class AgentSpec extends AkkaSpec { "be able to return a 'queued' future" in { val agent = Agent("a") - agent send (_ + "b") - agent send (_ + "c") + agent.send(_ + "b") + agent.send(_ + "c") Await.result(agent.future, timeout.duration) should ===("abc") } "be able to await the value after updates have completed" in { val agent = Agent("a") - agent send (_ + "b") - agent send (_ + "c") + agent.send(_ + "b") + agent.send(_ + "c") Await.result(agent.future, timeout.duration) should ===("abc") } "be able to be mapped" in { val agent1 = Agent(5) - val agent2 = agent1 map (_ * 2) + val agent2 = agent1.map(_ * 2) agent1() should ===(5) agent2() should ===(10) @@ -183,4 +185,3 @@ class AgentSpec extends AkkaSpec { } } } - diff --git a/akka-bench-jmh-typed/src/main/scala/akka/BenchRunner.scala b/akka-bench-jmh-typed/src/main/scala/akka/BenchRunner.scala index 9b2b04a615..d5632bc741 100644 --- a/akka-bench-jmh-typed/src/main/scala/akka/BenchRunner.scala +++ b/akka-bench-jmh-typed/src/main/scala/akka/BenchRunner.scala @@ -24,7 +24,8 @@ object BenchRunner { val report = results.asScala.map { result: RunResult => val bench = result.getParams.getBenchmark - val params = result.getParams.getParamsKeys.asScala.map(key => s"$key=${result.getParams.getParam(key)}").mkString("_") + val params = + result.getParams.getParamsKeys.asScala.map(key => s"$key=${result.getParams.getParam(key)}").mkString("_") val score = result.getAggregatedResult.getPrimaryResult.getScore.round val unit = result.getAggregatedResult.getPrimaryResult.getScoreUnit s"\t${bench}_${params}\t$score\t$unit" diff --git a/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedActorBenchmark.scala b/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedActorBenchmark.scala index 148c0ebbd1..f422b38ef4 100644 --- a/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedActorBenchmark.scala +++ b/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedActorBenchmark.scala @@ -56,8 +56,7 @@ class TypedActorBenchmark { system = ActorSystem( TypedBenchmarkActors.echoActorsSupervisor(numMessagesPerActorPair, numActors, dispatcher, batchSize, timeout), "TypedActorBenchmark", - ConfigFactory.parseString( - s""" + ConfigFactory.parseString(s""" akka.actor { default-mailbox.mailbox-capacity = 512 @@ -86,8 +85,7 @@ class TypedActorBenchmark { mailbox-type = "$mailbox" } } - """ - )) + """)) } @TearDown(Level.Trial) @@ -103,4 +101,3 @@ class TypedActorBenchmark { } } - diff --git a/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala b/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala index 195c4c8b1f..ded76e8684 100644 --- a/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala +++ b/akka-bench-jmh-typed/src/main/scala/akka/actor/typed/TypedBenchmarkActors.scala @@ -14,12 +14,16 @@ object TypedBenchmarkActors { // we pass the respondTo actor ref into the behavior final case object Message - private def echoBehavior(respondTo: ActorRef[Message.type]): Behavior[Message.type] = Behaviors.receive { (ctx, msg) => - respondTo ! Message - Behaviors.same + private def echoBehavior(respondTo: ActorRef[Message.type]): Behavior[Message.type] = Behaviors.receive { + (ctx, msg) => + respondTo ! Message + Behaviors.same } - private def echoSender(messagesPerPair: Int, onDone: ActorRef[Done], batchSize: Int, childProps: Props): Behavior[Message.type] = + private def echoSender(messagesPerPair: Int, + onDone: ActorRef[Done], + batchSize: Int, + childProps: Props): Behavior[Message.type] = Behaviors.setup { ctx => val echo = ctx.spawn(echoBehavior(ctx.self), "echo", childProps) var left = messagesPerPair / 2 @@ -53,50 +57,60 @@ object TypedBenchmarkActors { case class Start(respondTo: ActorRef[Completed]) case class Completed(startNanoTime: Long) - def echoActorsSupervisor(numMessagesPerActorPair: Int, numActors: Int, dispatcher: String, batchSize: Int, + def echoActorsSupervisor(numMessagesPerActorPair: Int, + numActors: Int, + dispatcher: String, + batchSize: Int, shutdownTimeout: FiniteDuration): Behavior[Start] = Behaviors.receive { (ctx, msg) => msg match { case Start(respondTo) => // note: no protection against accidentally running bench sessions in parallel - val sessionBehavior = startEchoBenchSession(numMessagesPerActorPair, numActors, dispatcher, batchSize, respondTo) + val sessionBehavior = + startEchoBenchSession(numMessagesPerActorPair, numActors, dispatcher, batchSize, respondTo) ctx.spawnAnonymous(sessionBehavior) Behaviors.same } } - private def startEchoBenchSession(messagesPerPair: Int, numActors: Int, dispatcher: String, - batchSize: Int, respondTo: ActorRef[Completed]): Behavior[Unit] = { + private def startEchoBenchSession(messagesPerPair: Int, + numActors: Int, + dispatcher: String, + batchSize: Int, + respondTo: ActorRef[Completed]): Behavior[Unit] = { val numPairs = numActors / 2 - Behaviors.setup[Any] { ctx => - val props = Props.empty.withDispatcherFromConfig("akka.actor." + dispatcher) - val pairs = (1 to numPairs).map { _ => - ctx.spawnAnonymous(echoSender(messagesPerPair, ctx.self.narrow[Done], batchSize, props), props) - } - val startNanoTime = System.nanoTime() - pairs.foreach(_ ! Message) - var interactionsLeft = numPairs - Behaviors.receiveMessage { - case Done => - interactionsLeft -= 1 - if (interactionsLeft == 0) { - val totalNumMessages = numPairs * messagesPerPair - printProgress(totalNumMessages, numActors, startNanoTime) - respondTo ! Completed(startNanoTime) - Behaviors.stopped - } else { - Behaviors.same - } + Behaviors + .setup[Any] { ctx => + val props = Props.empty.withDispatcherFromConfig("akka.actor." + dispatcher) + val pairs = (1 to numPairs).map { _ => + ctx.spawnAnonymous(echoSender(messagesPerPair, ctx.self.narrow[Done], batchSize, props), props) + } + val startNanoTime = System.nanoTime() + pairs.foreach(_ ! Message) + var interactionsLeft = numPairs + Behaviors.receiveMessage { + case Done => + interactionsLeft -= 1 + if (interactionsLeft == 0) { + val totalNumMessages = numPairs * messagesPerPair + printProgress(totalNumMessages, numActors, startNanoTime) + respondTo ! Completed(startNanoTime) + Behaviors.stopped + } else { + Behaviors.same + } + } } - }.narrow[Unit] + .narrow[Unit] } private def printProgress(totalMessages: Long, numActors: Int, startNanoTime: Long) = { val durationMicros = (System.nanoTime() - startNanoTime) / 1000 - println(f" $totalMessages messages by $numActors actors took ${durationMicros / 1000} ms, " + + println( + f" $totalMessages messages by $numActors actors took ${durationMicros / 1000} ms, " + f"${totalMessages.toDouble / durationMicros}%,.2f M msg/s") } } diff --git a/akka-bench-jmh/src/main/scala/akka/BenchRunner.scala b/akka-bench-jmh/src/main/scala/akka/BenchRunner.scala index 9b2b04a615..d5632bc741 100644 --- a/akka-bench-jmh/src/main/scala/akka/BenchRunner.scala +++ b/akka-bench-jmh/src/main/scala/akka/BenchRunner.scala @@ -24,7 +24,8 @@ object BenchRunner { val report = results.asScala.map { result: RunResult => val bench = result.getParams.getBenchmark - val params = result.getParams.getParamsKeys.asScala.map(key => s"$key=${result.getParams.getParam(key)}").mkString("_") + val params = + result.getParams.getParamsKeys.asScala.map(key => s"$key=${result.getParams.getParam(key)}").mkString("_") val score = result.getAggregatedResult.getPrimaryResult.getScore.round val unit = result.getAggregatedResult.getPrimaryResult.getScoreUnit s"\t${bench}_${params}\t$score\t$unit" diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala index 9a896d17cd..05b7720265 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorBenchmark.scala @@ -36,7 +36,10 @@ class ActorBenchmark { var batchSize = 0 //@Param(Array("akka.actor.ManyToOneArrayMailbox")) - @Param(Array("akka.dispatch.SingleConsumerOnlyUnboundedMailbox", "akka.actor.ManyToOneArrayMailbox", "akka.actor.JCToolsMailbox")) + @Param( + Array("akka.dispatch.SingleConsumerOnlyUnboundedMailbox", + "akka.actor.ManyToOneArrayMailbox", + "akka.actor.JCToolsMailbox")) var mailbox = "" @Param(Array("fjp-dispatcher")) // @Param(Array("fjp-dispatcher", "affinity-dispatcher")) @@ -49,8 +52,8 @@ class ActorBenchmark { requireRightNumberOfCores(threads) - system = ActorSystem("ActorBenchmark", ConfigFactory.parseString( - s""" + system = ActorSystem("ActorBenchmark", + ConfigFactory.parseString(s""" akka.actor { default-mailbox.mailbox-capacity = 512 @@ -79,8 +82,7 @@ class ActorBenchmark { mailbox-type = "$mailbox" } } - """ - )) + """)) } @TearDown(Level.Trial) @@ -95,4 +97,3 @@ class ActorBenchmark { benchmarkEchoActors(numMessagesPerActorPair, numActors, dispatcher, batchSize, timeout) } - diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala index 33c2b0b8a2..789533e644 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ActorCreationBenchmark.scala @@ -17,7 +17,7 @@ hand checking: [info] a.a.ActorCreationBenchmark.synchronousStarting ss 120000 21.496 0.502 us -*/ + */ @State(Scope.Benchmark) @BenchmarkMode(Array(Mode.SingleShotTime)) @Fork(5) diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala index 44dd34119e..5647cf87c2 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolComparativeBenchmark.scala @@ -45,8 +45,8 @@ class AffinityPoolComparativeBenchmark { s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}"""" } - system = ActorSystem("AffinityPoolComparativeBenchmark", ConfigFactory.parseString( - s"""| akka { + system = ActorSystem("AffinityPoolComparativeBenchmark", + ConfigFactory.parseString(s"""| akka { | log-dead-letters = off | actor { | default-fj-dispatcher { @@ -82,8 +82,7 @@ class AffinityPoolComparativeBenchmark { | $mailboxConf | } | } - """.stripMargin - )) + """.stripMargin)) } @TearDown(Level.Trial) diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala index 02c917ef59..714775f41a 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolIdleCPULevelBenchmark.scala @@ -35,8 +35,8 @@ class AffinityPoolIdleCPULevelBenchmark { requireRightNumberOfCores(numThreads) - system = ActorSystem("AffinityPoolWaitingStrategyBenchmark", ConfigFactory.parseString( - s""" | akka { + system = ActorSystem("AffinityPoolWaitingStrategyBenchmark", + ConfigFactory.parseString(s""" | akka { | log-dead-letters = off | actor { | affinity-dispatcher { @@ -54,8 +54,7 @@ class AffinityPoolIdleCPULevelBenchmark { | | } | } - """.stripMargin - )) + """.stripMargin)) } @TearDown(Level.Trial) @@ -64,6 +63,7 @@ class AffinityPoolIdleCPULevelBenchmark { @Benchmark @OutputTimeUnit(TimeUnit.NANOSECONDS) @OperationsPerInvocation(8000000) - def pingPong(): Unit = benchmarkPingPongActors(numMessagesPerActorPair, numActors, "affinity-dispatcher", throughPut, timeout) + def pingPong(): Unit = + benchmarkPingPongActors(numMessagesPerActorPair, numActors, "affinity-dispatcher", throughPut, timeout) } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala index fbb4f6d2a9..652d4e5f51 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/AffinityPoolRequestResponseBenchmark.scala @@ -49,8 +49,8 @@ class AffinityPoolRequestResponseBenchmark { s"""default-mailbox.mailbox-type = "${classOf[akka.dispatch.SingleConsumerOnlyUnboundedMailbox].getName}"""" } - system = ActorSystem("AffinityPoolComparativeBenchmark", ConfigFactory.parseString( - s"""| akka { + system = ActorSystem("AffinityPoolComparativeBenchmark", + ConfigFactory.parseString(s"""| akka { | log-dead-letters = off | actor { | default-fj-dispatcher { @@ -86,8 +86,7 @@ class AffinityPoolRequestResponseBenchmark { | $mailboxConf | } | } - """.stripMargin - )) + """.stripMargin)) } @TearDown(Level.Trial) @@ -95,7 +94,8 @@ class AffinityPoolRequestResponseBenchmark { @Setup(Level.Invocation) def setupActors(): Unit = { - val (_actors, _latch) = RequestResponseActors.startUserQueryActorPairs(numActors, numQueriesPerActor, numUsersInDB, dispatcher) + val (_actors, _latch) = + RequestResponseActors.startUserQueryActorPairs(numActors, numQueriesPerActor, numUsersInDB, dispatcher) actors = _actors latch = _latch } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala b/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala index 33d536a038..28bf2ad2cf 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/BenchmarkActors.scala @@ -21,10 +21,9 @@ object BenchmarkActors { var left = messagesPerPair / 2 def receive = { case Message => - if (left == 0) { latch.countDown() - context stop self + context.stop(self) } sender() ! Message @@ -83,10 +82,10 @@ object BenchmarkActors { class Pipe(next: Option[ActorRef]) extends Actor { def receive = { case Message => - if (next.isDefined) next.get forward Message + if (next.isDefined) next.get.forward(Message) case Stop => - context stop self - if (next.isDefined) next.get forward Stop + context.stop(self) + if (next.isDefined) next.get.forward(Stop) } } @@ -94,7 +93,8 @@ object BenchmarkActors { def props(next: Option[ActorRef]) = Props(new Pipe(next)) } - private def startPingPongActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String)(implicit system: ActorSystem) = { + private def startPingPongActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String)( + implicit system: ActorSystem) = { val fullPathToDispatcher = "akka.actor." + dispatcher val latch = new CountDownLatch(numPairs * 2) val actors = for { @@ -116,8 +116,8 @@ object BenchmarkActors { } } - private def startEchoActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String, - batchSize: Int)(implicit system: ActorSystem) = { + private def startEchoActorPairs(messagesPerPair: Int, numPairs: Int, dispatcher: String, batchSize: Int)( + implicit system: ActorSystem) = { val fullPathToDispatcher = "akka.actor." + dispatcher val latch = new CountDownLatch(numPairs) @@ -133,17 +133,20 @@ object BenchmarkActors { def printProgress(totalMessages: Long, numActors: Int, startNanoTime: Long) = { val durationMicros = (System.nanoTime() - startNanoTime) / 1000 - println(f" $totalMessages messages by $numActors actors took ${durationMicros / 1000} ms, " + + println( + f" $totalMessages messages by $numActors actors took ${durationMicros / 1000} ms, " + f"${totalMessages.toDouble / durationMicros}%,.2f M msg/s") } def requireRightNumberOfCores(numCores: Int) = - require( - Runtime.getRuntime.availableProcessors == numCores, - s"Update the cores constant to ${Runtime.getRuntime.availableProcessors}" - ) + require(Runtime.getRuntime.availableProcessors == numCores, + s"Update the cores constant to ${Runtime.getRuntime.availableProcessors}") - def benchmarkPingPongActors(numMessagesPerActorPair: Int, numActors: Int, dispatcher: String, throughPut: Int, shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = { + def benchmarkPingPongActors(numMessagesPerActorPair: Int, + numActors: Int, + dispatcher: String, + throughPut: Int, + shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = { val numPairs = numActors / 2 val totalNumMessages = numPairs * numMessagesPerActorPair val (actors, latch) = startPingPongActorPairs(numMessagesPerActorPair, numPairs, dispatcher) @@ -153,7 +156,11 @@ object BenchmarkActors { printProgress(totalNumMessages, numActors, startNanoTime) } - def benchmarkEchoActors(numMessagesPerActorPair: Int, numActors: Int, dispatcher: String, batchSize: Int, shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = { + def benchmarkEchoActors(numMessagesPerActorPair: Int, + numActors: Int, + dispatcher: String, + batchSize: Int, + shutdownTimeout: Duration)(implicit system: ActorSystem): Unit = { val numPairs = numActors / 2 val totalNumMessages = numPairs * numMessagesPerActorPair val (actors, latch) = startEchoActorPairs(numMessagesPerActorPair, numPairs, dispatcher, batchSize) @@ -169,4 +176,3 @@ object BenchmarkActors { } } - diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala index fe324a0276..8a17b62ffd 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ForkJoinActorBenchmark.scala @@ -28,7 +28,10 @@ class ForkJoinActorBenchmark { @Param(Array(coresStr)) // coresStr, cores2xStr, cores4xStr var threads = "" - @Param(Array("akka.dispatch.SingleConsumerOnlyUnboundedMailbox", "akka.actor.ManyToOneArrayMailbox", "akka.actor.JCToolsMailbox")) + @Param( + Array("akka.dispatch.SingleConsumerOnlyUnboundedMailbox", + "akka.actor.ManyToOneArrayMailbox", + "akka.actor.JCToolsMailbox")) var mailbox = "" implicit var system: ActorSystem = _ @@ -38,8 +41,8 @@ class ForkJoinActorBenchmark { requireRightNumberOfCores(cores) - system = ActorSystem("ForkJoinActorBenchmark", ConfigFactory.parseString( - s""" + system = ActorSystem("ForkJoinActorBenchmark", + ConfigFactory.parseString(s""" akka { log-dead-letters = off default-mailbox.mailbox-capacity = 512 @@ -56,8 +59,7 @@ class ForkJoinActorBenchmark { } } } - """ - )) + """)) } @TearDown(Level.Trial) @@ -72,7 +74,8 @@ class ForkJoinActorBenchmark { @Benchmark @OperationsPerInvocation(totalMessagesLessThanCores) - def pingPongLessActorsThanCores(): Unit = benchmarkPingPongActors(messages, lessThanCoresActors, "fjp-dispatcher", tpt, timeout) + def pingPongLessActorsThanCores(): Unit = + benchmarkPingPongActors(messages, lessThanCoresActors, "fjp-dispatcher", tpt, timeout) // @Benchmark // @OperationsPerInvocation(totalMessagesSameAsCores) @@ -80,7 +83,8 @@ class ForkJoinActorBenchmark { @Benchmark @OperationsPerInvocation(totalMessagesMoreThanCores) - def pingPongMoreActorsThanCores(): Unit = benchmarkPingPongActors(messages, moreThanCoresActors, "fjp-dispatcher", tpt, timeout) + def pingPongMoreActorsThanCores(): Unit = + benchmarkPingPongActors(messages, moreThanCoresActors, "fjp-dispatcher", tpt, timeout) // @Benchmark // @Measurement(timeUnit = TimeUnit.MILLISECONDS) diff --git a/akka-bench-jmh/src/main/scala/akka/actor/JCToolsMailbox.scala b/akka-bench-jmh/src/main/scala/akka/actor/JCToolsMailbox.scala index 524f2cb520..f107b1d204 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/JCToolsMailbox.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/JCToolsMailbox.scala @@ -25,14 +25,19 @@ case class JCToolsMailbox(val capacity: Int) extends MailboxType with ProducesMe new JCToolsMessageQueue(capacity) } -class JCToolsMessageQueue(capacity: Int) extends MpscGrowableArrayQueue[Envelope](capacity) with MessageQueue with BoundedMessageQueueSemantics { +class JCToolsMessageQueue(capacity: Int) + extends MpscGrowableArrayQueue[Envelope](capacity) + with MessageQueue + with BoundedMessageQueueSemantics { final def pushTimeOut: Duration = Duration.Undefined final def enqueue(receiver: ActorRef, handle: Envelope): Unit = if (!offer(handle)) - receiver.asInstanceOf[InternalActorRef].provider.deadLetters.tell( - DeadLetter(handle.message, handle.sender, receiver), handle.sender - ) + receiver + .asInstanceOf[InternalActorRef] + .provider + .deadLetters + .tell(DeadLetter(handle.message, handle.sender, receiver), handle.sender) final def dequeue(): Envelope = poll() diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ManyToOneArrayMailbox.scala b/akka-bench-jmh/src/main/scala/akka/actor/ManyToOneArrayMailbox.scala index 24df0b3214..204a939a76 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ManyToOneArrayMailbox.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ManyToOneArrayMailbox.scala @@ -23,7 +23,9 @@ import scala.annotation.tailrec * * NOTE: ManyToOneArrayMailbox does not use `mailbox-push-timeout-time` as it is non-blocking. */ -case class ManyToOneArrayMailbox(val capacity: Int) extends MailboxType with ProducesMessageQueue[BoundedNodeMessageQueue] { +case class ManyToOneArrayMailbox(val capacity: Int) + extends MailboxType + with ProducesMessageQueue[BoundedNodeMessageQueue] { def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity")) @@ -45,9 +47,11 @@ class ManyToOneArrayMessageQueue(capacity: Int) extends MessageQueue with Bounde final def enqueue(receiver: ActorRef, handle: Envelope): Unit = if (!queue.add(handle)) - receiver.asInstanceOf[InternalActorRef].provider.deadLetters.tell( - DeadLetter(handle.message, handle.sender, receiver), handle.sender - ) + receiver + .asInstanceOf[InternalActorRef] + .provider + .deadLetters + .tell(DeadLetter(handle.message, handle.sender, receiver), handle.sender) final def dequeue(): Envelope = queue.poll() diff --git a/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala b/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala index 737a752e6d..9279e12064 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/RequestResponseActors.scala @@ -25,7 +25,7 @@ object RequestResponseActors { receivedUsers.put(u.userId, u) if (left == 0) { latch.countDown() - context stop self + context.stop(self) } else { sender() ! Request(randGenerator.nextInt(numUsersInDB)) } @@ -50,7 +50,7 @@ object RequestResponseActors { } if (left == 0) { latch.countDown() - context stop self + context.stop(self) } left -= 1 } @@ -71,13 +71,16 @@ object RequestResponseActors { } } - def startUserQueryActorPairs(numActors: Int, numQueriesPerActor: Int, numUsersInDBPerActor: Int, dispatcher: String)(implicit system: ActorSystem) = { + def startUserQueryActorPairs(numActors: Int, numQueriesPerActor: Int, numUsersInDBPerActor: Int, dispatcher: String)( + implicit system: ActorSystem) = { val fullPathToDispatcher = "akka.actor." + dispatcher val latch = new CountDownLatch(numActors) val actorsPairs = for { i <- (1 to (numActors / 2)).toVector - userQueryActor = system.actorOf(UserQueryActor.props(latch, numQueriesPerActor, numUsersInDBPerActor).withDispatcher(fullPathToDispatcher)) - userServiceActor = system.actorOf(UserServiceActor.props(latch, numQueriesPerActor, numUsersInDBPerActor).withDispatcher(fullPathToDispatcher)) + userQueryActor = system.actorOf( + UserQueryActor.props(latch, numQueriesPerActor, numUsersInDBPerActor).withDispatcher(fullPathToDispatcher)) + userServiceActor = system.actorOf( + UserServiceActor.props(latch, numQueriesPerActor, numUsersInDBPerActor).withDispatcher(fullPathToDispatcher)) } yield (userQueryActor, userServiceActor) (actorsPairs, latch) } diff --git a/akka-bench-jmh/src/main/scala/akka/actor/RouterPoolCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/RouterPoolCreationBenchmark.scala index cde2ef9f31..e76804846d 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/RouterPoolCreationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/RouterPoolCreationBenchmark.scala @@ -41,4 +41,3 @@ class RouterPoolCreationBenchmark { true } } - diff --git a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala index 0aa9be291f..99ab061c1f 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/ScheduleBenchmark.scala @@ -86,12 +86,14 @@ class ScheduleBenchmark { @Benchmark def multipleScheduleOnce(): Unit = { - val tryWithNext = (1 to to).foldLeft(0.millis -> List[Cancellable]()) { - case ((interv, c), idx) => - (interv + interval, scheduler.scheduleOnce(interv) { - op(idx) - } :: c) - }._2 + val tryWithNext = (1 to to) + .foldLeft(0.millis -> List[Cancellable]()) { + case ((interv, c), idx) => + (interv + interval, scheduler.scheduleOnce(interv) { + op(idx) + } :: c) + } + ._2 promise.future.onComplete { case _ => tryWithNext.foreach(_.cancel()) diff --git a/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala index b784b56d62..81840ecaef 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/StashCreationBenchmark.scala @@ -59,4 +59,3 @@ class StashCreationBenchmark { true } } - diff --git a/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala index b6e94b1ea0..c6689f9574 100644 --- a/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/actor/TellOnlyBenchmark.scala @@ -26,8 +26,8 @@ class TellOnlyBenchmark { @Setup(Level.Trial) def setup(): Unit = { - system = ActorSystem("TellOnlyBenchmark", ConfigFactory.parseString( - s"""| akka { + system = ActorSystem("TellOnlyBenchmark", + ConfigFactory.parseString(s"""| akka { | log-dead-letters = off | actor { | default-dispatcher { @@ -46,8 +46,7 @@ class TellOnlyBenchmark { | type = "akka.actor.TellOnlyBenchmark$$DroppingDispatcherConfigurator" | mailbox-type = "akka.actor.TellOnlyBenchmark$$UnboundedDroppingMailbox" | } - | """.stripMargin - )) + | """.stripMargin)) } @TearDown(Level.Trial) @@ -98,7 +97,7 @@ object TellOnlyBenchmark { class Echo extends Actor { def receive = { case s @ `stop` => - context stop self + context.stop(self) case m => sender ! m } } @@ -120,15 +119,18 @@ object TellOnlyBenchmark { new DroppingMessageQueue } - class DroppingDispatcher( - _configurator: MessageDispatcherConfigurator, - _id: String, - _throughput: Int, - _throughputDeadlineTime: Duration, - _executorServiceFactoryProvider: ExecutorServiceFactoryProvider, - _shutdownTimeout: FiniteDuration - ) - extends Dispatcher(_configurator, _id, _throughput, _throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) { + class DroppingDispatcher(_configurator: MessageDispatcherConfigurator, + _id: String, + _throughput: Int, + _throughputDeadlineTime: Duration, + _executorServiceFactoryProvider: ExecutorServiceFactoryProvider, + _shutdownTimeout: FiniteDuration) + extends Dispatcher(_configurator, + _id, + _throughput, + _throughputDeadlineTime, + _executorServiceFactoryProvider, + _shutdownTimeout) { override protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = { val mbox = receiver.mailbox @@ -141,15 +143,14 @@ object TellOnlyBenchmark { } class DroppingDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends MessageDispatcherConfigurator(config, prerequisites) { + extends MessageDispatcherConfigurator(config, prerequisites) { - override def dispatcher(): MessageDispatcher = new DroppingDispatcher( - this, - config.getString("id"), - config.getInt("throughput"), - config.getNanosDuration("throughput-deadline-time"), - configureExecutor(), - config.getMillisDuration("shutdown-timeout") - ) + override def dispatcher(): MessageDispatcher = + new DroppingDispatcher(this, + config.getString("id"), + config.getInt("throughput"), + config.getNanosDuration("throughput-deadline-time"), + configureExecutor(), + config.getMillisDuration("shutdown-timeout")) } } diff --git a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetSerializationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetSerializationBenchmark.scala index a24604dd98..d9fb57a9f0 100644 --- a/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetSerializationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/cluster/ddata/ORSetSerializationBenchmark.scala @@ -35,8 +35,7 @@ import org.openjdk.jmh.annotations.{ Scope => JmhScope } @OutputTimeUnit(TimeUnit.SECONDS) class ORSetSerializationBenchmark { - private val config = ConfigFactory.parseString( - """ + private val config = ConfigFactory.parseString(""" akka.actor.provider=cluster akka.remote.netty.tcp.port=0 akka.remote.artery.canonical.port = 0 @@ -44,8 +43,7 @@ class ORSetSerializationBenchmark { serialize-messages = off allow-java-serialization = off } - """ - ) + """) private val system1 = ActorSystem("ORSetSerializationBenchmark", config) private val system2 = ActorSystem("ORSetSerializationBenchmark", config) diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala index b9c789c5d3..946d425bf1 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/LevelDbBatchingBenchmark.scala @@ -99,11 +99,10 @@ class LevelDbBatchingBenchmark { // TOOLS private def deleteStorage(sys: ActorSystem): Unit = { - val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir" - ).map(s => new File(sys.settings.config.getString(s))) + val storageLocations = + List("akka.persistence.journal.leveldb.dir", + "akka.persistence.journal.leveldb-shared.store.dir", + "akka.persistence.snapshot-store.local.dir").map(s => new File(sys.settings.config.getString(s))) storageLocations.foreach(FileUtils.deleteDirectory) } diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala index 71821b6e3a..43082f3ddc 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistenceActorDeferBenchmark.scala @@ -30,11 +30,10 @@ class PersistentActorDeferBenchmark { val config = PersistenceSpec.config("leveldb", "benchmark") - lazy val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir" - ).map(s => new File(system.settings.config.getString(s))) + lazy val storageLocations = + List("akka.persistence.journal.leveldb.dir", + "akka.persistence.journal.leveldb-shared.store.dir", + "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) var system: ActorSystem = _ @@ -52,7 +51,8 @@ class PersistentActorDeferBenchmark { storageLocations.foreach(FileUtils.deleteDirectory) persistAsync_defer = system.actorOf(Props(classOf[`persistAsync, defer`], data10k.last), "a-1") - persistAsync_defer_replyASAP = system.actorOf(Props(classOf[`persistAsync, defer, respond ASAP`], data10k.last), "a-2") + persistAsync_defer_replyASAP = + system.actorOf(Props(classOf[`persistAsync, defer, respond ASAP`], data10k.last), "a-2") } @TearDown @@ -87,8 +87,11 @@ class `persistAsync, defer`(respondAfter: Int) extends PersistentActor { override def receiveCommand = { case n: Int => - persistAsync(Evt(n)) { e => } - deferAsync(Evt(n)) { e => if (e.i == respondAfter) sender() ! e.i } + persistAsync(Evt(n)) { e => + } + deferAsync(Evt(n)) { e => + if (e.i == respondAfter) sender() ! e.i + } } override def receiveRecover = { case _ => // do nothing @@ -100,8 +103,10 @@ class `persistAsync, defer, respond ASAP`(respondAfter: Int) extends PersistentA override def receiveCommand = { case n: Int => - persistAsync(Evt(n)) { e => } - deferAsync(Evt(n)) { e => } + persistAsync(Evt(n)) { e => + } + deferAsync(Evt(n)) { e => + } if (n == respondAfter) sender() ! n } override def receiveRecover = { diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala index 344a1bd05e..872959f9f4 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorBenchmark.scala @@ -19,11 +19,10 @@ class PersistentActorThroughputBenchmark { val config = PersistenceSpec.config("leveldb", "benchmark") - lazy val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir" - ).map(s => new File(system.settings.config.getString(s))) + lazy val storageLocations = + List("akka.persistence.journal.leveldb.dir", + "akka.persistence.journal.leveldb-shared.store.dir", + "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) var system: ActorSystem = _ @@ -50,7 +49,8 @@ class PersistentActorThroughputBenchmark { persistPersistentActor = system.actorOf(Props(classOf[PersistPersistentActor], data10k.last), "ep-1") persistAsync1PersistentActor = system.actorOf(Props(classOf[PersistAsyncPersistentActor], data10k.last), "epa-1") - persistAsyncQuickReplyPersistentActor = system.actorOf(Props(classOf[PersistAsyncQuickReplyPersistentActor], data10k.last), "epa-2") + persistAsyncQuickReplyPersistentActor = + system.actorOf(Props(classOf[PersistAsyncQuickReplyPersistentActor], data10k.last), "epa-2") } @TearDown @@ -120,7 +120,10 @@ class PersistPersistentActor(respondAfter: Int) extends PersistentActor { override def persistenceId: String = self.path.name override def receiveCommand = { - case n: Int => persist(Evt(n)) { e => if (e.i == respondAfter) sender() ! e } + case n: Int => + persist(Evt(n)) { e => + if (e.i == respondAfter) sender() ! e + } } override def receiveRecover = { case _ => // do nothing @@ -133,7 +136,9 @@ class PersistAsyncPersistentActor(respondAfter: Int) extends PersistentActor { override def receiveCommand = { case n: Int => - persistAsync(Evt(n)) { e => if (e.i == respondAfter) sender() ! e } + persistAsync(Evt(n)) { e => + if (e.i == respondAfter) sender() ! e + } } override def receiveRecover = { case _ => // do nothing diff --git a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala index 9d09c7c8fe..0ca931c90d 100644 --- a/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/persistence/PersistentActorWithAtLeastOnceDeliveryBenchmark.scala @@ -20,11 +20,10 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark { val config = PersistenceSpec.config("leveldb", "benchmark") - lazy val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir" - ).map(s => new File(system.settings.config.getString(s))) + lazy val storageLocations = + List("akka.persistence.journal.leveldb.dir", + "akka.persistence.journal.leveldb-shared.store.dir", + "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) var system: ActorSystem = _ @@ -47,9 +46,15 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark { destinationActor = system.actorOf(Props[DestinationActor], "destination") - noPersistPersistentActorWithAtLeastOnceDelivery = system.actorOf(Props(classOf[NoPersistPersistentActorWithAtLeastOnceDelivery], dataCount, probe.ref, destinationActor.path), "nop-1") - persistPersistentActorWithAtLeastOnceDelivery = system.actorOf(Props(classOf[PersistPersistentActorWithAtLeastOnceDelivery], dataCount, probe.ref, destinationActor.path), "ep-1") - persistAsyncPersistentActorWithAtLeastOnceDelivery = system.actorOf(Props(classOf[PersistAsyncPersistentActorWithAtLeastOnceDelivery], dataCount, probe.ref, destinationActor.path), "epa-1") + noPersistPersistentActorWithAtLeastOnceDelivery = system.actorOf( + Props(classOf[NoPersistPersistentActorWithAtLeastOnceDelivery], dataCount, probe.ref, destinationActor.path), + "nop-1") + persistPersistentActorWithAtLeastOnceDelivery = system.actorOf( + Props(classOf[PersistPersistentActorWithAtLeastOnceDelivery], dataCount, probe.ref, destinationActor.path), + "ep-1") + persistAsyncPersistentActorWithAtLeastOnceDelivery = system.actorOf( + Props(classOf[PersistAsyncPersistentActorWithAtLeastOnceDelivery], dataCount, probe.ref, destinationActor.path), + "epa-1") } @TearDown @@ -85,7 +90,11 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark { } } -class NoPersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upStream: ActorRef, val downStream: ActorPath) extends PersistentActor with AtLeastOnceDelivery { +class NoPersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, + val upStream: ActorRef, + val downStream: ActorPath) + extends PersistentActor + with AtLeastOnceDelivery { override def redeliverInterval = 100.milliseconds @@ -117,7 +126,11 @@ class NoPersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upS } } -class PersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upStream: ActorRef, val downStream: ActorPath) extends PersistentActor with AtLeastOnceDelivery { +class PersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, + val upStream: ActorRef, + val downStream: ActorPath) + extends PersistentActor + with AtLeastOnceDelivery { override def redeliverInterval = 100.milliseconds @@ -151,7 +164,11 @@ class PersistPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upStr } } -class PersistAsyncPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, val upStream: ActorRef, val downStream: ActorPath) extends PersistentActor with AtLeastOnceDelivery { +class PersistAsyncPersistentActorWithAtLeastOnceDelivery(respondAfter: Int, + val upStream: ActorRef, + val downStream: ActorPath) + extends PersistentActor + with AtLeastOnceDelivery { override def redeliverInterval = 100.milliseconds diff --git a/akka-bench-jmh/src/main/scala/akka/remote/artery/CodecBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/remote/artery/CodecBenchmark.scala index 498fe6a9bf..b301b0c058 100644 --- a/akka-bench-jmh/src/main/scala/akka/remote/artery/CodecBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/remote/artery/CodecBenchmark.scala @@ -77,8 +77,7 @@ class CodecBenchmark { @Setup(Level.Trial) def setupTrial(): Unit = { - val commonConfig = ConfigFactory.parseString( - s""" + val commonConfig = ConfigFactory.parseString(s""" akka { loglevel = WARNING actor.provider = remote @@ -89,13 +88,12 @@ class CodecBenchmark { actor.serialization-identifiers { "${classOf[DummyMessageSerializer].getName}" = 4711 } actor.serialization-bindings {"${classOf[DummyMessage].getName}" = codec-benchmark } } - """ - ) + """) val config = configType match { case RemoteInstrument => - ConfigFactory.parseString( - s"""akka.remote.artery.advanced.instruments = [ "${classOf[DummyRemoteInstrument].getName}" ]""" - ).withFallback(commonConfig) + ConfigFactory + .parseString(s"""akka.remote.artery.advanced.instruments = [ "${classOf[DummyRemoteInstrument].getName}" ]""") + .withFallback(commonConfig) case _ => commonConfig } @@ -106,10 +104,8 @@ class CodecBenchmark { val settings = ActorMaterializerSettings(system) materializer = ActorMaterializer(settings)(system) - uniqueLocalAddress = UniqueAddress( - system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress, - AddressUidExtension(system).longAddressUid - ) + uniqueLocalAddress = UniqueAddress(system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress, + AddressUidExtension(system).longAddressUid) val actorOnSystemA = system.actorOf(Props.empty, "a") senderStringA = actorOnSystemA.path.toSerializationFormatWithAddress(uniqueLocalAddress.address) @@ -117,9 +113,9 @@ class CodecBenchmark { val actorOnSystemB = systemB.actorOf(Props.empty, "b") val addressB = systemB.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val rootB = RootActorPath(addressB) - remoteRefB = - Await.result(system.actorSelection(rootB / "user" / "b").resolveOne(5.seconds), 5.seconds) - .asInstanceOf[RemoteActorRef] + remoteRefB = Await + .result(system.actorSelection(rootB / "user" / "b").resolveOne(5.seconds), 5.seconds) + .asInstanceOf[RemoteActorRef] resolvedRef = actorOnSystemA.asInstanceOf[InternalActorRef] recipientStringB = remoteRefB.path.toSerializationFormatWithAddress(addressB) @@ -128,56 +124,57 @@ class CodecBenchmark { } else null val envelope = new EnvelopeBuffer(envelopeTemplateBuffer) val outboundEnvelope = OutboundEnvelope(OptionVal.None, payload, OptionVal.None) - headerIn setVersion ArteryTransport.HighestVersion - headerIn setUid 42 - headerIn setSenderActorRef actorOnSystemA - headerIn setRecipientActorRef remoteRefB - headerIn setManifest "" - headerIn setRemoteInstruments remoteInstruments + headerIn.setVersion(ArteryTransport.HighestVersion) + headerIn.setUid(42) + headerIn.setSenderActorRef(actorOnSystemA) + headerIn.setRecipientActorRef(remoteRefB) + headerIn.setManifest("") + headerIn.setRemoteInstruments(remoteInstruments) MessageSerializer.serializeForArtery(SerializationExtension(system), outboundEnvelope, headerIn, envelope) envelope.byteBuffer.flip() // Now build up the graphs val encoder: Flow[OutboundEnvelope, EnvelopeBuffer, Encoder.OutboundCompressionAccess] = - Flow.fromGraph(new Encoder(uniqueLocalAddress, system.asInstanceOf[ExtendedActorSystem], outboundEnvelopePool, - envelopePool, streamId = 1, debugLogSend = false, version = ArteryTransport.HighestVersion)) + Flow.fromGraph( + new Encoder(uniqueLocalAddress, + system.asInstanceOf[ExtendedActorSystem], + outboundEnvelopePool, + envelopePool, + streamId = 1, + debugLogSend = false, + version = ArteryTransport.HighestVersion)) val encoderInput: Flow[String, OutboundEnvelope, NotUsed] = Flow[String].map(msg => outboundEnvelopePool.acquire().init(OptionVal.None, payload, OptionVal.Some(remoteRefB))) val compressions = new InboundCompressionsImpl(system, inboundContext, inboundContext.settings.Advanced.Compression) val decoder: Flow[EnvelopeBuffer, InboundEnvelope, InboundCompressionAccess] = - Flow.fromGraph(new Decoder(inboundContext, system.asInstanceOf[ExtendedActorSystem], - uniqueLocalAddress, inboundContext.settings, compressions, inboundEnvelopePool)) + Flow.fromGraph( + new Decoder(inboundContext, + system.asInstanceOf[ExtendedActorSystem], + uniqueLocalAddress, + inboundContext.settings, + compressions, + inboundEnvelopePool)) val deserializer: Flow[InboundEnvelope, InboundEnvelope, NotUsed] = Flow.fromGraph(new Deserializer(inboundContext, system.asInstanceOf[ExtendedActorSystem], envelopePool)) - val decoderInput: Flow[String, EnvelopeBuffer, NotUsed] = Flow[String] - .map { _ => - val envelope = envelopePool.acquire() - envelopeTemplateBuffer.rewind() - envelope.byteBuffer.put(envelopeTemplateBuffer) - envelope.byteBuffer.flip() - envelope - } + val decoderInput: Flow[String, EnvelopeBuffer, NotUsed] = Flow[String].map { _ => + val envelope = envelopePool.acquire() + envelopeTemplateBuffer.rewind() + envelope.byteBuffer.put(envelopeTemplateBuffer) + envelope.byteBuffer.flip() + envelope + } - encodeGraph = encoderInput - .via(encoder) - .map(envelope => envelopePool.release(envelope)) + encodeGraph = encoderInput.via(encoder).map(envelope => envelopePool.release(envelope)) - decodeGraph = decoderInput - .via(decoder) - .via(deserializer) - .map { - case env: ReusableInboundEnvelope => inboundEnvelopePool.release(env) - case _ => - } + decodeGraph = decoderInput.via(decoder).via(deserializer).map { + case env: ReusableInboundEnvelope => inboundEnvelopePool.release(env) + case _ => + } - encodeDecodeGraph = encoderInput - .via(encoder) - .via(decoder) - .via(deserializer) - .map { - case env: ReusableInboundEnvelope => inboundEnvelopePool.release(env) - case _ => - } + encodeDecodeGraph = encoderInput.via(encoder).via(decoder).via(deserializer).map { + case env: ReusableInboundEnvelope => inboundEnvelopePool.release(env) + case _ => + } } @TearDown(Level.Trial) @@ -192,8 +189,7 @@ class CodecBenchmark { } @TearDown(Level.Iteration) - def tearDownIteration(): Unit = { - } + def tearDownIteration(): Unit = {} @Benchmark @OperationsPerInvocation(OperationsPerInvocation) @@ -201,8 +197,7 @@ class CodecBenchmark { val latch = new CountDownLatch(1) val N = OperationsPerInvocation - Source.fromGraph(new BenchTestSourceSameElement(N, "elem")) - .runWith(new LatchSink(N, latch))(materializer) + Source.fromGraph(new BenchTestSourceSameElement(N, "elem")).runWith(new LatchSink(N, latch))(materializer) if (!latch.await(30, TimeUnit.SECONDS)) throw new RuntimeException("Latch didn't complete in time") @@ -214,7 +209,8 @@ class CodecBenchmark { val latch = new CountDownLatch(1) val N = OperationsPerInvocation - Source.fromGraph(new BenchTestSourceSameElement(N, "elem")) + Source + .fromGraph(new BenchTestSourceSameElement(N, "elem")) .via(encodeGraph) .runWith(new LatchSink(N, latch))(materializer) @@ -228,7 +224,8 @@ class CodecBenchmark { val latch = new CountDownLatch(1) val N = OperationsPerInvocation - Source.fromGraph(new BenchTestSourceSameElement(N, "elem")) + Source + .fromGraph(new BenchTestSourceSameElement(N, "elem")) .via(decodeGraph) .runWith(new LatchSink(N, latch))(materializer) @@ -242,7 +239,8 @@ class CodecBenchmark { val latch = new CountDownLatch(1) val N = OperationsPerInvocation - Source.fromGraph(new BenchTestSourceSameElement(N, "elem")) + Source + .fromGraph(new BenchTestSourceSameElement(N, "elem")) .via(encodeDecodeGraph) .runWith(new LatchSink(N, latch))(materializer) @@ -297,12 +295,18 @@ object CodecBenchmark { override def identifier: Byte = 7 // Lucky number slevin - override def remoteWriteMetadata(recipient: ActorRef, message: Object, sender: ActorRef, buffer: ByteBuffer): Unit = { + override def remoteWriteMetadata(recipient: ActorRef, + message: Object, + sender: ActorRef, + buffer: ByteBuffer): Unit = { buffer.putInt(Metadata.length) buffer.put(Metadata) } - override def remoteReadMetadata(recipient: ActorRef, message: Object, sender: ActorRef, buffer: ByteBuffer): Unit = { + override def remoteReadMetadata(recipient: ActorRef, + message: Object, + sender: ActorRef, + buffer: ByteBuffer): Unit = { val length = Metadata.length val metaLength = buffer.getInt @tailrec @@ -315,8 +319,16 @@ object CodecBenchmark { throw new IOException(s"DummyInstrument deserialization error. Expected ${Metadata.toString}") } - override def remoteMessageSent(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit = () + override def remoteMessageSent(recipient: ActorRef, + message: Object, + sender: ActorRef, + size: Int, + time: Long): Unit = () - override def remoteMessageReceived(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit = () + override def remoteMessageReceived(recipient: ActorRef, + message: Object, + sender: ActorRef, + size: Int, + time: Long): Unit = () } } diff --git a/akka-bench-jmh/src/main/scala/akka/remote/artery/FlightRecorderBench.scala b/akka-bench-jmh/src/main/scala/akka/remote/artery/FlightRecorderBench.scala index 0f3ce9da4d..0b79050202 100644 --- a/akka-bench-jmh/src/main/scala/akka/remote/artery/FlightRecorderBench.scala +++ b/akka-bench-jmh/src/main/scala/akka/remote/artery/FlightRecorderBench.scala @@ -30,7 +30,8 @@ class FlightRecorderBench { def setup(): Unit = { file = File.createTempFile("akka-flightrecorder", "dat") file.deleteOnExit() - fileChannel = FileChannel.open(file.toPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.READ) + fileChannel = + FileChannel.open(file.toPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.READ) recorder = new FlightRecorder(fileChannel) } diff --git a/akka-bench-jmh/src/main/scala/akka/remote/artery/LatchSink.scala b/akka-bench-jmh/src/main/scala/akka/remote/artery/LatchSink.scala index f82fa6f999..143feddaaa 100644 --- a/akka-bench-jmh/src/main/scala/akka/remote/artery/LatchSink.scala +++ b/akka-bench-jmh/src/main/scala/akka/remote/artery/LatchSink.scala @@ -43,7 +43,7 @@ class LatchSink(countDownAfter: Int, latch: CountDownLatch) extends GraphStage[S } class BarrierSink(countDownAfter: Int, latch: CountDownLatch, barrierAfter: Int, barrier: CyclicBarrier) - extends GraphStage[SinkShape[Any]] { + extends GraphStage[SinkShape[Any]] { val in: Inlet[Any] = Inlet("BarrierSink") override val shape: SinkShape[Any] = SinkShape(in) diff --git a/akka-bench-jmh/src/main/scala/akka/remote/artery/SendQueueBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/remote/artery/SendQueueBenchmark.scala index dc41041255..aef6deeedb 100644 --- a/akka-bench-jmh/src/main/scala/akka/remote/artery/SendQueueBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/remote/artery/SendQueueBenchmark.scala @@ -27,10 +27,8 @@ import org.agrona.concurrent.ManyToOneConcurrentArrayQueue @Measurement(iterations = 10) class SendQueueBenchmark { - val config = ConfigFactory.parseString( - """ - """ - ) + val config = ConfigFactory.parseString(""" + """) implicit val system = ActorSystem("SendQueueBenchmark", config) @@ -57,8 +55,10 @@ class SendQueueBenchmark { val source = Source.queue[Int](1024, OverflowStrategy.dropBuffer) - val (queue, killSwitch) = source.viaMat(KillSwitches.single)(Keep.both) - .toMat(new BarrierSink(N, latch, burstSize, barrier))(Keep.left).run()(materializer) + val (queue, killSwitch) = source + .viaMat(KillSwitches.single)(Keep.both) + .toMat(new BarrierSink(N, latch, burstSize, barrier))(Keep.left) + .run()(materializer) var n = 1 while (n <= N) { @@ -84,8 +84,10 @@ class SendQueueBenchmark { val source = Source.actorRef(1024, OverflowStrategy.dropBuffer) - val (ref, killSwitch) = source.viaMat(KillSwitches.single)(Keep.both) - .toMat(new BarrierSink(N, latch, burstSize, barrier))(Keep.left).run()(materializer) + val (ref, killSwitch) = source + .viaMat(KillSwitches.single)(Keep.both) + .toMat(new BarrierSink(N, latch, burstSize, barrier))(Keep.left) + .run()(materializer) var n = 1 while (n <= N) { @@ -112,8 +114,10 @@ class SendQueueBenchmark { val queue = new ManyToOneConcurrentArrayQueue[Int](1024) val source = Source.fromGraph(new SendQueue[Int](_ => ())) - val (sendQueue, killSwitch) = source.viaMat(KillSwitches.single)(Keep.both) - .toMat(new BarrierSink(N, latch, burstSize, barrier))(Keep.left).run()(materializer) + val (sendQueue, killSwitch) = source + .viaMat(KillSwitches.single)(Keep.both) + .toMat(new BarrierSink(N, latch, burstSize, barrier))(Keep.left) + .run()(materializer) sendQueue.inject(queue) var n = 1 diff --git a/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala index 62ffed449c..b4ec60085f 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/EmptySourceBenchmark.scala @@ -40,5 +40,5 @@ class EmptySourceBenchmark { Rewrite to GraphStage: [info] EmptySourceBenchmark.empty thrpt 10 17.556 ± 2.865 ops/ms - */ + */ } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FlatMapConcatBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FlatMapConcatBenchmark.scala index db2564e3b2..64bc3cd82d 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FlatMapConcatBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FlatMapConcatBenchmark.scala @@ -28,16 +28,14 @@ object FlatMapConcatBenchmark { class FlatMapConcatBenchmark { import FlatMapConcatBenchmark._ - private val config = ConfigFactory.parseString( - """ + private val config = ConfigFactory.parseString(""" akka.actor.default-dispatcher { executor = "fork-join-executor" fork-join-executor { parallelism-factor = 1 } } - """ - ) + """) private implicit val system: ActorSystem = ActorSystem("FlatMapConcatBenchmark", config) @@ -63,9 +61,7 @@ class FlatMapConcatBenchmark { def sourceDotSingle(): Unit = { val latch = new CountDownLatch(1) - testSource - .flatMapConcat(Source.single) - .runWith(new LatchSink(OperationsPerInvocation, latch))(materializer) + testSource.flatMapConcat(Source.single).runWith(new LatchSink(OperationsPerInvocation, latch))(materializer) awaitLatch(latch) } @@ -87,9 +83,7 @@ class FlatMapConcatBenchmark { def oneElementList(): Unit = { val latch = new CountDownLatch(1) - testSource - .flatMapConcat(n => Source(n :: Nil)) - .runWith(new LatchSink(OperationsPerInvocation, latch))(materializer) + testSource.flatMapConcat(n => Source(n :: Nil)).runWith(new LatchSink(OperationsPerInvocation, latch))(materializer) awaitLatch(latch) } @@ -99,9 +93,7 @@ class FlatMapConcatBenchmark { def mapBaseline(): Unit = { val latch = new CountDownLatch(1) - testSource - .map(elem => elem) - .runWith(new LatchSink(OperationsPerInvocation, latch))(materializer) + testSource.map(elem => elem).runWith(new LatchSink(OperationsPerInvocation, latch))(materializer) awaitLatch(latch) } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala index bb68ed9912..dbf41643cd 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FlowMapBenchmark.scala @@ -22,8 +22,7 @@ import scala.concurrent.duration._ @BenchmarkMode(Array(Mode.Throughput)) class FlowMapBenchmark { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka { log-config-on-start = off log-dead-letters-during-shutdown = off @@ -47,8 +46,7 @@ class FlowMapBenchmark { type = akka.testkit.CallingThreadDispatcherConfigurator } } - }""".stripMargin - ).withFallback(ConfigFactory.load()) + }""".stripMargin).withFallback(ConfigFactory.load()) implicit val system = ActorSystem("test", config) @@ -71,8 +69,7 @@ class FlowMapBenchmark { @Setup def setup(): Unit = { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialInputBufferSize, initialInputBufferSize) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialInputBufferSize, initialInputBufferSize) materializer = ActorMaterializer(settings) diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FramingBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FramingBenchmark.scala index b9da316347..f371d2f067 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FramingBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FramingBenchmark.scala @@ -22,8 +22,7 @@ import scala.util.Random @BenchmarkMode(Array(Mode.Throughput)) class FramingBenchmark { - val config: Config = ConfigFactory.parseString( - """ + val config: Config = ConfigFactory.parseString(""" akka { log-config-on-start = off log-dead-letters-during-shutdown = off @@ -45,8 +44,7 @@ class FramingBenchmark { type = akka.testkit.CallingThreadDispatcherConfigurator } } - }""".stripMargin - ).withFallback(ConfigFactory.load()) + }""".stripMargin).withFallback(ConfigFactory.load()) implicit val system: ActorSystem = ActorSystem("test", config) @@ -66,7 +64,9 @@ class FramingBenchmark { materializer = ActorMaterializer() val frame = List.range(0, messageSize, 1).map(_ => Random.nextPrintableChar()).mkString + "\n" - flow = Source.repeat(ByteString(List.range(0, framePerSeq, 1).map(_ => frame).mkString)).take(100000) + flow = Source + .repeat(ByteString(List.range(0, framePerSeq, 1).map(_ => frame).mkString)) + .take(100000) .via(Framing.delimiter(ByteString("\n"), Int.MaxValue)) } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/FusedGraphsBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/FusedGraphsBenchmark.scala index 3f89f5329c..d9ed74bb09 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/FusedGraphsBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/FusedGraphsBenchmark.scala @@ -29,18 +29,19 @@ class TestSource(elems: Array[MutableElement]) extends GraphStage[SourceShape[Mu val out = Outlet[MutableElement]("TestSource.out") override val shape = SourceShape(out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { - private[this] var left = FusedGraphsBenchmark.ElementCount - 1 + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { + private[this] var left = FusedGraphsBenchmark.ElementCount - 1 - override def onPull(): Unit = { - if (left >= 0) { - push(out, elems(left)) - left -= 1 - } else completeStage() + override def onPull(): Unit = { + if (left >= 0) { + push(out, elems(left)) + left -= 1 + } else completeStage() + } + + setHandler(out, this) } - - setHandler(out, this) - } } class JitSafeCompletionLatch extends GraphStageWithMaterializedValue[SinkShape[MutableElement], CountDownLatch] { @@ -77,12 +78,13 @@ class IdentityStage extends GraphStage[FlowShape[MutableElement, MutableElement] val out = Outlet[MutableElement]("Identity.out") override val shape = FlowShape(in, out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onPull(): Unit = pull(in) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onPull(): Unit = pull(in) - setHandlers(in, out, this) - } + setHandlers(in, out, this) + } } @State(Scope.Benchmark) @@ -126,130 +128,94 @@ class FusedGraphsBenchmark { val identityStage = new IdentityStage - singleIdentity = - fuse( - testSource - .via(identityStage) - .toMat(testSink)(Keep.right) - ) + singleIdentity = fuse(testSource.via(identityStage).toMat(testSink)(Keep.right)) - chainOfIdentities = - fuse( - testSource - .via(identityStage) - .via(identityStage) - .via(identityStage) - .via(identityStage) - .via(identityStage) - .via(identityStage) - .via(identityStage) - .via(identityStage) - .via(identityStage) - .via(identityStage) - .toMat(testSink)(Keep.right) - ) + chainOfIdentities = fuse( + testSource + .via(identityStage) + .via(identityStage) + .via(identityStage) + .via(identityStage) + .via(identityStage) + .via(identityStage) + .via(identityStage) + .via(identityStage) + .via(identityStage) + .via(identityStage) + .toMat(testSink)(Keep.right)) - singleMap = - fuse( - testSource - .map(addFunc) - .toMat(testSink)(Keep.right) - ) + singleMap = fuse(testSource.map(addFunc).toMat(testSink)(Keep.right)) - chainOfMaps = - fuse( - testSource - .map(addFunc) - .map(addFunc) - .map(addFunc) - .map(addFunc) - .map(addFunc) - .map(addFunc) - .map(addFunc) - .map(addFunc) - .map(addFunc) - .map(addFunc) - .toMat(testSink)(Keep.right) - ) + chainOfMaps = fuse( + testSource + .map(addFunc) + .map(addFunc) + .map(addFunc) + .map(addFunc) + .map(addFunc) + .map(addFunc) + .map(addFunc) + .map(addFunc) + .map(addFunc) + .map(addFunc) + .toMat(testSink)(Keep.right)) - repeatTakeMapAndFold = - fuse( - Source.repeat(new MutableElement(0)) - .take(ElementCount) - .map(addFunc) - .map(addFunc) - .fold(new MutableElement(0))((acc, x) => { acc.value += x.value; acc }) - .toMat(testSink)(Keep.right) - ) + repeatTakeMapAndFold = fuse( + Source + .repeat(new MutableElement(0)) + .take(ElementCount) + .map(addFunc) + .map(addFunc) + .fold(new MutableElement(0))((acc, x) => { acc.value += x.value; acc }) + .toMat(testSink)(Keep.right)) - singleBuffer = - fuse( - testSource - .buffer(10, OverflowStrategy.backpressure) - .toMat(testSink)(Keep.right) - ) + singleBuffer = fuse(testSource.buffer(10, OverflowStrategy.backpressure).toMat(testSink)(Keep.right)) - chainOfBuffers = - fuse( - testSource - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .buffer(10, OverflowStrategy.backpressure) - .toMat(testSink)(Keep.right) - ) + chainOfBuffers = fuse( + testSource + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .buffer(10, OverflowStrategy.backpressure) + .toMat(testSink)(Keep.right)) - val broadcastZipFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ + val broadcastZipFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { + implicit b => + import GraphDSL.Implicits._ - val bcast = b.add(Broadcast[MutableElement](2)) - val zip = b.add(Zip[MutableElement, MutableElement]()) + val bcast = b.add(Broadcast[MutableElement](2)) + val zip = b.add(Zip[MutableElement, MutableElement]()) - bcast ~> zip.in0 - bcast ~> zip.in1 + bcast ~> zip.in0 + bcast ~> zip.in1 - FlowShape(bcast.in, zip.out.map(_._1).outlet) + FlowShape(bcast.in, zip.out.map(_._1).outlet) }) - val balanceMergeFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ + val balanceMergeFlow: Flow[MutableElement, MutableElement, NotUsed] = Flow.fromGraph(GraphDSL.create() { + implicit b => + import GraphDSL.Implicits._ - val balance = b.add(Balance[MutableElement](2)) - val merge = b.add(Merge[MutableElement](2)) + val balance = b.add(Balance[MutableElement](2)) + val merge = b.add(Merge[MutableElement](2)) - balance ~> merge - balance ~> merge + balance ~> merge + balance ~> merge - FlowShape(balance.in, merge.out) + FlowShape(balance.in, merge.out) }) - broadcastZip = - fuse( - testSource - .via(broadcastZipFlow) - .toMat(testSink)(Keep.right) - ) + broadcastZip = fuse(testSource.via(broadcastZipFlow).toMat(testSink)(Keep.right)) - balanceMerge = - fuse( - testSource - .via(balanceMergeFlow) - .toMat(testSink)(Keep.right) - ) + balanceMerge = fuse(testSource.via(balanceMergeFlow).toMat(testSink)(Keep.right)) - broadcastZipBalanceMerge = - fuse( - testSource - .via(broadcastZipFlow) - .via(balanceMergeFlow) - .toMat(testSink)(Keep.right) - ) + broadcastZipBalanceMerge = fuse(testSource.via(broadcastZipFlow).via(balanceMergeFlow).toMat(testSink)(Keep.right)) } @Benchmark diff --git a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala index c3bd93565d..a4606cdc63 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/InterpreterBenchmark.scala @@ -34,9 +34,7 @@ class InterpreterBenchmark { val source = new GraphDataSource("source", data100k) val sink = new GraphDataSink[Int]("sink", data100k.size) - val b = builder(identities: _*) - .connect(source, identities.head.in) - .connect(identities.last.out, sink) + val b = builder(identities: _*).connect(source, identities.head.in).connect(identities.last.out, sink) // FIXME: This should not be here, this is pure setup overhead for (i <- (0 until identities.size - 1)) { @@ -58,20 +56,22 @@ object InterpreterBenchmark { override val out: akka.stream.Outlet[T] = Outlet[T]("out") out.id = 0 - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (idx < data.size) { - push(out, data(idx)) - idx += 1 - } else { - completeStage() - } - } - override def onDownstreamFinish(): Unit = completeStage() - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + if (idx < data.size) { + push(out, data(idx)) + idx += 1 + } else { + completeStage() + } + } + override def onDownstreamFinish(): Unit = completeStage() + }) } - case class GraphDataSink[T](override val toString: String, var expected: Int) extends DownstreamBoundaryStageLogic[T] { + case class GraphDataSink[T](override val toString: String, var expected: Int) + extends DownstreamBoundaryStageLogic[T] { override val in: akka.stream.Inlet[T] = Inlet[T]("in") in.id = 0 diff --git a/akka-bench-jmh/src/main/scala/akka/stream/InvokeWithFeedbackBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/InvokeWithFeedbackBenchmark.scala index 8f6413597d..bd6b509684 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/InvokeWithFeedbackBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/InvokeWithFeedbackBenchmark.scala @@ -33,7 +33,8 @@ class InvokeWithFeedbackBenchmark { // these are currently the only two built in stages using invokeWithFeedback val (in, out) = - Source.queue[Int](bufferSize = 1, overflowStrategy = OverflowStrategy.backpressure) + Source + .queue[Int](bufferSize = 1, overflowStrategy = OverflowStrategy.backpressure) .toMat(Sink.queue[Int]())(Keep.both) .run() diff --git a/akka-bench-jmh/src/main/scala/akka/stream/JsonFramingBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/JsonFramingBenchmark.scala index 07d059d255..cef9afd026 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/JsonFramingBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/JsonFramingBenchmark.scala @@ -16,23 +16,18 @@ import org.openjdk.jmh.annotations._ class JsonFramingBenchmark { val json = - ByteString( - """{"fname":"Frank","name":"Smith","age":42,"id":1337,"boardMember":false}""" - ) + ByteString("""{"fname":"Frank","name":"Smith","age":42,"id":1337,"boardMember":false}""") val json5 = - ByteString( - """|{"fname":"Frank","name":"Smith","age":42,"id":1337,"boardMember":false}, + ByteString("""|{"fname":"Frank","name":"Smith","age":42,"id":1337,"boardMember":false}, |{"fname":"Bob","name":"Smith","age":42,"id":1337,"boardMember":false}, |{"fname":"Bob","name":"Smith","age":42,"id":1337,"boardMember":false}, |{"fname":"Bob","name":"Smith","age":42,"id":1337,"boardMember":false}, - |{"fname":"Hank","name":"Smith","age":42,"id":1337,"boardMember":false}""".stripMargin - ) + |{"fname":"Hank","name":"Smith","age":42,"id":1337,"boardMember":false}""".stripMargin) val jsonLong = ByteString( - s"""{"fname":"Frank","name":"Smith","age":42,"id":1337,"boardMember":false,"description":"${"a" * 1000000}"}""" - ) + s"""{"fname":"Frank","name":"Smith","age":42,"id":1337,"boardMember":false,"description":"${"a" * 1000000}"}""") val bracket = new JsonObjectParser diff --git a/akka-bench-jmh/src/main/scala/akka/stream/MapAsyncBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/MapAsyncBenchmark.scala index dc8766bf9b..209d52ffb8 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/MapAsyncBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/MapAsyncBenchmark.scala @@ -27,16 +27,14 @@ object MapAsyncBenchmark { class MapAsyncBenchmark { import MapAsyncBenchmark._ - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.actor.default-dispatcher { executor = "fork-join-executor" fork-join-executor { parallelism-factor = 1 } } - """ - ) + """) implicit val system = ActorSystem("MapAsyncBenchmark", config) import system.dispatcher diff --git a/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala index 09ac0e134a..6fcb0ebd4d 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/MaterializationBenchmark.scala @@ -83,10 +83,7 @@ object MaterializationBenchmark { flow } - Source.repeat(Source.single(())) - .take(subStreamCount) - .flatMapConcat(_.via(subFlow)) - .toMat(Sink.last)(Keep.right) + Source.repeat(Source.single(())).take(subStreamCount).flatMapConcat(_.via(subFlow)).toMat(Sink.last)(Keep.right) } } diff --git a/akka-bench-jmh/src/main/scala/akka/stream/NewLayoutBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/NewLayoutBenchmark.scala index ef520adc7d..c61dc8c3dd 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/NewLayoutBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/NewLayoutBenchmark.scala @@ -362,4 +362,4 @@ class NewLayoutBenchmark { } } -*/ + */ diff --git a/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala index e27551581e..72475fd945 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/PartitionHubBenchmark.scala @@ -27,16 +27,14 @@ object PartitionHubBenchmark { class PartitionHubBenchmark { import PartitionHubBenchmark._ - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.actor.default-dispatcher { executor = "fork-join-executor" fork-join-executor { parallelism-factor = 1 } } - """ - ) + """) implicit val system = ActorSystem("PartitionHubBenchmark", config) @@ -69,11 +67,10 @@ class PartitionHubBenchmark { val N = OperationsPerInvocation val latch = new CountDownLatch(NumberOfStreams) - val source = testSource - .runWith(PartitionHub.sink[java.lang.Integer]( - (size, elem) => elem.intValue % NumberOfStreams, - startAfterNrOfConsumers = NumberOfStreams, bufferSize = BufferSize - ))(materializer) + val source = testSource.runWith( + PartitionHub.sink[java.lang.Integer]((size, elem) => elem.intValue % NumberOfStreams, + startAfterNrOfConsumers = NumberOfStreams, + bufferSize = BufferSize))(materializer) for (_ <- 0 until NumberOfStreams) source.runWith(new LatchSink(N / NumberOfStreams, latch))(materializer) @@ -90,13 +87,10 @@ class PartitionHubBenchmark { val N = OperationsPerInvocation val latch = new CountDownLatch(NumberOfStreams) - val source = testSource - .runWith( - Sink.fromGraph(new FixedSizePartitionHub( - _.intValue % NumberOfStreams, - lanes = NumberOfStreams, bufferSize = BufferSize - )) - )(materializer) + val source = testSource.runWith( + Sink.fromGraph( + new FixedSizePartitionHub(_.intValue % NumberOfStreams, lanes = NumberOfStreams, bufferSize = BufferSize)))( + materializer) for (_ <- 0 until NumberOfStreams) source.runWith(new LatchSink(N / NumberOfStreams, latch))(materializer) diff --git a/akka-bench-jmh/src/main/scala/akka/stream/SourceRefBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/SourceRefBenchmark.scala index fb53861f66..5393393f37 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/SourceRefBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/SourceRefBenchmark.scala @@ -25,14 +25,12 @@ import scala.util.Success @BenchmarkMode(Array(Mode.Throughput)) class SourceRefBenchmark { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka { log-config-on-start = off log-dead-letters-during-shutdown = off loglevel = "WARNING" - }""".stripMargin - ).withFallback(ConfigFactory.load()) + }""".stripMargin).withFallback(ConfigFactory.load()) implicit val system = ActorSystem("test", config) diff --git a/akka-bench-jmh/src/main/scala/akka/stream/impl/OutputStreamSourceStageBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/impl/OutputStreamSourceStageBenchmark.scala index 68f140d0bd..26655089b3 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/impl/OutputStreamSourceStageBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/impl/OutputStreamSourceStageBenchmark.scala @@ -36,9 +36,7 @@ class OutputStreamSourceStageBenchmark { @Benchmark @OperationsPerInvocation(WritesPerBench) def consumeWrites(): Unit = { - val (os, done) = StreamConverters.asOutputStream() - .toMat(Sink.ignore)(Keep.both) - .run() + val (os, done) = StreamConverters.asOutputStream().toMat(Sink.ignore)(Keep.both).run() new Thread(new Runnable { def run(): Unit = { var counter = 0 diff --git a/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesBenchmark.scala index fbd9feaf37..3783cc3641 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesBenchmark.scala @@ -8,12 +8,12 @@ import java.nio.file.{ Files, Path } import java.util.concurrent.TimeUnit import akka.{ Done, NotUsed } import akka.actor.ActorSystem -import akka.stream.{ Attributes, ActorMaterializer } +import akka.stream.{ ActorMaterializer, Attributes } import akka.stream.scaladsl._ import akka.util.ByteString import org.openjdk.jmh.annotations._ import scala.concurrent.duration._ -import scala.concurrent.{ Promise, Await, Future } +import scala.concurrent.{ Await, Future, Promise } import akka.stream.IOResult /** @@ -33,7 +33,8 @@ class FileSourcesBenchmark { val f = Files.createTempFile(getClass.getName, ".bench.tmp") - val ft = Source.fromIterator(() => Iterator.continually(line)) + val ft = Source + .fromIterator(() => Iterator.continually(line)) .take(10 * 39062) // adjust as needed .runWith(FileIO.toPath(f)) Await.result(ft, 30.seconds) @@ -52,7 +53,8 @@ class FileSourcesBenchmark { def setup(): Unit = { fileChannelSource = FileIO.fromPath(file, bufSize) fileInputStreamSource = StreamConverters.fromInputStream(() => Files.newInputStream(file), bufSize) - ioSourceLinesIterator = Source.fromIterator(() => scala.io.Source.fromFile(file.toFile).getLines()).map(ByteString(_)) + ioSourceLinesIterator = + Source.fromIterator(() => scala.io.Source.fromFile(file.toFile).getLines()).map(ByteString(_)) } @TearDown diff --git a/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesScaleBenchmark.scala b/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesScaleBenchmark.scala index 0a51500ef0..aedf05484c 100644 --- a/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesScaleBenchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/stream/io/FileSourcesScaleBenchmark.scala @@ -23,6 +23,7 @@ import scala.concurrent.{ Await, Future } @Warmup(iterations = 5, timeUnit = TimeUnit.SECONDS, batchSize = 1) @Measurement(iterations = 10, timeUnit = TimeUnit.SECONDS, batchSize = 1) class FileSourcesScaleBenchmark { + /** * Benchmark (bufSize) Mode Cnt Score Error Units * FileSourcesScaleBenchmark.flatMapMerge 2048 avgt 10 1.587 ± 0.118 s/op @@ -36,7 +37,8 @@ class FileSourcesScaleBenchmark { (1 to FILES_NUMBER).map(i => { val f = Files.createTempFile(getClass.getName, i + ".bench.tmp") - val ft = Source.fromIterator(() => Iterator.continually(line)) + val ft = Source + .fromIterator(() => Iterator.continually(line)) .take(20000) // adjust as needed .runWith(FileIO.toPath(f)) Await.result(ft, 300.seconds) @@ -66,16 +68,20 @@ class FileSourcesScaleBenchmark { @Benchmark def flatMapMerge(): Unit = { - val h = Source.fromIterator(() => files.iterator) - .flatMapMerge(FILES_NUMBER, path => FileIO.fromPath(path, bufSize)).runWith(Sink.ignore) + val h = Source + .fromIterator(() => files.iterator) + .flatMapMerge(FILES_NUMBER, path => FileIO.fromPath(path, bufSize)) + .runWith(Sink.ignore) Await.result(h, 300.seconds) } @Benchmark def mapAsync(): Unit = { - val h = Source.fromIterator(() => files.iterator) - .mapAsync(FILES_NUMBER)(path => FileIO.fromPath(path, bufSize).runWith(Sink.ignore)).runWith(Sink.ignore) + val h = Source + .fromIterator(() => files.iterator) + .mapAsync(FILES_NUMBER)(path => FileIO.fromPath(path, bufSize).runWith(Sink.ignore)) + .runWith(Sink.ignore) Await.result(h, 300.seconds) } diff --git a/akka-bench-jmh/src/main/scala/akka/util/ByteString_indexOf_Benchmark.scala b/akka-bench-jmh/src/main/scala/akka/util/ByteString_indexOf_Benchmark.scala index fd13e2fb2a..ee578fa182 100644 --- a/akka-bench-jmh/src/main/scala/akka/util/ByteString_indexOf_Benchmark.scala +++ b/akka-bench-jmh/src/main/scala/akka/util/ByteString_indexOf_Benchmark.scala @@ -29,7 +29,7 @@ class ByteString_indexOf_Benchmark { ByteString_indexOf_Benchmark.bss_indexOf_from_far_index_case thrpt 20 14282036.963 ± 529652.214 ops/s ByteString_indexOf_Benchmark.bss_indexOf_from_worst_case thrpt 20 7815676.051 ± 323031.073 ops/s - */ + */ @Benchmark def bss_indexOf_from_worst_case: Int = bss.indexOf('z', 1) diff --git a/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala b/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala index 62d370a813..4a930c1403 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala @@ -3,11 +3,12 @@ */ package akka.camel + /** * Thrown to indicate that the actor referenced by an endpoint URI cannot be * found in the actor system. * */ class ActorNotRegisteredException(uri: String) extends RuntimeException { - override def getMessage: String = "Actor [%s] doesn't exist" format uri + override def getMessage: String = "Actor [%s] doesn't exist".format(uri) } diff --git a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala index 8876039cc2..2ff096bc0b 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala @@ -25,6 +25,7 @@ import scala.concurrent.duration.Duration * @param definition the processor definition */ class ActorRouteDefinition[T <: ProcessorDefinition[T]](definition: ProcessorDefinition[T]) { + /** * Sends the message to an ActorRef endpoint. * @param actorRef the actorRef to the actor. @@ -43,6 +44,6 @@ class ActorRouteDefinition[T <: ProcessorDefinition[T]](definition: ProcessorDef * This setting is used for out-capable, in-only, manually acknowledged communication. * @return the path to the actor, as a camel uri String */ - def to(actorRef: ActorRef, autoAck: Boolean, replyTimeout: Duration): T = definition.to(CamelPath.toUri(actorRef, autoAck, replyTimeout)) + def to(actorRef: ActorRef, autoAck: Boolean, replyTimeout: Duration): T = + definition.to(CamelPath.toUri(actorRef, autoAck, replyTimeout)) } - diff --git a/akka-camel/src/main/scala/akka/camel/Camel.scala b/akka-camel/src/main/scala/akka/camel/Camel.scala index e9dfe0ed7a..3adf147507 100644 --- a/akka-camel/src/main/scala/akka/camel/Camel.scala +++ b/akka-camel/src/main/scala/akka/camel/Camel.scala @@ -20,6 +20,7 @@ import scala.collection.immutable * This trait can be obtained through the [[akka.camel.CamelExtension]] object. */ trait Camel extends Extension with Activation { + /** * Underlying camel context. * @@ -97,21 +98,30 @@ class CamelSettings private[camel] (config: Config, dynamicAccess: DynamicAccess } val conversions = specifiedConversions.foldLeft(Map[String, Class[_ <: AnyRef]]()) { case (m, (key, fqcn)) => - m.updated(key, dynamicAccess.getClassFor[AnyRef](fqcn).recover { - case e => throw new ConfigurationException("Could not find/load Camel Converter class [" + fqcn + "]", e) - }.get) + m.updated(key, + dynamicAccess + .getClassFor[AnyRef](fqcn) + .recover { + case e => + throw new ConfigurationException("Could not find/load Camel Converter class [" + fqcn + "]", e) + } + .get) } (s: String, r: RouteDefinition) => conversions.get(s).fold(r)(r.convertBodyTo) } + /** * Configured setting, determine the class used to load/retrieve the instance of the Camel Context */ final val ContextProvider: ContextProvider = { val fqcn = config.getString("akka.camel.context-provider") - dynamicAccess.createInstanceFor[ContextProvider](fqcn, immutable.Seq.empty).recover { - case e => throw new ConfigurationException("Could not find/load Context Provider class [" + fqcn + "]", e) - }.get + dynamicAccess + .createInstanceFor[ContextProvider](fqcn, immutable.Seq.empty) + .recover { + case e => throw new ConfigurationException("Could not find/load Context Provider class [" + fqcn + "]", e) + } + .get } } diff --git a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala index 42a689099d..e27833b12b 100644 --- a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala @@ -17,20 +17,27 @@ import scala.collection.JavaConverters._ /** * An immutable representation of a Camel message. */ -@deprecated("Akka Camel is deprecated in favour of 'Alpakka', the Akka Streams based collection of integrations to various endpoints (including Camel).", since = "2.5.0") -class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments: Map[String, DataHandler]) extends Serializable with Product { - def this(body: Any, headers: JMap[String, Any]) = this(body, headers.asScala.toMap, Map.empty[String, DataHandler]) //Java - def this(body: Any, headers: JMap[String, Any], attachments: JMap[String, DataHandler]) = this(body, headers.asScala.toMap, attachments.asScala.toMap) //Java +@deprecated( + "Akka Camel is deprecated in favour of 'Alpakka', the Akka Streams based collection of integrations to various endpoints (including Camel).", + since = "2.5.0") +class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments: Map[String, DataHandler]) + extends Serializable + with Product { + def this(body: Any, headers: JMap[String, Any]) = + this(body, headers.asScala.toMap, Map.empty[String, DataHandler]) //Java + def this(body: Any, headers: JMap[String, Any], attachments: JMap[String, DataHandler]) = + this(body, headers.asScala.toMap, attachments.asScala.toMap) //Java def this(body: Any, headers: Map[String, Any]) = this(body, headers.toMap, Map.empty[String, DataHandler]) - def copy(body: Any = this.body, headers: Map[String, Any] = this.headers): CamelMessage = CamelMessage(body, headers, this.attachments) + def copy(body: Any = this.body, headers: Map[String, Any] = this.headers): CamelMessage = + CamelMessage(body, headers, this.attachments) - override def toString: String = "CamelMessage(%s, %s, %s)" format (body, headers, attachments) + override def toString: String = "CamelMessage(%s, %s, %s)".format(body, headers, attachments) /** * Returns those headers from this message whose name is contained in names. */ - def headers(names: Set[String]): Map[String, Any] = (headers filterKeys names).toMap + def headers(names: Set[String]): Map[String, Any] = headers.filterKeys(names).toMap /** * Java API: Returns those headers from this message whose name is contained in names. @@ -61,7 +68,11 @@ class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments * */ def headerAs[T](name: String)(implicit t: ClassTag[T], camelContext: CamelContext): Try[T] = - Try(headers.get(name).map(camelContext.getTypeConverter.mandatoryConvertTo[T](t.runtimeClass.asInstanceOf[Class[T]], _)).getOrElse(throw new NoSuchElementException(name))) + Try( + headers + .get(name) + .map(camelContext.getTypeConverter.mandatoryConvertTo[T](t.runtimeClass.asInstanceOf[Class[T]], _)) + .getOrElse(throw new NoSuchElementException(name))) /** * Java API: Returns the header by given name parameter. The header is converted to type T as defined by the clazz parameter. @@ -70,7 +81,8 @@ class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments * The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]] * using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]]. */ - def getHeaderAs[T](name: String, clazz: Class[T], camelContext: CamelContext): T = headerAs[T](name)(ClassTag(clazz), camelContext).get + def getHeaderAs[T](name: String, clazz: Class[T], camelContext: CamelContext): T = + headerAs[T](name)(ClassTag(clazz), camelContext).get /** * Returns a new CamelMessage with a transformed body using a transformer function. @@ -90,7 +102,8 @@ class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments * The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]] * using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]]. */ - def bodyAs[T](implicit t: ClassTag[T], camelContext: CamelContext): T = getBodyAs(t.runtimeClass.asInstanceOf[Class[T]], camelContext) + def bodyAs[T](implicit t: ClassTag[T], camelContext: CamelContext): T = + getBodyAs(t.runtimeClass.asInstanceOf[Class[T]], camelContext) /** * Java API: Returns the body of the message converted to the type as given by the clazz @@ -120,12 +133,14 @@ class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments * Java API: Returns a new CamelMessage with a new body, while keeping the same headers. */ def withBody[T](body: T): CamelMessage = copy(body = body) + /** * Creates a CamelMessage with current body converted to type T. * The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]] * using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]]. */ - def withBodyAs[T](implicit t: ClassTag[T], camelContext: CamelContext): CamelMessage = withBodyAs(t.runtimeClass.asInstanceOf[Class[T]]) + def withBodyAs[T](implicit t: ClassTag[T], camelContext: CamelContext): CamelMessage = + withBodyAs(t.runtimeClass.asInstanceOf[Class[T]]) /** * Java API: Creates a CamelMessage with current body converted to type clazz. @@ -133,12 +148,13 @@ class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments * The CamelContext is accessible in a [[akka.camel.javaapi.UntypedConsumerActor]] and [[akka.camel.javaapi.UntypedProducerActor]] * using the `getCamelContext` method, and is available on the [[akka.camel.CamelExtension]]. */ - def withBodyAs[T](clazz: Class[T])(implicit camelContext: CamelContext): CamelMessage = copy(body = getBodyAs(clazz, camelContext)) + def withBodyAs[T](clazz: Class[T])(implicit camelContext: CamelContext): CamelMessage = + copy(body = getBodyAs(clazz, camelContext)) /** * Returns those attachments from this message whose name is contained in names. */ - def attachments(names: Set[String]): Map[String, DataHandler] = (attachments filterKeys names).toMap + def attachments(names: Set[String]): Map[String, DataHandler] = attachments.filterKeys(names).toMap /** * Java API: Returns those attachments from this message whose name is contained in names. @@ -157,12 +173,14 @@ class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments /** * Java API: Creates a new CamelMessage with given attachments. A copy of the attachments map is made. */ - def withAttachments(attachments: JMap[String, DataHandler]): CamelMessage = CamelMessage(this.body, this.headers, attachments.asScala.toMap) + def withAttachments(attachments: JMap[String, DataHandler]): CamelMessage = + CamelMessage(this.body, this.headers, attachments.asScala.toMap) /** * SCALA API: Creates a new CamelMessage with given attachments. */ - def withAttachments(attachments: Map[String, DataHandler]): CamelMessage = CamelMessage(this.body, this.headers, attachments) + def withAttachments(attachments: Map[String, DataHandler]): CamelMessage = + CamelMessage(this.body, this.headers, attachments) /** * Indicates whether some other object is "equal to" this one. @@ -171,8 +189,8 @@ class CamelMessage(val body: Any, val headers: Map[String, Any], val attachments that match { case that: CamelMessage if canEqual(that) => this.body == that.body && - this.headers == that.headers && - this.attachments == that.attachments + this.headers == that.headers && + this.attachments == that.attachments case _ => false } @@ -213,17 +231,20 @@ object CamelMessage extends ((Any, Map[String, Any]) => CamelMessage) { /** * Returns a new CamelMessage based on the body and headers. */ - def apply(body: Any, headers: Map[String, Any]): CamelMessage = new CamelMessage(body, headers, Map.empty[String, DataHandler]) + def apply(body: Any, headers: Map[String, Any]): CamelMessage = + new CamelMessage(body, headers, Map.empty[String, DataHandler]) /** * Returns a new CamelMessage based on the body, headers and attachments. */ - def apply(body: Any, headers: Map[String, Any], attachments: Map[String, DataHandler]): CamelMessage = new CamelMessage(body, headers, attachments) + def apply(body: Any, headers: Map[String, Any], attachments: Map[String, DataHandler]): CamelMessage = + new CamelMessage(body, headers, attachments) /** * Returns Some(body, headers). */ - def unapply(camelMessage: CamelMessage): Option[(Any, Map[String, Any])] = Some((camelMessage.body, camelMessage.headers)) + def unapply(camelMessage: CamelMessage): Option[(Any, Map[String, Any])] = + Some((camelMessage.body, camelMessage.headers)) /** * CamelMessage header to correlate request with response messages. Applications that send @@ -250,7 +271,9 @@ object CamelMessage extends ((Any, Map[String, Any]) => CamelMessage) { * in the Camel message. */ private[camel] def from(camelMessage: JCamelMessage, headers: Map[String, Any]): CamelMessage = - CamelMessage(camelMessage.getBody, headers ++ camelMessage.getHeaders.asScala, camelMessage.getAttachments.asScala.toMap) + CamelMessage(camelMessage.getBody, + headers ++ camelMessage.getHeaders.asScala, + camelMessage.getAttachments.asScala.toMap) /** * Creates a new CamelMessageWithAttachments object from the Camel message. @@ -260,8 +283,12 @@ object CamelMessage extends ((Any, Map[String, Any]) => CamelMessage) { * @param attachments additional attachments to set on the created CamelMessageWithAttachments in addition to those * in the Camel message. */ - private[camel] def from(camelMessage: JCamelMessage, headers: Map[String, Any], attachments: Map[String, DataHandler]): CamelMessage = - CamelMessage(camelMessage.getBody, headers ++ camelMessage.getHeaders.asScala, attachments ++ camelMessage.getAttachments.asScala) + private[camel] def from(camelMessage: JCamelMessage, + headers: Map[String, Any], + attachments: Map[String, DataHandler]): CamelMessage = + CamelMessage(camelMessage.getBody, + headers ++ camelMessage.getHeaders.asScala, + attachments ++ camelMessage.getAttachments.asScala) /** * INTERNAL API @@ -279,6 +306,7 @@ object CamelMessage extends ((Any, Map[String, Any]) => CamelMessage) { * When `autoAck` is set to false in the [[akka.camel.Consumer]], you can send an `Ack` to the sender of the CamelMessage. */ case object Ack { + /** Java API to get the Ack singleton */ def getInstance = this } @@ -289,6 +317,6 @@ case object Ack { * message or Exchange.getOut message, depending on the exchange pattern. */ class AkkaCamelException private[akka] (cause: Throwable, val headers: Map[String, Any]) - extends AkkaException(cause.getMessage, cause) { + extends AkkaException(cause.getMessage, cause) { def this(cause: Throwable) = this(cause, Map.empty) } diff --git a/akka-camel/src/main/scala/akka/camel/Consumer.scala b/akka-camel/src/main/scala/akka/camel/Consumer.scala index 74effbe041..269327b26b 100644 --- a/akka-camel/src/main/scala/akka/camel/Consumer.scala +++ b/akka-camel/src/main/scala/akka/camel/Consumer.scala @@ -5,7 +5,7 @@ package akka.camel import akka.camel.internal.CamelSupervisor.Register -import org.apache.camel.model.{ RouteDefinition, ProcessorDefinition } +import org.apache.camel.model.{ ProcessorDefinition, RouteDefinition } import akka.actor._ import scala.concurrent.duration._ import akka.dispatch.Mapper @@ -13,9 +13,12 @@ import akka.dispatch.Mapper /** * Mixed in by Actor implementations that consume message from Camel endpoints. */ -@deprecated("Akka Camel is deprecated in favour of 'Alpakka', the Akka Streams based collection of integrations to various endpoints (including Camel).", since = "2.5.0") +@deprecated( + "Akka Camel is deprecated in favour of 'Alpakka', the Akka Streams based collection of integrations to various endpoints (including Camel).", + since = "2.5.0") trait Consumer extends Actor with CamelSupport { import Consumer._ + /** * Must return the Camel endpoint URI that the consumer wants to consume messages from. */ @@ -35,7 +38,9 @@ trait Consumer extends Actor with CamelSupport { } private[this] def register(): Unit = { - camel.supervisor ! Register(self, endpointUri, Some(ConsumerConfig(activationTimeout, replyTimeout, autoAck, onRouteDefinition))) + camel.supervisor ! Register(self, + endpointUri, + Some(ConsumerConfig(activationTimeout, replyTimeout, autoAck, onRouteDefinition))) } /** @@ -93,10 +98,17 @@ private[camel] object Consumer { * * Was a case class but has been split up as a workaround for SI-8283 */ -private[camel] class ConsumerConfig(val activationTimeout: FiniteDuration, val replyTimeout: FiniteDuration, val autoAck: Boolean, val onRouteDefinition: RouteDefinition => ProcessorDefinition[_]) extends NoSerializationVerificationNeeded - with scala.Serializable +private[camel] class ConsumerConfig(val activationTimeout: FiniteDuration, + val replyTimeout: FiniteDuration, + val autoAck: Boolean, + val onRouteDefinition: RouteDefinition => ProcessorDefinition[_]) + extends NoSerializationVerificationNeeded + with scala.Serializable private[camel] object ConsumerConfig { - def apply(activationTimeout: FiniteDuration, replyTimeout: FiniteDuration, autoAck: Boolean, onRouteDefinition: RouteDefinition => ProcessorDefinition[_]): ConsumerConfig = + def apply(activationTimeout: FiniteDuration, + replyTimeout: FiniteDuration, + autoAck: Boolean, + onRouteDefinition: RouteDefinition => ProcessorDefinition[_]): ConsumerConfig = new ConsumerConfig(activationTimeout, replyTimeout, autoAck, onRouteDefinition) } diff --git a/akka-camel/src/main/scala/akka/camel/ContextProvider.scala b/akka-camel/src/main/scala/akka/camel/ContextProvider.scala index 276f0a6d19..656b43edac 100644 --- a/akka-camel/src/main/scala/akka/camel/ContextProvider.scala +++ b/akka-camel/src/main/scala/akka/camel/ContextProvider.scala @@ -12,6 +12,7 @@ import org.apache.camel.impl.DefaultCamelContext * An instance of this class must be instantiable using a no-arg constructor. */ trait ContextProvider { + /** * Retrieve or create a Camel Context for the given actor system * Called once per actor system diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala index ad7bcfc8bc..e6078dacee 100644 --- a/akka-camel/src/main/scala/akka/camel/Producer.scala +++ b/akka-camel/src/main/scala/akka/camel/Producer.scala @@ -4,11 +4,11 @@ package akka.camel -import akka.actor.{ Props, NoSerializationVerificationNeeded, ActorRef, Actor } +import akka.actor.{ Actor, ActorRef, NoSerializationVerificationNeeded, Props } import internal.CamelSupervisor.{ CamelProducerObjects, Register } import internal.CamelExchangeAdapter import akka.actor.Status.Failure -import org.apache.camel.{ Endpoint, ExchangePattern, AsyncCallback } +import org.apache.camel.{ AsyncCallback, Endpoint, ExchangePattern } import org.apache.camel.processor.SendProcessor /** @@ -65,10 +65,8 @@ trait ProducerSupport extends Actor with CamelSupport { } producerChild = Some(context.actorOf(Props(new ProducerChild(endpoint, processor)).withDispatcher(disp))) messages = { - for ( - child <- producerChild; - (snd, msg) <- messages - ) child.tell(transformOutgoingMessage(msg), snd) + for (child <- producerChild; + (snd, msg) <- messages) child.tell(transformOutgoingMessage(msg), snd) Vector.empty } } @@ -80,7 +78,7 @@ trait ProducerSupport extends Actor with CamelSupport { case msg => producerChild match { - case Some(child) => child forward transformOutgoingMessage(msg) + case Some(child) => child.forward(transformOutgoingMessage(msg)) case None => messages :+= ((sender(), msg)) } } @@ -106,14 +104,14 @@ trait ProducerSupport extends Actor with CamelSupport { * done. This method may be overridden by subtraits or subclasses (e.g. to forward responses to another * actor). */ - protected def routeResponse(msg: Any): Unit = if (!oneway) sender() ! transformResponse(msg) private class ProducerChild(endpoint: Endpoint, processor: SendProcessor) extends Actor { def receive = { - case msg @ (_: FailureResult | _: MessageResult) => context.parent forward msg + case msg @ (_: FailureResult | _: MessageResult) => context.parent.forward(msg) case msg => produce(endpoint, processor, msg, if (oneway) ExchangePattern.InOnly else ExchangePattern.InOut) } + /** * Initiates a message exchange of given pattern with the endpoint specified by * endpointUri. The in-message of the initiated exchange is the canonical form @@ -138,19 +136,25 @@ trait ProducerSupport extends Actor with CamelSupport { val cmsg = CamelMessage.canonicalize(msg) xchg.setRequest(cmsg) - processor.process(xchg.exchange, new AsyncCallback { - // Ignoring doneSync, sending back async uniformly. - def done(doneSync: Boolean): Unit = producer.tell( - if (xchg.exchange.isFailed) xchg.toFailureResult(cmsg.headers(headersToCopy)) - else MessageResult(xchg.toResponseMessage(cmsg.headers(headersToCopy))), originalSender) - }) + processor.process(xchg.exchange, + new AsyncCallback { + // Ignoring doneSync, sending back async uniformly. + def done(doneSync: Boolean): Unit = + producer.tell( + if (xchg.exchange.isFailed) xchg.toFailureResult(cmsg.headers(headersToCopy)) + else MessageResult(xchg.toResponseMessage(cmsg.headers(headersToCopy))), + originalSender) + }) } } } + /** * Mixed in by Actor implementations to produce messages to Camel endpoints. */ -@deprecated("Akka Camel is deprecated in favour of 'Alpakka', the Akka Streams based collection of integrations to various endpoints (including Camel).", since = "2.5.0") +@deprecated( + "Akka Camel is deprecated in favour of 'Alpakka', the Akka Streams based collection of integrations to various endpoints (including Camel).", + since = "2.5.0") trait Producer extends ProducerSupport { this: Actor => /** @@ -168,15 +172,17 @@ private final case class MessageResult(message: CamelMessage) extends NoSerializ /** * INTERNAL API */ -private final case class FailureResult(cause: Throwable, headers: Map[String, Any] = Map.empty) extends NoSerializationVerificationNeeded +private final case class FailureResult(cause: Throwable, headers: Map[String, Any] = Map.empty) + extends NoSerializationVerificationNeeded /** * A one-way producer. * * */ -@deprecated("Akka Camel is deprecated in favour of 'Alpakka', the Akka Streams based collection of integrations to various endpoints (including Camel).", since = "2.5.0") +@deprecated( + "Akka Camel is deprecated in favour of 'Alpakka', the Akka Streams based collection of integrations to various endpoints (including Camel).", + since = "2.5.0") trait Oneway extends Producer { this: Actor => override def oneway: Boolean = true } - diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala index ec6e8506b4..1b4edb61d8 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala @@ -7,6 +7,7 @@ package akka.camel.internal import akka.actor.ActorRef private[camel] object ActivationProtocol { + /** * Super class of all activation messages. Registration of the Camel [[akka.camel.Consumer]]s and [[akka.camel.Producer]]s * is done asynchronously. Activation messages are sent in the Camel extension when endpoints are diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala index f898c93a72..824c3021b0 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala @@ -15,6 +15,7 @@ import akka.camel.internal.ActivationProtocol._ private[camel] class ActivationTracker extends Actor with ActorLogging { val activations = new WeakHashMap[ActorRef, ActivationStateMachine] + /** * A state machine that keeps track of the endpoint activation status of an actor. */ @@ -22,6 +23,7 @@ private[camel] class ActivationTracker extends Actor with ActorLogging { type State = PartialFunction[ActivationMessage, Unit] var receive: State = notActivated() + /** * Not activated state * @return a partial function that handles messages in the 'not activated' state @@ -54,10 +56,10 @@ private[camel] class ActivationTracker extends Actor with ActorLogging { case AwaitActivation(ref) => sender() ! EndpointActivated(ref) case AwaitDeActivation(ref) => awaitingDeActivation ::= sender() case msg @ EndpointDeActivated(ref) => - awaitingDeActivation foreach (_ ! msg) + awaitingDeActivation.foreach(_ ! msg) receive = deactivated case msg @ EndpointFailedToDeActivate(ref, cause) => - awaitingDeActivation foreach (_ ! msg) + awaitingDeActivation.foreach(_ ! msg) receive = failedToDeActivate(cause) } } @@ -101,11 +103,12 @@ private[camel] class ActivationTracker extends Actor with ActorLogging { override def receive = { case msg @ ActivationMessage(ref) => - (activations.getOrElseUpdate(ref, new ActivationStateMachine).receive orElse logStateWarning(ref))(msg) + activations.getOrElseUpdate(ref, new ActivationStateMachine).receive.orElse(logStateWarning(ref))(msg) } - private[this] def logStateWarning(actorRef: ActorRef): Receive = - { case msg => log.warning("Message [{}] not expected in current state of actor [{}]", msg, actorRef) } + private[this] def logStateWarning(actorRef: ActorRef): Receive = { + case msg => log.warning("Message [{}] not expected in current state of actor [{}]", msg, actorRef) + } } /** diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala index d9d948c30a..87b643b666 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala @@ -6,7 +6,7 @@ package akka.camel.internal import org.apache.camel.util.ExchangeHelper import org.apache.camel.{ Exchange, Message => JCamelMessage } -import akka.camel.{ FailureResult, AkkaCamelException, CamelMessage } +import akka.camel.{ AkkaCamelException, CamelMessage, FailureResult } /** * INTERNAL API @@ -16,6 +16,7 @@ import akka.camel.{ FailureResult, AkkaCamelException, CamelMessage } * to org.apache.camel.Message when using Camel. */ private[camel] class CamelExchangeAdapter(val exchange: Exchange) { + /** * Returns the exchange id */ diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala index bdf1ddba21..0acdb8827d 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala @@ -32,8 +32,8 @@ private[camel] class CamelSupervisor extends Actor with CamelSupport { def receive = { case AddWatch(actorRef) => context.watch(actorRef) case Terminated(actorRef) => registry ! DeRegister(actorRef) - case msg: ActivationMessage => activationTracker forward msg - case msg => registry forward (msg) + case msg: ActivationMessage => activationTracker.forward(msg) + case msg => registry.forward(msg) } } @@ -50,7 +50,8 @@ private[camel] object CamelSupervisor { * INTERNAL API * Registers a consumer or a producer. */ - final case class Register(actorRef: ActorRef, endpointUri: String, config: Option[ConsumerConfig] = None) extends NoSerializationVerificationNeeded + final case class Register(actorRef: ActorRef, endpointUri: String, config: Option[ConsumerConfig] = None) + extends NoSerializationVerificationNeeded /** * INTERNAL API @@ -70,35 +71,42 @@ private[camel] object CamelSupervisor { * INTERNAL API * Provides a Producer with the required camel objects to function. */ - final case class CamelProducerObjects(endpoint: Endpoint, processor: SendProcessor) extends NoSerializationVerificationNeeded + final case class CamelProducerObjects(endpoint: Endpoint, processor: SendProcessor) + extends NoSerializationVerificationNeeded } /** * INTERNAL API * Thrown by registrars to indicate that the actor could not be de-activated. */ -private[camel] class ActorDeActivationException(val actorRef: ActorRef, cause: Throwable) extends AkkaException(s"$actorRef failed to de-activate", cause) +private[camel] class ActorDeActivationException(val actorRef: ActorRef, cause: Throwable) + extends AkkaException(s"$actorRef failed to de-activate", cause) /** * INTERNAL API * Thrown by the registrars to indicate that the actor could not be activated. */ -private[camel] class ActorActivationException(val actorRef: ActorRef, cause: Throwable) extends AkkaException(s"$actorRef failed to activate", cause) +private[camel] class ActorActivationException(val actorRef: ActorRef, cause: Throwable) + extends AkkaException(s"$actorRef failed to activate", cause) /** * INTERNAL API * Registry for Camel Consumers and Producers. Supervises the registrars. */ private[camel] class Registry(activationTracker: ActorRef) extends Actor with CamelSupport { - import context.{ stop, parent } + import context.{ parent, stop } - private val producerRegistrar = context.actorOf(Props(classOf[ProducerRegistrar], activationTracker), "producerRegistrar") - private val consumerRegistrar = context.actorOf(Props(classOf[ConsumerRegistrar], activationTracker), "consumerRegistrar") + private val producerRegistrar = + context.actorOf(Props(classOf[ProducerRegistrar], activationTracker), "producerRegistrar") + private val consumerRegistrar = + context.actorOf(Props(classOf[ConsumerRegistrar], activationTracker), "consumerRegistrar") private var producers = Set[ActorRef]() private var consumers = Set[ActorRef]() class RegistryLogStrategy()(_decider: SupervisorStrategy.Decider) extends OneForOneStrategy()(_decider) { - override def logFailure(context: ActorContext, child: ActorRef, cause: Throwable, + override def logFailure(context: ActorContext, + child: ActorRef, + cause: Throwable, decision: SupervisorStrategy.Directive): Unit = cause match { case _: ActorActivationException | _: ActorDeActivationException => @@ -126,7 +134,7 @@ private[camel] class Registry(activationTracker: ActorRef) extends Actor with Ca case msg @ Register(consumer, _, Some(_)) => if (!consumers(consumer)) { consumers += consumer - consumerRegistrar forward msg + consumerRegistrar.forward(msg) parent ! AddWatch(consumer) } case msg @ Register(producer, _, None) => @@ -134,7 +142,7 @@ private[camel] class Registry(activationTracker: ActorRef) extends Actor with Ca producers += producer parent ! AddWatch(producer) } - producerRegistrar forward msg + producerRegistrar.forward(msg) case DeRegister(actorRef) => producers.find(_ == actorRef).foreach { p => deRegisterProducer(p) @@ -173,10 +181,12 @@ private[camel] class ProducerRegistrar(activationTracker: ActorRef) extends Acto case NonFatal(e) => throw new ActorActivationException(producer, e) } } else { - camelObjects.get(producer) foreach { case (endpoint, processor) => producer ! CamelProducerObjects(endpoint, processor) } + camelObjects.get(producer).foreach { + case (endpoint, processor) => producer ! CamelProducerObjects(endpoint, processor) + } } case DeRegister(producer) => - camelObjects.get(producer) foreach { + camelObjects.get(producer).foreach { case (_, processor) => try { camelObjects.get(producer).foreach(_._2.stop()) diff --git a/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala b/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala index 727821adfd..9b5688a140 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala @@ -20,15 +20,18 @@ import scala.language.existentials * * */ -private[camel] class ConsumerActorRouteBuilder(endpointUri: String, consumer: ActorRef, config: ConsumerConfig, settings: CamelSettings) extends RouteBuilder { +private[camel] class ConsumerActorRouteBuilder(endpointUri: String, + consumer: ActorRef, + config: ConsumerConfig, + settings: CamelSettings) + extends RouteBuilder { protected def targetActorUri = CamelPath.toUri(consumer, config.autoAck, config.replyTimeout) def configure(): Unit = applyUserRouteCustomization( - settings.Conversions.apply( - endpointUri take endpointUri.indexOf(":"), // e.g. "http" from "http://whatever/..." - from(endpointUri).routeId(consumer.path.toString))).to(targetActorUri) + settings.Conversions.apply(endpointUri.take(endpointUri.indexOf(":")), // e.g. "http" from "http://whatever/..." + from(endpointUri).routeId(consumer.path.toString))).to(targetActorUri) def applyUserRouteCustomization(rd: RouteDefinition) = config.onRouteDefinition(rd) } diff --git a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala index 841b8953da..0a04aa32e2 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala @@ -4,19 +4,19 @@ package akka.camel.internal -import akka.camel.internal.component.{ DurationTypeConverter, ActorComponent } +import akka.camel.internal.component.{ ActorComponent, DurationTypeConverter } import org.apache.camel.impl.DefaultCamelContext import scala.Predef._ import akka.event.Logging -import akka.camel.{ CamelSettings, Camel } +import akka.camel.{ Camel, CamelSettings } import akka.camel.internal.ActivationProtocol._ import scala.util.control.NonFatal import scala.concurrent.duration._ import org.apache.camel.ProducerTemplate -import scala.concurrent.{ Future, ExecutionContext } +import scala.concurrent.{ ExecutionContext, Future } import akka.util.Timeout import akka.pattern.ask -import akka.actor.{ ExtendedActorSystem, ActorRef, Props } +import akka.actor.{ ActorRef, ExtendedActorSystem, Props } /** * INTERNAL API @@ -52,7 +52,8 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel */ def start(): this.type = { context.start() - try template.start() catch { case NonFatal(e) => context.stop(); throw e } + try template.start() + catch { case NonFatal(e) => context.stop(); throw e } log.debug("Started CamelContext[{}] for ActorSystem[{}]", context.getName, system.name) this } @@ -65,8 +66,12 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel * @see akka.camel.internal.DefaultCamel#start */ def shutdown(): Unit = { - try context.stop() finally { - try template.stop() catch { case NonFatal(e) => log.debug("Swallowing non-fatal exception [{}] on stopping Camel producer template", e) } + try context.stop() + finally { + try template.stop() + catch { + case NonFatal(e) => log.debug("Swallowing non-fatal exception [{}] on stopping Camel producer template", e) + } } log.debug("Stopped CamelContext[{}] for ActorSystem[{}]", context.getName, system.name) } @@ -79,11 +84,12 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel * @param timeout the timeout for the Future */ def activationFutureFor(endpoint: ActorRef)(implicit timeout: Timeout, executor: ExecutionContext): Future[ActorRef] = - - (supervisor.ask(AwaitActivation(endpoint))(timeout)).map[ActorRef]({ - case EndpointActivated(`endpoint`) => endpoint - case EndpointFailedToActivate(`endpoint`, cause) => throw cause - }) + (supervisor + .ask(AwaitActivation(endpoint))(timeout)) + .map[ActorRef]({ + case EndpointActivated(`endpoint`) => endpoint + case EndpointFailedToActivate(`endpoint`, cause) => throw cause + }) /** * Produces a Future which will be completed when the given endpoint has been deactivated or @@ -92,9 +98,12 @@ private[camel] class DefaultCamel(val system: ExtendedActorSystem) extends Camel * @param endpoint the endpoint to be deactivated * @param timeout the timeout of the Future */ - def deactivationFutureFor(endpoint: ActorRef)(implicit timeout: Timeout, executor: ExecutionContext): Future[ActorRef] = - (supervisor.ask(AwaitDeActivation(endpoint))(timeout)).map[ActorRef]({ - case EndpointDeActivated(`endpoint`) => endpoint - case EndpointFailedToDeActivate(`endpoint`, cause) => throw cause - }) + def deactivationFutureFor(endpoint: ActorRef)(implicit timeout: Timeout, + executor: ExecutionContext): Future[ActorRef] = + (supervisor + .ask(AwaitDeActivation(endpoint))(timeout)) + .map[ActorRef]({ + case EndpointDeActivated(`endpoint`) => endpoint + case EndpointFailedToDeActivate(`endpoint`, cause) => throw cause + }) } diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index a24dcd709f..3dd6458fd7 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -6,17 +6,17 @@ package akka.camel.internal.component import java.util.{ Map => JMap } import org.apache.camel._ -import org.apache.camel.impl.{ DefaultProducer, DefaultEndpoint, DefaultComponent } +import org.apache.camel.impl.{ DefaultComponent, DefaultEndpoint, DefaultProducer } import akka.actor._ import akka.pattern._ import scala.beans.BeanProperty import scala.concurrent.duration._ import scala.concurrent.{ Future } import scala.util.control.NonFatal -import java.util.concurrent.{ TimeoutException, CountDownLatch } +import java.util.concurrent.{ CountDownLatch, TimeoutException } import akka.util.Timeout import akka.camel.internal.CamelExchangeAdapter -import akka.camel.{ ActorNotRegisteredException, Camel, Ack, FailureResult, CamelMessage } +import akka.camel.{ Ack, ActorNotRegisteredException, Camel, CamelMessage, FailureResult } import support.TypeConverterSupport import scala.util.{ Failure, Success, Try } @@ -31,6 +31,7 @@ import scala.util.{ Failure, Success, Try } * this component provides. */ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends DefaultComponent { + /** * @see org.apache.camel.Component */ @@ -49,11 +50,9 @@ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends D * [actorPath]?[options]%s, * where [actorPath] refers to the actor path to the actor. */ -private[camel] class ActorEndpoint( - uri: String, - comp: ActorComponent, - val path: ActorEndpointPath, - val camel: Camel) extends DefaultEndpoint(uri, comp) with ActorEndpointConfig { +private[camel] class ActorEndpoint(uri: String, comp: ActorComponent, val path: ActorEndpointPath, val camel: Camel) + extends DefaultEndpoint(uri, comp) + with ActorEndpointConfig { /** * The ActorEndpoint only supports receiving messages from Camel. @@ -99,7 +98,10 @@ private[camel] trait ActorEndpointConfig { * @see akka.camel.internal.component.ActorComponent * @see akka.camel.internal.component.ActorEndpoint */ -private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) extends DefaultProducer(endpoint) with AsyncProcessor { +private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) + extends DefaultProducer(endpoint) + with AsyncProcessor { + /** * Processes the exchange. * Calls the synchronous version of the method and waits for the result (blocking). @@ -117,7 +119,8 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex * The callback should therefore be careful of starting recursive loop. * @return (doneSync) true to continue execute synchronously, false to continue being executed asynchronously */ - def process(exchange: Exchange, callback: AsyncCallback): Boolean = processExchangeAdapter(new CamelExchangeAdapter(exchange), callback) + def process(exchange: Exchange, callback: AsyncCallback): Boolean = + processExchangeAdapter(new CamelExchangeAdapter(exchange), callback) /** * INTERNAL API @@ -148,20 +151,37 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex if (exchange.isOutCapable) { case Success(failure: FailureResult) => exchange.setFailure(failure) case Success(msg) => exchange.setResponse(CamelMessage.canonicalize(msg)) - case Failure(e: TimeoutException) => exchange.setFailure(FailureResult(new TimeoutException("Failed to get response from the actor [%s] within timeout [%s]. Check replyTimeout and blocking settings [%s]" format (endpoint.path, endpoint.replyTimeout, endpoint)))) - case Failure(throwable) => exchange.setFailure(FailureResult(throwable)) + case Failure(e: TimeoutException) => + exchange.setFailure( + FailureResult( + new TimeoutException( + "Failed to get response from the actor [%s] within timeout [%s]. Check replyTimeout and blocking settings [%s]" + .format(endpoint.path, endpoint.replyTimeout, endpoint)))) + case Failure(throwable) => exchange.setFailure(FailureResult(throwable)) } else { case Success(Ack) => () /* no response message to set */ case Success(failure: FailureResult) => exchange.setFailure(failure) - case Success(msg) => exchange.setFailure(FailureResult(new IllegalArgumentException("Expected Ack or Failure message, but got: [%s] from actor [%s]" format (msg, endpoint.path)))) - case Failure(e: TimeoutException) => exchange.setFailure(FailureResult(new TimeoutException("Failed to get Ack or Failure response from the actor [%s] within timeout [%s]. Check replyTimeout and blocking settings [%s]" format (endpoint.path, endpoint.replyTimeout, endpoint)))) - case Failure(throwable) => exchange.setFailure(FailureResult(throwable)) + case Success(msg) => + exchange.setFailure( + FailureResult( + new IllegalArgumentException( + "Expected Ack or Failure message, but got: [%s] from actor [%s]".format(msg, endpoint.path)))) + case Failure(e: TimeoutException) => + exchange.setFailure( + FailureResult( + new TimeoutException( + "Failed to get Ack or Failure response from the actor [%s] within timeout [%s]. Check replyTimeout and blocking settings [%s]" + .format(endpoint.path, endpoint.replyTimeout, endpoint)))) + case Failure(throwable) => exchange.setFailure(FailureResult(throwable)) } // FIXME #3074 how do we solve this with actorSelection? - val async = try actorFor(endpoint.path).ask(messageFor(exchange))(Timeout(endpoint.replyTimeout)) catch { case NonFatal(e) => Future.failed(e) } + val async = try actorFor(endpoint.path).ask(messageFor(exchange))(Timeout(endpoint.replyTimeout)) + catch { case NonFatal(e) => Future.failed(e) } implicit val ec = camel.system.dispatcher // FIXME which ExecutionContext should be used here? - async.onComplete(action andThen { _ => callback.done(false) }) + async.onComplete(action.andThen { _ => + callback.done(false) + }) false } @@ -169,10 +189,12 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex // FIXME #3074 how do we solve this with actorSelection? private def fireAndForget(message: CamelMessage, exchange: CamelExchangeAdapter): Unit = - try { actorFor(endpoint.path) ! message } catch { case NonFatal(e) => exchange.setFailure(new FailureResult(e)) } + try { + actorFor(endpoint.path) ! message + } catch { case NonFatal(e) => exchange.setFailure(new FailureResult(e)) } private[this] def actorFor(path: ActorEndpointPath): ActorRef = - path.findActorIn(camel.system) getOrElse (throw new ActorNotRegisteredException(path.actorPath)) + path.findActorIn(camel.system).getOrElse(throw new ActorNotRegisteredException(path.actorPath)) private[this] def messageFor(exchange: CamelExchangeAdapter) = exchange.toRequestMessage(Map(CamelMessage.MessageExchangeId -> exchange.getExchangeId)) @@ -185,12 +207,13 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex private[camel] object DurationTypeConverter extends TypeConverterSupport { @throws(classOf[TypeConversionException]) - def convertTo[T](valueType: Class[T], exchange: Exchange, value: AnyRef): T = valueType.cast(try { - val d = Duration(value.toString) - if (valueType.isInstance(d)) d else null - } catch { - case NonFatal(throwable) => throw new TypeConversionException(value, valueType, throwable) - }) + def convertTo[T](valueType: Class[T], exchange: Exchange, value: AnyRef): T = + valueType.cast(try { + val d = Duration(value.toString) + if (valueType.isInstance(d)) d else null + } catch { + case NonFatal(throwable) => throw new TypeConversionException(value, valueType, throwable) + }) } /** @@ -217,6 +240,7 @@ private[camel] case class ActorEndpointPath private (actorPath: String) { * The URI to the actor is exactly the same as the string representation of the ActorPath, except that it can also have optional URI parameters to configure the Consumer Actor. */ object CamelPath { + /** * Converts the actorRef to a Camel URI (string) which can be used in custom routes. * The created URI will have no parameters, it is purely the string representation of the actor's path. @@ -235,7 +259,8 @@ object CamelPath { * @param replyTimeout parameter for a Consumer Actor, see [[akka.camel.ConsumerConfig]] * @return the Camel URI to the Consumer Actor, including the parameters for auto acknowledgement and replyTimeout. */ - def toUri(actorRef: ActorRef, autoAck: Boolean, replyTimeout: Duration): String = "%s?autoAck=%s&replyTimeout=%s".format(actorRef.path.toString, autoAck, replyTimeout.toString) + def toUri(actorRef: ActorRef, autoAck: Boolean, replyTimeout: Duration): String = + "%s?autoAck=%s&replyTimeout=%s".format(actorRef.path.toString, autoAck, replyTimeout.toString) } /** @@ -252,7 +277,10 @@ private[camel] case object ActorEndpointPath { * parameters can be optionally added to the actor path to indicate auto-acknowledgement and replyTimeout for a [[akka.camel.Consumer]] actor. */ def fromCamelPath(camelPath: String): ActorEndpointPath = camelPath match { - case id if id startsWith "akka://" => new ActorEndpointPath(id.split('?')(0)) - case _ => throw new IllegalArgumentException("Invalid path: [%s] - should be an actorPath starting with 'akka://', optionally followed by options" format camelPath) + case id if id.startsWith("akka://") => new ActorEndpointPath(id.split('?')(0)) + case _ => + throw new IllegalArgumentException( + "Invalid path: [%s] - should be an actorPath starting with 'akka://', optionally followed by options".format( + camelPath)) } } diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala index 68b87813c6..fcbfe0dabb 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala @@ -16,6 +16,7 @@ import org.apache.camel.impl.DefaultCamelContext */ @Deprecated abstract class UntypedProducerActor extends UntypedActor with ProducerSupport { + /** * Called before the message is sent to the endpoint specified by getEndpointUri. The original * message is passed as argument. By default, this method simply returns the argument but may be overridden diff --git a/akka-camel/src/main/scala/akka/package.scala b/akka-camel/src/main/scala/akka/package.scala index ba9b15662b..d53f77b108 100644 --- a/akka-camel/src/main/scala/akka/package.scala +++ b/akka-camel/src/main/scala/akka/package.scala @@ -9,6 +9,7 @@ import language.implicitConversions import org.apache.camel.model.ProcessorDefinition package object camel { + /** * To allow using Actors with the Camel Route DSL: * @@ -16,5 +17,6 @@ package object camel { * from("file://data/input/CamelConsumer").to(actor) * }}} */ - implicit def toActorRouteDefinition[T <: ProcessorDefinition[T]](definition: ProcessorDefinition[T]) = new ActorRouteDefinition(definition) + implicit def toActorRouteDefinition[T <: ProcessorDefinition[T]](definition: ProcessorDefinition[T]) = + new ActorRouteDefinition(definition) } diff --git a/akka-camel/src/test/scala/akka/camel/CamelConfigSpec.scala b/akka-camel/src/test/scala/akka/camel/CamelConfigSpec.scala index b742060ca9..e34b6bf3df 100644 --- a/akka-camel/src/test/scala/akka/camel/CamelConfigSpec.scala +++ b/akka-camel/src/test/scala/akka/camel/CamelConfigSpec.scala @@ -50,4 +50,3 @@ class CamelConfigSpec extends WordSpec with Matchers { } } } - diff --git a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala index 264dd77aff..0e1d40bedb 100644 --- a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala @@ -6,10 +6,10 @@ package akka.camel import org.scalatest.WordSpec import org.scalatest.Matchers -import scala.concurrent.{ Promise, Await, Future } +import scala.concurrent.{ Await, Future, Promise } import scala.collection.immutable import akka.camel.TestSupport.NonSharedCamelSystem -import akka.actor.{ ActorRef, Props, Actor } +import akka.actor.{ Actor, ActorRef, Props } import akka.routing.BroadcastGroup import scala.concurrent.duration._ import akka.testkit._ @@ -51,7 +51,7 @@ class ConcurrentActivationTest extends WordSpec with Matchers with NonSharedCame // map over all futures, put all futures in one list of activated and deactivated actor refs. futureRegistrarLists.map { case (futureActivations, futureDeactivations) => - futureActivations zip futureDeactivations map { + futureActivations.zip(futureDeactivations).map { case (activations, deactivations) => promiseAllRefs.success((activations.flatten, deactivations.flatten)) } @@ -61,7 +61,8 @@ class ConcurrentActivationTest extends WordSpec with Matchers with NonSharedCame activations.size should ===(2 * number * number) // should be the size of the activated activated producers and consumers deactivations.size should ===(2 * number * number) - def partitionNames(refs: immutable.Seq[ActorRef]) = refs.map(_.path.name).partition(_.startsWith("concurrent-test-echo-consumer")) + def partitionNames(refs: immutable.Seq[ActorRef]) = + refs.map(_.path.name).partition(_.startsWith("concurrent-test-echo-consumer")) def assertContainsSameElements(lists: (Seq[_], Seq[_])): Unit = { val (a, b) = lists a.intersect(b).size should ===(a.size) @@ -93,7 +94,9 @@ class ConsumerBroadcast(promise: Promise[(Future[List[List[ActorRef]]], Future[L allActivationFutures = allActivationFutures :+ activationListFuture allDeactivationFutures = allDeactivationFutures :+ deactivationListFuture - val routee = context.actorOf(Props(classOf[Registrar], i, number, activationListPromise, deactivationListPromise), "registrar-" + i) + val routee = + context.actorOf(Props(classOf[Registrar], i, number, activationListPromise, deactivationListPromise), + "registrar-" + i) routee.path.toString } promise.success(Future.sequence(allActivationFutures) -> Future.sequence(allDeactivationFutures)) @@ -110,8 +113,12 @@ final case class DeRegisterConsumersAndProducers() final case class Activations() final case class DeActivations() -class Registrar(val start: Int, val number: Int, activationsPromise: Promise[List[ActorRef]], - deActivationsPromise: Promise[List[ActorRef]]) extends Actor with ActorLogging { +class Registrar(val start: Int, + val number: Int, + activationsPromise: Promise[List[ActorRef]], + deActivationsPromise: Promise[List[ActorRef]]) + extends Actor + with ActorLogging { private var actorRefs = Set[ActorRef]() private var activations = Set[Future[ActorRef]]() private var deActivations = Set[Future[ActorRef]]() @@ -128,18 +135,18 @@ class Registrar(val start: Int, val number: Int, activationsPromise: Promise[Lis add(new TestProducer(endpoint), "concurrent-test-producer-" + start + "-" + i) index = index + 1 if (activations.size == number * 2) { - Future.sequence(activations.toList) map activationsPromise.success + Future.sequence(activations.toList).map(activationsPromise.success) } case reg: DeRegisterConsumersAndProducers => actorRefs.foreach { aref => context.stop(aref) val result = camel.deactivationFutureFor(aref) - result.failed.foreach { - e => log.error("deactivationFutureFor {} failed: {}", aref, e.getMessage) + result.failed.foreach { e => + log.error("deactivationFutureFor {} failed: {}", aref, e.getMessage) } deActivations += result if (deActivations.size == number * 2) { - Future.sequence(deActivations.toList) map deActivationsPromise.success + Future.sequence(deActivations.toList).map(deActivationsPromise.success) } } } @@ -148,8 +155,8 @@ class Registrar(val start: Int, val number: Int, activationsPromise: Promise[Lis val ref = context.actorOf(Props(actor), name) actorRefs = actorRefs + ref val result = camel.activationFutureFor(ref) - result.failed.foreach { - e => log.error("activationFutureFor {} failed: {}", ref, e.getMessage) + result.failed.foreach { e => + log.error("activationFutureFor {} failed: {}", ref, e.getMessage) } activations += result } @@ -168,7 +175,8 @@ class EchoConsumer(endpoint: String) extends Actor with Consumer { * By default it returns an identity function, override this method to * return a custom route definition handler. */ - override def onRouteDefinition = (rd: RouteDefinition) => rd.onException(classOf[Exception]).handled(true).transform(Builder.exceptionMessage).end + override def onRouteDefinition = + (rd: RouteDefinition) => rd.onException(classOf[Exception]).handled(true).transform(Builder.exceptionMessage).end } class TestProducer(uri: String) extends Actor with Producer { diff --git a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala index d310a59eb4..5bde3e862c 100644 --- a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala @@ -12,11 +12,11 @@ import org.scalatest.WordSpec import akka.camel.TestSupport._ import org.apache.camel.model.{ RouteDefinition } import org.apache.camel.builder.Builder -import org.apache.camel.{ FailedToCreateRouteException, CamelExecutionException } +import org.apache.camel.{ CamelExecutionException, FailedToCreateRouteException } import java.util.concurrent.{ ExecutionException, TimeUnit, TimeoutException } import akka.actor.Status.Failure import scala.concurrent.duration._ -import scala.concurrent.{ ExecutionContext, Await } +import scala.concurrent.{ Await, ExecutionContext } import akka.testkit._ import akka.util.Timeout @@ -138,7 +138,9 @@ class ConsumerIntegrationTest extends WordSpec with Matchers with NonSharedCamel def endpointUri = "direct:manual-ack" def receive = { case _ => sender() ! Ack } }, name = "direct-manual-ack-1") - camel.template.asyncSendBody("direct:manual-ack", "some message").get(defaultTimeoutDuration.toSeconds, TimeUnit.SECONDS) should ===(null) //should not timeout + camel.template + .asyncSendBody("direct:manual-ack", "some message") + .get(defaultTimeoutDuration.toSeconds, TimeUnit.SECONDS) should ===(null) //should not timeout stop(ref) } @@ -150,7 +152,9 @@ class ConsumerIntegrationTest extends WordSpec with Matchers with NonSharedCamel }, name = "direct-manual-ack-2") intercept[ExecutionException] { - camel.template.asyncSendBody("direct:manual-ack", "some message").get(defaultTimeoutDuration.toSeconds, TimeUnit.SECONDS) + camel.template + .asyncSendBody("direct:manual-ack", "some message") + .get(defaultTimeoutDuration.toSeconds, TimeUnit.SECONDS) }.getCause.getCause should ===(someException) stop(ref) } @@ -163,7 +167,9 @@ class ConsumerIntegrationTest extends WordSpec with Matchers with NonSharedCamel }, name = "direct-manual-ack-3") intercept[ExecutionException] { - camel.template.asyncSendBody("direct:manual-ack", "some message").get(defaultTimeoutDuration.toSeconds, TimeUnit.SECONDS) + camel.template + .asyncSendBody("direct:manual-ack", "some message") + .get(defaultTimeoutDuration.toSeconds, TimeUnit.SECONDS) }.getCause.getCause.getMessage should include("Failed to get Ack") stop(ref) } @@ -180,7 +186,7 @@ class ConsumerIntegrationTest extends WordSpec with Matchers with NonSharedCamel class ErrorThrowingConsumer(override val endpointUri: String) extends Consumer { def receive = { - case msg: CamelMessage => throw new TestException("error: %s" format msg.body) + case msg: CamelMessage => throw new TestException("error: %s".format(msg.body)) } override def preRestart(reason: Throwable, message: Option[Any]): Unit = { super.preRestart(reason, message) @@ -208,9 +214,9 @@ class FailingOnceConsumer(override val endpointUri: String) extends Consumer { def receive = { case msg: CamelMessage => if (msg.headerAs[Boolean]("CamelRedelivered").getOrElse(false)) - sender() ! ("accepted: %s" format msg.body) + sender() ! ("accepted: %s".format(msg.body)) else - throw new TestException("rejected: %s" format msg.body) + throw new TestException("rejected: %s".format(msg.body)) } final override def preRestart(reason: Throwable, message: Option[Any]): Unit = { diff --git a/akka-camel/src/test/scala/akka/camel/DefaultCamelTest.scala b/akka-camel/src/test/scala/akka/camel/DefaultCamelTest.scala index c330d33e3d..00f88a0216 100644 --- a/akka-camel/src/test/scala/akka/camel/DefaultCamelTest.scala +++ b/akka-camel/src/test/scala/akka/camel/DefaultCamelTest.scala @@ -18,12 +18,12 @@ import akka.actor.ExtendedActorSystem class DefaultCamelTest extends WordSpec with SharedCamelSystem with Matchers with MockitoSugar { - import org.mockito.Mockito.{ when, verify } + import org.mockito.Mockito.{ verify, when } val sys = mock[ExtendedActorSystem] val config = ConfigFactory.defaultReference() - when(sys.dynamicAccess) thenReturn system.asInstanceOf[ExtendedActorSystem].dynamicAccess - when(sys.settings) thenReturn (new Settings(this.getClass.getClassLoader, config, "mocksystem")) - when(sys.name) thenReturn ("mocksystem") + when(sys.dynamicAccess).thenReturn(system.asInstanceOf[ExtendedActorSystem].dynamicAccess) + when(sys.settings).thenReturn(new Settings(this.getClass.getClassLoader, config, "mocksystem")) + when(sys.name).thenReturn("mocksystem") def camelWithMocks = new DefaultCamel(sys) { override val log = mock[MarkerLoggingAdapter] @@ -35,8 +35,8 @@ class DefaultCamelTest extends WordSpec with SharedCamelSystem with Matchers wit "during shutdown, when both context and template fail to shutdown" when { val camel = camelWithMocks - when(camel.context.stop()) thenThrow new RuntimeException("context") - when(camel.template.stop()) thenThrow new RuntimeException("template") + when(camel.context.stop()).thenThrow(new RuntimeException("context")) + when(camel.template.stop()).thenThrow(new RuntimeException("template")) val exception = intercept[RuntimeException] { camel.shutdown() } @@ -55,7 +55,7 @@ class DefaultCamelTest extends WordSpec with SharedCamelSystem with Matchers wit "during start, if template fails to start, it will stop the context" in { val camel = camelWithMocks - when(camel.template.start()) thenThrow new RuntimeException + when(camel.template.start()).thenThrow(new RuntimeException) intercept[RuntimeException] { camel.start diff --git a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala index f39e03f81c..42a17106fa 100644 --- a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala +++ b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala @@ -34,7 +34,8 @@ class MessageScalaTest extends FunSuite with Matchers with SharedCamelSystem { } test("mustTransformBodyAndPreserveHeaders") { - CamelMessage("a", Map("A" -> "1")).mapBody((body: String) => body + "b") should ===(CamelMessage("ab", Map("A" -> "1"))) + CamelMessage("a", Map("A" -> "1")).mapBody((body: String) => body + "b") should ===( + CamelMessage("ab", Map("A" -> "1"))) } test("mustConvertBodyAndPreserveHeaders") { @@ -42,8 +43,7 @@ class MessageScalaTest extends FunSuite with Matchers with SharedCamelSystem { } test("mustSetBodyAndPreserveHeaders") { - CamelMessage("test1", Map("A" -> "1")).copy(body = "test2") should ===( - CamelMessage("test2", Map("A" -> "1"))) + CamelMessage("test1", Map("A" -> "1")).copy(body = "test2") should ===(CamelMessage("test2", Map("A" -> "1"))) } test("mustSetHeadersAndPreserveBody") { diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala index 3e4cb9f744..79e5252005 100644 --- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala @@ -11,7 +11,7 @@ import org.apache.camel.builder.RouteBuilder import org.apache.camel.component.mock.MockEndpoint import scala.concurrent.Await import akka.actor.SupervisorStrategy.Stop -import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, WordSpecLike } +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach, WordSpecLike } import akka.actor._ import scala.concurrent.duration._ import akka.util.Timeout @@ -22,7 +22,12 @@ import akka.actor.Status.Failure /** * Tests the features of the Camel Producer. */ -class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", AkkaSpec.testConf)) with WordSpecLike with BeforeAndAfterAll with BeforeAndAfterEach with Matchers { +class ProducerFeatureTest + extends TestKit(ActorSystem("ProducerFeatureTest", AkkaSpec.testConf)) + with WordSpecLike + with BeforeAndAfterAll + with BeforeAndAfterEach + with Matchers { import ProducerFeatureTest._ implicit def camel = CamelExtension(system) @@ -44,7 +49,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "A Producer on a sync Camel route" must { "01 produce a message and receive normal response" in { - val producer = system.actorOf(Props(new TestProducer("direct:producer-test-2", true)), name = "01-direct-producer-2") + val producer = + system.actorOf(Props(new TestProducer("direct:producer-test-2", true)), name = "01-direct-producer-2") val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) producer.tell(message, testActor) expectMsg(CamelMessage("received TEST", Map(CamelMessage.MessageExchangeId -> "123"))) @@ -53,22 +59,24 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "02 produce a message and receive failure response" in { val latch = TestLatch() var deadActor: Option[ActorRef] = None - val supervisor = system.actorOf(Props(new Actor { - def receive = { - case p: Props => { - val producer = context.actorOf(p) - context.watch(producer) - sender() ! producer + val supervisor = system.actorOf( + Props(new Actor { + def receive = { + case p: Props => { + val producer = context.actorOf(p) + context.watch(producer) + sender() ! producer + } + case Terminated(actorRef) => { + deadActor = Some(actorRef) + latch.countDown() + } } - case Terminated(actorRef) => { - deadActor = Some(actorRef) - latch.countDown() + override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) { + case _: AkkaCamelException => Stop } - } - override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) { - case _: AkkaCamelException => Stop - } - }), name = "02-prod-anonymous-supervisor") + }), + name = "02-prod-anonymous-supervisor") supervisor.tell(Props(new TestProducer("direct:producer-test-2")), testActor) val producer = receiveOne(timeoutDuration).asInstanceOf[ActorRef] @@ -86,7 +94,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk } "03 produce a message oneway" in { - val producer = system.actorOf(Props(new TestProducer("direct:producer-test-1", true) with Oneway), name = "03-direct-producer-1-oneway") + val producer = system.actorOf(Props(new TestProducer("direct:producer-test-1", true) with Oneway), + name = "03-direct-producer-1-oneway") mockEndpoint.expectedBodiesReceived("TEST") producer ! CamelMessage("test", Map()) mockEndpoint.assertIsSatisfied() @@ -95,7 +104,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "04 produces message twoway without sender reference" in { // this test causes a dead letter which can be ignored. The producer is two-way but a oneway tell is used // to communicate with it and the response is ignored, which ends up in a dead letter - val producer = system.actorOf(Props(new TestProducer("direct:producer-test-1")), name = "04-ignore-this-deadletter-direct-producer-test-no-sender") + val producer = system.actorOf(Props(new TestProducer("direct:producer-test-1")), + name = "04-ignore-this-deadletter-direct-producer-test-no-sender") mockEndpoint.expectedBodiesReceived("test") producer ! CamelMessage("test", Map()) mockEndpoint.assertIsSatisfied() @@ -105,14 +115,16 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "A Producer on an async Camel route" must { "10 produce message to direct:producer-test-3 and receive normal response" in { - val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")), name = "10-direct-producer-test-3") + val producer = + system.actorOf(Props(new TestProducer("direct:producer-test-3")), name = "10-direct-producer-test-3") val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) producer.tell(message, testActor) expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123"))) } "11 produce message to direct:producer-test-3 and receive failure response" in { - val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")), name = "11-direct-producer-test-3-receive-failure") + val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")), + name = "11-direct-producer-test-3-receive-failure") val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { @@ -127,7 +139,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "12 produce message, forward normal response of direct:producer-test-2 to a replying target actor and receive response" in { val target = system.actorOf(Props[ReplyingForwardTarget], name = "12-reply-forwarding-target") - val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "12-direct-producer-test-2-forwarder") + val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), + name = "12-direct-producer-test-2-forwarder") val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) producer.tell(message, testActor) expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result"))) @@ -135,7 +148,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "13 produce message, forward failure response of direct:producer-test-2 to a replying target actor and receive response" in { val target = system.actorOf(Props[ReplyingForwardTarget], name = "13-reply-forwarding-target") - val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "13-direct-producer-test-2-forwarder-failure") + val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), + name = "13-direct-producer-test-2-forwarder-failure") val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { @@ -150,7 +164,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "14 produce message, forward normal response to a producing target actor and produce response to direct:forward-test-1" in { val target = system.actorOf(Props[ProducingForwardTarget], name = "14-producer-forwarding-target") - val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "14-direct-producer-test-2-forwarder-to-producing-target") + val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), + name = "14-direct-producer-test-2-forwarder-to-producing-target") mockEndpoint.expectedBodiesReceived("received test") producer.tell(CamelMessage("test", Map()), producer) mockEndpoint.assertIsSatisfied() @@ -158,7 +173,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "15 produce message, forward failure response to a producing target actor and produce response to direct:forward-test-1" in { val target = system.actorOf(Props[ProducingForwardTarget], name = "15-producer-forwarding-target-failure") - val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "15-direct-producer-test-2-forward-failure") + val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), + name = "15-direct-producer-test-2-forward-failure") filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { mockEndpoint.expectedMessageCount(1) mockEndpoint.message(0).body().isInstanceOf(classOf[akka.actor.Status.Failure]) @@ -169,7 +185,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "16 produce message, forward normal response from direct:producer-test-3 to a replying target actor and receive response" in { val target = system.actorOf(Props[ReplyingForwardTarget], name = "16-reply-forwarding-target") - val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "16-direct-producer-test-3-to-replying-actor") + val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), + name = "16-direct-producer-test-3-to-replying-actor") val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) producer.tell(message, testActor) @@ -178,7 +195,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "17 produce message, forward failure response from direct:producer-test-3 to a replying target actor and receive response" in { val target = system.actorOf(Props[ReplyingForwardTarget], name = "17-reply-forwarding-target") - val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "17-direct-producer-test-3-forward-failure") + val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), + name = "17-direct-producer-test-3-forward-failure") val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { @@ -193,7 +211,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "18 produce message, forward normal response from direct:producer-test-3 to a producing target actor and produce response to direct:forward-test-1" in { val target = system.actorOf(Props[ProducingForwardTarget], "18-producing-forward-target-normal") - val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "18-direct-producer-test-3-forward-normal") + val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), + name = "18-direct-producer-test-3-forward-normal") mockEndpoint.expectedBodiesReceived("received test") producer.tell(CamelMessage("test", Map()), producer) mockEndpoint.assertIsSatisfied() @@ -201,7 +220,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "19 produce message, forward failure response from direct:producer-test-3 to a producing target actor and produce response to direct:forward-test-1" in { val target = system.actorOf(Props[ProducingForwardTarget], "19-producing-forward-target-failure") - val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "19-direct-producer-test-3-forward-failure-producing-target") + val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), + name = "19-direct-producer-test-3-forward-failure-producing-target") filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { mockEndpoint.expectedMessageCount(1) mockEndpoint.message(0).body().isInstanceOf(classOf[akka.actor.Status.Failure]) @@ -212,7 +232,8 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "20 keep producing messages after error" in { import TestSupport._ - val consumer = start(new IntermittentErrorConsumer("direct:intermittentTest-1"), "20-intermittentTest-error-consumer") + val consumer = + start(new IntermittentErrorConsumer("direct:intermittentTest-1"), "20-intermittentTest-error-consumer") val producer = start(new SimpleProducer("direct:intermittentTest-1"), "20-intermittentTest-producer") filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { val futureFailed = producer.tell("fail", testActor) @@ -230,7 +251,9 @@ class ProducerFeatureTest extends TestKit(ActorSystem("ProducerFeatureTest", Akk "21 be able to transform outgoing messages and have a valid sender reference" in { import TestSupport._ filterEvents(EventFilter[Exception](occurrences = 1)) { - val producerSupervisor = system.actorOf(Props(new ProducerSupervisor(Props(new ChildProducer("mock:mock", true)))), "21-ignore-deadletter-sender-ref-test") + val producerSupervisor = + system.actorOf(Props(new ProducerSupervisor(Props(new ChildProducer("mock:mock", true)))), + "21-ignore-deadletter-sender-ref-test") mockEndpoint.reset() producerSupervisor.tell(CamelMessage("test", Map()), testActor) producerSupervisor.tell(CamelMessage("err", Map()), testActor) @@ -255,7 +278,7 @@ object ProducerFeatureTest { Await.ready(CamelExtension(context.system).activationFutureFor(child), timeout.duration) def receive = { case msg: CamelMessage => - child forward (msg) + child.forward(msg) case (aref: ActorRef, msg: String) => aref ! msg } @@ -269,14 +292,13 @@ object ProducerFeatureTest { def endpointUri = uri override def transformOutgoingMessage(msg: Any) = msg match { - case msg: CamelMessage => if (upper) msg.mapBody { - body: String => + case msg: CamelMessage => + if (upper) msg.mapBody { body: String => if (body == "err") throw new Exception("Crash!") val upperMsg = body.toUpperCase lastSender = Some(sender()) lastMessage = Some(upperMsg) - } - else msg + } else msg } override def postStop(): Unit = { @@ -294,10 +316,10 @@ object ProducerFeatureTest { } override protected def transformOutgoingMessage(msg: Any) = msg match { - case msg: CamelMessage => if (upper) msg.mapBody { - body: String => body.toUpperCase - } - else msg + case msg: CamelMessage => + if (upper) msg.mapBody { body: String => + body.toUpperCase + } else msg } } @@ -306,18 +328,20 @@ object ProducerFeatureTest { override def headersToCopy = Set(CamelMessage.MessageExchangeId, "test") - override def routeResponse(msg: Any): Unit = target forward msg + override def routeResponse(msg: Any): Unit = target.forward(msg) } class TestResponder extends Actor { def receive = { - case msg: CamelMessage => msg.body match { - case "fail" => context.sender() ! akka.actor.Status.Failure(new AkkaCamelException(new Exception("failure"), msg.headers)) - case _ => - context.sender() ! (msg.mapBody { - body: String => "received %s" format body - }) - } + case msg: CamelMessage => + msg.body match { + case "fail" => + context.sender() ! akka.actor.Status.Failure(new AkkaCamelException(new Exception("failure"), msg.headers)) + case _ => + context.sender() ! (msg.mapBody { body: String => + "received %s".format(body) + }) + } } } @@ -327,7 +351,8 @@ object ProducerFeatureTest { context.sender() ! (msg.copy(headers = msg.headers + ("test" -> "result"))) case msg: akka.actor.Status.Failure => msg.cause match { - case e: AkkaCamelException => context.sender() ! Status.Failure(new AkkaCamelException(e, e.headers + ("test" -> "failure"))) + case e: AkkaCamelException => + context.sender() ! Status.Failure(new AkkaCamelException(e, e.headers + ("test" -> "failure"))) } } } @@ -350,7 +375,7 @@ object ProducerFeatureTest { def process(exchange: Exchange) = { exchange.getIn.getBody match { case "fail" => throw new Exception("failure") - case body => exchange.getOut.setBody("received %s" format body) + case body => exchange.getOut.setBody("received %s".format(body)) } } }) diff --git a/akka-camel/src/test/scala/akka/camel/TestSupport.scala b/akka-camel/src/test/scala/akka/camel/TestSupport.scala index b1d00d0475..dc66c5692e 100644 --- a/akka-camel/src/test/scala/akka/camel/TestSupport.scala +++ b/akka-camel/src/test/scala/akka/camel/TestSupport.scala @@ -8,18 +8,20 @@ import language.postfixOps import language.implicitConversions import scala.concurrent.duration._ -import java.util.concurrent.{ TimeoutException, ExecutionException, TimeUnit } -import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, Suite } -import org.scalatest.matchers.{ BePropertyMatcher, BePropertyMatchResult } +import java.util.concurrent.{ ExecutionException, TimeUnit, TimeoutException } +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach, Suite } +import org.scalatest.matchers.{ BePropertyMatchResult, BePropertyMatcher } import scala.reflect.ClassTag -import akka.actor.{ ActorRef, Props, ActorSystem, Actor } +import akka.actor.{ Actor, ActorRef, ActorSystem, Props } import scala.concurrent.Await import akka.util.Timeout -import akka.testkit.{ TestKit, AkkaSpec } +import akka.testkit.{ AkkaSpec, TestKit } private[camel] object TestSupport { def start(actor: => Actor, name: String)(implicit system: ActorSystem, timeout: Timeout): ActorRef = - Await.result(CamelExtension(system).activationFutureFor(system.actorOf(Props(actor), name))(timeout, system.dispatcher), timeout.duration) + Await.result( + CamelExtension(system).activationFutureFor(system.actorOf(Props(actor), name))(timeout, system.dispatcher), + timeout.duration) def stop(actorRef: ActorRef)(implicit system: ActorSystem, timeout: Timeout): Unit = { system.stop(actorRef) @@ -29,6 +31,7 @@ private[camel] object TestSupport { private[camel] implicit def camelToTestWrapper(camel: Camel) = new CamelTestWrapper(camel) class CamelTestWrapper(camel: Camel) { + /** * Sends msg to the endpoint and returns response. * It only waits for the response until timeout passes. @@ -39,7 +42,9 @@ private[camel] object TestSupport { camel.template.asyncRequestBody(to, msg).get(timeout.toNanos, TimeUnit.NANOSECONDS) } catch { case e: ExecutionException => throw e.getCause - case e: TimeoutException => throw new AssertionError("Failed to get response to message [%s], send to endpoint [%s], within [%s]".format(msg, to, timeout)) + case e: TimeoutException => + throw new AssertionError( + "Failed to get response to message [%s], send to endpoint [%s], within [%s]".format(msg, to, timeout)) } } @@ -83,9 +88,8 @@ private[camel] object TestSupport { def anInstanceOf[T](implicit tag: ClassTag[T]) = { val clazz = tag.runtimeClass.asInstanceOf[Class[T]] new BePropertyMatcher[AnyRef] { - def apply(left: AnyRef) = BePropertyMatchResult( - clazz.isAssignableFrom(left.getClass), - "an instance of " + clazz.getName) + def apply(left: AnyRef) = + BePropertyMatchResult(clazz.isAssignableFrom(left.getClass), "an instance of " + clazz.getName) } } diff --git a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala index 80daa17b53..1b89f5311a 100644 --- a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala @@ -18,7 +18,13 @@ import scala.concurrent.duration._ import org.scalatest._ import akka.testkit._ -class UntypedProducerTest extends WordSpec with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with SharedCamelSystem with GivenWhenThen { +class UntypedProducerTest + extends WordSpec + with Matchers + with BeforeAndAfterAll + with BeforeAndAfterEach + with SharedCamelSystem + with GivenWhenThen { import UntypedProducerTest._ val timeout = 1 second override protected def beforeAll = { @@ -46,7 +52,8 @@ class UntypedProducerTest extends WordSpec with Matchers with BeforeAndAfterAll } "produce a message and receive a failure response" in { - val producer = system.actorOf(Props[SampleUntypedReplyingProducer], name = "sample-untyped-replying-producer-failure") + val producer = + system.actorOf(Props[SampleUntypedReplyingProducer], name = "sample-untyped-replying-producer-failure") val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { @@ -85,7 +92,7 @@ object UntypedProducerTest { def process(exchange: Exchange) = { exchange.getIn.getBody match { case "fail" => throw new Exception("failure") - case body => exchange.getOut.setBody("received %s" format body) + case body => exchange.getOut.setBody("received %s".format(body)) } } }) diff --git a/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala index 3271d31102..84b13f3f67 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala @@ -6,12 +6,18 @@ package akka.camel.internal import org.scalatest.Matchers import scala.concurrent.duration._ -import org.scalatest.{ GivenWhenThen, BeforeAndAfterEach, BeforeAndAfterAll, WordSpecLike } -import akka.actor.{ Props, ActorSystem } -import akka.testkit.{ TimingTest, TestProbe, TestKit } +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach, GivenWhenThen, WordSpecLike } +import akka.actor.{ ActorSystem, Props } +import akka.testkit.{ TestKit, TestProbe, TimingTest } import akka.camel.internal.ActivationProtocol._ -class ActivationTrackerTest extends TestKit(ActorSystem("ActivationTrackerTest")) with WordSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with GivenWhenThen { +class ActivationTrackerTest + extends TestKit(ActorSystem("ActivationTrackerTest")) + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with BeforeAndAfterEach + with GivenWhenThen { override protected def afterAll(): Unit = { shutdown() } @@ -125,13 +131,20 @@ class ActivationTrackerTest extends TestKit(ActorSystem("ActivationTrackerTest") val probe = TestProbe() def awaitActivation() = at.tell(AwaitActivation(actor.ref), probe.ref) def awaitDeActivation() = at.tell(AwaitDeActivation(actor.ref), probe.ref) - def verifyActivated()(implicit timeout: FiniteDuration) = within(timeout) { probe.expectMsg(EndpointActivated(actor.ref)) } - def verifyDeActivated()(implicit timeout: FiniteDuration) = within(timeout) { probe.expectMsg(EndpointDeActivated(actor.ref)) } + def verifyActivated()(implicit timeout: FiniteDuration) = within(timeout) { + probe.expectMsg(EndpointActivated(actor.ref)) + } + def verifyDeActivated()(implicit timeout: FiniteDuration) = within(timeout) { + probe.expectMsg(EndpointDeActivated(actor.ref)) + } - def verifyFailedToActivate()(implicit timeout: FiniteDuration) = within(timeout) { probe.expectMsg(EndpointFailedToActivate(actor.ref, cause)) } - def verifyFailedToDeActivate()(implicit timeout: FiniteDuration) = within(timeout) { probe.expectMsg(EndpointFailedToDeActivate(actor.ref, cause)) } + def verifyFailedToActivate()(implicit timeout: FiniteDuration) = within(timeout) { + probe.expectMsg(EndpointFailedToActivate(actor.ref, cause)) + } + def verifyFailedToDeActivate()(implicit timeout: FiniteDuration) = within(timeout) { + probe.expectMsg(EndpointFailedToDeActivate(actor.ref, cause)) + } } } - diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala index e3c5cf0e55..705186cefe 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala @@ -17,13 +17,14 @@ class ActorComponentConfigurationTest extends WordSpec with Matchers with Shared val component: Component = camel.context.getComponent("akka") "Endpoint url config should be correctly parsed" in { - val actorEndpointConfig = component.createEndpoint(s"akka://test/user/$$a?autoAck=false&replyTimeout=987000000+nanos").asInstanceOf[ActorEndpointConfig] + val actorEndpointConfig = component + .createEndpoint(s"akka://test/user/$$a?autoAck=false&replyTimeout=987000000+nanos") + .asInstanceOf[ActorEndpointConfig] - actorEndpointConfig should have( - 'endpointUri(s"akka://test/user/$$a?autoAck=false&replyTimeout=987000000+nanos"), - 'path(ActorEndpointPath.fromCamelPath(s"akka://test/user/$$a")), - 'autoAck(false), - 'replyTimeout(987000000 nanos)) + actorEndpointConfig should have('endpointUri (s"akka://test/user/$$a?autoAck=false&replyTimeout=987000000+nanos"), + 'path (ActorEndpointPath.fromCamelPath(s"akka://test/user/$$a")), + 'autoAck (false), + 'replyTimeout (987000000 nanos)) } } diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala index 124f156e91..3921e8459e 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala @@ -32,7 +32,11 @@ import akka.util.Timeout import akka.actor._ import akka.testkit._ -class ActorProducerTest extends TestKit(ActorSystem("ActorProducerTest")) with WordSpecLike with Matchers with ActorProducerFixture { +class ActorProducerTest + extends TestKit(ActorSystem("ActorProducerTest")) + with WordSpecLike + with Matchers + with ActorProducerFixture { implicit val timeout = Timeout(10 seconds) "ActorProducer" when { @@ -327,7 +331,8 @@ class ActorProducerTest extends TestKit(ActorSystem("ActorProducerTest")) with W } } -private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAfterAll with BeforeAndAfterEach { self: TestKit with Matchers with Suite => +private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAfterAll with BeforeAndAfterEach { + self: TestKit with Matchers with Suite => var camel: Camel = _ var exchange: CamelExchangeAdapter = _ var callback: AsyncCallback = _ @@ -346,17 +351,16 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft val sys = mock[ExtendedActorSystem] val config = ConfigFactory.defaultReference() - when(sys.dispatcher) thenReturn system.dispatcher - when(sys.dynamicAccess) thenReturn system.asInstanceOf[ExtendedActorSystem].dynamicAccess - when(sys.settings) thenReturn (new Settings(this.getClass.getClassLoader, config, "mocksystem")) - when(sys.name) thenReturn ("mocksystem") + when(sys.dispatcher).thenReturn(system.dispatcher) + when(sys.dynamicAccess).thenReturn(system.asInstanceOf[ExtendedActorSystem].dynamicAccess) + when(sys.settings).thenReturn(new Settings(this.getClass.getClassLoader, config, "mocksystem")) + when(sys.name).thenReturn("mocksystem") def camelWithMocks = new DefaultCamel(sys) { override val log = mock[MarkerLoggingAdapter] override lazy val template = mock[ProducerTemplate] override lazy val context = mock[DefaultCamelContext] - override val settings = new CamelSettings(ConfigFactory.parseString( - """ + override val settings = new CamelSettings(ConfigFactory.parseString(""" akka { camel { jmx = off @@ -368,7 +372,8 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft } } } - """).withFallback(config), sys.dynamicAccess) + """).withFallback(config), + sys.dynamicAccess) } camel = camelWithMocks @@ -386,7 +391,10 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft def msg(s: String) = CamelMessage(s, Map.empty) - def given(actor: ActorRef = probe.ref, outCapable: Boolean = true, autoAck: Boolean = true, replyTimeout: FiniteDuration = 20 seconds) = { + def given(actor: ActorRef = probe.ref, + outCapable: Boolean = true, + autoAck: Boolean = true, + replyTimeout: FiniteDuration = 20 seconds) = { prepareMocks(actor, outCapable = outCapable) new ActorProducer(configure(isAutoAck = autoAck, _replyTimeout = replyTimeout), camel) } @@ -402,7 +410,7 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft val callbackValue = new AtomicBoolean() def done(doneSync: Boolean): Unit = { - callbackValue set doneSync + callbackValue.set(doneSync) callbackReceived.countDown() } @@ -410,12 +418,16 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft if (!callbackReceived.await(timeout.length, timeout.unit)) fail("Callback not received!") else callbackValue.get - def expectDoneSyncWithin(implicit timeout: FiniteDuration): Unit = if (!valueWithin(timeout)) fail("Expected to be done Synchronously") - def expectDoneAsyncWithin(implicit timeout: FiniteDuration): Unit = if (valueWithin(timeout)) fail("Expected to be done Asynchronously") + def expectDoneSyncWithin(implicit timeout: FiniteDuration): Unit = + if (!valueWithin(timeout)) fail("Expected to be done Synchronously") + def expectDoneAsyncWithin(implicit timeout: FiniteDuration): Unit = + if (valueWithin(timeout)) fail("Expected to be done Asynchronously") } - def configure(endpointUri: String = "test-uri", isAutoAck: Boolean = true, _replyTimeout: FiniteDuration = 20 seconds) = { + def configure(endpointUri: String = "test-uri", + isAutoAck: Boolean = true, + _replyTimeout: FiniteDuration = 20 seconds) = { val endpoint = new ActorEndpoint(endpointUri, actorComponent, actorEndpointPath, camel) endpoint.autoAck = isAutoAck endpoint.replyTimeout = _replyTimeout @@ -423,13 +435,14 @@ private[camel] trait ActorProducerFixture extends MockitoSugar with BeforeAndAft } def prepareMocks(actor: ActorRef, message: CamelMessage = message, outCapable: Boolean): Unit = { - when(actorEndpointPath.findActorIn(any[ActorSystem])) thenReturn Option(actor) - when(exchange.toRequestMessage(any[Map[String, Any]])) thenReturn message - when(exchange.isOutCapable) thenReturn outCapable + when(actorEndpointPath.findActorIn(any[ActorSystem])).thenReturn(Option(actor)) + when(exchange.toRequestMessage(any[Map[String, Any]])).thenReturn(message) + when(exchange.isOutCapable).thenReturn(outCapable) } - def echoActor = system.actorOf(Props(new Actor { - def receive = { case msg => sender() ! "received " + msg } - }), name = "echoActor") + def echoActor = + system.actorOf(Props(new Actor { + def receive = { case msg => sender() ! "received " + msg } + }), name = "echoActor") } diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala index df097297c0..6ba051d425 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala @@ -39,4 +39,3 @@ class DurationConverterSpec extends WordSpec with Matchers { } } - diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala index c40faf079d..de92b5233c 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsCollector.scala @@ -26,6 +26,7 @@ sealed abstract class CollectionControlMessage extends Serializable */ @SerialVersionUID(1L) case object CollectionStartMessage extends CollectionControlMessage { + /** Java API */ def getInstance = CollectionStartMessage } @@ -35,6 +36,7 @@ case object CollectionStartMessage extends CollectionControlMessage { */ @SerialVersionUID(1L) case object CollectionStopMessage extends CollectionControlMessage { + /** Java API */ def getInstance = CollectionStopMessage } @@ -59,7 +61,8 @@ private[metrics] class ClusterMetricsSupervisor extends Actor with ActorLogging if (CollectorEnabled) { self ! CollectionStartMessage } else { - log.warning(s"Metrics collection is disabled in configuration. Use subtypes of ${classOf[CollectionControlMessage].getName} to manage collection at runtime.") + log.warning( + s"Metrics collection is disabled in configuration. Use subtypes of ${classOf[CollectionControlMessage].getName} to manage collection at runtime.") } } @@ -87,6 +90,7 @@ trait ClusterMetricsEvent * Current snapshot of cluster node metrics. */ final case class ClusterMetricsChanged(nodeMetrics: Set[NodeMetrics]) extends ClusterMetricsEvent { + /** Java API */ def getNodeMetrics: java.lang.Iterable[NodeMetrics] = scala.collection.JavaConverters.asJavaIterableConverter(nodeMetrics).asJava @@ -107,8 +111,9 @@ private[metrics] trait ClusterMetricsMessage extends Serializable * Envelope adding a sender address to the cluster metrics gossip. */ @SerialVersionUID(1L) -private[metrics] final case class MetricsGossipEnvelope(from: Address, gossip: MetricsGossip, reply: Boolean) extends ClusterMetricsMessage - with DeadLetterSuppression +private[metrics] final case class MetricsGossipEnvelope(from: Address, gossip: MetricsGossip, reply: Boolean) + extends ClusterMetricsMessage + with DeadLetterSuppression /** * INTERNAL API. @@ -129,7 +134,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { import Member.addressOrdering import context.dispatcher val cluster = Cluster(context.system) - import cluster.{ selfAddress, scheduler } + import cluster.{ scheduler, selfAddress } import cluster.ClusterLogger._ val metrics = ClusterMetricsExtension(context.system) import metrics.settings._ @@ -152,16 +157,16 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { /** * Start periodic gossip to random nodes in cluster */ - val gossipTask = scheduler.schedule( - PeriodicTasksInitialDelay max CollectorGossipInterval, - CollectorGossipInterval, self, GossipTick) + val gossipTask = + scheduler.schedule(PeriodicTasksInitialDelay max CollectorGossipInterval, CollectorGossipInterval, self, GossipTick) /** * Start periodic metrics collection */ - val sampleTask = scheduler.schedule( - PeriodicTasksInitialDelay max CollectorSampleInterval, - CollectorSampleInterval, self, MetricsTick) + val sampleTask = scheduler.schedule(PeriodicTasksInitialDelay max CollectorSampleInterval, + CollectorSampleInterval, + self, + MetricsTick) override def preStart(): Unit = { cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent]) @@ -186,7 +191,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { } override def postStop: Unit = { - cluster unsubscribe self + cluster.unsubscribe(self) gossipTask.cancel() sampleTask.cancel() collector.close() @@ -202,7 +207,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { */ def removeMember(member: Member): Unit = { nodes -= member.address - latestGossip = latestGossip remove member.address + latestGossip = latestGossip.remove(member.address) publish() } @@ -210,7 +215,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus]] `Up`. */ def receiveState(state: CurrentClusterState): Unit = - nodes = (state.members diff state.unreachable) collect { + nodes = (state.members.diff(state.unreachable)).collect { case m if m.status == MemberStatus.Up || m.status == MemberStatus.WeaklyUp => m.address } @@ -233,7 +238,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { // remote node might not have same view of member nodes, this side should only care // about nodes that are known here, otherwise removed nodes can come back val otherGossip = envelope.gossip.filter(nodes) - latestGossip = latestGossip merge otherGossip + latestGossip = latestGossip.merge(otherGossip) // changes will be published in the period collect task if (!envelope.reply) replyGossipTo(envelope.from) @@ -242,7 +247,7 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { /** * Gossip to peer nodes. */ - def gossip(): Unit = selectRandomNode((nodes - selfAddress).toVector) foreach gossipTo + def gossip(): Unit = selectRandomNode((nodes - selfAddress).toVector).foreach(gossipTo) def gossipTo(address: Address): Unit = sendGossip(address, MetricsGossipEnvelope(selfAddress, latestGossip, reply = false)) @@ -254,11 +259,11 @@ private[metrics] class ClusterMetricsCollector extends Actor with ActorLogging { context.actorSelection(self.path.toStringWithAddress(address)) ! envelope def selectRandomNode(addresses: immutable.IndexedSeq[Address]): Option[Address] = - if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) + if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current.nextInt(addresses.size))) /** * Publishes to the event stream. */ - def publish(): Unit = context.system.eventStream publish ClusterMetricsChanged(latestGossip.nodes) + def publish(): Unit = context.system.eventStream.publish(ClusterMetricsChanged(latestGossip.nodes)) } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala index acd4a71170..6c3ff2c791 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsExtension.scala @@ -44,11 +44,13 @@ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension { * * Supervision strategy. */ - private[metrics] val strategy = system.dynamicAccess.createInstanceFor[SupervisorStrategy]( - SupervisorStrategyProvider, immutable.Seq(classOf[Config] -> SupervisorStrategyConfiguration)) + private[metrics] val strategy = system.dynamicAccess + .createInstanceFor[SupervisorStrategy](SupervisorStrategyProvider, + immutable.Seq(classOf[Config] -> SupervisorStrategyConfiguration)) .getOrElse { val log: LoggingAdapter = Logging(system, getClass.getName) - log.error(s"Configured strategy provider ${SupervisorStrategyProvider} failed to load, using default ${classOf[ClusterMetricsStrategy].getName}.") + log.error(s"Configured strategy provider ${SupervisorStrategyProvider} failed to load, using default ${classOf[ + ClusterMetricsStrategy].getName}.") new ClusterMetricsStrategy(SupervisorStrategyConfiguration) } @@ -84,5 +86,6 @@ class ClusterMetricsExtension(system: ExtendedActorSystem) extends Extension { object ClusterMetricsExtension extends ExtensionId[ClusterMetricsExtension] with ExtensionIdProvider { override def lookup = ClusterMetricsExtension override def get(system: ActorSystem): ClusterMetricsExtension = super.get(system) - override def createExtension(system: ExtendedActorSystem): ClusterMetricsExtension = new ClusterMetricsExtension(system) + override def createExtension(system: ExtendedActorSystem): ClusterMetricsExtension = + new ClusterMetricsExtension(system) } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala index 89ec3edcfa..7a793739fa 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsRouting.scala @@ -34,8 +34,10 @@ import akka.cluster.routing.ClusterRouterSettingsBase * @param metricsSelector decides what probability to use for selecting a routee, based * on remaining capacity as indicated by the node metrics */ -final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsSelector: MetricsSelector = MixMetricsSelector) - extends RoutingLogic with NoSerializationVerificationNeeded { +final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, + metricsSelector: MetricsSelector = MixMetricsSelector) + extends RoutingLogic + with NoSerializationVerificationNeeded { private val cluster = Cluster(system) @@ -49,8 +51,8 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsS @tailrec final def metricsChanged(event: ClusterMetricsChanged): Unit = { val oldValue = weightedRouteesRef.get val (routees, _, _) = oldValue - val weightedRoutees = Some(new WeightedRoutees(routees, cluster.selfAddress, - metricsSelector.weights(event.nodeMetrics))) + val weightedRoutees = Some( + new WeightedRoutees(routees, cluster.selfAddress, metricsSelector.weights(event.nodeMetrics))) // retry when CAS failure if (!weightedRouteesRef.compareAndSet(oldValue, (routees, event.nodeMetrics, weightedRoutees))) metricsChanged(event) @@ -65,8 +67,8 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsS val (oldRoutees, oldMetrics, oldWeightedRoutees) = oldValue if (routees ne oldRoutees) { - val weightedRoutees = Some(new WeightedRoutees(routees, cluster.selfAddress, - metricsSelector.weights(oldMetrics))) + val weightedRoutees = Some( + new WeightedRoutees(routees, cluster.selfAddress, metricsSelector.weights(oldMetrics))) // ignore, don't update, in case of CAS failure weightedRouteesRef.compareAndSet(oldValue, (routees, oldMetrics, weightedRoutees)) weightedRoutees @@ -120,19 +122,18 @@ final case class AdaptiveLoadBalancingRoutingLogic(system: ActorSystem, metricsS * supervision, death watch and router management messages */ @SerialVersionUID(1L) -final case class AdaptiveLoadBalancingPool( - metricsSelector: MetricsSelector = MixMetricsSelector, - val nrOfInstances: Int = 0, - override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, - override val usePoolDispatcher: Boolean = false) - extends Pool { +final case class AdaptiveLoadBalancingPool(metricsSelector: MetricsSelector = MixMetricsSelector, + val nrOfInstances: Int = 0, + override val supervisorStrategy: SupervisorStrategy = + Pool.defaultSupervisorStrategy, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + override val usePoolDispatcher: Boolean = false) + extends Pool { def this(config: Config, dynamicAccess: DynamicAccess) = - this( - nrOfInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config), - metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess), - usePoolDispatcher = config.hasPath("pool-dispatcher")) + this(nrOfInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config), + metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess), + usePoolDispatcher = config.hasPath("pool-dispatcher")) /** * Java API @@ -150,14 +151,15 @@ final case class AdaptiveLoadBalancingPool( new Router(AdaptiveLoadBalancingRoutingLogic(system, metricsSelector)) override def routingLogicController(routingLogic: RoutingLogic): Option[Props] = - Some(Props( - classOf[AdaptiveLoadBalancingMetricsListener], - routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic])) + Some( + Props(classOf[AdaptiveLoadBalancingMetricsListener], + routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic])) /** * Setting the supervisor strategy to be used for the “head” Router actor. */ - def withSupervisorStrategy(strategy: SupervisorStrategy): AdaptiveLoadBalancingPool = copy(supervisorStrategy = strategy) + def withSupervisorStrategy(strategy: SupervisorStrategy): AdaptiveLoadBalancingPool = + copy(supervisorStrategy = strategy) /** * Setting the dispatcher to be used for the router head actor, which handles @@ -171,13 +173,14 @@ final case class AdaptiveLoadBalancingPool( */ override def withFallback(other: RouterConfig): RouterConfig = if (this.supervisorStrategy ne Pool.defaultSupervisorStrategy) this - else other match { - case _: FromConfig | _: NoRouter => this // NoRouter is the default, hence “neutral” - case otherRouter: AdaptiveLoadBalancingPool => - if (otherRouter.supervisorStrategy eq Pool.defaultSupervisorStrategy) this - else this.withSupervisorStrategy(otherRouter.supervisorStrategy) - case _ => throw new IllegalArgumentException("Expected AdaptiveLoadBalancingPool, got [%s]".format(other)) - } + else + other match { + case _: FromConfig | _: NoRouter => this // NoRouter is the default, hence “neutral” + case otherRouter: AdaptiveLoadBalancingPool => + if (otherRouter.supervisorStrategy eq Pool.defaultSupervisorStrategy) this + else this.withSupervisorStrategy(otherRouter.supervisorStrategy) + case _ => throw new IllegalArgumentException("Expected AdaptiveLoadBalancingPool, got [%s]".format(other)) + } } @@ -202,16 +205,14 @@ final case class AdaptiveLoadBalancingPool( * router management messages */ @SerialVersionUID(1L) -final case class AdaptiveLoadBalancingGroup( - metricsSelector: MetricsSelector = MixMetricsSelector, - val paths: immutable.Iterable[String] = Nil, - override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) - extends Group { +final case class AdaptiveLoadBalancingGroup(metricsSelector: MetricsSelector = MixMetricsSelector, + val paths: immutable.Iterable[String] = Nil, + override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) + extends Group { def this(config: Config, dynamicAccess: DynamicAccess) = - this( - metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess), - paths = immutableSeq(config.getStringList("routees.paths"))) + this(metricsSelector = MetricsSelector.fromConfig(config, dynamicAccess), + paths = immutableSeq(config.getStringList("routees.paths"))) /** * Java API @@ -220,9 +221,8 @@ final case class AdaptiveLoadBalancingGroup( * @param routeesPaths string representation of the actor paths of the routees, messages are * sent with [[akka.actor.ActorSelection]] to these paths */ - def this( - metricsSelector: MetricsSelector, - routeesPaths: java.lang.Iterable[String]) = this(paths = immutableSeq(routeesPaths)) + def this(metricsSelector: MetricsSelector, routeesPaths: java.lang.Iterable[String]) = + this(paths = immutableSeq(routeesPaths)) override def paths(system: ActorSystem): immutable.Iterable[String] = this.paths @@ -230,9 +230,9 @@ final case class AdaptiveLoadBalancingGroup( new Router(AdaptiveLoadBalancingRoutingLogic(system, metricsSelector)) override def routingLogicController(routingLogic: RoutingLogic): Option[Props] = - Some(Props( - classOf[AdaptiveLoadBalancingMetricsListener], - routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic])) + Some( + Props(classOf[AdaptiveLoadBalancingMetricsListener], + routingLogic.asInstanceOf[AdaptiveLoadBalancingRoutingLogic])) /** * Setting the dispatcher to be used for the router head actor, which handles @@ -249,6 +249,7 @@ final case class AdaptiveLoadBalancingGroup( @SerialVersionUID(1L) case object HeapMetricsSelector extends CapacityMetricsSelector { import akka.cluster.metrics.StandardMetrics.HeapMemory + /** * Java API: get the singleton instance */ @@ -277,6 +278,7 @@ case object HeapMetricsSelector extends CapacityMetricsSelector { @SerialVersionUID(1L) case object CpuMetricsSelector extends CapacityMetricsSelector { import akka.cluster.metrics.StandardMetrics.Cpu + /** * Java API: get the singleton instance */ @@ -315,6 +317,7 @@ case object CpuMetricsSelector extends CapacityMetricsSelector { @SerialVersionUID(1L) case object SystemLoadAverageMetricsSelector extends CapacityMetricsSelector { import akka.cluster.metrics.StandardMetrics.Cpu + /** * Java API: get the singleton instance */ @@ -334,8 +337,8 @@ case object SystemLoadAverageMetricsSelector extends CapacityMetricsSelector { * [akka.cluster.routing.CpuMetricsSelector], and [akka.cluster.routing.SystemLoadAverageMetricsSelector] */ @SerialVersionUID(1L) -object MixMetricsSelector extends MixMetricsSelectorBase( - Vector(HeapMetricsSelector, CpuMetricsSelector, SystemLoadAverageMetricsSelector)) { +object MixMetricsSelector + extends MixMetricsSelectorBase(Vector(HeapMetricsSelector, CpuMetricsSelector, SystemLoadAverageMetricsSelector)) { /** * Java API: get the default singleton instance @@ -349,16 +352,15 @@ object MixMetricsSelector extends MixMetricsSelectorBase( * [akka.cluster.routing.CpuMetricsSelector], and [akka.cluster.routing.SystemLoadAverageMetricsSelector] */ @SerialVersionUID(1L) -final case class MixMetricsSelector( - selectors: immutable.IndexedSeq[CapacityMetricsSelector]) - extends MixMetricsSelectorBase(selectors) +final case class MixMetricsSelector(selectors: immutable.IndexedSeq[CapacityMetricsSelector]) + extends MixMetricsSelectorBase(selectors) /** * Base class for MetricsSelector that combines other selectors and aggregates their capacity. */ @SerialVersionUID(1L) abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMetricsSelector]) - extends CapacityMetricsSelector { + extends CapacityMetricsSelector { /** * Java API: construct a mix-selector from a sequence of selectors @@ -369,13 +371,15 @@ abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMe val combined: immutable.IndexedSeq[(Address, Double)] = selectors.flatMap(_.capacity(nodeMetrics).toSeq) // aggregated average of the capacities by address val init: Map[Address, (Double, Int)] = Map.empty.withDefaultValue((0.0, 0)) - combined.foldLeft(init) { - case (acc, (address, capacity)) => - val (sum, count) = acc(address) - acc + (address -> ((sum + capacity, count + 1))) - }.map { - case (address, (sum, count)) => address -> (sum / count) - } + combined + .foldLeft(init) { + case (acc, (address, capacity)) => + val (sum, count) = acc(address) + acc + (address -> ((sum + capacity, count + 1))) + } + .map { + case (address, (sum, count)) => address -> (sum / count) + } } } @@ -389,12 +393,16 @@ object MetricsSelector { case "load" => SystemLoadAverageMetricsSelector case fqn => val args = List(classOf[Config] -> config) - dynamicAccess.createInstanceFor[MetricsSelector](fqn, args).recover({ - case exception => throw new IllegalArgumentException( - (s"Cannot instantiate metrics-selector [$fqn], " + - "make sure it extends [akka.cluster.routing.MetricsSelector] and " + - "has constructor with [com.typesafe.config.Config] parameter"), exception) - }).get + dynamicAccess + .createInstanceFor[MetricsSelector](fqn, args) + .recover({ + case exception => + throw new IllegalArgumentException((s"Cannot instantiate metrics-selector [$fqn], " + + "make sure it extends [akka.cluster.routing.MetricsSelector] and " + + "has constructor with [com.typesafe.config.Config] parameter"), + exception) + }) + .get } } @@ -403,6 +411,7 @@ object MetricsSelector { */ @SerialVersionUID(1L) trait MetricsSelector extends Serializable { + /** * The weights per address, based on the nodeMetrics. */ @@ -436,7 +445,7 @@ abstract class CapacityMetricsSelector extends MetricsSelector { val (_, min) = capacity.minBy { case (_, c) => c } // lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero val divisor = math.max(0.01, min) - capacity map { case (address, c) => (address -> math.round((c) / divisor).toInt) } + capacity.map { case (address, c) => (address -> math.round((c) / divisor).toInt) } } } @@ -454,7 +463,9 @@ abstract class CapacityMetricsSelector extends MetricsSelector { * * Pick routee based on its weight. Higher weight, higher probability. */ -private[metrics] class WeightedRoutees(routees: immutable.IndexedSeq[Routee], selfAddress: Address, weights: Map[Address, Int]) { +private[metrics] class WeightedRoutees(routees: immutable.IndexedSeq[Routee], + selfAddress: Address, + weights: Map[Address, Int]) { // fill an array of same size as the refs with accumulated weights, // binarySearch is used to pick the right bucket from a requested value @@ -475,7 +486,7 @@ private[metrics] class WeightedRoutees(routees: immutable.IndexedSeq[Routee], se val w = weights.withDefaultValue(meanWeight) // we don’t necessarily have metrics for all addresses var i = 0 var sum = 0 - routees foreach { r => + routees.foreach { r => sum += w(fullAddress(r)) buckets(i) = sum i += 1 @@ -494,7 +505,7 @@ private[metrics] class WeightedRoutees(routees: immutable.IndexedSeq[Routee], se * Pick the routee matching a value, from 1 to total. */ def apply(value: Int): Routee = { - require(1 <= value && value <= total, "value must be between [1 - %s]" format total) + require(1 <= value && value <= total, "value must be between [1 - %s]".format(total)) routees(idx(Arrays.binarySearch(buckets, value))) } @@ -506,8 +517,8 @@ private[metrics] class WeightedRoutees(routees: immutable.IndexedSeq[Routee], se if (i >= 0) i // exact match else { val j = math.abs(i + 1) - if (j >= buckets.length) throw new IndexOutOfBoundsException( - "Requested index [%s] is > max index [%s]".format(i, buckets.length)) + if (j >= buckets.length) + throw new IndexOutOfBoundsException("Requested index [%s] is > max index [%s]".format(i, buckets.length)) else j } } @@ -518,7 +529,7 @@ private[metrics] class WeightedRoutees(routees: immutable.IndexedSeq[Routee], se * Subscribe to [[ClusterMetricsEvent]]s and update routing logic depending on the events. */ private[metrics] class AdaptiveLoadBalancingMetricsListener(routingLogic: AdaptiveLoadBalancingRoutingLogic) - extends Actor { + extends Actor { def extension = ClusterMetricsExtension(context.system) diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala index 332ca9bbda..ee3ac56719 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsSettings.scala @@ -37,12 +37,12 @@ case class ClusterMetricsSettings(config: Config) { val CollectorFallback: Boolean = cc.getBoolean("collector.fallback") val CollectorSampleInterval: FiniteDuration = { cc.getMillisDuration("collector.sample-interval") - } requiring (_ > Duration.Zero, "collector.sample-interval must be > 0") + }.requiring(_ > Duration.Zero, "collector.sample-interval must be > 0") val CollectorGossipInterval: FiniteDuration = { cc.getMillisDuration("collector.gossip-interval") - } requiring (_ > Duration.Zero, "collector.gossip-interval must be > 0") + }.requiring(_ > Duration.Zero, "collector.gossip-interval must be > 0") val CollectorMovingAverageHalfLife: FiniteDuration = { cc.getMillisDuration("collector.moving-average-half-life") - } requiring (_ > Duration.Zero, "collector.moving-average-half-life must be > 0") + }.requiring(_ > Duration.Zero, "collector.moving-average-half-life must be > 0") } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala index 7ce45c3cef..caace74020 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/ClusterMetricsStrategy.scala @@ -12,10 +12,11 @@ import akka.util.Helpers.ConfigOps * Default [[ClusterMetricsSupervisor]] strategy: * A configurable [[akka.actor.OneForOneStrategy]] with restart-on-throwable decider. */ -class ClusterMetricsStrategy(config: Config) extends OneForOneStrategy( - maxNrOfRetries = config.getInt("maxNrOfRetries"), - withinTimeRange = config.getMillisDuration("withinTimeRange"), - loggingEnabled = config.getBoolean("loggingEnabled"))(ClusterMetricsStrategy.metricsDecider) +class ClusterMetricsStrategy(config: Config) + extends OneForOneStrategy( + maxNrOfRetries = config.getInt("maxNrOfRetries"), + withinTimeRange = config.getMillisDuration("withinTimeRange"), + loggingEnabled = config.getBoolean("loggingEnabled"))(ClusterMetricsStrategy.metricsDecider) /** * Provide custom metrics strategy resources. diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala index adffed9b07..313ce794b3 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Metric.scala @@ -22,7 +22,7 @@ import scala.util.Try */ @SerialVersionUID(1L) final case class Metric private[metrics] (name: String, value: Number, average: Option[EWMA]) - extends MetricNumericConverter { + extends MetricNumericConverter { require(defined(value), s"Invalid Metric [$name] value [$value]") @@ -31,12 +31,11 @@ final case class Metric private[metrics] (name: String, value: Number, average: * Returns the updated metric. */ def :+(latest: Metric): Metric = - if (this sameAs latest) average match { + if (this.sameAs(latest)) average match { case Some(avg) => copy(value = latest.value, average = Some(avg :+ latest.value.doubleValue)) case None if latest.average.isDefined => copy(value = latest.value, average = latest.average) case _ => copy(value = latest.value) - } - else this + } else this /** * The numerical value of the average, if defined, otherwise the latest value @@ -112,8 +111,10 @@ object StandardMetrics { // In latest Linux kernels: CpuCombined + CpuStolen + CpuIdle = 1.0 or 100%. /** Sum of User + Sys + Nice + Wait. See `org.hyperic.sigar.CpuPerc` */ final val CpuCombined = "cpu-combined" + /** The amount of CPU 'stolen' from this virtual machine by the hypervisor for other tasks (such as running another virtual machine). */ final val CpuStolen = "cpu-stolen" + /** Amount of CPU time left after combined and stolen are removed. */ final val CpuIdle = "cpu-idle" @@ -128,9 +129,12 @@ object StandardMetrics { for { used <- nodeMetrics.metric(HeapMemoryUsed) committed <- nodeMetrics.metric(HeapMemoryCommitted) - } yield (nodeMetrics.address, nodeMetrics.timestamp, - used.smoothValue.longValue, committed.smoothValue.longValue, - nodeMetrics.metric(HeapMemoryMax).map(_.smoothValue.longValue)) + } yield + (nodeMetrics.address, + nodeMetrics.timestamp, + used.smoothValue.longValue, + committed.smoothValue.longValue, + nodeMetrics.metric(HeapMemoryMax).map(_.smoothValue.longValue)) } } @@ -172,14 +176,17 @@ object StandardMetrics { * necessary cpu metrics. * @return if possible a tuple matching the Cpu constructor parameters */ - def unapply(nodeMetrics: NodeMetrics): Option[(Address, Long, Option[Double], Option[Double], Option[Double], Int)] = { + def unapply( + nodeMetrics: NodeMetrics): Option[(Address, Long, Option[Double], Option[Double], Option[Double], Int)] = { for { processors <- nodeMetrics.metric(Processors) - } yield (nodeMetrics.address, nodeMetrics.timestamp, - nodeMetrics.metric(SystemLoadAverage).map(_.smoothValue), - nodeMetrics.metric(CpuCombined).map(_.smoothValue), - nodeMetrics.metric(CpuStolen).map(_.smoothValue), - processors.value.intValue) + } yield + (nodeMetrics.address, + nodeMetrics.timestamp, + nodeMetrics.metric(SystemLoadAverage).map(_.smoothValue), + nodeMetrics.metric(CpuCombined).map(_.smoothValue), + nodeMetrics.metric(CpuStolen).map(_.smoothValue), + processors.value.intValue) } } @@ -207,13 +214,12 @@ object StandardMetrics { * @param processors the number of available processors */ @SerialVersionUID(1L) - final case class Cpu( - address: Address, - timestamp: Long, - systemLoadAverage: Option[Double], - cpuCombined: Option[Double], - cpuStolen: Option[Double], - processors: Int) { + final case class Cpu(address: Address, + timestamp: Long, + systemLoadAverage: Option[Double], + cpuCombined: Option[Double], + cpuStolen: Option[Double], + processors: Int) { cpuCombined match { case Some(x) => require(0.0 <= x && x <= 1.0, s"cpuCombined must be between [0.0 - 1.0], was [$x]") @@ -283,7 +289,7 @@ final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Met if (timestamp >= that.timestamp) this // that is older else { // equality is based on the name of the Metric - copy(metrics = that.metrics union (metrics diff that.metrics), timestamp = that.timestamp) + copy(metrics = that.metrics.union(metrics.diff(that.metrics)), timestamp = that.timestamp) } } @@ -298,13 +304,14 @@ final case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Met val updated = for { latest <- latestNode.metrics current <- currentNode.metrics - if (latest sameAs current) + if latest.sameAs(current) } yield { current :+ latest } // Append metrics missing from either latest or current. // Equality is based on the [[Metric.name]] - val merged = updated union (latestNode.metrics diff updated) union (currentNode.metrics diff updated diff latestNode.metrics) + val merged = + updated.union(latestNode.metrics.diff(updated)).union(currentNode.metrics.diff(updated).diff(latestNode.metrics)) copy(metrics = merged, timestamp = latestNode.timestamp) } @@ -347,32 +354,36 @@ private[metrics] final case class MetricsGossip(nodes: Set[NodeMetrics]) { /** * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus]] `Up`. */ - def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node)) + def remove(node: Address): MetricsGossip = copy(nodes = nodes.filterNot(_.address == node)) /** * Only the nodes that are in the `includeNodes` Set. */ def filter(includeNodes: Set[Address]): MetricsGossip = - copy(nodes = nodes filter { includeNodes contains _.address }) + copy(nodes = nodes.filter { includeNodes contains _.address }) /** * Adds new remote [[NodeMetrics]] and merges existing from a remote gossip. */ def merge(otherGossip: MetricsGossip): MetricsGossip = - otherGossip.nodes.foldLeft(this) { (gossip, nodeMetrics) => gossip :+ nodeMetrics } + otherGossip.nodes.foldLeft(this) { (gossip, nodeMetrics) => + gossip :+ nodeMetrics + } /** * Adds new local [[NodeMetrics]], or merges an existing. */ def :+(newNodeMetrics: NodeMetrics): MetricsGossip = nodeMetricsFor(newNodeMetrics.address) match { case Some(existingNodeMetrics) => - copy(nodes = nodes - existingNodeMetrics + (existingNodeMetrics update newNodeMetrics)) + copy(nodes = nodes - existingNodeMetrics + (existingNodeMetrics.update(newNodeMetrics))) case None => copy(nodes = nodes + newNodeMetrics) } /** * Returns [[NodeMetrics]] for a node if exists. */ - def nodeMetricsFor(address: Address): Option[NodeMetrics] = nodes find { n => n.address == address } + def nodeMetricsFor(address: Address): Option[NodeMetrics] = nodes.find { n => + n.address == address + } } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala index 834c0a4994..6e01a424b6 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/MetricsCollector.scala @@ -24,6 +24,7 @@ import org.hyperic.sigar.SigarProxy * Implementations of cluster system metrics collectors extend this trait. */ trait MetricsCollector extends Closeable { + /** * Samples and collects new data points. * This method is invoked periodically and should return @@ -59,16 +60,20 @@ private[metrics] object MetricsCollector { def create(provider: String) = TryNative { log.debug(s"Trying ${provider}.") - system.asInstanceOf[ExtendedActorSystem].dynamicAccess - .createInstanceFor[MetricsCollector](provider, List(classOf[ActorSystem] -> system)).get + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[MetricsCollector](provider, List(classOf[ActorSystem] -> system)) + .get } - val collector = if (useCustom) - create(collectorCustom) - else if (useInternal) - create(collectorSigar) orElse create(collectorJMX) - else // Use complete fall back chain. - create(collectorCustom) orElse create(collectorSigar) orElse create(collectorJMX) + val collector = + if (useCustom) + create(collectorCustom) + else if (useInternal) + create(collectorSigar).orElse(create(collectorJMX)) + else // Use complete fall back chain. + create(collectorCustom).orElse(create(collectorSigar)).orElse(create(collectorJMX)) collector.recover { case e => throw new ConfigurationException(s"Could not create metrics collector: ${e}") @@ -86,9 +91,7 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics import StandardMetrics._ private def this(address: Address, settings: ClusterMetricsSettings) = - this( - address, - EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval)) + this(address, EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval)) /** * This constructor is used when creating an instance from configured FQCN @@ -122,19 +125,15 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics * returned from JMX, and None is returned from this method. * Creates a new instance each time. */ - def systemLoadAverage: Option[Metric] = Metric.create( - name = SystemLoadAverage, - value = osMBean.getSystemLoadAverage, - decayFactor = None) + def systemLoadAverage: Option[Metric] = + Metric.create(name = SystemLoadAverage, value = osMBean.getSystemLoadAverage, decayFactor = None) /** * (JMX) Returns the number of available processors * Creates a new instance each time. */ - def processors: Option[Metric] = Metric.create( - name = Processors, - value = osMBean.getAvailableProcessors, - decayFactor = None) + def processors: Option[Metric] = + Metric.create(name = Processors, value = osMBean.getAvailableProcessors, decayFactor = None) /** * Current heap to be passed in to heapUsed, heapCommitted and heapMax @@ -145,20 +144,16 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics * (JMX) Returns the current sum of heap memory used from all heap memory pools (in bytes). * Creates a new instance each time. */ - def heapUsed(heap: MemoryUsage): Option[Metric] = Metric.create( - name = HeapMemoryUsed, - value = heap.getUsed, - decayFactor = decayFactorOption) + def heapUsed(heap: MemoryUsage): Option[Metric] = + Metric.create(name = HeapMemoryUsed, value = heap.getUsed, decayFactor = decayFactorOption) /** * (JMX) Returns the current sum of heap memory guaranteed to be available to the JVM * from all heap memory pools (in bytes). * Creates a new instance each time. */ - def heapCommitted(heap: MemoryUsage): Option[Metric] = Metric.create( - name = HeapMemoryCommitted, - value = heap.getCommitted, - decayFactor = decayFactorOption) + def heapCommitted(heap: MemoryUsage): Option[Metric] = + Metric.create(name = HeapMemoryCommitted, value = heap.getCommitted, decayFactor = decayFactorOption) /** * (JMX) Returns the maximum amount of memory (in bytes) that can be used @@ -166,10 +161,8 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics * never negative. * Creates a new instance each time. */ - def heapMax(heap: MemoryUsage): Option[Metric] = Metric.create( - name = HeapMemoryMax, - value = heap.getMax, - decayFactor = None) + def heapMax(heap: MemoryUsage): Option[Metric] = + Metric.create(name = HeapMemoryMax, value = heap.getMax, decayFactor = None) override def close(): Unit = () @@ -188,16 +181,13 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics * @param sigar the org.hyperic.Sigar instance */ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarProxy) - extends JmxMetricsCollector(address, decayFactor) { + extends JmxMetricsCollector(address, decayFactor) { import StandardMetrics._ import org.hyperic.sigar.CpuPerc def this(address: Address, settings: ClusterMetricsSettings, sigar: SigarProxy) = - this( - address, - EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval), - sigar) + this(address, EWMA.alpha(settings.CollectorMovingAverageHalfLife, settings.CollectorSampleInterval), sigar) def this(address: Address, settings: ClusterMetricsSettings) = this(address, settings, DefaultSigarProvider(settings).createSigarInstance) @@ -219,7 +209,7 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP override def metrics(): Set[Metric] = { // Must obtain cpuPerc in one shot. See https://github.com/akka/akka/issues/16121 val cpuPerc = sigar.getCpuPerc - super.metrics union Set(cpuCombined(cpuPerc), cpuStolen(cpuPerc)).flatten + super.metrics.union(Set(cpuCombined(cpuPerc), cpuStolen(cpuPerc)).flatten) } /** @@ -227,10 +217,8 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP * * Creates a new instance each time. */ - override def systemLoadAverage: Option[Metric] = Metric.create( - name = SystemLoadAverage, - value = sigar.getLoadAverage()(0).asInstanceOf[Number], - decayFactor = None) + override def systemLoadAverage: Option[Metric] = + Metric.create(name = SystemLoadAverage, value = sigar.getLoadAverage()(0).asInstanceOf[Number], decayFactor = None) /** * (SIGAR) Returns the combined CPU sum of User + Sys + Nice + Wait, in percentage. This metric can describe @@ -242,10 +230,8 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP * * Creates a new instance each time. */ - def cpuCombined(cpuPerc: CpuPerc): Option[Metric] = Metric.create( - name = CpuCombined, - value = cpuPerc.getCombined.asInstanceOf[Number], - decayFactor = decayFactorOption) + def cpuCombined(cpuPerc: CpuPerc): Option[Metric] = + Metric.create(name = CpuCombined, value = cpuPerc.getCombined.asInstanceOf[Number], decayFactor = decayFactorOption) /** * (SIGAR) Returns the stolen CPU time. Relevant to virtual hosting environments. @@ -254,10 +240,8 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP * * Creates a new instance each time. */ - def cpuStolen(cpuPerc: CpuPerc): Option[Metric] = Metric.create( - name = CpuStolen, - value = cpuPerc.getStolen.asInstanceOf[Number], - decayFactor = decayFactorOption) + def cpuStolen(cpuPerc: CpuPerc): Option[Metric] = + Metric.create(name = CpuStolen, value = cpuPerc.getStolen.asInstanceOf[Number], decayFactor = decayFactorOption) /** * (SIGAR) Returns the idle CPU time. @@ -265,10 +249,8 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP * * Creates a new instance each time. */ - def cpuIdle(cpuPerc: CpuPerc): Option[Metric] = Metric.create( - name = CpuIdle, - value = cpuPerc.getIdle.asInstanceOf[Number], - decayFactor = decayFactorOption) + def cpuIdle(cpuPerc: CpuPerc): Option[Metric] = + Metric.create(name = CpuIdle, value = cpuPerc.getIdle.asInstanceOf[Number], decayFactor = decayFactorOption) /** * Releases any native resources associated with this instance. diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala index 8848e6b964..adac131c67 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/Provision.scala @@ -64,17 +64,19 @@ trait SigarProvider { def createSigarInstance: SigarProxy = { TryNative { verifiedSigarInstance - } orElse TryNative { - provisionSigarLibrary() - verifiedSigarInstance - } recover { - case e: Throwable => throw new RuntimeException("Failed to load sigar:", e) - } get + }.orElse(TryNative { + provisionSigarLibrary() + verifiedSigarInstance + }) + .recover { + case e: Throwable => throw new RuntimeException("Failed to load sigar:", e) + } get } } object SigarProvider { + /** * Release underlying sigar proxy resources. * @@ -97,7 +99,8 @@ case class DefaultSigarProvider(settings: ClusterMetricsSettings) extends SigarP */ private[metrics] object TryNative { def apply[T](r: => T): Try[T] = - try Success(r) catch { + try Success(r) + catch { // catching all, for example java.lang.LinkageError that are not caught by `NonFatal` in `Try` case e: Throwable => Failure(e) } diff --git a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala index 6d5f0966c9..300ed20321 100644 --- a/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala +++ b/akka-cluster-metrics/src/main/scala/akka/cluster/metrics/protobuf/MessageSerializer.scala @@ -11,7 +11,7 @@ import java.{ lang => jl } import akka.actor.{ Address, ExtendedActorSystem } import akka.cluster.metrics.protobuf.msg.{ ClusterMetricsMessages => cm } import akka.cluster.metrics._ -import akka.serialization.{ BaseSerializer, SerializationExtension, Serializers, SerializerWithStringManifest } +import akka.serialization.{ BaseSerializer, SerializationExtension, SerializerWithStringManifest, Serializers } import akka.util.ClassLoaderObjectInputStream import akka.protobuf.{ ByteString, MessageLite } import akka.util.ccompat._ @@ -93,8 +93,9 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS case CpuMetricsSelectorManifest => CpuMetricsSelector case HeapMetricsSelectorManifest => HeapMetricsSelector case SystemLoadAverageMetricsSelectorManifest => SystemLoadAverageMetricsSelector - case _ => throw new NotSerializableException( - s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}") + case _ => + throw new NotSerializableException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}") } private def addressToProto(address: Address): cm.Address.Builder = address match { @@ -121,8 +122,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS val builder = cm.MetricsSelector.newBuilder() val serializer = serialization.findSerializerFor(selector) - builder.setData(ByteString.copyFrom(serializer.toBinary(selector))) - .setSerializerId(serializer.identifier) + builder.setData(ByteString.copyFrom(serializer.toBinary(selector))).setSerializerId(serializer.identifier) val manifest = Serializers.manifestFor(serializer, selector) builder.setManifest(manifest) @@ -175,13 +175,14 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS val allNodeMetrics = envelope.gossip.nodes val allAddresses: Vector[Address] = allNodeMetrics.iterator.map(_.address).to(immutable.Vector) val addressMapping = allAddresses.zipWithIndex.toMap - val allMetricNames: Vector[String] = allNodeMetrics.foldLeft(Set.empty[String])((s, n) => s ++ n.metrics.iterator.map(_.name)).toVector + val allMetricNames: Vector[String] = + allNodeMetrics.foldLeft(Set.empty[String])((s, n) => s ++ n.metrics.iterator.map(_.name)).toVector val metricNamesMapping = allMetricNames.zipWithIndex.toMap def mapAddress(address: Address) = mapWithErrorMessage(addressMapping, address, "address") def mapName(name: String) = mapWithErrorMessage(metricNamesMapping, name, "address") - def ewmaToProto(ewma: Option[EWMA]): Option[cm.NodeMetrics.EWMA.Builder] = ewma.map { - x => cm.NodeMetrics.EWMA.newBuilder().setValue(x.value).setAlpha(x.alpha) + def ewmaToProto(ewma: Option[EWMA]): Option[cm.NodeMetrics.EWMA.Builder] = ewma.map { x => + cm.NodeMetrics.EWMA.newBuilder().setValue(x.value).setAlpha(x.alpha) } def numberToProto(number: Number): cm.NodeMetrics.Number.Builder = { @@ -202,20 +203,31 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS } def metricToProto(metric: Metric): cm.NodeMetrics.Metric.Builder = { - val builder = cm.NodeMetrics.Metric.newBuilder().setNameIndex(mapName(metric.name)).setNumber(numberToProto(metric.value)) + val builder = + cm.NodeMetrics.Metric.newBuilder().setNameIndex(mapName(metric.name)).setNumber(numberToProto(metric.value)) ewmaToProto(metric.average).map(builder.setEwma).getOrElse(builder) } def nodeMetricsToProto(nodeMetrics: NodeMetrics): cm.NodeMetrics.Builder = - cm.NodeMetrics.newBuilder().setAddressIndex(mapAddress(nodeMetrics.address)).setTimestamp(nodeMetrics.timestamp). - addAllMetrics(nodeMetrics.metrics.map(metricToProto(_).build).asJava) + cm.NodeMetrics + .newBuilder() + .setAddressIndex(mapAddress(nodeMetrics.address)) + .setTimestamp(nodeMetrics.timestamp) + .addAllMetrics(nodeMetrics.metrics.map(metricToProto(_).build).asJava) val nodeMetrics: Iterable[cm.NodeMetrics] = allNodeMetrics.map(nodeMetricsToProto(_).build) - cm.MetricsGossipEnvelope.newBuilder().setFrom(addressToProto(envelope.from)).setGossip( - cm.MetricsGossip.newBuilder().addAllAllAddresses(allAddresses.map(addressToProto(_).build()).asJava). - addAllAllMetricNames(allMetricNames.asJava).addAllNodeMetrics(nodeMetrics.asJava)). - setReply(envelope.reply).build + cm.MetricsGossipEnvelope + .newBuilder() + .setFrom(addressToProto(envelope.from)) + .setGossip( + cm.MetricsGossip + .newBuilder() + .addAllAllAddresses(allAddresses.map(addressToProto(_).build()).asJava) + .addAllAllMetricNames(allMetricNames.asJava) + .addAllNodeMetrics(nodeMetrics.asJava)) + .setReply(envelope.reply) + .build } private def metricsGossipEnvelopeFromBinary(bytes: Array[Byte]): MetricsGossipEnvelope = @@ -237,9 +249,8 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS case NumberType.Float_VALUE => jl.Float.intBitsToFloat(number.getValue32) case NumberType.Integer_VALUE => number.getValue32 case NumberType.Serialized_VALUE => - val in = new ClassLoaderObjectInputStream( - system.dynamicAccess.classLoader, - new ByteArrayInputStream(number.getSerialized.toByteArray)) + val in = new ClassLoaderObjectInputStream(system.dynamicAccess.classLoader, + new ByteArrayInputStream(number.getSerialized.toByteArray)) val obj = in.readObject in.close() obj.asInstanceOf[jl.Number] @@ -247,14 +258,17 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS } def metricFromProto(metric: cm.NodeMetrics.Metric): Metric = - Metric(metricNameMapping(metric.getNameIndex), numberFromProto(metric.getNumber), - if (metric.hasEwma) ewmaFromProto(metric.getEwma) else None) + Metric(metricNameMapping(metric.getNameIndex), + numberFromProto(metric.getNumber), + if (metric.hasEwma) ewmaFromProto(metric.getEwma) else None) def nodeMetricsFromProto(nodeMetrics: cm.NodeMetrics): NodeMetrics = - NodeMetrics(addressMapping(nodeMetrics.getAddressIndex), nodeMetrics.getTimestamp, - nodeMetrics.getMetricsList.asScala.iterator.map(metricFromProto).to(immutable.Set)) + NodeMetrics(addressMapping(nodeMetrics.getAddressIndex), + nodeMetrics.getTimestamp, + nodeMetrics.getMetricsList.asScala.iterator.map(metricFromProto).to(immutable.Set)) - val nodeMetrics: Set[NodeMetrics] = mgossip.getNodeMetricsList.asScala.iterator.map(nodeMetricsFromProto).to(immutable.Set) + val nodeMetrics: Set[NodeMetrics] = + mgossip.getNodeMetricsList.asScala.iterator.map(nodeMetricsFromProto).to(immutable.Set) MetricsGossipEnvelope(addressFromProto(envelope.getFrom), MetricsGossip(nodeMetrics), envelope.getReply) } @@ -265,33 +279,33 @@ class MessageSerializer(val system: ExtendedActorSystem) extends SerializerWithS val selector = if (alb.hasMetricsSelector) { val ms = alb.getMetricsSelector - serialization.deserialize( - ms.getData.toByteArray, - ms.getSerializerId, - ms.getManifest - ).get.asInstanceOf[MetricsSelector] + serialization + .deserialize(ms.getData.toByteArray, ms.getSerializerId, ms.getManifest) + .get + .asInstanceOf[MetricsSelector] } else MixMetricsSelector - AdaptiveLoadBalancingPool( - metricsSelector = selector, - nrOfInstances = alb.getNrOfInstances, - routerDispatcher = if (alb.hasRouterDispatcher) alb.getRouterDispatcher else Dispatchers.DefaultDispatcherId, - usePoolDispatcher = alb.getUsePoolDispatcher - ) + AdaptiveLoadBalancingPool(metricsSelector = selector, + nrOfInstances = alb.getNrOfInstances, + routerDispatcher = + if (alb.hasRouterDispatcher) alb.getRouterDispatcher + else Dispatchers.DefaultDispatcherId, + usePoolDispatcher = alb.getUsePoolDispatcher) } def mixMetricSelectorFromBinary(bytes: Array[Byte]): MixMetricsSelector = { val mm = cm.MixMetricsSelector.parseFrom(bytes) - MixMetricsSelector(mm.getSelectorsList.asScala + MixMetricsSelector( + mm.getSelectorsList.asScala // should be safe because we serialized only the right subtypes of MetricsSelector - .map(s => metricSelectorFromProto(s).asInstanceOf[CapacityMetricsSelector]).toIndexedSeq) + .map(s => metricSelectorFromProto(s).asInstanceOf[CapacityMetricsSelector]) + .toIndexedSeq) } def metricSelectorFromProto(selector: cm.MetricsSelector): MetricsSelector = - serialization.deserialize( - selector.getData.toByteArray, - selector.getSerializerId, - selector.getManifest - ).get.asInstanceOf[MetricsSelector] + serialization + .deserialize(selector.getData.toByteArray, selector.getSerializerId, selector.getManifest) + .get + .asInstanceOf[MetricsSelector] } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala index 21a711f936..241f82b473 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala @@ -24,7 +24,7 @@ trait ClusterMetricsCommonConfig extends MultiNodeConfig { def nodeList = Seq(node1, node2, node3, node4, node5) // Extract individual sigar library for every node. - nodeList foreach { role => + nodeList.foreach { role => nodeConfig(role) { parseString(s"akka.cluster.metrics.native-library-extract-folder=$${user.dir}/target/native/" + role.name) } @@ -49,24 +49,20 @@ trait ClusterMetricsCommonConfig extends MultiNodeConfig { object ClusterMetricsDisabledConfig extends ClusterMetricsCommonConfig { commonConfig { - Seq( - customLogging, - disableMetricsExtension, - debugConfig(on = false), - MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet) - .reduceLeft(_ withFallback _) + Seq(customLogging, + disableMetricsExtension, + debugConfig(on = false), + MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet).reduceLeft(_.withFallback(_)) } } object ClusterMetricsEnabledConfig extends ClusterMetricsCommonConfig { commonConfig { - Seq( - customLogging, - enableMetricsExtension, - debugConfig(on = false), - MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet) - .reduceLeft(_ withFallback _) + Seq(customLogging, + enableMetricsExtension, + debugConfig(on = false), + MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet).reduceLeft(_.withFallback(_)) } } @@ -77,8 +73,10 @@ class ClusterMetricsEnabledMultiJvmNode3 extends ClusterMetricsEnabledSpec class ClusterMetricsEnabledMultiJvmNode4 extends ClusterMetricsEnabledSpec class ClusterMetricsEnabledMultiJvmNode5 extends ClusterMetricsEnabledSpec -abstract class ClusterMetricsEnabledSpec extends MultiNodeSpec(ClusterMetricsEnabledConfig) - with MultiNodeClusterSpec with RedirectLogging { +abstract class ClusterMetricsEnabledSpec + extends MultiNodeSpec(ClusterMetricsEnabledConfig) + with MultiNodeClusterSpec + with RedirectLogging { import ClusterMetricsEnabledConfig._ def isSigar(collector: MetricsCollector): Boolean = collector.isInstanceOf[SigarMetricsCollector] @@ -89,7 +87,9 @@ abstract class ClusterMetricsEnabledSpec extends MultiNodeSpec(ClusterMetricsEna val conf = cluster.system.settings.config val text = conf.root.render val file = new File(s"target/${myself.name}_application.conf") - Some(new PrintWriter(file)) map { p => p.write(text); p.close } + Some(new PrintWriter(file)).map { p => + p.write(text); p.close + } } saveApplicationConf() @@ -98,17 +98,17 @@ abstract class ClusterMetricsEnabledSpec extends MultiNodeSpec(ClusterMetricsEna "Cluster metrics" must { "periodically collect metrics on each node, publish to the event stream, " + - "and gossip metrics around the node ring" in within(60 seconds) { - awaitClusterUp(roles: _*) - enterBarrier("cluster-started") - awaitAssert(clusterView.members.count(_.status == MemberStatus.Up) should ===(roles.size)) - // TODO ensure same contract - //awaitAssert(clusterView.clusterMetrics.size should ===(roles.size)) - awaitAssert(metricsView.clusterMetrics.size should ===(roles.size)) - val collector = MetricsCollector(cluster.system) - collector.sample.metrics.size should be > (3) - enterBarrier("after") - } + "and gossip metrics around the node ring" in within(60 seconds) { + awaitClusterUp(roles: _*) + enterBarrier("cluster-started") + awaitAssert(clusterView.members.count(_.status == MemberStatus.Up) should ===(roles.size)) + // TODO ensure same contract + //awaitAssert(clusterView.clusterMetrics.size should ===(roles.size)) + awaitAssert(metricsView.clusterMetrics.size should ===(roles.size)) + val collector = MetricsCollector(cluster.system) + collector.sample.metrics.size should be > (3) + enterBarrier("after") + } "reflect the correct number of node metrics in cluster view" in within(30 seconds) { runOn(node2) { cluster.leave(node1) @@ -131,8 +131,10 @@ class ClusterMetricsDisabledMultiJvmNodv3 extends ClusterMetricsDisabledSpec class ClusterMetricsDisabledMultiJvmNode4 extends ClusterMetricsDisabledSpec class ClusterMetricsDisabledMultiJvmNode5 extends ClusterMetricsDisabledSpec -abstract class ClusterMetricsDisabledSpec extends MultiNodeSpec(ClusterMetricsDisabledConfig) - with MultiNodeClusterSpec with RedirectLogging { +abstract class ClusterMetricsDisabledSpec + extends MultiNodeSpec(ClusterMetricsDisabledConfig) + with MultiNodeClusterSpec + with RedirectLogging { val metricsView = new ClusterMetricsView(cluster.system) diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index 3e4388dc34..e0b705e35e 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -14,10 +14,10 @@ import akka.actor._ import akka.cluster.Cluster import akka.cluster.MultiNodeClusterSpec import akka.pattern.ask -import akka.remote.testkit.{ MultiNodeSpec, MultiNodeConfig } +import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } import akka.routing.GetRoutees import akka.routing.FromConfig -import akka.testkit.{ LongRunningTest, DefaultTimeout, ImplicitSender } +import akka.testkit.{ DefaultTimeout, ImplicitSender, LongRunningTest } import akka.routing.ActorRefRoutee import akka.routing.Routees import akka.cluster.routing.ClusterRouterPool @@ -59,13 +59,16 @@ object AdaptiveLoadBalancingRouterConfig extends MultiNodeConfig { def nodeList = Seq(node1, node2, node3) // Extract individual sigar library for every node. - nodeList foreach { role => + nodeList.foreach { role => nodeConfig(role) { - ConfigFactory.parseString(s"akka.cluster.metrics.native-library-extract-folder=$${user.dir}/target/native/" + role.name) + ConfigFactory.parseString( + s"akka.cluster.metrics.native-library-extract-folder=$${user.dir}/target/native/" + role.name) } } - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" # Enable metrics estension. akka.extensions=["akka.cluster.metrics.ClusterMetricsExtension"] @@ -97,7 +100,8 @@ object AdaptiveLoadBalancingRouterConfig extends MultiNodeConfig { } } } - """)).withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -109,9 +113,12 @@ class AdaptiveLoadBalancingRouterMultiJvmNode1 extends AdaptiveLoadBalancingRout class AdaptiveLoadBalancingRouterMultiJvmNode2 extends AdaptiveLoadBalancingRouterSpec class AdaptiveLoadBalancingRouterMultiJvmNode3 extends AdaptiveLoadBalancingRouterSpec -abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoadBalancingRouterConfig) - with MultiNodeClusterSpec with RedirectLogging - with ImplicitSender with DefaultTimeout { +abstract class AdaptiveLoadBalancingRouterSpec + extends MultiNodeSpec(AdaptiveLoadBalancingRouterConfig) + with MultiNodeClusterSpec + with RedirectLogging + with ImplicitSender + with DefaultTimeout { import AdaptiveLoadBalancingRouterConfig._ def currentRoutees(router: ActorRef) = @@ -136,10 +143,10 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa def startRouter(name: String): ActorRef = { val router = system.actorOf( - ClusterRouterPool( - local = AdaptiveLoadBalancingPool(HeapMetricsSelector), - settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true)). - props(Props[Echo]), + ClusterRouterPool(local = AdaptiveLoadBalancingPool(HeapMetricsSelector), + settings = ClusterRouterPoolSettings(totalInstances = 10, + maxInstancesPerNode = 1, + allowLocalRoutees = true)).props(Props[Echo]), name) // it may take some time until router receives cluster member events awaitAssert { currentRoutees(router).size should ===(roles.size) } @@ -166,7 +173,7 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa metricsAwait() val iterationCount = 100 - 1 to iterationCount foreach { _ => + (1 to iterationCount).foreach { _ => router1 ! "hit" // wait a while between each message, since metrics is collected periodically Thread.sleep(10) @@ -203,7 +210,7 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa metricsAwait() val iterationCount = 3000 - 1 to iterationCount foreach { _ => + (1 to iterationCount).foreach { _ => router2 ! "hit" } @@ -234,8 +241,8 @@ abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoa // it may take some time until router receives cluster member events awaitAssert { currentRoutees(router4).size should ===(6) } val routees = currentRoutees(router4) - routees.map { case ActorRefRoutee(ref) => fullAddress(ref) }.toSet should ===(Set( - address(node1), address(node2), address(node3))) + routees.map { case ActorRefRoutee(ref) => fullAddress(ref) }.toSet should ===( + Set(address(node1), address(node2), address(node3))) } enterBarrier("after-5") } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala index a7011ead8e..6fe33761c1 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsSampleSpec.scala @@ -24,7 +24,7 @@ object StatsSampleSpecConfig extends MultiNodeConfig { def nodeList = Seq(first, second, third) // Extract individual sigar library for every node. - nodeList foreach { role => + nodeList.foreach { role => nodeConfig(role) { ConfigFactory.parseString(s""" # Enable metrics extension in akka-cluster-metrics. @@ -71,9 +71,12 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) - with WordSpecLike with Matchers with BeforeAndAfterAll - with ImplicitSender { +abstract class StatsSampleSpec + extends MultiNodeSpec(StatsSampleSpecConfig) + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with ImplicitSender { import StatsSampleSpecConfig._ @@ -99,7 +102,7 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) //#addresses //#join - Cluster(system) join firstAddress + Cluster(system).join(firstAddress) //#join system.actorOf(Props[StatsWorker], "statsWorker") @@ -129,8 +132,7 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) // first attempts might fail because worker actors not started yet awaitAssert { service ! StatsJob("this is the text that will be analyzed") - expectMsgType[StatsResult](1.second).meanWordLength should be( - 3.875 +- 0.001) + expectMsgType[StatsResult](1.second).meanWordLength should be(3.875 +- 0.001) } } diff --git a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala index 17a1d9d179..64bd5b861a 100644 --- a/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala +++ b/akka-cluster-metrics/src/multi-jvm/scala/akka/cluster/metrics/sample/StatsService.scala @@ -15,20 +15,16 @@ class StatsService extends Actor { // This router is used both with lookup and deploy of routees. If you // have a router with only lookup of routees you can use Props.empty // instead of Props[StatsWorker.class]. - val workerRouter = context.actorOf( - FromConfig.props(Props[StatsWorker]), - name = "workerRouter") + val workerRouter = context.actorOf(FromConfig.props(Props[StatsWorker]), name = "workerRouter") def receive = { case StatsJob(text) if text != "" => val words = text.split(" ") val replyTo = sender() // important to not close over sender() // create actor that collects replies from workers - val aggregator = context.actorOf(Props( - classOf[StatsAggregator], words.size, replyTo)) - words foreach { word => - workerRouter.tell( - ConsistentHashableEnvelope(word, word), aggregator) + val aggregator = context.actorOf(Props(classOf[StatsAggregator], words.size, replyTo)) + words.foreach { word => + workerRouter.tell(ConsistentHashableEnvelope(word, word), aggregator) } } } @@ -59,9 +55,11 @@ abstract class StatsService2 extends Actor { import akka.routing.ConsistentHashingGroup val workerRouter = context.actorOf( - ClusterRouterGroup(ConsistentHashingGroup(Nil), ClusterRouterGroupSettings( - totalInstances = 100, routeesPaths = List("/user/statsWorker"), - allowLocalRoutees = true, useRoles = Set("compute"))).props(), + ClusterRouterGroup(ConsistentHashingGroup(Nil), + ClusterRouterGroupSettings(totalInstances = 100, + routeesPaths = List("/user/statsWorker"), + allowLocalRoutees = true, + useRoles = Set("compute"))).props(), name = "workerRouter2") //#router-lookup-in-code } @@ -73,9 +71,10 @@ abstract class StatsService3 extends Actor { import akka.routing.ConsistentHashingPool val workerRouter = context.actorOf( - ClusterRouterPool(ConsistentHashingPool(0), ClusterRouterPoolSettings( - totalInstances = 100, maxInstancesPerNode = 3, - allowLocalRoutees = false)).props(Props[StatsWorker]), + ClusterRouterPool(ConsistentHashingPool(0), + ClusterRouterPoolSettings(totalInstances = 100, + maxInstancesPerNode = 3, + allowLocalRoutees = false)).props(Props[StatsWorker]), name = "workerRouter3") //#router-deploy-in-code } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala index d983b14dd4..cc387c11ad 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsExtensionSpec.scala @@ -11,8 +11,7 @@ import akka.testkit._ import akka.cluster.metrics.StandardMetrics._ import akka.cluster.Cluster -class MetricsExtensionSpec extends AkkaSpec(MetricsConfig.clusterSigarMock) - with ImplicitSender with RedirectLogging { +class MetricsExtensionSpec extends AkkaSpec(MetricsConfig.clusterSigarMock) with ImplicitSender with RedirectLogging { val cluster = Cluster(system) @@ -57,25 +56,24 @@ class MetricsExtensionSpec extends AkkaSpec(MetricsConfig.clusterSigarMock) val history = metricsView.metricsHistory.reverse.map { _.head } - val expected = List( - (0.700, 0.000, 0.000), - (0.700, 0.018, 0.007), - (0.700, 0.051, 0.020), - (0.700, 0.096, 0.038), - (0.700, 0.151, 0.060), - (0.700, 0.214, 0.085), - (0.700, 0.266, 0.106), - (0.700, 0.309, 0.123), - (0.700, 0.343, 0.137), - (0.700, 0.372, 0.148)) + val expected = List((0.700, 0.000, 0.000), + (0.700, 0.018, 0.007), + (0.700, 0.051, 0.020), + (0.700, 0.096, 0.038), + (0.700, 0.151, 0.060), + (0.700, 0.214, 0.085), + (0.700, 0.266, 0.106), + (0.700, 0.309, 0.123), + (0.700, 0.343, 0.137), + (0.700, 0.372, 0.148)) expected.size should ===(sampleCount) - history.zip(expected) foreach { + history.zip(expected).foreach { case (mockMetrics, expectedData) => (mockMetrics, expectedData) match { case (Cpu(_, _, loadAverageMock, cpuCombinedMock, cpuStolenMock, _), - (loadAverageEwma, cpuCombinedEwma, cpuStolenEwma)) => + (loadAverageEwma, cpuCombinedEwma, cpuStolenEwma)) => loadAverageMock.get should ===(loadAverageEwma +- epsilon) cpuCombinedMock.get should ===(cpuCombinedEwma +- epsilon) cpuStolenMock.get should ===(cpuStolenEwma +- epsilon) @@ -108,7 +106,9 @@ class MetricsExtensionSpec extends AkkaSpec(MetricsConfig.clusterSigarMock) } - (1 to 3) foreach { step => cycle() } + (1 to 3).foreach { step => + cycle() + } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala index 109a52334d..fd126af20f 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/ClusterMetricsRoutingSpec.scala @@ -25,38 +25,42 @@ class MetricsSelectorSpec extends WordSpec with Matchers { val decayFactor = Some(0.18) - val nodeMetricsA = NodeMetrics(a1, System.currentTimeMillis, Set( - Metric.create(HeapMemoryUsed, 128, decayFactor), - Metric.create(HeapMemoryCommitted, 256, decayFactor), - Metric.create(HeapMemoryMax, 512, None), - Metric.create(CpuCombined, 0.2, decayFactor), - Metric.create(CpuStolen, 0.1, decayFactor), - Metric.create(SystemLoadAverage, 0.5, None), - Metric.create(Processors, 8, None)).flatten) + val nodeMetricsA = NodeMetrics(a1, + System.currentTimeMillis, + Set(Metric.create(HeapMemoryUsed, 128, decayFactor), + Metric.create(HeapMemoryCommitted, 256, decayFactor), + Metric.create(HeapMemoryMax, 512, None), + Metric.create(CpuCombined, 0.2, decayFactor), + Metric.create(CpuStolen, 0.1, decayFactor), + Metric.create(SystemLoadAverage, 0.5, None), + Metric.create(Processors, 8, None)).flatten) - val nodeMetricsB = NodeMetrics(b1, System.currentTimeMillis, Set( - Metric.create(HeapMemoryUsed, 256, decayFactor), - Metric.create(HeapMemoryCommitted, 512, decayFactor), - Metric.create(HeapMemoryMax, 1024, None), - Metric.create(CpuCombined, 0.4, decayFactor), - Metric.create(CpuStolen, 0.2, decayFactor), - Metric.create(SystemLoadAverage, 1.0, None), - Metric.create(Processors, 16, None)).flatten) + val nodeMetricsB = NodeMetrics(b1, + System.currentTimeMillis, + Set(Metric.create(HeapMemoryUsed, 256, decayFactor), + Metric.create(HeapMemoryCommitted, 512, decayFactor), + Metric.create(HeapMemoryMax, 1024, None), + Metric.create(CpuCombined, 0.4, decayFactor), + Metric.create(CpuStolen, 0.2, decayFactor), + Metric.create(SystemLoadAverage, 1.0, None), + Metric.create(Processors, 16, None)).flatten) - val nodeMetricsC = NodeMetrics(c1, System.currentTimeMillis, Set( - Metric.create(HeapMemoryUsed, 1024, decayFactor), - Metric.create(HeapMemoryCommitted, 1024, decayFactor), - Metric.create(HeapMemoryMax, 1024, None), - Metric.create(CpuCombined, 0.6, decayFactor), - Metric.create(CpuStolen, 0.3, decayFactor), - Metric.create(SystemLoadAverage, 16.0, None), - Metric.create(Processors, 16, None)).flatten) + val nodeMetricsC = NodeMetrics(c1, + System.currentTimeMillis, + Set(Metric.create(HeapMemoryUsed, 1024, decayFactor), + Metric.create(HeapMemoryCommitted, 1024, decayFactor), + Metric.create(HeapMemoryMax, 1024, None), + Metric.create(CpuCombined, 0.6, decayFactor), + Metric.create(CpuStolen, 0.3, decayFactor), + Metric.create(SystemLoadAverage, 16.0, None), + Metric.create(Processors, 16, None)).flatten) - val nodeMetricsD = NodeMetrics(d1, System.currentTimeMillis, Set( - Metric.create(HeapMemoryUsed, 511, decayFactor), - Metric.create(HeapMemoryCommitted, 512, decayFactor), - Metric.create(HeapMemoryMax, 512, None), - Metric.create(Processors, 2, decayFactor)).flatten) + val nodeMetricsD = NodeMetrics(d1, + System.currentTimeMillis, + Set(Metric.create(HeapMemoryUsed, 511, decayFactor), + Metric.create(HeapMemoryCommitted, 512, decayFactor), + Metric.create(HeapMemoryMax, 512, None), + Metric.create(Processors, 2, decayFactor)).flatten) val nodeMetrics = Set(nodeMetricsA, nodeMetricsB, nodeMetricsC, nodeMetricsD) diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala index 8695ff9c7c..fa1fbde12c 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/EWMASpec.scala @@ -5,7 +5,7 @@ package akka.cluster.metrics import scala.concurrent.duration._ -import akka.testkit.{ LongRunningTest, AkkaSpec } +import akka.testkit.{ AkkaSpec, LongRunningTest } import java.util.concurrent.ThreadLocalRandom class EWMASpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollectorFactory { @@ -75,7 +75,7 @@ class EWMASpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollec "calculate the ewma for multiple, variable, data streams" taggedAs LongRunningTest in { var streamingDataSet = Map.empty[String, Metric] var usedMemory = Array.empty[Byte] - (1 to 50) foreach { _ => + (1 to 50).foreach { _ => // wait a while between each message to give the metrics a chance to change Thread.sleep(100) usedMemory = usedMemory ++ Array.fill(1024)(ThreadLocalRandom.current.nextInt(127).toByte) diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala index fe7aa7d5c4..7afa515b7b 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricSpec.scala @@ -59,18 +59,18 @@ class NodeMetricsSpec extends WordSpec with Matchers { "NodeMetrics must" must { "return correct result for 2 'same' nodes" in { - (NodeMetrics(node1, 0) sameAs NodeMetrics(node1, 0)) should ===(true) + (NodeMetrics(node1, 0).sameAs(NodeMetrics(node1, 0))) should ===(true) } "return correct result for 2 not 'same' nodes" in { - (NodeMetrics(node1, 0) sameAs NodeMetrics(node2, 0)) should ===(false) + (NodeMetrics(node1, 0).sameAs(NodeMetrics(node2, 0))) should ===(false) } "merge 2 NodeMetrics by most recent" in { val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 10, None), Metric.create("b", 20, None)).flatten) val sample2 = NodeMetrics(node1, 2, Set(Metric.create("a", 11, None), Metric.create("c", 30, None)).flatten) - val merged = sample1 merge sample2 + val merged = sample1.merge(sample2) merged.timestamp should ===(sample2.timestamp) merged.metric("a").map(_.value) should ===(Some(11)) merged.metric("b").map(_.value) should ===(Some(20)) @@ -81,7 +81,7 @@ class NodeMetricsSpec extends WordSpec with Matchers { val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 10, None), Metric.create("b", 20, None)).flatten) val sample2 = NodeMetrics(node1, 0, Set(Metric.create("a", 11, None), Metric.create("c", 30, None)).flatten) - val merged = sample1 merge sample2 // older and not same + val merged = sample1.merge(sample2) // older and not same merged.timestamp should ===(sample1.timestamp) merged.metrics should ===(sample1.metrics) } @@ -90,7 +90,7 @@ class NodeMetricsSpec extends WordSpec with Matchers { val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 10, None), Metric.create("b", 20, None)).flatten) val sample2 = NodeMetrics(node1, 2, Set(Metric.create("a", 11, None), Metric.create("c", 30, None)).flatten) - val updated = sample1 update sample2 + val updated = sample1.update(sample2) updated.metrics.size should ===(3) updated.timestamp should ===(sample2.timestamp) @@ -109,7 +109,7 @@ class NodeMetricsSpec extends WordSpec with Matchers { val sample2 = NodeMetrics(node1, 2, Set(Metric.create("a", 2, decay), Metric.create("c", 5, decay)).flatten) val sample3 = NodeMetrics(node1, 3, Set(Metric.create("a", 3, decay), Metric.create("d", 6, decay)).flatten) - val updated = sample1 update sample2 update sample3 + val updated = sample1.update(sample2).update(sample3) updated.metrics.size should ===(4) updated.timestamp should ===(sample3.timestamp) @@ -128,7 +128,10 @@ class NodeMetricsSpec extends WordSpec with Matchers { } } -class MetricsGossipSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with ImplicitSender with MetricsCollectorFactory { +class MetricsGossipSpec + extends AkkaSpec(MetricsConfig.defaultEnabled) + with ImplicitSender + with MetricsCollectorFactory { val collector = createMetricsCollector @@ -167,19 +170,19 @@ class MetricsGossipSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with Impl g1.nodes.size should ===(2) val beforeMergeNodes = g1.nodes - val m2Updated = m2 copy (metrics = newSample(m2.metrics), timestamp = m2.timestamp + 1000) + val m2Updated = m2.copy(metrics = newSample(m2.metrics), timestamp = m2.timestamp + 1000) val g2 = g1 :+ m2Updated // merge peers g2.nodes.size should ===(2) g2.nodeMetricsFor(m1.address).map(_.metrics) should ===(Some(m1.metrics)) g2.nodeMetricsFor(m2.address).map(_.metrics) should ===(Some(m2Updated.metrics)) - g2.nodes collect { case peer if peer.address == m2.address => peer.timestamp should ===(m2Updated.timestamp) } + g2.nodes.collect { case peer if peer.address == m2.address => peer.timestamp should ===(m2Updated.timestamp) } } "merge an existing metric set for a node and update node ring" in { val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics) val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics) val m3 = NodeMetrics(Address("akka.tcp", "sys", "a", 2556), newTimestamp, collector.sample.metrics) - val m2Updated = m2 copy (metrics = newSample(m2.metrics), timestamp = m2.timestamp + 1000) + val m2Updated = m2.copy(metrics = newSample(m2.metrics), timestamp = m2.timestamp + 1000) val g1 = MetricsGossip.empty :+ m1 :+ m2 val g2 = MetricsGossip.empty :+ m3 :+ m2Updated @@ -187,7 +190,7 @@ class MetricsGossipSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with Impl g1.nodes.map(_.address) should ===(Set(m1.address, m2.address)) // should contain nodes 1,3, and the most recent version of 2 - val mergedGossip = g1 merge g2 + val mergedGossip = g1.merge(g2) mergedGossip.nodes.map(_.address) should ===(Set(m1.address, m2.address, m3.address)) mergedGossip.nodeMetricsFor(m1.address).map(_.metrics) should ===(Some(m1.metrics)) mergedGossip.nodeMetricsFor(m2.address).map(_.metrics) should ===(Some(m2Updated.metrics)) @@ -208,7 +211,7 @@ class MetricsGossipSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with Impl val g1 = MetricsGossip.empty :+ m1 :+ m2 g1.nodes.size should ===(2) - val g2 = g1 remove m1.address + val g2 = g1.remove(m1.address) g2.nodes.size should ===(1) g2.nodes.exists(_.address == m1.address) should ===(false) g2.nodeMetricsFor(m1.address) should ===(None) @@ -221,7 +224,7 @@ class MetricsGossipSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with Impl val g1 = MetricsGossip.empty :+ m1 :+ m2 g1.nodes.size should ===(2) - val g2 = g1 filter Set(m2.address) + val g2 = g1.filter(Set(m2.address)) g2.nodes.size should ===(1) g2.nodes.exists(_.address == m1.address) should ===(false) g2.nodeMetricsFor(m1.address) should ===(None) @@ -240,10 +243,11 @@ class MetricValuesSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with Metri val nodes: Seq[NodeMetrics] = { (1 to 100).foldLeft(List(node1, node2)) { (nodes, _) => - nodes map { n => - n.copy(metrics = collector.sample.metrics.flatMap(latest => n.metrics.collect { - case streaming if latest sameAs streaming => streaming :+ latest - })) + nodes.map { n => + n.copy(metrics = collector.sample.metrics.flatMap(latest => + n.metrics.collect { + case streaming if latest.sameAs(streaming) => streaming :+ latest + })) } } } @@ -256,7 +260,7 @@ class MetricValuesSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with Metri } "extract expected MetricValue types for load balancing" in { - nodes foreach { node => + nodes.foreach { node => node match { case HeapMemory(address, _, used, committed, _) => used should be > (0L) diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala index cf7686caa5..e3df7c4b19 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/MetricsCollectorSpec.scala @@ -12,7 +12,10 @@ import scala.util.{ Try } import akka.testkit._ import akka.cluster.metrics.StandardMetrics._ -class MetricsCollectorSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with ImplicitSender with MetricsCollectorFactory { +class MetricsCollectorSpec + extends AkkaSpec(MetricsConfig.defaultEnabled) + with ImplicitSender + with MetricsCollectorFactory { val collector = createMetricsCollector @@ -22,23 +25,25 @@ class MetricsCollectorSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with I for (i <- 1 to 20) { val sample1 = collector.sample.metrics val sample2 = collector.sample.metrics - val merged12 = sample2 flatMap (latest => sample1 collect { - case peer if latest sameAs peer => - val m = peer :+ latest - m.value should ===(latest.value) - m.isSmooth should ===(peer.isSmooth || latest.isSmooth) - m - }) + val merged12 = sample2.flatMap(latest => + sample1.collect { + case peer if latest.sameAs(peer) => + val m = peer :+ latest + m.value should ===(latest.value) + m.isSmooth should ===(peer.isSmooth || latest.isSmooth) + m + }) val sample3 = collector.sample.metrics val sample4 = collector.sample.metrics - val merged34 = sample4 flatMap (latest => sample3 collect { - case peer if latest sameAs peer => - val m = peer :+ latest - m.value should ===(latest.value) - m.isSmooth should ===(peer.isSmooth || latest.isSmooth) - m - }) + val merged34 = sample4.flatMap(latest => + sample3.collect { + case peer if latest.sameAs(peer) => + val m = peer :+ latest + m.value should ===(latest.value) + m.isSmooth should ===(peer.isSmooth || latest.isSmooth) + m + }) } } } @@ -51,10 +56,10 @@ class MetricsCollectorSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with I "collect accurate metrics for a node" in { val sample = collector.sample - val metrics = sample.metrics.collect { case m => (m.name, m.value) } - val used = metrics collectFirst { case (HeapMemoryUsed, b) => b } - val committed = metrics collectFirst { case (HeapMemoryCommitted, b) => b } - metrics foreach { + val metrics = sample.metrics.collect { case m => (m.name, m.value) } + val used = metrics.collectFirst { case (HeapMemoryUsed, b) => b } + val committed = metrics.collectFirst { case (HeapMemoryCommitted, b) => b } + metrics.foreach { case (SystemLoadAverage, b) => b.doubleValue should be >= (0.0) case (Processors, b) => b.intValue should be >= (0) case (HeapMemoryUsed, b) => b.longValue should be >= (0L) @@ -85,7 +90,7 @@ class MetricsCollectorSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with I } "collect 50 node metrics samples in an acceptable duration" taggedAs LongRunningTest in within(10 seconds) { - (1 to 50) foreach { _ => + (1 to 50).foreach { _ => val sample = collector.sample sample.metrics.size should be >= (3) Thread.sleep(100) diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala index 4f019da715..04c9dc8609 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/TestUtil.scala @@ -51,12 +51,13 @@ case class SimpleSigarProvider(location: String = "native") extends SigarProvide /** * Provide sigar library as static mock. */ -case class MockitoSigarProvider( - pid: Long = 123, - loadAverage: Array[Double] = Array(0.7, 0.3, 0.1), - cpuCombined: Double = 0.5, - cpuStolen: Double = 0.2, - steps: Int = 5) extends SigarProvider with MockitoSugar { +case class MockitoSigarProvider(pid: Long = 123, + loadAverage: Array[Double] = Array(0.7, 0.3, 0.1), + cpuCombined: Double = 0.5, + cpuStolen: Double = 0.2, + steps: Int = 5) + extends SigarProvider + with MockitoSugar { import org.hyperic.sigar._ import org.mockito.Mockito._ @@ -67,7 +68,7 @@ case class MockitoSigarProvider( /** Generate monotonic array from 0 to value. */ def increase(value: Double): Array[Double] = { val delta = value / steps - (0 to steps) map { _ * delta } toArray + (0 to steps).map { _ * delta } toArray } /** Sigar mock instance. */ @@ -76,13 +77,13 @@ case class MockitoSigarProvider( // Note "thenReturn(0)" invocation is consumed in collector construction. val cpuPerc = mock[CpuPerc] - when(cpuPerc.getCombined) thenReturn (0, increase(cpuCombined): _*) - when(cpuPerc.getStolen) thenReturn (0, increase(cpuStolen): _*) + when(cpuPerc.getCombined).thenReturn(0, increase(cpuCombined): _*) + when(cpuPerc.getStolen).thenReturn(0, increase(cpuStolen): _*) val sigar = mock[SigarProxy] - when(sigar.getPid) thenReturn pid - when(sigar.getLoadAverage) thenReturn loadAverage // Constant. - when(sigar.getCpuPerc) thenReturn cpuPerc // Increasing. + when(sigar.getPid).thenReturn(pid) + when(sigar.getLoadAverage).thenReturn(loadAverage) // Constant. + when(sigar.getCpuPerc).thenReturn(cpuPerc) // Increasing. sigar } @@ -134,11 +135,10 @@ trait MetricsCollectorFactory { this: AkkaSpec => * */ class MockitoSigarMetricsCollector(system: ActorSystem) - extends SigarMetricsCollector( - Address(if (RARP(system).provider.remoteSettings.Artery.Enabled) "akka" else "akka.tcp", system.name), - MetricsConfig.defaultDecayFactor, - MockitoSigarProvider().createSigarInstance) { -} + extends SigarMetricsCollector( + Address(if (RARP(system).provider.remoteSettings.Artery.Enabled) "akka" else "akka.tcp", system.name), + MetricsConfig.defaultDecayFactor, + MockitoSigarProvider().createSigarInstance) {} /** * Metrics test configurations. @@ -202,17 +202,19 @@ class ClusterMetricsView(system: ExtendedActorSystem) extends Closeable { /** Create actor that subscribes to the cluster eventBus to update current read view state. */ private val eventBusListener: ActorRef = { - system.systemActorOf(Props(new Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { - override def preStart(): Unit = extension.subscribe(self) - override def postStop(): Unit = extension.unsubscribe(self) - def receive = { - case ClusterMetricsChanged(nodes) => - currentMetricsSet = nodes - collectedMetricsList = nodes :: collectedMetricsList - case _ => - // Ignore. - } - }).withDispatcher(Dispatchers.DefaultDispatcherId).withDeploy(Deploy.local), name = "metrics-event-bus-listener") + system.systemActorOf( + Props(new Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { + override def preStart(): Unit = extension.subscribe(self) + override def postStop(): Unit = extension.unsubscribe(self) + def receive = { + case ClusterMetricsChanged(nodes) => + currentMetricsSet = nodes + collectedMetricsList = nodes :: collectedMetricsList + case _ => + // Ignore. + } + }).withDispatcher(Dispatchers.DefaultDispatcherId).withDeploy(Deploy.local), + name = "metrics-event-bus-listener") } /** Current cluster metrics. */ diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala index 0e865da070..68ea64a4a8 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/WeightedRouteesSpec.scala @@ -41,8 +41,8 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" val weighted = new WeightedRoutees(routees, a1, weights) weighted(1) should ===(routeeA) - 2 to 4 foreach { weighted(_) should ===(routeeB) } - 5 to 14 foreach { weighted(_) should ===(routeeC) } + (2 to 4).foreach { weighted(_) should ===(routeeB) } + (5 to 14).foreach { weighted(_) should ===(routeeC) } weighted.total should ===(14) } @@ -77,9 +77,9 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" val weighted = new WeightedRoutees(routees, a1, weights) weighted(1) should ===(routeeA) - 2 to 8 foreach { weighted(_) should ===(routeeB) } + (2 to 8).foreach { weighted(_) should ===(routeeB) } // undefined, uses the mean of the weights, i.e. 4 - 9 to 12 foreach { weighted(_) should ===(routeeC) } + (9 to 12).foreach { weighted(_) should ===(routeeC) } weighted.total should ===(12) } @@ -88,15 +88,15 @@ class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" val routees2 = Vector(testActorRoutee, routeeB, routeeC) val weighted = new WeightedRoutees(routees2, a1, weights) - 1 to 2 foreach { weighted(_) should ===(testActorRoutee) } - 3 to weighted.total foreach { weighted(_) should not be (testActorRoutee) } + (1 to 2).foreach { weighted(_) should ===(testActorRoutee) } + (3 to weighted.total).foreach { weighted(_) should not be (testActorRoutee) } } "not allocate ref with weight zero" in { val weights = Map(a1 -> 0, b1 -> 2, c1 -> 10) val weighted = new WeightedRoutees(routees, a1, weights) - 1 to weighted.total foreach { weighted(_) should not be (routeeA) } + (1 to weighted.total).foreach { weighted(_) should not be (routeeA) } } } diff --git a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala index a0e0d7a62c..fdb474e464 100644 --- a/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala +++ b/akka-cluster-metrics/src/test/scala/akka/cluster/metrics/protobuf/MessageSerializerSpec.scala @@ -10,8 +10,7 @@ import akka.cluster.MemberStatus import akka.cluster.metrics._ import akka.cluster.TestMember -class MessageSerializerSpec extends AkkaSpec( - """ +class MessageSerializerSpec extends AkkaSpec(""" akka.actor.provider = cluster akka.actor.serialize-messages = off akka.actor.allow-java-serialization = off @@ -43,15 +42,16 @@ class MessageSerializerSpec extends AkkaSpec( "be serializable" in { - val metricsGossip = MetricsGossip(Set( - NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))), - NodeMetrics(b1.address, 4712, Set( - Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))), - Metric("bar1", Double.MinPositiveValue, None), - Metric("bar2", Float.MaxValue, None), - Metric("bar3", Int.MaxValue, None), - Metric("bar4", Long.MaxValue, None), - Metric("bar5", BigInt(Long.MaxValue), None))))) + val metricsGossip = MetricsGossip( + Set(NodeMetrics(a1.address, 4711, Set(Metric("foo", 1.2, None))), + NodeMetrics(b1.address, + 4712, + Set(Metric("foo", 2.1, Some(EWMA(value = 100.0, alpha = 0.18))), + Metric("bar1", Double.MinPositiveValue, None), + Metric("bar2", Float.MaxValue, None), + Metric("bar3", Int.MaxValue, None), + Metric("bar4", Long.MaxValue, None), + Metric("bar5", BigInt(Long.MaxValue), None))))) checkSerialization(MetricsGossipEnvelope(a1.address, metricsGossip, true)) @@ -64,7 +64,8 @@ class MessageSerializerSpec extends AkkaSpec( checkSerialization(simplePool) val complicatedPool = AdaptiveLoadBalancingPool( - metricsSelector = MixMetricsSelector(Vector(CpuMetricsSelector, HeapMetricsSelector, SystemLoadAverageMetricsSelector)), + metricsSelector = + MixMetricsSelector(Vector(CpuMetricsSelector, HeapMetricsSelector, SystemLoadAverageMetricsSelector)), nrOfInstances = 7, routerDispatcher = "my-dispatcher", usePoolDispatcher = true) diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingQuery.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingQuery.scala index 18e652810a..09d0afd7ba 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingQuery.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingQuery.scala @@ -17,15 +17,17 @@ sealed trait ClusterShardingQuery * Query the ShardRegion state for the given entity type key. This will get the state of the * local ShardRegion's state. */ -final case class GetShardRegionState(entityTypeKey: EntityTypeKey[_], replyTo: ActorRef[CurrentShardRegionState]) extends ClusterShardingQuery { +final case class GetShardRegionState(entityTypeKey: EntityTypeKey[_], replyTo: ActorRef[CurrentShardRegionState]) + extends ClusterShardingQuery { + /** * Java API * * Query the ShardRegion state for the given entity type key. This will get the state of the * local ShardRegion's state. */ - def this(entityTypeKey: javadsl.EntityTypeKey[_], replyTo: ActorRef[CurrentShardRegionState]) = this(entityTypeKey.asScala, replyTo) + def this(entityTypeKey: javadsl.EntityTypeKey[_], replyTo: ActorRef[CurrentShardRegionState]) = + this(entityTypeKey.asScala, replyTo) } // TODO - GetClusterShardingStats - diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala index 6881a6f5ab..27b7f327e0 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ClusterShardingSettings.scala @@ -33,60 +33,59 @@ object ClusterShardingSettings { apply(system) /** INTERNAL API: Indended only for internal use, it is not recommended to keep converting between the setting types */ - private[akka] def fromUntypedSettings(numberOfShards: Int, untypedSettings: UntypedShardingSettings): ClusterShardingSettings = { - new ClusterShardingSettings( - numberOfShards, - role = untypedSettings.role, - dataCenter = None, - rememberEntities = untypedSettings.rememberEntities, - journalPluginId = untypedSettings.journalPluginId, - snapshotPluginId = untypedSettings.snapshotPluginId, - passivateIdleEntityAfter = untypedSettings.passivateIdleEntityAfter, - stateStoreMode = StateStoreMode.byName(untypedSettings.stateStoreMode), - new TuningParameters(untypedSettings.tuningParameters), - new ClusterSingletonManagerSettings( - untypedSettings.coordinatorSingletonSettings.singletonName, - untypedSettings.coordinatorSingletonSettings.role, - untypedSettings.coordinatorSingletonSettings.removalMargin, - untypedSettings.coordinatorSingletonSettings.handOverRetryInterval - ) - ) + private[akka] def fromUntypedSettings(numberOfShards: Int, + untypedSettings: UntypedShardingSettings): ClusterShardingSettings = { + new ClusterShardingSettings(numberOfShards, + role = untypedSettings.role, + dataCenter = None, + rememberEntities = untypedSettings.rememberEntities, + journalPluginId = untypedSettings.journalPluginId, + snapshotPluginId = untypedSettings.snapshotPluginId, + passivateIdleEntityAfter = untypedSettings.passivateIdleEntityAfter, + stateStoreMode = StateStoreMode.byName(untypedSettings.stateStoreMode), + new TuningParameters(untypedSettings.tuningParameters), + new ClusterSingletonManagerSettings( + untypedSettings.coordinatorSingletonSettings.singletonName, + untypedSettings.coordinatorSingletonSettings.role, + untypedSettings.coordinatorSingletonSettings.removalMargin, + untypedSettings.coordinatorSingletonSettings.handOverRetryInterval)) } /** INTERNAL API: Indended only for internal use, it is not recommended to keep converting between the setting types */ private[akka] def toUntypedSettings(settings: ClusterShardingSettings): UntypedShardingSettings = { - new UntypedShardingSettings( - role = settings.role, - rememberEntities = settings.rememberEntities, - journalPluginId = settings.journalPluginId, - snapshotPluginId = settings.snapshotPluginId, - stateStoreMode = settings.stateStoreMode.name, - passivateIdleEntityAfter = settings.passivateIdleEntityAfter, - new UntypedShardingSettings.TuningParameters( - bufferSize = settings.tuningParameters.bufferSize, - coordinatorFailureBackoff = settings.tuningParameters.coordinatorFailureBackoff, - retryInterval = settings.tuningParameters.retryInterval, - handOffTimeout = settings.tuningParameters.handOffTimeout, - shardStartTimeout = settings.tuningParameters.shardStartTimeout, - shardFailureBackoff = settings.tuningParameters.shardFailureBackoff, - entityRestartBackoff = settings.tuningParameters.entityRestartBackoff, - rebalanceInterval = settings.tuningParameters.rebalanceInterval, - snapshotAfter = settings.tuningParameters.snapshotAfter, - keepNrOfBatches = settings.tuningParameters.keepNrOfBatches, - leastShardAllocationRebalanceThreshold = settings.tuningParameters.leastShardAllocationRebalanceThreshold, // TODO extract it a bit - leastShardAllocationMaxSimultaneousRebalance = settings.tuningParameters.leastShardAllocationMaxSimultaneousRebalance, - waitingForStateTimeout = settings.tuningParameters.waitingForStateTimeout, - updatingStateTimeout = settings.tuningParameters.updatingStateTimeout, - entityRecoveryStrategy = settings.tuningParameters.entityRecoveryStrategy, - entityRecoveryConstantRateStrategyFrequency = settings.tuningParameters.entityRecoveryConstantRateStrategyFrequency, - entityRecoveryConstantRateStrategyNumberOfEntities = settings.tuningParameters.entityRecoveryConstantRateStrategyNumberOfEntities - ), - new UntypedClusterSingletonManagerSettings( - settings.coordinatorSingletonSettings.singletonName, - settings.coordinatorSingletonSettings.role, - settings.coordinatorSingletonSettings.removalMargin, - settings.coordinatorSingletonSettings.handOverRetryInterval - )) + new UntypedShardingSettings(role = settings.role, + rememberEntities = settings.rememberEntities, + journalPluginId = settings.journalPluginId, + snapshotPluginId = settings.snapshotPluginId, + stateStoreMode = settings.stateStoreMode.name, + passivateIdleEntityAfter = settings.passivateIdleEntityAfter, + new UntypedShardingSettings.TuningParameters( + bufferSize = settings.tuningParameters.bufferSize, + coordinatorFailureBackoff = settings.tuningParameters.coordinatorFailureBackoff, + retryInterval = settings.tuningParameters.retryInterval, + handOffTimeout = settings.tuningParameters.handOffTimeout, + shardStartTimeout = settings.tuningParameters.shardStartTimeout, + shardFailureBackoff = settings.tuningParameters.shardFailureBackoff, + entityRestartBackoff = settings.tuningParameters.entityRestartBackoff, + rebalanceInterval = settings.tuningParameters.rebalanceInterval, + snapshotAfter = settings.tuningParameters.snapshotAfter, + keepNrOfBatches = settings.tuningParameters.keepNrOfBatches, + leastShardAllocationRebalanceThreshold = + settings.tuningParameters.leastShardAllocationRebalanceThreshold, // TODO extract it a bit + leastShardAllocationMaxSimultaneousRebalance = + settings.tuningParameters.leastShardAllocationMaxSimultaneousRebalance, + waitingForStateTimeout = settings.tuningParameters.waitingForStateTimeout, + updatingStateTimeout = settings.tuningParameters.updatingStateTimeout, + entityRecoveryStrategy = settings.tuningParameters.entityRecoveryStrategy, + entityRecoveryConstantRateStrategyFrequency = + settings.tuningParameters.entityRecoveryConstantRateStrategyFrequency, + entityRecoveryConstantRateStrategyNumberOfEntities = + settings.tuningParameters.entityRecoveryConstantRateStrategyNumberOfEntities), + new UntypedClusterSingletonManagerSettings( + settings.coordinatorSingletonSettings.singletonName, + settings.coordinatorSingletonSettings.role, + settings.coordinatorSingletonSettings.removalMargin, + settings.coordinatorSingletonSettings.handOverRetryInterval)) } @@ -98,30 +97,31 @@ object ClusterShardingSettings { def byName(name: String): StateStoreMode = if (name == StateStoreModePersistence.name) StateStoreModePersistence else if (name == StateStoreModeDData.name) StateStoreModeDData - else throw new IllegalArgumentException("Not recognized StateStoreMode, only 'persistence' and 'ddata' are supported.") + else + throw new IllegalArgumentException( + "Not recognized StateStoreMode, only 'persistence' and 'ddata' are supported.") } final case object StateStoreModePersistence extends StateStoreMode { override def name = "persistence" } final case object StateStoreModeDData extends StateStoreMode { override def name = "ddata" } // generated using kaze-class - final class TuningParameters private ( - val bufferSize: Int, - val coordinatorFailureBackoff: FiniteDuration, - val entityRecoveryConstantRateStrategyFrequency: FiniteDuration, - val entityRecoveryConstantRateStrategyNumberOfEntities: Int, - val entityRecoveryStrategy: String, - val entityRestartBackoff: FiniteDuration, - val handOffTimeout: FiniteDuration, - val keepNrOfBatches: Int, - val leastShardAllocationMaxSimultaneousRebalance: Int, - val leastShardAllocationRebalanceThreshold: Int, - val rebalanceInterval: FiniteDuration, - val retryInterval: FiniteDuration, - val shardFailureBackoff: FiniteDuration, - val shardStartTimeout: FiniteDuration, - val snapshotAfter: Int, - val updatingStateTimeout: FiniteDuration, - val waitingForStateTimeout: FiniteDuration) { + final class TuningParameters private (val bufferSize: Int, + val coordinatorFailureBackoff: FiniteDuration, + val entityRecoveryConstantRateStrategyFrequency: FiniteDuration, + val entityRecoveryConstantRateStrategyNumberOfEntities: Int, + val entityRecoveryStrategy: String, + val entityRestartBackoff: FiniteDuration, + val handOffTimeout: FiniteDuration, + val keepNrOfBatches: Int, + val leastShardAllocationMaxSimultaneousRebalance: Int, + val leastShardAllocationRebalanceThreshold: Int, + val rebalanceInterval: FiniteDuration, + val retryInterval: FiniteDuration, + val shardFailureBackoff: FiniteDuration, + val shardStartTimeout: FiniteDuration, + val snapshotAfter: Int, + val updatingStateTimeout: FiniteDuration, + val waitingForStateTimeout: FiniteDuration) { def this(untyped: UntypedShardingSettings.TuningParameters) { this( @@ -141,29 +141,33 @@ object ClusterShardingSettings { updatingStateTimeout = untyped.updatingStateTimeout, entityRecoveryStrategy = untyped.entityRecoveryStrategy, entityRecoveryConstantRateStrategyFrequency = untyped.entityRecoveryConstantRateStrategyFrequency, - entityRecoveryConstantRateStrategyNumberOfEntities = untyped.entityRecoveryConstantRateStrategyNumberOfEntities - ) + entityRecoveryConstantRateStrategyNumberOfEntities = untyped.entityRecoveryConstantRateStrategyNumberOfEntities) } - require( - entityRecoveryStrategy == "all" || entityRecoveryStrategy == "constant", - s"Unknown 'entity-recovery-strategy' [$entityRecoveryStrategy], valid values are 'all' or 'constant'") + require(entityRecoveryStrategy == "all" || entityRecoveryStrategy == "constant", + s"Unknown 'entity-recovery-strategy' [$entityRecoveryStrategy], valid values are 'all' or 'constant'") def withBufferSize(value: Int): TuningParameters = copy(bufferSize = value) def withCoordinatorFailureBackoff(value: FiniteDuration): TuningParameters = copy(coordinatorFailureBackoff = value) - def withCoordinatorFailureBackoff(value: java.time.Duration): TuningParameters = withCoordinatorFailureBackoff(value.asScala) - def withEntityRecoveryConstantRateStrategyFrequency(value: FiniteDuration): TuningParameters = copy(entityRecoveryConstantRateStrategyFrequency = value) - def withEntityRecoveryConstantRateStrategyFrequency(value: java.time.Duration): TuningParameters = withEntityRecoveryConstantRateStrategyFrequency(value.asScala) - def withEntityRecoveryConstantRateStrategyNumberOfEntities(value: Int): TuningParameters = copy(entityRecoveryConstantRateStrategyNumberOfEntities = value) + def withCoordinatorFailureBackoff(value: java.time.Duration): TuningParameters = + withCoordinatorFailureBackoff(value.asScala) + def withEntityRecoveryConstantRateStrategyFrequency(value: FiniteDuration): TuningParameters = + copy(entityRecoveryConstantRateStrategyFrequency = value) + def withEntityRecoveryConstantRateStrategyFrequency(value: java.time.Duration): TuningParameters = + withEntityRecoveryConstantRateStrategyFrequency(value.asScala) + def withEntityRecoveryConstantRateStrategyNumberOfEntities(value: Int): TuningParameters = + copy(entityRecoveryConstantRateStrategyNumberOfEntities = value) def withEntityRecoveryStrategy(value: java.lang.String): TuningParameters = copy(entityRecoveryStrategy = value) def withEntityRestartBackoff(value: FiniteDuration): TuningParameters = copy(entityRestartBackoff = value) def withEntityRestartBackoff(value: java.time.Duration): TuningParameters = withEntityRestartBackoff(value.asScala) def withHandOffTimeout(value: FiniteDuration): TuningParameters = copy(handOffTimeout = value) def withHandOffTimeout(value: java.time.Duration): TuningParameters = withHandOffTimeout(value.asScala) def withKeepNrOfBatches(value: Int): TuningParameters = copy(keepNrOfBatches = value) - def withLeastShardAllocationMaxSimultaneousRebalance(value: Int): TuningParameters = copy(leastShardAllocationMaxSimultaneousRebalance = value) - def withLeastShardAllocationRebalanceThreshold(value: Int): TuningParameters = copy(leastShardAllocationRebalanceThreshold = value) + def withLeastShardAllocationMaxSimultaneousRebalance(value: Int): TuningParameters = + copy(leastShardAllocationMaxSimultaneousRebalance = value) + def withLeastShardAllocationRebalanceThreshold(value: Int): TuningParameters = + copy(leastShardAllocationRebalanceThreshold = value) def withRebalanceInterval(value: FiniteDuration): TuningParameters = copy(rebalanceInterval = value) def withRebalanceInterval(value: java.time.Duration): TuningParameters = withRebalanceInterval(value.asScala) def withRetryInterval(value: FiniteDuration): TuningParameters = copy(retryInterval = value) @@ -176,43 +180,45 @@ object ClusterShardingSettings { def withUpdatingStateTimeout(value: FiniteDuration): TuningParameters = copy(updatingStateTimeout = value) def withUpdatingStateTimeout(value: java.time.Duration): TuningParameters = withUpdatingStateTimeout(value.asScala) def withWaitingForStateTimeout(value: FiniteDuration): TuningParameters = copy(waitingForStateTimeout = value) - def withWaitingForStateTimeout(value: java.time.Duration): TuningParameters = withWaitingForStateTimeout(value.asScala) + def withWaitingForStateTimeout(value: java.time.Duration): TuningParameters = + withWaitingForStateTimeout(value.asScala) private def copy( - bufferSize: Int = bufferSize, - coordinatorFailureBackoff: FiniteDuration = coordinatorFailureBackoff, - entityRecoveryConstantRateStrategyFrequency: FiniteDuration = entityRecoveryConstantRateStrategyFrequency, - entityRecoveryConstantRateStrategyNumberOfEntities: Int = entityRecoveryConstantRateStrategyNumberOfEntities, - entityRecoveryStrategy: java.lang.String = entityRecoveryStrategy, - entityRestartBackoff: FiniteDuration = entityRestartBackoff, - handOffTimeout: FiniteDuration = handOffTimeout, - keepNrOfBatches: Int = keepNrOfBatches, - leastShardAllocationMaxSimultaneousRebalance: Int = leastShardAllocationMaxSimultaneousRebalance, - leastShardAllocationRebalanceThreshold: Int = leastShardAllocationRebalanceThreshold, - rebalanceInterval: FiniteDuration = rebalanceInterval, - retryInterval: FiniteDuration = retryInterval, - shardFailureBackoff: FiniteDuration = shardFailureBackoff, - shardStartTimeout: FiniteDuration = shardStartTimeout, - snapshotAfter: Int = snapshotAfter, - updatingStateTimeout: FiniteDuration = updatingStateTimeout, - waitingForStateTimeout: FiniteDuration = waitingForStateTimeout): TuningParameters = new TuningParameters( - bufferSize = bufferSize, - coordinatorFailureBackoff = coordinatorFailureBackoff, - entityRecoveryConstantRateStrategyFrequency = entityRecoveryConstantRateStrategyFrequency, - entityRecoveryConstantRateStrategyNumberOfEntities = entityRecoveryConstantRateStrategyNumberOfEntities, - entityRecoveryStrategy = entityRecoveryStrategy, - entityRestartBackoff = entityRestartBackoff, - handOffTimeout = handOffTimeout, - keepNrOfBatches = keepNrOfBatches, - leastShardAllocationMaxSimultaneousRebalance = leastShardAllocationMaxSimultaneousRebalance, - leastShardAllocationRebalanceThreshold = leastShardAllocationRebalanceThreshold, - rebalanceInterval = rebalanceInterval, - retryInterval = retryInterval, - shardFailureBackoff = shardFailureBackoff, - shardStartTimeout = shardStartTimeout, - snapshotAfter = snapshotAfter, - updatingStateTimeout = updatingStateTimeout, - waitingForStateTimeout = waitingForStateTimeout) + bufferSize: Int = bufferSize, + coordinatorFailureBackoff: FiniteDuration = coordinatorFailureBackoff, + entityRecoveryConstantRateStrategyFrequency: FiniteDuration = entityRecoveryConstantRateStrategyFrequency, + entityRecoveryConstantRateStrategyNumberOfEntities: Int = entityRecoveryConstantRateStrategyNumberOfEntities, + entityRecoveryStrategy: java.lang.String = entityRecoveryStrategy, + entityRestartBackoff: FiniteDuration = entityRestartBackoff, + handOffTimeout: FiniteDuration = handOffTimeout, + keepNrOfBatches: Int = keepNrOfBatches, + leastShardAllocationMaxSimultaneousRebalance: Int = leastShardAllocationMaxSimultaneousRebalance, + leastShardAllocationRebalanceThreshold: Int = leastShardAllocationRebalanceThreshold, + rebalanceInterval: FiniteDuration = rebalanceInterval, + retryInterval: FiniteDuration = retryInterval, + shardFailureBackoff: FiniteDuration = shardFailureBackoff, + shardStartTimeout: FiniteDuration = shardStartTimeout, + snapshotAfter: Int = snapshotAfter, + updatingStateTimeout: FiniteDuration = updatingStateTimeout, + waitingForStateTimeout: FiniteDuration = waitingForStateTimeout): TuningParameters = + new TuningParameters(bufferSize = bufferSize, + coordinatorFailureBackoff = coordinatorFailureBackoff, + entityRecoveryConstantRateStrategyFrequency = entityRecoveryConstantRateStrategyFrequency, + entityRecoveryConstantRateStrategyNumberOfEntities = + entityRecoveryConstantRateStrategyNumberOfEntities, + entityRecoveryStrategy = entityRecoveryStrategy, + entityRestartBackoff = entityRestartBackoff, + handOffTimeout = handOffTimeout, + keepNrOfBatches = keepNrOfBatches, + leastShardAllocationMaxSimultaneousRebalance = leastShardAllocationMaxSimultaneousRebalance, + leastShardAllocationRebalanceThreshold = leastShardAllocationRebalanceThreshold, + rebalanceInterval = rebalanceInterval, + retryInterval = retryInterval, + shardFailureBackoff = shardFailureBackoff, + shardStartTimeout = shardStartTimeout, + snapshotAfter = snapshotAfter, + updatingStateTimeout = updatingStateTimeout, + waitingForStateTimeout = waitingForStateTimeout) override def toString = s"""TuningParameters($bufferSize,$coordinatorFailureBackoff,$entityRecoveryConstantRateStrategyFrequency,$entityRecoveryConstantRateStrategyNumberOfEntities,$entityRecoveryStrategy,$entityRestartBackoff,$handOffTimeout,$keepNrOfBatches,$leastShardAllocationMaxSimultaneousRebalance,$leastShardAllocationRebalanceThreshold,$rebalanceInterval,$retryInterval,$shardFailureBackoff,$shardStartTimeout,$snapshotAfter,$updatingStateTimeout,$waitingForStateTimeout)""" @@ -244,24 +250,23 @@ object ClusterShardingSettings { * actors. * @param tuningParameters additional tuning parameters, see descriptions in reference.conf */ -final class ClusterShardingSettings( - val numberOfShards: Int, - val role: Option[String], - val dataCenter: Option[DataCenter], - val rememberEntities: Boolean, - val journalPluginId: String, - val snapshotPluginId: String, - val passivateIdleEntityAfter: FiniteDuration, - val stateStoreMode: ClusterShardingSettings.StateStoreMode, - val tuningParameters: ClusterShardingSettings.TuningParameters, - val coordinatorSingletonSettings: ClusterSingletonManagerSettings) extends NoSerializationVerificationNeeded { +final class ClusterShardingSettings(val numberOfShards: Int, + val role: Option[String], + val dataCenter: Option[DataCenter], + val rememberEntities: Boolean, + val journalPluginId: String, + val snapshotPluginId: String, + val passivateIdleEntityAfter: FiniteDuration, + val stateStoreMode: ClusterShardingSettings.StateStoreMode, + val tuningParameters: ClusterShardingSettings.TuningParameters, + val coordinatorSingletonSettings: ClusterSingletonManagerSettings) + extends NoSerializationVerificationNeeded { import akka.cluster.sharding.typed.ClusterShardingSettings.StateStoreModeDData import akka.cluster.sharding.typed.ClusterShardingSettings.StateStoreModePersistence - require( - stateStoreMode == StateStoreModePersistence || stateStoreMode == StateStoreModeDData, - s"Unknown 'state-store-mode' [$stateStoreMode], " + - s"valid values are '${StateStoreModeDData.name}' or '${StateStoreModePersistence.name}'") + require(stateStoreMode == StateStoreModePersistence || stateStoreMode == StateStoreModeDData, + s"Unknown 'state-store-mode' [$stateStoreMode], " + + s"valid values are '${StateStoreModeDData.name}' or '${StateStoreModePersistence.name}'") /** * INTERNAL API @@ -271,7 +276,7 @@ final class ClusterShardingSettings( @InternalApi private[akka] def shouldHostShard(cluster: Cluster): Boolean = role.forall(cluster.selfMember.roles.contains) && - dataCenter.forall(cluster.selfMember.dataCenter.contains) + dataCenter.forall(cluster.selfMember.dataCenter.contains) // no withNumberOfShards because it should be defined in configuration to be able to verify same // value on all nodes with `JoinConfigCompatChecker` @@ -306,28 +311,27 @@ final class ClusterShardingSettings( * The `role` of the `ClusterSingletonManagerSettings` is not used. The `role` of the * coordinator singleton will be the same as the `role` of `ClusterShardingSettings`. */ - def withCoordinatorSingletonSettings(coordinatorSingletonSettings: ClusterSingletonManagerSettings): ClusterShardingSettings = + def withCoordinatorSingletonSettings( + coordinatorSingletonSettings: ClusterSingletonManagerSettings): ClusterShardingSettings = copy(coordinatorSingletonSettings = coordinatorSingletonSettings) - private def copy( - role: Option[String] = role, - dataCenter: Option[DataCenter] = dataCenter, - rememberEntities: Boolean = rememberEntities, - journalPluginId: String = journalPluginId, - snapshotPluginId: String = snapshotPluginId, - stateStoreMode: ClusterShardingSettings.StateStoreMode = stateStoreMode, - tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters, - coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings, - passivateIdleEntityAfter: FiniteDuration = passivateIdleEntityAfter): ClusterShardingSettings = - new ClusterShardingSettings( - numberOfShards, - role, - dataCenter, - rememberEntities, - journalPluginId, - snapshotPluginId, - passivateIdleEntityAfter, - stateStoreMode, - tuningParameters, - coordinatorSingletonSettings) + private def copy(role: Option[String] = role, + dataCenter: Option[DataCenter] = dataCenter, + rememberEntities: Boolean = rememberEntities, + journalPluginId: String = journalPluginId, + snapshotPluginId: String = snapshotPluginId, + stateStoreMode: ClusterShardingSettings.StateStoreMode = stateStoreMode, + tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters, + coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings, + passivateIdleEntityAfter: FiniteDuration = passivateIdleEntityAfter): ClusterShardingSettings = + new ClusterShardingSettings(numberOfShards, + role, + dataCenter, + rememberEntities, + journalPluginId, + snapshotPluginId, + passivateIdleEntityAfter, + stateStoreMode, + tuningParameters, + coordinatorSingletonSettings) } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingMessageExtractor.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingMessageExtractor.scala index 395d5bc16b..98555ccd71 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingMessageExtractor.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/ShardingMessageExtractor.scala @@ -23,10 +23,7 @@ object ShardingMessageExtractor { /** * Scala API: Create a message extractor for a protocol where the entity id is available in each message. */ - def noEnvelope[M]( - numberOfShards: Int, - stopMessage: M)( - extractEntityId: M => String): ShardingMessageExtractor[M, M] = + def noEnvelope[M](numberOfShards: Int, stopMessage: M)(extractEntityId: M => String): ShardingMessageExtractor[M, M] = new HashCodeNoEnvelopeMessageExtractor[M](numberOfShards) { def entityId(message: M) = extractEntityId(message) } @@ -72,9 +69,8 @@ abstract class ShardingMessageExtractor[E, M] { * * @tparam M The type of message accepted by the entity actor */ -final class HashCodeMessageExtractor[M]( - val numberOfShards: Int) - extends ShardingMessageExtractor[ShardingEnvelope[M], M] { +final class HashCodeMessageExtractor[M](val numberOfShards: Int) + extends ShardingMessageExtractor[ShardingEnvelope[M], M] { override def entityId(envelope: ShardingEnvelope[M]): String = envelope.entityId override def shardId(entityId: String): String = HashCodeMessageExtractor.shardId(entityId, numberOfShards) @@ -89,9 +85,7 @@ final class HashCodeMessageExtractor[M]( * * @tparam M The type of message accepted by the entity actor */ -abstract class HashCodeNoEnvelopeMessageExtractor[M]( - val numberOfShards: Int) - extends ShardingMessageExtractor[M, M] { +abstract class HashCodeNoEnvelopeMessageExtractor[M](val numberOfShards: Int) extends ShardingMessageExtractor[M, M] { override def shardId(entityId: String): String = HashCodeMessageExtractor.shardId(entityId, numberOfShards) override final def unwrapMessage(message: M): M = message @@ -110,4 +104,3 @@ abstract class HashCodeNoEnvelopeMessageExtractor[M]( * and have the message types themselves carry identifiers. */ final case class ShardingEnvelope[M](entityId: String, message: M) // TODO think if should remain a case class - diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala index 53f22e9c09..325d854d3b 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ClusterShardingImpl.scala @@ -49,7 +49,7 @@ import akka.util.Timeout * Other messages are delegated to the given `ShardingMessageExtractor`. */ @InternalApi private[akka] class ExtractorAdapter[E, M](delegate: ShardingMessageExtractor[E, M]) - extends ShardingMessageExtractor[Any, M] { + extends ShardingMessageExtractor[Any, M] { override def entityId(message: Any): String = { message match { case ShardingEnvelope(entityId, _) => entityId //also covers UntypedStartEntity in ShardingEnvelope @@ -80,6 +80,7 @@ import akka.util.Timeout * INTERNAL API */ @InternalApi private[akka] object EntityTypeKeyImpl { + /** * Default separator character used for concatenating EntityTypeKey with entityId to construct unique persistenceId. * This must be same as in Lagom's `scaladsl.PersistentEntity`, for compatibility. No separator is used @@ -92,17 +93,22 @@ import akka.util.Timeout /** * INTERNAL API */ -@InternalApi private[akka] final case class EntityTypeKeyImpl[T](name: String, messageClassName: String, - entityIdSeparator: String = EntityTypeKeyImpl.EntityIdSeparator) - extends javadsl.EntityTypeKey[T] with scaladsl.EntityTypeKey[T] { +@InternalApi private[akka] final case class EntityTypeKeyImpl[T](name: String, + messageClassName: String, + entityIdSeparator: String = + EntityTypeKeyImpl.EntityIdSeparator) + extends javadsl.EntityTypeKey[T] + with scaladsl.EntityTypeKey[T] { if (!entityIdSeparator.isEmpty && name.contains(entityIdSeparator)) - throw new IllegalArgumentException(s"EntityTypeKey.name [$name] contains [$entityIdSeparator] which is " + + throw new IllegalArgumentException( + s"EntityTypeKey.name [$name] contains [$entityIdSeparator] which is " + "a reserved character") override def persistenceIdFrom(entityId: String): PersistenceId = { if (!entityIdSeparator.isEmpty && entityId.contains(entityIdSeparator)) - throw new IllegalArgumentException(s"entityId [$entityId] contains [$entityIdSeparator] which is " + + throw new IllegalArgumentException( + s"entityId [$entityId] contains [$entityIdSeparator] which is " + "a reserved character") PersistenceId(name + entityIdSeparator + entityId) @@ -116,11 +122,13 @@ import akka.util.Timeout /** INTERNAL API */ @InternalApi private[akka] final class ClusterShardingImpl(system: ActorSystem[_]) - extends javadsl.ClusterSharding with scaladsl.ClusterSharding { + extends javadsl.ClusterSharding + with scaladsl.ClusterSharding { import akka.actor.typed.scaladsl.adapter._ - require(system.isInstanceOf[ActorSystemAdapter[_]], "only adapted untyped actor systems can be used for cluster features") + require(system.isInstanceOf[ActorSystemAdapter[_]], + "only adapted untyped actor systems can be used for cluster features") private val cluster = Cluster(system) private val untypedSystem: ExtendedActorSystem = system.toUntyped.asInstanceOf[ExtendedActorSystem] @@ -130,7 +138,8 @@ import akka.util.Timeout // typeKey.name to messageClassName private val regions: ConcurrentHashMap[String, String] = new ConcurrentHashMap private val proxies: ConcurrentHashMap[String, String] = new ConcurrentHashMap - private val shardCommandActors: ConcurrentHashMap[String, ActorRef[scaladsl.ClusterSharding.ShardCommand]] = new ConcurrentHashMap + private val shardCommandActors: ConcurrentHashMap[String, ActorRef[scaladsl.ClusterSharding.ShardCommand]] = + new ConcurrentHashMap // scaladsl impl override def init[M, E](entity: scaladsl.Entity[M, E]): ActorRef[E] = { @@ -144,35 +153,39 @@ import akka.util.Timeout case Some(e) => e }).asInstanceOf[ShardingMessageExtractor[E, M]] - internalInit(entity.createBehavior, entity.entityProps, entity.typeKey, - entity.stopMessage, settings, extractor, entity.allocationStrategy) + internalInit(entity.createBehavior, + entity.entityProps, + entity.typeKey, + entity.stopMessage, + settings, + extractor, + entity.allocationStrategy) } // javadsl impl override def init[M, E](entity: javadsl.Entity[M, E]): ActorRef[E] = { import scala.compat.java8.OptionConverters._ - init(new scaladsl.Entity( - createBehavior = (ctx: EntityContext) => Behaviors.setup[M] { actorContext => - entity.createBehavior( - new javadsl.EntityContext[M](ctx.entityId, ctx.shard, actorContext.asJava)) - }, - typeKey = entity.typeKey.asScala, - stopMessage = entity.stopMessage.asScala, - entityProps = entity.entityProps, - settings = entity.settings.asScala, - messageExtractor = entity.messageExtractor.asScala, - allocationStrategy = entity.allocationStrategy.asScala - )) + init( + new scaladsl.Entity(createBehavior = (ctx: EntityContext) => + Behaviors.setup[M] { actorContext => + entity.createBehavior( + new javadsl.EntityContext[M](ctx.entityId, ctx.shard, actorContext.asJava)) + }, + typeKey = entity.typeKey.asScala, + stopMessage = entity.stopMessage.asScala, + entityProps = entity.entityProps, + settings = entity.settings.asScala, + messageExtractor = entity.messageExtractor.asScala, + allocationStrategy = entity.allocationStrategy.asScala)) } - private def internalInit[M, E]( - behavior: EntityContext => Behavior[M], - entityProps: Props, - typeKey: scaladsl.EntityTypeKey[M], - stopMessage: Option[M], - settings: ClusterShardingSettings, - extractor: ShardingMessageExtractor[E, M], - allocationStrategy: Option[ShardAllocationStrategy]): ActorRef[E] = { + private def internalInit[M, E](behavior: EntityContext => Behavior[M], + entityProps: Props, + typeKey: scaladsl.EntityTypeKey[M], + stopMessage: Option[M], + settings: ClusterShardingSettings, + extractor: ShardingMessageExtractor[E, M], + allocationStrategy: Option[ShardAllocationStrategy]): ActorRef[E] = { val extractorAdapter = new ExtractorAdapter(extractor) val extractEntityId: ShardRegion.ExtractEntityId = { @@ -192,16 +205,22 @@ import akka.util.Timeout log.info("Starting Shard Region [{}]...", typeKey.name) val shardCommandDelegator: ActorRef[scaladsl.ClusterSharding.ShardCommand] = - shardCommandActors.computeIfAbsent( - typeKey.name, - new java.util.function.Function[String, ActorRef[scaladsl.ClusterSharding.ShardCommand]] { - override def apply(t: String): ActorRef[scaladsl.ClusterSharding.ShardCommand] = { - // using untyped.systemActorOf to avoid the Future[ActorRef] - system.toUntyped.asInstanceOf[ExtendedActorSystem].systemActorOf( - PropsAdapter(ShardCommandActor.behavior(stopMessage.getOrElse(PoisonPill))), - URLEncoder.encode(typeKey.name, ByteString.UTF_8) + "ShardCommandDelegator") - } - }) + shardCommandActors.computeIfAbsent(typeKey.name, + new java.util.function.Function[ + String, + ActorRef[scaladsl.ClusterSharding.ShardCommand]] { + override def apply( + t: String): ActorRef[scaladsl.ClusterSharding.ShardCommand] = { + // using untyped.systemActorOf to avoid the Future[ActorRef] + system.toUntyped + .asInstanceOf[ExtendedActorSystem] + .systemActorOf( + PropsAdapter( + ShardCommandActor.behavior(stopMessage.getOrElse(PoisonPill))), + URLEncoder + .encode(typeKey.name, ByteString.UTF_8) + "ShardCommandDelegator") + } + }) def poisonPillInterceptor(behv: Behavior[M]): Behavior[M] = { stopMessage match { @@ -214,24 +233,25 @@ import akka.util.Timeout val behv = behavior(new EntityContext(entityId, shardCommandDelegator)) PropsAdapter(poisonPillInterceptor(behv), entityProps) } - untypedSharding.internalStart( - typeKey.name, - untypedEntityPropsFactory, - ClusterShardingSettings.toUntypedSettings(settings), - extractEntityId, - extractShardId, - allocationStrategy.getOrElse(defaultShardAllocationStrategy(settings)), - stopMessage.getOrElse(PoisonPill)) + untypedSharding.internalStart(typeKey.name, + untypedEntityPropsFactory, + ClusterShardingSettings.toUntypedSettings(settings), + extractEntityId, + extractShardId, + allocationStrategy.getOrElse(defaultShardAllocationStrategy(settings)), + stopMessage.getOrElse(PoisonPill)) } else { log.info("Starting Shard Region Proxy [{}] (no actors will be hosted on this node) " + - "for role [{}] and dataCenter [{}] ...", typeKey.name, settings.role, settings.dataCenter) + "for role [{}] and dataCenter [{}] ...", + typeKey.name, + settings.role, + settings.dataCenter) - untypedSharding.startProxy( - typeKey.name, - settings.role, - dataCenter = settings.dataCenter, - extractEntityId, - extractShardId) + untypedSharding.startProxy(typeKey.name, + settings.role, + dataCenter = settings.dataCenter, + extractEntityId, + extractShardId) } val messageClassName = typeKey.asInstanceOf[EntityTypeKeyImpl[M]].messageClassName @@ -248,13 +268,17 @@ import akka.util.Timeout } override def entityRefFor[M](typeKey: scaladsl.EntityTypeKey[M], entityId: String): scaladsl.EntityRef[M] = { - new EntityRefImpl[M](untypedSharding.shardRegion(typeKey.name), entityId, - typeKey.asInstanceOf[EntityTypeKeyImpl[M]], system.scheduler) + new EntityRefImpl[M](untypedSharding.shardRegion(typeKey.name), + entityId, + typeKey.asInstanceOf[EntityTypeKeyImpl[M]], + system.scheduler) } override def entityRefFor[M](typeKey: javadsl.EntityTypeKey[M], entityId: String): javadsl.EntityRef[M] = { - new EntityRefImpl[M](untypedSharding.shardRegion(typeKey.name), entityId, - typeKey.asInstanceOf[EntityTypeKeyImpl[M]], system.scheduler) + new EntityRefImpl[M](untypedSharding.shardRegion(typeKey.name), + entityId, + typeKey.asInstanceOf[EntityTypeKeyImpl[M]], + system.scheduler) } override def defaultShardAllocationStrategy(settings: ClusterShardingSettings): ShardAllocationStrategy = { @@ -274,9 +298,13 @@ import akka.util.Timeout /** * INTERNAL API */ -@InternalApi private[akka] final class EntityRefImpl[M](shardRegion: akka.actor.ActorRef, entityId: String, - typeKey: EntityTypeKeyImpl[M], scheduler: Scheduler) - extends javadsl.EntityRef[M] with scaladsl.EntityRef[M] with InternalRecipientRef[M] { +@InternalApi private[akka] final class EntityRefImpl[M](shardRegion: akka.actor.ActorRef, + entityId: String, + typeKey: EntityTypeKeyImpl[M], + scheduler: Scheduler) + extends javadsl.EntityRef[M] + with scaladsl.EntityRef[M] + with InternalRecipientRef[M] { override def tell(msg: M): Unit = shardRegion ! ShardingEnvelope(entityId, msg) @@ -300,21 +328,20 @@ import akka.util.Timeout // Note: _promiseRef mustn't have a type pattern, since it can be null private[this] val (_ref: ActorRef[U], _future: Future[U], _promiseRef) = if (untyped.isTerminated) - ( - adapt.ActorRefAdapter[U](untyped.provider.deadLetters), - Future.failed[U](new AskTimeoutException( - s"Recipient shard region of [${EntityRefImpl.this}] had already been terminated.")), - null) + (adapt.ActorRefAdapter[U](untyped.provider.deadLetters), + Future.failed[U]( + new AskTimeoutException(s"Recipient shard region of [${EntityRefImpl.this}] had already been terminated.")), + null) else if (timeout.duration.length <= 0) - ( - adapt.ActorRefAdapter[U](untyped.provider.deadLetters), - Future.failed[U](new IllegalArgumentException( - s"Timeout length must be positive, question not sent to [${EntityRefImpl.this}]")), - null - ) + (adapt.ActorRefAdapter[U](untyped.provider.deadLetters), + Future.failed[U]( + new IllegalArgumentException( + s"Timeout length must be positive, question not sent to [${EntityRefImpl.this}]")), + null) else { // note that the real messageClassName will be set afterwards, replyTo pattern - val a = PromiseActorRef(untyped.provider, timeout, targetName = EntityRefImpl.this, messageClassName = "unknown") + val a = + PromiseActorRef(untyped.provider, timeout, targetName = EntityRefImpl.this, messageClassName = "unknown") val b = adapt.ActorRefAdapter[U](a) (b, a.result.future.asInstanceOf[Future[U]], a) } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingSerializer.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingSerializer.scala index 653ddb4fcd..3edc61e9e9 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingSerializer.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/internal/ShardingSerializer.scala @@ -17,7 +17,8 @@ import akka.serialization.SerializerWithStringManifest * INTERNAL API */ @InternalApi private[akka] class ShardingSerializer(val system: akka.actor.ExtendedActorSystem) - extends SerializerWithStringManifest with BaseSerializer { + extends SerializerWithStringManifest + with BaseSerializer { private val payloadSupport = new WrappedPayloadSupport(system) diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala index 948cb89969..f7ffa0c651 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/ClusterSharding.scala @@ -213,10 +213,15 @@ object Entity { * @param createBehavior Create the behavior for an entity given a [[EntityContext]] (includes entityId) * @tparam M The type of message the entity accepts */ - def of[M]( - typeKey: EntityTypeKey[M], - createBehavior: JFunction[EntityContext[M], Behavior[M]]): Entity[M, ShardingEnvelope[M]] = { - new Entity(createBehavior, typeKey, Optional.empty(), Props.empty, Optional.empty(), Optional.empty(), Optional.empty()) + def of[M](typeKey: EntityTypeKey[M], + createBehavior: JFunction[EntityContext[M], Behavior[M]]): Entity[M, ShardingEnvelope[M]] = { + new Entity(createBehavior, + typeKey, + Optional.empty(), + Props.empty, + Optional.empty(), + Optional.empty(), + Optional.empty()) } /** @@ -231,18 +236,21 @@ object Entity { * @tparam Command The type of message the entity accepts */ def ofPersistentEntity[Command, Event, State >: Null]( - typeKey: EntityTypeKey[Command], - createPersistentEntity: JFunction[EntityContext[Command], EventSourcedEntity[Command, Event, State]]): Entity[Command, ShardingEnvelope[Command]] = { + typeKey: EntityTypeKey[Command], + createPersistentEntity: JFunction[EntityContext[Command], EventSourcedEntity[Command, Event, State]]) + : Entity[Command, ShardingEnvelope[Command]] = { - of(typeKey, new JFunction[EntityContext[Command], Behavior[Command]] { - override def apply(ctx: EntityContext[Command]): Behavior[Command] = { - val persistentEntity = createPersistentEntity(ctx) - if (persistentEntity.entityTypeKey != typeKey) - throw new IllegalArgumentException(s"The [${persistentEntity.entityTypeKey}] of the PersistentEntity " + - s" [${persistentEntity.getClass.getName}] doesn't match expected $typeKey.") - persistentEntity - } - }) + of(typeKey, + new JFunction[EntityContext[Command], Behavior[Command]] { + override def apply(ctx: EntityContext[Command]): Behavior[Command] = { + val persistentEntity = createPersistentEntity(ctx) + if (persistentEntity.entityTypeKey != typeKey) + throw new IllegalArgumentException( + s"The [${persistentEntity.entityTypeKey}] of the PersistentEntity " + + s" [${persistentEntity.getClass.getName}] doesn't match expected $typeKey.") + persistentEntity + } + }) } } @@ -250,14 +258,13 @@ object Entity { /** * Defines how the entity should be created. Used in [[ClusterSharding#init]]. */ -final class Entity[M, E] private ( - val createBehavior: JFunction[EntityContext[M], Behavior[M]], - val typeKey: EntityTypeKey[M], - val stopMessage: Optional[M], - val entityProps: Props, - val settings: Optional[ClusterShardingSettings], - val messageExtractor: Optional[ShardingMessageExtractor[E, M]], - val allocationStrategy: Optional[ShardAllocationStrategy]) { +final class Entity[M, E] private (val createBehavior: JFunction[EntityContext[M], Behavior[M]], + val typeKey: EntityTypeKey[M], + val stopMessage: Optional[M], + val entityProps: Props, + val settings: Optional[ClusterShardingSettings], + val messageExtractor: Optional[ShardingMessageExtractor[E, M]], + val allocationStrategy: Optional[ShardAllocationStrategy]) { /** * [[akka.actor.typed.Props]] of the entity actors, such as dispatcher settings. @@ -289,7 +296,13 @@ final class Entity[M, E] private ( * is configured with `akka.cluster.sharding.number-of-shards`. */ def withMessageExtractor[Envelope](newExtractor: ShardingMessageExtractor[Envelope, M]): Entity[M, Envelope] = - new Entity(createBehavior, typeKey, stopMessage, entityProps, settings, Optional.ofNullable(newExtractor), allocationStrategy) + new Entity(createBehavior, + typeKey, + stopMessage, + entityProps, + settings, + Optional.ofNullable(newExtractor), + allocationStrategy) /** * Allocation strategy which decides on which nodes to allocate new shards, @@ -298,14 +311,12 @@ final class Entity[M, E] private ( def withAllocationStrategy(newAllocationStrategy: ShardAllocationStrategy): Entity[M, E] = copy(allocationStrategy = Optional.ofNullable(newAllocationStrategy)) - private def copy( - createBehavior: JFunction[EntityContext[M], Behavior[M]] = createBehavior, - typeKey: EntityTypeKey[M] = typeKey, - stopMessage: Optional[M] = stopMessage, - entityProps: Props = entityProps, - settings: Optional[ClusterShardingSettings] = settings, - allocationStrategy: Optional[ShardAllocationStrategy] = allocationStrategy - ): Entity[M, E] = { + private def copy(createBehavior: JFunction[EntityContext[M], Behavior[M]] = createBehavior, + typeKey: EntityTypeKey[M] = typeKey, + stopMessage: Optional[M] = stopMessage, + entityProps: Props = entityProps, + settings: Optional[ClusterShardingSettings] = settings, + allocationStrategy: Optional[ShardAllocationStrategy] = allocationStrategy): Entity[M, E] = { new Entity(createBehavior, typeKey, stopMessage, entityProps, settings, messageExtractor, allocationStrategy) } @@ -314,10 +325,9 @@ final class Entity[M, E] private ( /** * Parameter to [[Entity.of]] */ -final class EntityContext[M]( - entityId: String, - shard: ActorRef[ClusterSharding.ShardCommand], - actorContext: ActorContext[M]) { +final class EntityContext[M](entityId: String, + shard: ActorRef[ClusterSharding.ShardCommand], + actorContext: ActorContext[M]) { def getEntityId: String = entityId @@ -399,7 +409,8 @@ object EntityTypeKey { * * Not for user extension. */ -@DoNotInherit abstract class EntityRef[M] extends RecipientRef[M] { scaladslSelf: scaladsl.EntityRef[M] with InternalRecipientRef[M] => +@DoNotInherit abstract class EntityRef[M] extends RecipientRef[M] { + scaladslSelf: scaladsl.EntityRef[M] with InternalRecipientRef[M] => /** * Send a message to the entity referenced by this EntityRef using *at-most-once* diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/EventSourcedEntity.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/EventSourcedEntity.scala index a9bd0a2d7e..9f7a6deac1 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/EventSourcedEntity.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/javadsl/EventSourcedEntity.scala @@ -20,19 +20,24 @@ import akka.persistence.typed.javadsl.EventSourcedBehavior * [[EntityTypeKey.persistenceIdFrom]]. */ abstract class EventSourcedEntity[Command, Event, State >: Null] private ( - val entityTypeKey: EntityTypeKey[Command], - val entityId: String, - persistenceId: PersistenceId, onPersistFailure: Optional[BackoffSupervisorStrategy]) - extends EventSourcedBehavior[Command, Event, State](persistenceId, onPersistFailure) { + val entityTypeKey: EntityTypeKey[Command], + val entityId: String, + persistenceId: PersistenceId, + onPersistFailure: Optional[BackoffSupervisorStrategy]) + extends EventSourcedBehavior[Command, Event, State](persistenceId, onPersistFailure) { def this(entityTypeKey: EntityTypeKey[Command], entityId: String) = { - this(entityTypeKey, entityId, - persistenceId = entityTypeKey.persistenceIdFrom(entityId), Optional.empty[BackoffSupervisorStrategy]) + this(entityTypeKey, + entityId, + persistenceId = entityTypeKey.persistenceIdFrom(entityId), + Optional.empty[BackoffSupervisorStrategy]) } def this(entityTypeKey: EntityTypeKey[Command], entityId: String, onPersistFailure: BackoffSupervisorStrategy) = { - this(entityTypeKey, entityId, - persistenceId = entityTypeKey.persistenceIdFrom(entityId), Optional.ofNullable(onPersistFailure)) + this(entityTypeKey, + entityId, + persistenceId = entityTypeKey.persistenceIdFrom(entityId), + Optional.ofNullable(onPersistFailure)) } } diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala index fccbd00196..28eaf80d63 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/ClusterSharding.scala @@ -220,23 +220,21 @@ object Entity { * @param createBehavior Create the behavior for an entity given a [[EntityContext]] (includes entityId) * @tparam M The type of message the entity accepts */ - def apply[M]( - typeKey: EntityTypeKey[M], - createBehavior: EntityContext => Behavior[M]): Entity[M, ShardingEnvelope[M]] = + def apply[M](typeKey: EntityTypeKey[M], + createBehavior: EntityContext => Behavior[M]): Entity[M, ShardingEnvelope[M]] = new Entity(createBehavior, typeKey, None, Props.empty, None, None, None) } /** * Defines how the entity should be created. Used in [[ClusterSharding#init]]. */ -final class Entity[M, E] private[akka] ( - val createBehavior: EntityContext => Behavior[M], - val typeKey: EntityTypeKey[M], - val stopMessage: Option[M], - val entityProps: Props, - val settings: Option[ClusterShardingSettings], - val messageExtractor: Option[ShardingMessageExtractor[E, M]], - val allocationStrategy: Option[ShardAllocationStrategy]) { +final class Entity[M, E] private[akka] (val createBehavior: EntityContext => Behavior[M], + val typeKey: EntityTypeKey[M], + val stopMessage: Option[M], + val entityProps: Props, + val settings: Option[ClusterShardingSettings], + val messageExtractor: Option[ShardingMessageExtractor[E, M]], + val allocationStrategy: Option[ShardAllocationStrategy]) { /** * [[akka.actor.typed.Props]] of the entity actors, such as dispatcher settings. @@ -277,14 +275,12 @@ final class Entity[M, E] private[akka] ( def withAllocationStrategy(newAllocationStrategy: ShardAllocationStrategy): Entity[M, E] = copy(allocationStrategy = Option(newAllocationStrategy)) - private def copy( - createBehavior: EntityContext => Behavior[M] = createBehavior, - typeKey: EntityTypeKey[M] = typeKey, - stopMessage: Option[M] = stopMessage, - entityProps: Props = entityProps, - settings: Option[ClusterShardingSettings] = settings, - allocationStrategy: Option[ShardAllocationStrategy] = allocationStrategy - ): Entity[M, E] = { + private def copy(createBehavior: EntityContext => Behavior[M] = createBehavior, + typeKey: EntityTypeKey[M] = typeKey, + stopMessage: Option[M] = stopMessage, + entityProps: Props = entityProps, + settings: Option[ClusterShardingSettings] = settings, + allocationStrategy: Option[ShardAllocationStrategy] = allocationStrategy): Entity[M, E] = { new Entity(createBehavior, typeKey, stopMessage, entityProps, settings, messageExtractor, allocationStrategy) } @@ -293,9 +289,7 @@ final class Entity[M, E] private[akka] ( /** * Parameter to [[Entity.apply]] */ -final class EntityContext( - val entityId: String, - val shard: ActorRef[ClusterSharding.ShardCommand]) +final class EntityContext(val entityId: String, val shard: ActorRef[ClusterSharding.ShardCommand]) /** Allows starting a specific Sharded Entity by its entity identifier */ object StartEntity { @@ -344,6 +338,7 @@ object StartEntity { } object EntityTypeKey { + /** * Creates an `EntityTypeKey`. The `name` must be unique. */ @@ -449,4 +444,4 @@ object ClusterShardingSetup { * for tests that need to replace extension with stub/mock implementations. */ final class ClusterShardingSetup(createExtension: java.util.function.Function[ActorSystem[_], ClusterSharding]) - extends ExtensionSetup[ClusterSharding](ClusterSharding, createExtension) + extends ExtensionSetup[ClusterSharding](ClusterSharding, createExtension) diff --git a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/EventSourcedEntity.scala b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/EventSourcedEntity.scala index 933aa1f57f..012c342db7 100644 --- a/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/EventSourcedEntity.scala +++ b/akka-cluster-sharding-typed/src/main/scala/akka/cluster/sharding/typed/scaladsl/EventSourcedEntity.scala @@ -19,11 +19,10 @@ object EventSourcedEntity { * automatically from the [[EntityTypeKey]] and `entityId` constructor parameters by using * [[EntityTypeKey.persistenceIdFrom]]. */ - def apply[Command, Event, State]( - entityTypeKey: EntityTypeKey[Command], - entityId: String, - emptyState: State, - commandHandler: (State, Command) => Effect[Event, State], - eventHandler: (State, Event) => State): EventSourcedBehavior[Command, Event, State] = + def apply[Command, Event, State](entityTypeKey: EntityTypeKey[Command], + entityId: String, + emptyState: State, + commandHandler: (State, Command) => Effect[Event, State], + eventHandler: (State, Event) => State): EventSourcedBehavior[Command, Event, State] = EventSourcedBehavior(entityTypeKey.persistenceIdFrom(entityId), emptyState, commandHandler, eventHandler) } diff --git a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala index 9791c91436..982014a5ab 100644 --- a/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/multi-jvm/scala/akka/cluster/sharding/typed/MultiDcClusterShardingSpec.scala @@ -23,25 +23,20 @@ object MultiDcClusterShardingSpecConfig extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig( - ConfigFactory.parseString( - """ + commonConfig(ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.cluster.sharding { number-of-shards = 10 # First is likely to be ignored as shard coordinator not ready retry-interval = 0.2s } - """).withFallback( - MultiNodeClusterSpec.clusterConfig)) + """).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first, second)(ConfigFactory.parseString( - """ + nodeConfig(first, second)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" """)) - nodeConfig(third, fourth)(ConfigFactory.parseString( - """ + nodeConfig(third, fourth)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc2" """)) @@ -53,8 +48,10 @@ class MultiDcClusterShardingMultiJvmNode2 extends MultiDcClusterShardingSpec class MultiDcClusterShardingMultiJvmNode3 extends MultiDcClusterShardingSpec class MultiDcClusterShardingMultiJvmNode4 extends MultiDcClusterShardingSpec -abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterShardingSpecConfig) - with MultiNodeTypedClusterSpec with ScalaFutures { +abstract class MultiDcClusterShardingSpec + extends MultiNodeSpec(MultiDcClusterShardingSpecConfig) + with MultiNodeTypedClusterSpec + with ScalaFutures { import MultiDcClusterShardingSpecConfig._ import MultiDcClusterActors._ @@ -69,8 +66,7 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh "init sharding" in { val sharding = ClusterSharding(typedSystem) - val shardRegion: ActorRef[ShardingEnvelope[PingProtocol]] = sharding.init( - Entity(typeKey, _ => multiDcPinger)) + val shardRegion: ActorRef[ShardingEnvelope[PingProtocol]] = sharding.init(Entity(typeKey, _ => multiDcPinger)) val probe = TestProbe[Pong] shardRegion ! ShardingEnvelope(entityId, Ping(probe.ref)) probe.expectMessage(max = 10.seconds, Pong(cluster.selfMember.dataCenter)) @@ -97,10 +93,7 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh "be able to message cross dc via proxy" in { runOn(first, second) { val proxy: ActorRef[ShardingEnvelope[PingProtocol]] = ClusterSharding(typedSystem).init( - Entity( - typeKey, - _ => multiDcPinger) - .withSettings(ClusterShardingSettings(typedSystem).withDataCenter("dc2"))) + Entity(typeKey, _ => multiDcPinger).withSettings(ClusterShardingSettings(typedSystem).withDataCenter("dc2"))) val probe = TestProbe[Pong] proxy ! ShardingEnvelope(entityId, Ping(probe.ref)) probe.expectMessage(remainingOrDefault, Pong("dc2")) diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/JoinConfigCompatCheckerClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/JoinConfigCompatCheckerClusterShardingSpec.scala index 40259cd27b..8ebe1441b8 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/JoinConfigCompatCheckerClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/JoinConfigCompatCheckerClusterShardingSpec.scala @@ -31,12 +31,10 @@ object JoinConfig { """).withFallback(AkkaSpec.testConf) def joinConfig(configured: Int): Config = - ConfigFactory.parseString(s"$Key = $configured") - .withFallback(baseConfig) + ConfigFactory.parseString(s"$Key = $configured").withFallback(baseConfig) } -abstract class JoinConfigCompatCheckerClusterShardingSpec extends AkkaSpec( - JoinConfig.joinConfig(JoinConfig.Shards)) { +abstract class JoinConfigCompatCheckerClusterShardingSpec extends AkkaSpec(JoinConfig.joinConfig(JoinConfig.Shards)) { protected val duration = 5.seconds @@ -61,8 +59,7 @@ abstract class JoinConfigCompatCheckerClusterShardingSpec extends AkkaSpec( } protected def configured(system: ActorSystem): Int = - Try(system.settings.config.getInt(JoinConfig.Key)) - .getOrElse(0) + Try(system.settings.config.getInt(JoinConfig.Key)).getOrElse(0) } diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala index 2c014ea961..33e394764d 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingPersistenceSpec.scala @@ -34,8 +34,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike object ClusterShardingPersistenceSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.loglevel = INFO #akka.persistence.typed.log-stashing = on @@ -50,8 +49,12 @@ object ClusterShardingPersistenceSpec { sealed trait Command final case class Add(s: String) extends Command - final case class AddWithConfirmation(s: String)(override val replyTo: ActorRef[Done]) extends Command with ExpectingReply[Done] - final case class PassivateAndPersist(s: String)(override val replyTo: ActorRef[Done]) extends Command with ExpectingReply[Done] + final case class AddWithConfirmation(s: String)(override val replyTo: ActorRef[Done]) + extends Command + with ExpectingReply[Done] + final case class PassivateAndPersist(s: String)(override val replyTo: ActorRef[Done]) + extends Command + with ExpectingReply[Done] final case class Get(replyTo: ActorRef[String]) extends Command final case class Echo(msg: String, replyTo: ActorRef[String]) extends Command final case class Block(latch: CountDownLatch) extends Command @@ -69,7 +72,6 @@ object ClusterShardingPersistenceSpec { def persistentEntity(entityId: String, shard: ActorRef[ShardCommand]): Behavior[Command] = { Behaviors.setup { ctx => - entityActorRefs.get(entityId) match { case null => case promise => promise.trySuccess(ctx.self.unsafeUpcast) @@ -78,54 +80,53 @@ object ClusterShardingPersistenceSpec { // transient state (testing purpose) var stashing = false - EventSourcedEntity[Command, String, String]( - entityTypeKey = typeKey, - entityId = entityId, - emptyState = "", - commandHandler = (state, cmd) => cmd match { - case Add(s) => - if (stashing) - Effect.stash() - else - Effect.persist(s) + EventSourcedEntity[Command, String, String](entityTypeKey = typeKey, + entityId = entityId, + emptyState = "", + commandHandler = (state, cmd) => + cmd match { + case Add(s) => + if (stashing) + Effect.stash() + else + Effect.persist(s) - case cmd @ AddWithConfirmation(s) => - if (stashing) - Effect.stash() - else - Effect.persist(s) - .thenReply(cmd)(_ => Done) + case cmd @ AddWithConfirmation(s) => + if (stashing) + Effect.stash() + else + Effect.persist(s).thenReply(cmd)(_ => Done) - case Get(replyTo) => - replyTo ! s"$entityId:$state" - Effect.none + case Get(replyTo) => + replyTo ! s"$entityId:$state" + Effect.none - case cmd @ PassivateAndPersist(s) => - shard ! Passivate(ctx.self) - Effect.persist(s) - .thenReply(cmd)(_ => Done) + case cmd @ PassivateAndPersist(s) => + shard ! Passivate(ctx.self) + Effect.persist(s).thenReply(cmd)(_ => Done) - case Echo(msg, replyTo) => - Effect.none.thenRun(_ => replyTo ! msg) + case Echo(msg, replyTo) => + Effect.none.thenRun(_ => replyTo ! msg) - case Block(latch) => - latch.await(5, TimeUnit.SECONDS) - Effect.none + case Block(latch) => + latch.await(5, TimeUnit.SECONDS) + Effect.none - case BeginStashingAddCommands => - stashing = true - Effect.none + case BeginStashingAddCommands => + stashing = true + Effect.none - case UnstashAll => - stashing = false - Effect.unstashAll() + case UnstashAll => + stashing = false + Effect.unstashAll() - case UnstashAllAndPassivate => - stashing = false - shard ! Passivate(ctx.self) - Effect.unstashAll() - }, - eventHandler = (state, evt) => if (state.isEmpty) evt else state + "|" + evt) + case UnstashAllAndPassivate => + stashing = false + shard ! Passivate(ctx.self) + Effect.unstashAll() + }, + eventHandler = + (state, evt) => if (state.isEmpty) evt else state + "|" + evt) .onRecoveryCompleted { state => ctx.log.debug("onRecoveryCompleted: [{}]", state) lifecycleProbes.get(entityId) match { @@ -137,14 +138,15 @@ object ClusterShardingPersistenceSpec { lifecycleProbes.get(entityId) match { case null => ctx.log.debug("no lifecycleProbe (postStop) for [{}]", entityId) case p => p ! "stopped" - } - ) + }) } } } -class ClusterShardingPersistenceSpec extends ScalaTestWithActorTestKit(ClusterShardingPersistenceSpec.config) with WordSpecLike { +class ClusterShardingPersistenceSpec + extends ScalaTestWithActorTestKit(ClusterShardingPersistenceSpec.config) + with WordSpecLike { import ClusterShardingPersistenceSpec._ private var _entityId = 0 @@ -172,9 +174,7 @@ class ClusterShardingPersistenceSpec extends ScalaTestWithActorTestKit(ClusterSh "Typed cluster sharding with persistent actor" must { - ClusterSharding(system).init(Entity( - typeKey, - ctx => persistentEntity(ctx.entityId, ctx.shard))) + ClusterSharding(system).init(Entity(typeKey, ctx => persistentEntity(ctx.entityId, ctx.shard))) Cluster(system).manager ! Join(Cluster(system).selfMember.address) diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala index ce595b875f..b8efc45c12 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingSpec.scala @@ -34,8 +34,7 @@ import org.scalatest.WordSpecLike import akka.util.ccompat.imm._ object ClusterShardingSpec { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.actor.provider = cluster // akka.loglevel = debug @@ -135,28 +134,30 @@ object ClusterShardingSpec { val typeKey2 = EntityTypeKey[IdTestProtocol]("no-envelope-shard") def behavior(shard: ActorRef[ClusterSharding.ShardCommand], stopProbe: Option[ActorRef[String]] = None) = - Behaviors.receive[TestProtocol] { - case (ctx, PassivatePlz()) => - shard ! ClusterSharding.Passivate(ctx.self) - Behaviors.same + Behaviors + .receive[TestProtocol] { + case (ctx, PassivatePlz()) => + shard ! ClusterSharding.Passivate(ctx.self) + Behaviors.same - case (_, StopPlz()) => - stopProbe.foreach(_ ! "StopPlz") - Behaviors.stopped + case (_, StopPlz()) => + stopProbe.foreach(_ ! "StopPlz") + Behaviors.stopped - case (ctx, WhoAreYou(replyTo)) => - val address = Cluster(ctx.system).selfMember.address - replyTo ! s"I'm ${ctx.self.path.name} at ${address.host.get}:${address.port.get}" - Behaviors.same + case (ctx, WhoAreYou(replyTo)) => + val address = Cluster(ctx.system).selfMember.address + replyTo ! s"I'm ${ctx.self.path.name} at ${address.host.get}:${address.port.get}" + Behaviors.same - case (_, ReplyPlz(toMe)) => - toMe ! "Hello!" - Behaviors.same - }.receiveSignal { - case (_, PostStop) => - stopProbe.foreach(_ ! "PostStop") - Behaviors.same - } + case (_, ReplyPlz(toMe)) => + toMe ! "Hello!" + Behaviors.same + } + .receiveSignal { + case (_, PostStop) => + stopProbe.foreach(_ ! "PostStop") + Behaviors.same + } def behaviorWithId(shard: ActorRef[ClusterSharding.ShardCommand]) = Behaviors.receive[IdTestProtocol] { case (_, IdStopPlz()) => @@ -193,33 +194,24 @@ class ClusterShardingSpec extends ScalaTestWithActorTestKit(ClusterShardingSpec. super.afterAll() } - private val shardingRef1: ActorRef[ShardingEnvelope[TestProtocol]] = sharding.init(Entity( - typeKey, - ctx => behavior(ctx.shard)) - .withStopMessage(StopPlz())) + private val shardingRef1: ActorRef[ShardingEnvelope[TestProtocol]] = + sharding.init(Entity(typeKey, ctx => behavior(ctx.shard)).withStopMessage(StopPlz())) - private val shardingRef2 = sharding2.init(Entity( - typeKey, - ctx => behavior(ctx.shard)) - .withStopMessage(StopPlz())) + private val shardingRef2 = sharding2.init(Entity(typeKey, ctx => behavior(ctx.shard)).withStopMessage(StopPlz())) - private val shardingRef3: ActorRef[IdTestProtocol] = sharding.init(Entity( - typeKey2, - ctx => behaviorWithId(ctx.shard)) - .withMessageExtractor(ShardingMessageExtractor.noEnvelope[IdTestProtocol](10, IdStopPlz()) { - case IdReplyPlz(id, _) => id - case IdWhoAreYou(id, _) => id - case other => throw new IllegalArgumentException(s"Unexpected message $other") - }) - .withStopMessage(IdStopPlz()) - ) + private val shardingRef3: ActorRef[IdTestProtocol] = sharding.init( + Entity(typeKey2, ctx => behaviorWithId(ctx.shard)) + .withMessageExtractor(ShardingMessageExtractor.noEnvelope[IdTestProtocol](10, IdStopPlz()) { + case IdReplyPlz(id, _) => id + case IdWhoAreYou(id, _) => id + case other => throw new IllegalArgumentException(s"Unexpected message $other") + }) + .withStopMessage(IdStopPlz())) - private val shardingRef4 = sharding2.init(Entity( - typeKey2, - ctx => behaviorWithId(ctx.shard)) - .withMessageExtractor(idTestProtocolMessageExtractor) - .withStopMessage(IdStopPlz()) - ) + private val shardingRef4 = sharding2.init( + Entity(typeKey2, ctx => behaviorWithId(ctx.shard)) + .withMessageExtractor(idTestProtocolMessageExtractor) + .withStopMessage(IdStopPlz())) def totalEntityCount1(): Int = { import akka.pattern.ask @@ -268,10 +260,8 @@ class ClusterShardingSpec extends ScalaTestWithActorTestKit(ClusterShardingSpec. val p = TestProbe[String]() val typeKey3 = EntityTypeKey[TestProtocol]("passivate-test") - val shardingRef3: ActorRef[ShardingEnvelope[TestProtocol]] = sharding.init(Entity( - typeKey3, - ctx => behavior(ctx.shard, Some(stopProbe.ref))) - .withStopMessage(StopPlz())) + val shardingRef3: ActorRef[ShardingEnvelope[TestProtocol]] = + sharding.init(Entity(typeKey3, ctx => behavior(ctx.shard, Some(stopProbe.ref))).withStopMessage(StopPlz())) shardingRef3 ! ShardingEnvelope(s"test1", ReplyPlz(p.ref)) p.expectMessage("Hello!") @@ -289,9 +279,8 @@ class ClusterShardingSpec extends ScalaTestWithActorTestKit(ClusterShardingSpec. val p = TestProbe[String]() val typeKey4 = EntityTypeKey[TestProtocol]("passivate-test-poison") - val shardingRef4: ActorRef[ShardingEnvelope[TestProtocol]] = sharding.init(Entity( - typeKey4, - ctx => behavior(ctx.shard, Some(stopProbe.ref)))) + val shardingRef4: ActorRef[ShardingEnvelope[TestProtocol]] = + sharding.init(Entity(typeKey4, ctx => behavior(ctx.shard, Some(stopProbe.ref)))) // no StopPlz stopMessage shardingRef4 ! ShardingEnvelope(s"test4", ReplyPlz(p.ref)) @@ -308,10 +297,9 @@ class ClusterShardingSpec extends ScalaTestWithActorTestKit(ClusterShardingSpec. "fail if init sharding for already used typeName, but with a different type" in { // sharding has been already initialized with EntityTypeKey[TestProtocol]("envelope-shard") val ex = intercept[Exception] { - sharding.init(Entity( - EntityTypeKey[IdTestProtocol]("envelope-shard"), - ctx => behaviorWithId(ctx.shard)) - .withStopMessage(IdStopPlz())) + sharding.init( + Entity(EntityTypeKey[IdTestProtocol]("envelope-shard"), ctx => behaviorWithId(ctx.shard)) + .withStopMessage(IdStopPlz())) } ex.getMessage should include("already initialized") @@ -325,7 +313,7 @@ class ClusterShardingSpec extends ScalaTestWithActorTestKit(ClusterShardingSpec. charlieRef ! WhoAreYou(p.ref) p.receiveMessage() should startWith("I'm charlie") - charlieRef tell WhoAreYou(p.ref) + charlieRef.tell(WhoAreYou(p.ref)) p.receiveMessage() should startWith("I'm charlie") charlieRef ! StopPlz() @@ -338,7 +326,7 @@ class ClusterShardingSpec extends ScalaTestWithActorTestKit(ClusterShardingSpec. val reply1 = bobRef ? WhoAreYou // TODO document that WhoAreYou(_) would not work reply1.futureValue should startWith("I'm bob") - val reply2 = charlieRef ask WhoAreYou + val reply2 = charlieRef.ask(WhoAreYou) reply2.futureValue should startWith("I'm charlie") bobRef ! StopPlz() @@ -349,22 +337,21 @@ class ClusterShardingSpec extends ScalaTestWithActorTestKit(ClusterShardingSpec. val p = TestProbe[TheReply]() - spawn( - Behaviors.setup[TheReply] { ctx => - // FIXME is the implicit ClassTag difficult to use? - // it works fine when there is a single parameter apply, - // but trouble when more parameters and this doesn't compile - //ctx.ask(aliceRef)(x => WhoAreYou(x)) { - ctx.ask(aliceRef)(WhoAreYou) { - case Success(name) => TheReply(name) - case Failure(ex) => TheReply(ex.getMessage) - } + spawn(Behaviors.setup[TheReply] { ctx => + // FIXME is the implicit ClassTag difficult to use? + // it works fine when there is a single parameter apply, + // but trouble when more parameters and this doesn't compile + //ctx.ask(aliceRef)(x => WhoAreYou(x)) { + ctx.ask(aliceRef)(WhoAreYou) { + case Success(name) => TheReply(name) + case Failure(ex) => TheReply(ex.getMessage) + } - Behaviors.receiveMessage[TheReply] { reply => - p.ref ! reply - Behaviors.same - } - }) + Behaviors.receiveMessage[TheReply] { reply => + p.ref ! reply + Behaviors.same + } + }) p.receiveMessage().s should startWith("I'm alice") @@ -374,10 +361,7 @@ class ClusterShardingSpec extends ScalaTestWithActorTestKit(ClusterShardingSpec. "EntityRef - AskTimeoutException" in { val ignorantKey = EntityTypeKey[TestProtocol]("ignorant") - sharding.init(Entity( - ignorantKey, - _ => Behaviors.ignore[TestProtocol]) - .withStopMessage(StopPlz())) + sharding.init(Entity(ignorantKey, _ => Behaviors.ignore[TestProtocol]).withStopMessage(StopPlz())) val ref = sharding.entityRefFor(ignorantKey, "sloppy") diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala index 43dab1cc02..3de079b938 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/ClusterShardingStateSpec.scala @@ -36,12 +36,9 @@ class ClusterShardingStateSpec extends ScalaTestWithActorTestKit(ClusterSharding probe.expectMessage(CurrentShardRegionState(Set())) val shardingRef: ActorRef[IdTestProtocol] = sharding.init( - Entity( - typeKey, - ctx => ClusterShardingSpec.behaviorWithId(ctx.shard)) + Entity(typeKey, ctx => ClusterShardingSpec.behaviorWithId(ctx.shard)) .withStopMessage(IdStopPlz()) - .withMessageExtractor(idTestProtocolMessageExtractor) - ) + .withMessageExtractor(idTestProtocolMessageExtractor)) sharding.shardState ! GetShardRegionState(typeKey, probe.ref) probe.expectMessage(CurrentShardRegionState(Set())) diff --git a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/EntityTypeKeySpec.scala b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/EntityTypeKeySpec.scala index 9a69390812..9bb42ad569 100644 --- a/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/EntityTypeKeySpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/akka/cluster/sharding/typed/scaladsl/EntityTypeKeySpec.scala @@ -16,13 +16,13 @@ class EntityTypeKeySpec extends WordSpec with Matchers { } "support custom entityIdSeparator for compatibility with Lagom's javadsl" in { - EntityTypeKey[String]("MyType").withEntityIdSeparator("") - .persistenceIdFrom("abc") should ===(PersistenceId("MyTypeabc")) + EntityTypeKey[String]("MyType").withEntityIdSeparator("").persistenceIdFrom("abc") should ===( + PersistenceId("MyTypeabc")) } "support custom entityIdSeparator for compatibility with other naming" in { - EntityTypeKey[String]("MyType").withEntityIdSeparator("#/#") - .persistenceIdFrom("abc") should ===(PersistenceId("MyType#/#abc")) + EntityTypeKey[String]("MyType").withEntityIdSeparator("#/#").persistenceIdFrom("abc") should ===( + PersistenceId("MyType#/#abc")) } "not allow | in name because it's the default entityIdSeparator" in { diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleSpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleSpec.scala index 660f970d1e..77bea06316 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleSpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldEventSourcedEntityExampleSpec.scala @@ -13,8 +13,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike object HelloWorldEventSourcedEntityExampleSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.actor.provider = cluster akka.remote.netty.tcp.port = 0 @@ -25,7 +24,9 @@ object HelloWorldEventSourcedEntityExampleSpec { """) } -class HelloWorldEventSourcedEntityExampleSpec extends ScalaTestWithActorTestKit(HelloWorldEventSourcedEntityExampleSpec.config) with WordSpecLike { +class HelloWorldEventSourcedEntityExampleSpec + extends ScalaTestWithActorTestKit(HelloWorldEventSourcedEntityExampleSpec.config) + with WordSpecLike { import HelloWorldPersistentEntityExample.HelloWorld import HelloWorldPersistentEntityExample.HelloWorld._ @@ -35,9 +36,7 @@ class HelloWorldEventSourcedEntityExampleSpec extends ScalaTestWithActorTestKit( super.beforeAll() Cluster(system).manager ! Join(Cluster(system).selfMember.address) - sharding.init(Entity( - HelloWorld.entityTypeKey, - ctx => HelloWorld.persistentEntity(ctx.entityId))) + sharding.init(Entity(HelloWorld.entityTypeKey, ctx => HelloWorld.persistentEntity(ctx.entityId))) } "HelloWorld example" must { diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala index 664e596e98..cf3b1a66ca 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/HelloWorldPersistentEntityExample.scala @@ -23,9 +23,9 @@ object HelloWorldPersistentEntityExample { // registration at startup private val sharding = ClusterSharding(system) - sharding.init(Entity( - typeKey = HelloWorld.entityTypeKey, - createBehavior = entityContext => HelloWorld.persistentEntity(entityContext.entityId))) + sharding.init( + Entity(typeKey = HelloWorld.entityTypeKey, + createBehavior = entityContext => HelloWorld.persistentEntity(entityContext.entityId))) private implicit val askTimeout: Timeout = Timeout(5.seconds) @@ -62,31 +62,28 @@ object HelloWorldPersistentEntityExample { def numberOfPeople: Int = names.size } - private val commandHandler: (KnownPeople, Command) => Effect[Greeted, KnownPeople] = { - (_, cmd) => - cmd match { - case cmd: Greet => greet(cmd) - } + private val commandHandler: (KnownPeople, Command) => Effect[Greeted, KnownPeople] = { (_, cmd) => + cmd match { + case cmd: Greet => greet(cmd) + } } private def greet(cmd: Greet): Effect[Greeted, KnownPeople] = - Effect.persist(Greeted(cmd.whom)) - .thenRun(state => cmd.replyTo ! Greeting(cmd.whom, state.numberOfPeople)) + Effect.persist(Greeted(cmd.whom)).thenRun(state => cmd.replyTo ! Greeting(cmd.whom, state.numberOfPeople)) - private val eventHandler: (KnownPeople, Greeted) => KnownPeople = { - (state, evt) => state.add(evt.whom) + private val eventHandler: (KnownPeople, Greeted) => KnownPeople = { (state, evt) => + state.add(evt.whom) } val entityTypeKey: EntityTypeKey[Command] = EntityTypeKey[Command]("HelloWorld") - def persistentEntity(entityId: String): Behavior[Command] = EventSourcedEntity( - entityTypeKey = entityTypeKey, - entityId = entityId, - emptyState = KnownPeople(Set.empty), - commandHandler, - eventHandler - ) + def persistentEntity(entityId: String): Behavior[Command] = + EventSourcedEntity(entityTypeKey = entityTypeKey, + entityId = entityId, + emptyState = KnownPeople(Set.empty), + commandHandler, + eventHandler) } //#persistent-entity diff --git a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala index f0e4c8e02c..d20959032e 100644 --- a/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala +++ b/akka-cluster-sharding-typed/src/test/scala/docs/akka/cluster/sharding/typed/ShardingCompileOnlySpec.scala @@ -46,9 +46,8 @@ object ShardingCompileOnlySpec { //#init val TypeKey = EntityTypeKey[CounterCommand]("Counter") - val shardRegion: ActorRef[ShardingEnvelope[CounterCommand]] = sharding.init(Entity( - typeKey = TypeKey, - createBehavior = ctx => counter(ctx.entityId, 0))) + val shardRegion: ActorRef[ShardingEnvelope[CounterCommand]] = + sharding.init(Entity(typeKey = TypeKey, createBehavior = ctx => counter(ctx.entityId, 0))) //#init //#send @@ -64,9 +63,7 @@ object ShardingCompileOnlySpec { //#persistence val BlogTypeKey = EntityTypeKey[BlogCommand]("BlogPost") - ClusterSharding(system).init(Entity( - typeKey = BlogTypeKey, - createBehavior = ctx => behavior(ctx.entityId))) + ClusterSharding(system).init(Entity(typeKey = BlogTypeKey, createBehavior = ctx => behavior(ctx.entityId))) //#persistence //#counter-passivate @@ -76,7 +73,6 @@ object ShardingCompileOnlySpec { def counter2(shard: ActorRef[ClusterSharding.ShardCommand], entityId: String): Behavior[CounterCommand] = { Behaviors.setup { ctx => - def become(value: Int): Behavior[CounterCommand] = Behaviors.receiveMessage[CounterCommand] { case Increment => @@ -98,10 +94,9 @@ object ShardingCompileOnlySpec { } } - sharding.init(Entity( - typeKey = TypeKey, - createBehavior = ctx => counter2(ctx.shard, ctx.entityId)) - .withStopMessage(GoodByeCounter)) + sharding.init( + Entity(typeKey = TypeKey, createBehavior = ctx => counter2(ctx.shard, ctx.entityId)) + .withStopMessage(GoodByeCounter)) //#counter-passivate } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala index d047a784ce..02e9ac25b0 100755 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterSharding.scala @@ -171,11 +171,10 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { private lazy val guardian: ActorRef = { val guardianName: String = system.settings.config.getString("akka.cluster.sharding.guardian-name") - val dispatcher = system.settings.config - .getString("akka.cluster.sharding.use-dispatcher") match { - case "" => Dispatchers.DefaultDispatcherId - case id => id - } + val dispatcher = system.settings.config.getString("akka.cluster.sharding.use-dispatcher") match { + case "" => Dispatchers.DefaultDispatcherId + case id => id + } system.systemActorOf(Props[ClusterShardingGuardian].withDispatcher(dispatcher), guardianName) } @@ -204,16 +203,21 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * for a rebalance or graceful shutdown of a `ShardRegion`, e.g. `PoisonPill`. * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def start( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - allocationStrategy: ShardAllocationStrategy, - handOffStopMessage: Any): ActorRef = { + def start(typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + allocationStrategy: ShardAllocationStrategy, + handOffStopMessage: Any): ActorRef = { - internalStart(typeName, _ => entityProps, settings, extractEntityId, extractShardId, allocationStrategy, handOffStopMessage) + internalStart(typeName, + _ => entityProps, + settings, + extractEntityId, + extractShardId, + allocationStrategy, + handOffStopMessage) } /** @@ -240,36 +244,45 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * for a rebalance or graceful shutdown of a `ShardRegion`, e.g. `PoisonPill`. * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def start( - typeName: String, - entityProps: Props, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - allocationStrategy: ShardAllocationStrategy, - handOffStopMessage: Any): ActorRef = { + def start(typeName: String, + entityProps: Props, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + allocationStrategy: ShardAllocationStrategy, + handOffStopMessage: Any): ActorRef = { - start(typeName, entityProps, ClusterShardingSettings(system), extractEntityId, extractShardId, allocationStrategy, handOffStopMessage) + start(typeName, + entityProps, + ClusterShardingSettings(system), + extractEntityId, + extractShardId, + allocationStrategy, + handOffStopMessage) } /** * INTERNAL API */ - @InternalApi private[akka] def internalStart( - typeName: String, - entityProps: String => Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - allocationStrategy: ShardAllocationStrategy, - handOffStopMessage: Any): ActorRef = { + @InternalApi private[akka] def internalStart(typeName: String, + entityProps: String => Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + allocationStrategy: ShardAllocationStrategy, + handOffStopMessage: Any): ActorRef = { if (settings.shouldHostShard(cluster)) { regions.get(typeName) match { case null => // it's ok to Start several time, the guardian will deduplicate concurrent requests implicit val timeout = system.settings.CreationTimeout - val startMsg = Start(typeName, entityProps, settings, - extractEntityId, extractShardId, allocationStrategy, handOffStopMessage) + val startMsg = Start(typeName, + entityProps, + settings, + extractEntityId, + extractShardId, + allocationStrategy, + handOffStopMessage) val Started(shardRegion) = Await.result(guardian ? startMsg, timeout.duration) regions.put(typeName, shardRegion) shardRegion @@ -278,12 +291,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { } else { log.debug("Starting Shard Region Proxy [{}] (no actors will be hosted on this node)...", typeName) - startProxy( - typeName, - settings.role, - dataCenter = None, // startProxy method must be used directly to start a proxy for another DC - extractEntityId, - extractShardId) + startProxy(typeName, + settings.role, + dataCenter = None, // startProxy method must be used directly to start a proxy for another DC + extractEntityId, + extractShardId) } } @@ -311,12 +323,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * that passed the `extractEntityId` will be used * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def start( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId): ActorRef = { + def start(typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId): ActorRef = { val allocationStrategy = defaultShardAllocationStrategy(settings) @@ -346,11 +357,10 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * that passed the `extractEntityId` will be used * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def start( - typeName: String, - entityProps: Props, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId): ActorRef = { + def start(typeName: String, + entityProps: Props, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId): ActorRef = { start(typeName, entityProps, ClusterShardingSettings(system), extractEntityId, extractShardId) } @@ -377,26 +387,23 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * for a rebalance or graceful shutdown of a `ShardRegion`, e.g. `PoisonPill`. * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def start( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, - messageExtractor: ShardRegion.MessageExtractor, - allocationStrategy: ShardAllocationStrategy, - handOffStopMessage: Any): ActorRef = { + def start(typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, + messageExtractor: ShardRegion.MessageExtractor, + allocationStrategy: ShardAllocationStrategy, + handOffStopMessage: Any): ActorRef = { - internalStart( - typeName, - _ => entityProps, - settings, - extractEntityId = { - case msg if messageExtractor.entityId(msg) ne null => - (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) - }, - extractShardId = msg => messageExtractor.shardId(msg), - allocationStrategy = allocationStrategy, - handOffStopMessage = handOffStopMessage - ) + internalStart(typeName, + _ => entityProps, + settings, + extractEntityId = { + case msg if messageExtractor.entityId(msg) ne null => + (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) + }, + extractShardId = msg => messageExtractor.shardId(msg), + allocationStrategy = allocationStrategy, + handOffStopMessage = handOffStopMessage) } /** @@ -420,11 +427,10 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * entity from the incoming message * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def start( - typeName: String, - entityProps: Props, - settings: ClusterShardingSettings, - messageExtractor: ShardRegion.MessageExtractor): ActorRef = { + def start(typeName: String, + entityProps: Props, + settings: ClusterShardingSettings, + messageExtractor: ShardRegion.MessageExtractor): ActorRef = { val allocationStrategy = defaultShardAllocationStrategy(settings) @@ -451,10 +457,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * entity from the incoming message * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def start( - typeName: String, - entityProps: Props, - messageExtractor: ShardRegion.MessageExtractor): ActorRef = { + def start(typeName: String, entityProps: Props, messageExtractor: ShardRegion.MessageExtractor): ActorRef = { start(typeName, entityProps, ClusterShardingSettings(system), messageExtractor) } @@ -477,11 +480,10 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * that passed the `extractEntityId` will be used * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def startProxy( - typeName: String, - role: Option[String], - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId): ActorRef = + def startProxy(typeName: String, + role: Option[String], + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId): ActorRef = startProxy(typeName, role, dataCenter = None, extractEntityId, extractShardId) /** @@ -505,12 +507,11 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * that passed the `extractEntityId` will be used * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def startProxy( - typeName: String, - role: Option[String], - dataCenter: Option[DataCenter], - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId): ActorRef = { + def startProxy(typeName: String, + role: Option[String], + dataCenter: Option[DataCenter], + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId): ActorRef = { proxies.get(proxyName(typeName, dataCenter)) match { case null => @@ -549,10 +550,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * entity from the incoming message * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def startProxy( - typeName: String, - role: Optional[String], - messageExtractor: ShardRegion.MessageExtractor): ActorRef = + def startProxy(typeName: String, role: Optional[String], messageExtractor: ShardRegion.MessageExtractor): ActorRef = startProxy(typeName, role, dataCenter = Optional.empty(), messageExtractor) /** @@ -573,22 +571,15 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { * entity from the incoming message * @return the actor ref of the [[ShardRegion]] that is to be responsible for the shard */ - def startProxy( - typeName: String, - role: Optional[String], - dataCenter: Optional[String], - messageExtractor: ShardRegion.MessageExtractor): ActorRef = { + def startProxy(typeName: String, + role: Optional[String], + dataCenter: Optional[String], + messageExtractor: ShardRegion.MessageExtractor): ActorRef = { - startProxy( - typeName, - Option(role.orElse(null)), - Option(dataCenter.orElse(null)), - extractEntityId = { - case msg if messageExtractor.entityId(msg) ne null => - (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) - }, - extractShardId = msg => messageExtractor.shardId(msg) - ) + startProxy(typeName, Option(role.orElse(null)), Option(dataCenter.orElse(null)), extractEntityId = { + case msg if messageExtractor.entityId(msg) ne null => + (messageExtractor.entityId(msg), messageExtractor.entityMessage(msg)) + }, extractShardId = msg => messageExtractor.shardId(msg)) } @@ -612,8 +603,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { case null => proxies.get(proxyName(typeName, None)) match { case null => - throw new IllegalArgumentException( - s"Shard type [$typeName] must be started first") + throw new IllegalArgumentException(s"Shard type [$typeName] must be started first") case ref => ref } case ref => ref @@ -630,8 +620,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { def shardRegionProxy(typeName: String, dataCenter: DataCenter): ActorRef = { proxies.get(proxyName(typeName, Some(dataCenter))) match { case null => - throw new IllegalArgumentException( - s"Shard type [$typeName] must be started first") + throw new IllegalArgumentException(s"Shard type [$typeName] must be started first") case ref => ref } } @@ -652,24 +641,21 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension { */ private[akka] object ClusterShardingGuardian { import ShardCoordinator.ShardAllocationStrategy - final case class Start( - typeName: String, - entityProps: String => Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - allocationStrategy: ShardAllocationStrategy, - handOffStopMessage: Any) - extends NoSerializationVerificationNeeded - final case class StartProxy( - typeName: String, - dataCenter: Option[DataCenter], - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId) - extends NoSerializationVerificationNeeded - final case class Started(shardRegion: ActorRef) - extends NoSerializationVerificationNeeded + final case class Start(typeName: String, + entityProps: String => Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + allocationStrategy: ShardAllocationStrategy, + handOffStopMessage: Any) + extends NoSerializationVerificationNeeded + final case class StartProxy(typeName: String, + dataCenter: Option[DataCenter], + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId) + extends NoSerializationVerificationNeeded + final case class Started(shardRegion: ActorRef) extends NoSerializationVerificationNeeded } /** @@ -682,8 +668,7 @@ private[akka] class ClusterShardingGuardian extends Actor { val cluster = Cluster(context.system) val sharding = ClusterSharding(context.system) - val majorityMinCap = context.system.settings.config - .getInt("akka.cluster.sharding.distributed-data.majority-min-cap") + val majorityMinCap = context.system.settings.config.getInt("akka.cluster.sharding.distributed-data.majority-min-cap") private lazy val replicatorSettings = ReplicatorSettings(context.system.settings.config.getConfig("akka.cluster.sharding.distributed-data")) private var replicatorByRole = Map.empty[Option[String], ActorRef] @@ -705,11 +690,8 @@ private[akka] class ClusterShardingGuardian extends Actor { case None => "replicator" } // Use members within the data center and with the given role (if any) - val replicatorRoles = Set( - ClusterSettings.DcRolePrefix + cluster.settings.SelfDataCenter) ++ settings.role - val ref = context.actorOf( - Replicator.props(replicatorSettings.withRoles(replicatorRoles)), - name) + val replicatorRoles = Set(ClusterSettings.DcRolePrefix + cluster.settings.SelfDataCenter) ++ settings.role + val ref = context.actorOf(Replicator.props(replicatorSettings.withRoles(replicatorRoles)), name) replicatorByRole = replicatorByRole.updated(settings.role, ref) ref } @@ -719,12 +701,12 @@ private[akka] class ClusterShardingGuardian extends Actor { def receive: Receive = { case Start(typeName, - entityProps, - settings, - extractEntityId, - extractShardId, - allocationStrategy, - handOffStopMessage) => + entityProps, + settings, + extractEntityId, + extractShardId, + allocationStrategy, + handOffStopMessage) => try { import settings.role import settings.tuningParameters.coordinatorFailureBackoff @@ -740,38 +722,34 @@ private[akka] class ClusterShardingGuardian extends Actor { ShardCoordinator.props(typeName, settings, allocationStrategy) else ShardCoordinator.props(typeName, settings, allocationStrategy, rep, majorityMinCap) - val singletonProps = BackoffSupervisor.props( - childProps = coordinatorProps, - childName = "coordinator", - minBackoff = coordinatorFailureBackoff, - maxBackoff = coordinatorFailureBackoff * 5, - randomFactor = 0.2, - maxNrOfRetries = -1) + val singletonProps = BackoffSupervisor + .props(childProps = coordinatorProps, + childName = "coordinator", + minBackoff = coordinatorFailureBackoff, + maxBackoff = coordinatorFailureBackoff * 5, + randomFactor = 0.2, + maxNrOfRetries = -1) .withDeploy(Deploy.local) - val singletonSettings = settings.coordinatorSingletonSettings - .withSingletonName("singleton") - .withRole(role) - context.actorOf( - ClusterSingletonManager.props(singletonProps, terminationMessage = PoisonPill, singletonSettings) - .withDispatcher(context.props.dispatcher), - name = cName) + val singletonSettings = settings.coordinatorSingletonSettings.withSingletonName("singleton").withRole(role) + context.actorOf(ClusterSingletonManager + .props(singletonProps, terminationMessage = PoisonPill, singletonSettings) + .withDispatcher(context.props.dispatcher), + name = cName) } context.actorOf( - ShardRegion.props( - typeName = typeName, - entityProps = entityProps, - settings = settings, - coordinatorPath = cPath, - extractEntityId = extractEntityId, - extractShardId = extractShardId, - handOffStopMessage = handOffStopMessage, - replicator = rep, - majorityMinCap - ) + ShardRegion + .props(typeName = typeName, + entityProps = entityProps, + settings = settings, + coordinatorPath = cPath, + extractEntityId = extractEntityId, + extractShardId = extractShardId, + handOffStopMessage = handOffStopMessage, + replicator = rep, + majorityMinCap) .withDispatcher(context.props.dispatcher), - name = encName - ) + name = encName) } sender() ! Started(shardRegion) } catch { @@ -782,11 +760,7 @@ private[akka] class ClusterShardingGuardian extends Actor { sender() ! Status.Failure(e) } - case StartProxy(typeName, - dataCenter, - settings, - extractEntityId, - extractShardId) => + case StartProxy(typeName, dataCenter, settings, extractEntityId, extractShardId) => try { val encName = URLEncoder.encode(s"${typeName}Proxy", ByteString.UTF_8) val cPath = coordinatorPath(URLEncoder.encode(typeName, ByteString.UTF_8)) @@ -797,19 +771,17 @@ private[akka] class ClusterShardingGuardian extends Actor { } val shardRegion = context.child(actorName).getOrElse { context.actorOf( - ShardRegion.proxyProps( - typeName = typeName, - dataCenter = dataCenter, - settings = settings, - coordinatorPath = cPath, - extractEntityId = extractEntityId, - extractShardId = extractShardId, - replicator = context.system.deadLetters, - majorityMinCap - ) + ShardRegion + .proxyProps(typeName = typeName, + dataCenter = dataCenter, + settings = settings, + coordinatorPath = cPath, + extractEntityId = extractEntityId, + extractShardId = extractShardId, + replicator = context.system.deadLetters, + majorityMinCap) .withDispatcher(context.props.dispatcher), - name = actorName - ) + name = actorName) } sender() ! Started(shardRegion) } catch { diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala index 206bddccb2..5f03cc55e7 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ClusterShardingSettings.scala @@ -42,15 +42,16 @@ object ClusterShardingSettings { rebalanceInterval = config.getDuration("rebalance-interval", MILLISECONDS).millis, snapshotAfter = config.getInt("snapshot-after"), keepNrOfBatches = config.getInt("keep-nr-of-batches"), - leastShardAllocationRebalanceThreshold = - config.getInt("least-shard-allocation-strategy.rebalance-threshold"), + leastShardAllocationRebalanceThreshold = config.getInt("least-shard-allocation-strategy.rebalance-threshold"), leastShardAllocationMaxSimultaneousRebalance = config.getInt("least-shard-allocation-strategy.max-simultaneous-rebalance"), waitingForStateTimeout = config.getDuration("waiting-for-state-timeout", MILLISECONDS).millis, updatingStateTimeout = config.getDuration("updating-state-timeout", MILLISECONDS).millis, entityRecoveryStrategy = config.getString("entity-recovery-strategy"), - entityRecoveryConstantRateStrategyFrequency = config.getDuration("entity-recovery-constant-rate-strategy.frequency", MILLISECONDS).millis, - entityRecoveryConstantRateStrategyNumberOfEntities = config.getInt("entity-recovery-constant-rate-strategy.number-of-entities")) + entityRecoveryConstantRateStrategyFrequency = + config.getDuration("entity-recovery-constant-rate-strategy.frequency", MILLISECONDS).millis, + entityRecoveryConstantRateStrategyNumberOfEntities = + config.getInt("entity-recovery-constant-rate-strategy.number-of-entities")) val coordinatorSingletonSettings = ClusterSingletonManagerSettings(config.getConfig("coordinator-singleton")) @@ -58,15 +59,14 @@ object ClusterShardingSettings { if (config.getString("passivate-idle-entity-after").toLowerCase == "off") Duration.Zero else config.getDuration("passivate-idle-entity-after", MILLISECONDS).millis - new ClusterShardingSettings( - role = roleOption(config.getString("role")), - rememberEntities = config.getBoolean("remember-entities"), - journalPluginId = config.getString("journal-plugin-id"), - snapshotPluginId = config.getString("snapshot-plugin-id"), - stateStoreMode = config.getString("state-store-mode"), - passivateIdleEntityAfter = passivateIdleAfter, - tuningParameters, - coordinatorSingletonSettings) + new ClusterShardingSettings(role = roleOption(config.getString("role")), + rememberEntities = config.getBoolean("remember-entities"), + journalPluginId = config.getString("journal-plugin-id"), + snapshotPluginId = config.getString("snapshot-plugin-id"), + stateStoreMode = config.getString("state-store-mode"), + passivateIdleEntityAfter = passivateIdleAfter, + tuningParameters, + coordinatorSingletonSettings) } /** @@ -87,99 +87,93 @@ object ClusterShardingSettings { private[akka] def roleOption(role: String): Option[String] = if (role == "") None else Option(role) - class TuningParameters( - val coordinatorFailureBackoff: FiniteDuration, - val retryInterval: FiniteDuration, - val bufferSize: Int, - val handOffTimeout: FiniteDuration, - val shardStartTimeout: FiniteDuration, - val shardFailureBackoff: FiniteDuration, - val entityRestartBackoff: FiniteDuration, - val rebalanceInterval: FiniteDuration, - val snapshotAfter: Int, - val keepNrOfBatches: Int, - val leastShardAllocationRebalanceThreshold: Int, - val leastShardAllocationMaxSimultaneousRebalance: Int, - val waitingForStateTimeout: FiniteDuration, - val updatingStateTimeout: FiniteDuration, - val entityRecoveryStrategy: String, - val entityRecoveryConstantRateStrategyFrequency: FiniteDuration, - val entityRecoveryConstantRateStrategyNumberOfEntities: Int) { + class TuningParameters(val coordinatorFailureBackoff: FiniteDuration, + val retryInterval: FiniteDuration, + val bufferSize: Int, + val handOffTimeout: FiniteDuration, + val shardStartTimeout: FiniteDuration, + val shardFailureBackoff: FiniteDuration, + val entityRestartBackoff: FiniteDuration, + val rebalanceInterval: FiniteDuration, + val snapshotAfter: Int, + val keepNrOfBatches: Int, + val leastShardAllocationRebalanceThreshold: Int, + val leastShardAllocationMaxSimultaneousRebalance: Int, + val waitingForStateTimeout: FiniteDuration, + val updatingStateTimeout: FiniteDuration, + val entityRecoveryStrategy: String, + val entityRecoveryConstantRateStrategyFrequency: FiniteDuration, + val entityRecoveryConstantRateStrategyNumberOfEntities: Int) { - require( - entityRecoveryStrategy == "all" || entityRecoveryStrategy == "constant", - s"Unknown 'entity-recovery-strategy' [$entityRecoveryStrategy], valid values are 'all' or 'constant'") + require(entityRecoveryStrategy == "all" || entityRecoveryStrategy == "constant", + s"Unknown 'entity-recovery-strategy' [$entityRecoveryStrategy], valid values are 'all' or 'constant'") // included for binary compatibility - def this( - coordinatorFailureBackoff: FiniteDuration, - retryInterval: FiniteDuration, - bufferSize: Int, - handOffTimeout: FiniteDuration, - shardStartTimeout: FiniteDuration, - shardFailureBackoff: FiniteDuration, - entityRestartBackoff: FiniteDuration, - rebalanceInterval: FiniteDuration, - snapshotAfter: Int, - leastShardAllocationRebalanceThreshold: Int, - leastShardAllocationMaxSimultaneousRebalance: Int, - waitingForStateTimeout: FiniteDuration, - updatingStateTimeout: FiniteDuration, - entityRecoveryStrategy: String, - entityRecoveryConstantRateStrategyFrequency: FiniteDuration, - entityRecoveryConstantRateStrategyNumberOfEntities: Int) = { - this( - coordinatorFailureBackoff, - retryInterval, - bufferSize, - handOffTimeout, - shardStartTimeout, - shardFailureBackoff, - entityRestartBackoff, - rebalanceInterval, - snapshotAfter, - 2, - leastShardAllocationRebalanceThreshold, - leastShardAllocationMaxSimultaneousRebalance, - waitingForStateTimeout, - updatingStateTimeout, - entityRecoveryStrategy, - entityRecoveryConstantRateStrategyFrequency, - entityRecoveryConstantRateStrategyNumberOfEntities) + def this(coordinatorFailureBackoff: FiniteDuration, + retryInterval: FiniteDuration, + bufferSize: Int, + handOffTimeout: FiniteDuration, + shardStartTimeout: FiniteDuration, + shardFailureBackoff: FiniteDuration, + entityRestartBackoff: FiniteDuration, + rebalanceInterval: FiniteDuration, + snapshotAfter: Int, + leastShardAllocationRebalanceThreshold: Int, + leastShardAllocationMaxSimultaneousRebalance: Int, + waitingForStateTimeout: FiniteDuration, + updatingStateTimeout: FiniteDuration, + entityRecoveryStrategy: String, + entityRecoveryConstantRateStrategyFrequency: FiniteDuration, + entityRecoveryConstantRateStrategyNumberOfEntities: Int) = { + this(coordinatorFailureBackoff, + retryInterval, + bufferSize, + handOffTimeout, + shardStartTimeout, + shardFailureBackoff, + entityRestartBackoff, + rebalanceInterval, + snapshotAfter, + 2, + leastShardAllocationRebalanceThreshold, + leastShardAllocationMaxSimultaneousRebalance, + waitingForStateTimeout, + updatingStateTimeout, + entityRecoveryStrategy, + entityRecoveryConstantRateStrategyFrequency, + entityRecoveryConstantRateStrategyNumberOfEntities) } // included for binary compatibility - def this( - coordinatorFailureBackoff: FiniteDuration, - retryInterval: FiniteDuration, - bufferSize: Int, - handOffTimeout: FiniteDuration, - shardStartTimeout: FiniteDuration, - shardFailureBackoff: FiniteDuration, - entityRestartBackoff: FiniteDuration, - rebalanceInterval: FiniteDuration, - snapshotAfter: Int, - leastShardAllocationRebalanceThreshold: Int, - leastShardAllocationMaxSimultaneousRebalance: Int, - waitingForStateTimeout: FiniteDuration, - updatingStateTimeout: FiniteDuration) = { - this( - coordinatorFailureBackoff, - retryInterval, - bufferSize, - handOffTimeout, - shardStartTimeout, - shardFailureBackoff, - entityRestartBackoff, - rebalanceInterval, - snapshotAfter, - leastShardAllocationRebalanceThreshold, - leastShardAllocationMaxSimultaneousRebalance, - waitingForStateTimeout, - updatingStateTimeout, - "all", - 100.milliseconds, - 5) + def this(coordinatorFailureBackoff: FiniteDuration, + retryInterval: FiniteDuration, + bufferSize: Int, + handOffTimeout: FiniteDuration, + shardStartTimeout: FiniteDuration, + shardFailureBackoff: FiniteDuration, + entityRestartBackoff: FiniteDuration, + rebalanceInterval: FiniteDuration, + snapshotAfter: Int, + leastShardAllocationRebalanceThreshold: Int, + leastShardAllocationMaxSimultaneousRebalance: Int, + waitingForStateTimeout: FiniteDuration, + updatingStateTimeout: FiniteDuration) = { + this(coordinatorFailureBackoff, + retryInterval, + bufferSize, + handOffTimeout, + shardStartTimeout, + shardFailureBackoff, + entityRestartBackoff, + rebalanceInterval, + snapshotAfter, + leastShardAllocationRebalanceThreshold, + leastShardAllocationMaxSimultaneousRebalance, + waitingForStateTimeout, + updatingStateTimeout, + "all", + 100.milliseconds, + 5) } } @@ -204,32 +198,39 @@ object ClusterShardingSettings { * Use 0 to disable automatic passivation. * @param tuningParameters additional tuning parameters, see descriptions in reference.conf */ -final class ClusterShardingSettings( - val role: Option[String], - val rememberEntities: Boolean, - val journalPluginId: String, - val snapshotPluginId: String, - val stateStoreMode: String, - val passivateIdleEntityAfter: FiniteDuration, - val tuningParameters: ClusterShardingSettings.TuningParameters, - val coordinatorSingletonSettings: ClusterSingletonManagerSettings) extends NoSerializationVerificationNeeded { +final class ClusterShardingSettings(val role: Option[String], + val rememberEntities: Boolean, + val journalPluginId: String, + val snapshotPluginId: String, + val stateStoreMode: String, + val passivateIdleEntityAfter: FiniteDuration, + val tuningParameters: ClusterShardingSettings.TuningParameters, + val coordinatorSingletonSettings: ClusterSingletonManagerSettings) + extends NoSerializationVerificationNeeded { // included for binary compatibility reasons - @deprecated("Use the ClusterShardingSettings factory methods or the constructor including passivateIdleEntityAfter instead", "2.5.18") - def this( - role: Option[String], - rememberEntities: Boolean, - journalPluginId: String, - snapshotPluginId: String, - stateStoreMode: String, - tuningParameters: ClusterShardingSettings.TuningParameters, - coordinatorSingletonSettings: ClusterSingletonManagerSettings) = - this(role, rememberEntities, journalPluginId, snapshotPluginId, stateStoreMode, Duration.Zero, tuningParameters, coordinatorSingletonSettings) + @deprecated( + "Use the ClusterShardingSettings factory methods or the constructor including passivateIdleEntityAfter instead", + "2.5.18") + def this(role: Option[String], + rememberEntities: Boolean, + journalPluginId: String, + snapshotPluginId: String, + stateStoreMode: String, + tuningParameters: ClusterShardingSettings.TuningParameters, + coordinatorSingletonSettings: ClusterSingletonManagerSettings) = + this(role, + rememberEntities, + journalPluginId, + snapshotPluginId, + stateStoreMode, + Duration.Zero, + tuningParameters, + coordinatorSingletonSettings) - import ClusterShardingSettings.{ StateStoreModePersistence, StateStoreModeDData } - require( - stateStoreMode == StateStoreModePersistence || stateStoreMode == StateStoreModeDData, - s"Unknown 'state-store-mode' [$stateStoreMode], valid values are '$StateStoreModeDData' or '$StateStoreModePersistence'") + import ClusterShardingSettings.{ StateStoreModeDData, StateStoreModePersistence } + require(stateStoreMode == StateStoreModePersistence || stateStoreMode == StateStoreModeDData, + s"Unknown 'state-store-mode' [$stateStoreMode], valid values are '$StateStoreModeDData' or '$StateStoreModePersistence'") /** If true, this node should run the shard region, otherwise just a shard proxy should started on this node. */ @InternalApi @@ -265,26 +266,25 @@ final class ClusterShardingSettings( * The `role` of the `ClusterSingletonManagerSettings` is not used. The `role` of the * coordinator singleton will be the same as the `role` of `ClusterShardingSettings`. */ - def withCoordinatorSingletonSettings(coordinatorSingletonSettings: ClusterSingletonManagerSettings): ClusterShardingSettings = + def withCoordinatorSingletonSettings( + coordinatorSingletonSettings: ClusterSingletonManagerSettings): ClusterShardingSettings = copy(coordinatorSingletonSettings = coordinatorSingletonSettings) - private def copy( - role: Option[String] = role, - rememberEntities: Boolean = rememberEntities, - journalPluginId: String = journalPluginId, - snapshotPluginId: String = snapshotPluginId, - stateStoreMode: String = stateStoreMode, - passivateIdleAfter: FiniteDuration = passivateIdleEntityAfter, - tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters, - coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings): ClusterShardingSettings = - - new ClusterShardingSettings( - role, - rememberEntities, - journalPluginId, - snapshotPluginId, - stateStoreMode, - passivateIdleAfter, - tuningParameters, - coordinatorSingletonSettings) + private def copy(role: Option[String] = role, + rememberEntities: Boolean = rememberEntities, + journalPluginId: String = journalPluginId, + snapshotPluginId: String = snapshotPluginId, + stateStoreMode: String = stateStoreMode, + passivateIdleAfter: FiniteDuration = passivateIdleEntityAfter, + tuningParameters: ClusterShardingSettings.TuningParameters = tuningParameters, + coordinatorSingletonSettings: ClusterSingletonManagerSettings = coordinatorSingletonSettings) + : ClusterShardingSettings = + new ClusterShardingSettings(role, + rememberEntities, + journalPluginId, + snapshotPluginId, + stateStoreMode, + passivateIdleAfter, + tuningParameters, + coordinatorSingletonSettings) } diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala index e7c9628dd4..6caf52312e 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/RemoveInternalClusterShardingData.scala @@ -81,8 +81,11 @@ object RemoveInternalClusterShardingData { * API corresponding to the [[#main]] method as described in the * [[RemoveInternalClusterShardingData$ RemoveInternalClusterShardingData companion object]] */ - def remove(system: ActorSystem, journalPluginId: String, typeNames: Set[String], - terminateSystem: Boolean, remove2dot3Data: Boolean): Future[Unit] = { + def remove(system: ActorSystem, + journalPluginId: String, + typeNames: Set[String], + terminateSystem: Boolean, + remove2dot3Data: Boolean): Future[Unit] = { val resolvedJournalPluginId = if (journalPluginId == "") system.settings.config.getString("akka.persistence.journal.plugin") @@ -93,16 +96,18 @@ object RemoveInternalClusterShardingData { } val completion = Promise[Unit]() - system.actorOf( - props(journalPluginId, typeNames, completion, remove2dot3Data), - name = "removeInternalClusterShardingData") + system.actorOf(props(journalPluginId, typeNames, completion, remove2dot3Data), + name = "removeInternalClusterShardingData") completion.future } /** * INTERNAL API: `Props` for [[RemoveInternalClusterShardingData]] actor. */ - private[akka] def props(journalPluginId: String, typeNames: Set[String], completion: Promise[Unit], remove2dot3Data: Boolean): Props = + private[akka] def props(journalPluginId: String, + typeNames: Set[String], + completion: Promise[Unit], + remove2dot3Data: Boolean): Props = Props(new RemoveInternalClusterShardingData(journalPluginId, typeNames, completion, remove2dot3Data)) .withDeploy(Deploy.local) @@ -122,9 +127,10 @@ object RemoveInternalClusterShardingData { * `persistenceId`. It will reply with `RemoveOnePersistenceId.Result` * when done. */ - private[akka] class RemoveOnePersistenceId( - override val journalPluginId: String, override val persistenceId: String, replyTo: ActorRef) - extends PersistentActor { + private[akka] class RemoveOnePersistenceId(override val journalPluginId: String, + override val persistenceId: String, + replyTo: ActorRef) + extends PersistentActor { import RemoveInternalClusterShardingData.RemoveOnePersistenceId._ @@ -132,7 +138,6 @@ object RemoveInternalClusterShardingData { override def receiveRecover: Receive = { case event: ShardCoordinator.Internal.DomainEvent => - case SnapshotOffer(_, _) => hasSnapshots = true @@ -144,20 +149,23 @@ object RemoveInternalClusterShardingData { context.become(waitDeleteMessagesSuccess) } - override def receiveCommand: Receive = ({ - case DeleteSnapshotsSuccess(_) => - context.become(waitDeleteMessagesSuccess) - case DeleteMessagesSuccess(_) => - context.become(waitDeleteSnapshotsSuccess) - }: Receive).orElse(handleFailure) + override def receiveCommand: Receive = + ({ + case DeleteSnapshotsSuccess(_) => + context.become(waitDeleteMessagesSuccess) + case DeleteMessagesSuccess(_) => + context.become(waitDeleteSnapshotsSuccess) + }: Receive).orElse(handleFailure) - def waitDeleteSnapshotsSuccess: Receive = ({ - case DeleteSnapshotsSuccess(_) => done() - }: Receive).orElse(handleFailure) + def waitDeleteSnapshotsSuccess: Receive = + ({ + case DeleteSnapshotsSuccess(_) => done() + }: Receive).orElse(handleFailure) - def waitDeleteMessagesSuccess: Receive = ({ - case DeleteMessagesSuccess(_) => done() - }: Receive).orElse(handleFailure) + def waitDeleteMessagesSuccess: Receive = + ({ + case DeleteMessagesSuccess(_) => done() + }: Receive).orElse(handleFailure) def handleFailure: Receive = { case DeleteMessagesFailure(cause, _) => failure(cause) @@ -181,9 +189,12 @@ object RemoveInternalClusterShardingData { /** * @see [[RemoveInternalClusterShardingData$ RemoveInternalClusterShardingData companion object]] */ -class RemoveInternalClusterShardingData(journalPluginId: String, typeNames: Set[String], completion: Promise[Unit], - remove2dot3Data: Boolean) extends Actor - with ActorLogging { +class RemoveInternalClusterShardingData(journalPluginId: String, + typeNames: Set[String], + completion: Promise[Unit], + remove2dot3Data: Boolean) + extends Actor + with ActorLogging { import RemoveInternalClusterShardingData._ import RemoveOnePersistenceId.Result diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala index 3cb4ffb256..f972b414b0 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/Shard.scala @@ -87,30 +87,43 @@ private[akka] object Shard { /** * Persistent state of the Shard. */ - @SerialVersionUID(1L) final case class State private[akka] ( - entities: Set[EntityId] = Set.empty) extends ClusterShardingSerializable + @SerialVersionUID(1L) final case class State private[akka] (entities: Set[EntityId] = Set.empty) + extends ClusterShardingSerializable /** * Factory method for the [[akka.actor.Props]] of the [[Shard]] actor. * If `settings.rememberEntities` is enabled the `PersistentShard` * subclass is used, otherwise `Shard`. */ - def props( - typeName: String, - shardId: ShardRegion.ShardId, - entityProps: String => Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - handOffStopMessage: Any, - replicator: ActorRef, - majorityMinCap: Int): Props = { + def props(typeName: String, + shardId: ShardRegion.ShardId, + entityProps: String => Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + handOffStopMessage: Any, + replicator: ActorRef, + majorityMinCap: Int): Props = { if (settings.rememberEntities && settings.stateStoreMode == ClusterShardingSettings.StateStoreModeDData) { - Props(new DDataShard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, - handOffStopMessage, replicator, majorityMinCap)).withDeploy(Deploy.local) + Props( + new DDataShard(typeName, + shardId, + entityProps, + settings, + extractEntityId, + extractShardId, + handOffStopMessage, + replicator, + majorityMinCap)).withDeploy(Deploy.local) } else if (settings.rememberEntities && settings.stateStoreMode == ClusterShardingSettings.StateStoreModePersistence) - Props(new PersistentShard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage)) - .withDeploy(Deploy.local) + Props( + new PersistentShard(typeName, + shardId, + entityProps, + settings, + extractEntityId, + extractShardId, + handOffStopMessage)).withDeploy(Deploy.local) else Props(new Shard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage)) .withDeploy(Deploy.local) @@ -128,14 +141,15 @@ private[akka] object Shard { * * @see [[ClusterSharding$ ClusterSharding extension]] */ -private[akka] class Shard( - typeName: String, - shardId: ShardRegion.ShardId, - entityProps: String => Props, - settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - handOffStopMessage: Any) extends Actor with ActorLogging { +private[akka] class Shard(typeName: String, + shardId: ShardRegion.ShardId, + entityProps: String => Props, + settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + handOffStopMessage: Any) + extends Actor + with ActorLogging { import ShardRegion.{ handOffStopperProps, EntityId, Msg, Passivate, ShardInitialized } import ShardCoordinator.Internal.{ HandOff, ShardStopped } @@ -193,7 +207,10 @@ private[akka] class Shard( if (passivateIdleTask.isDefined) { lastMessageTimestamp = lastMessageTimestamp.updated(start.entityId, System.nanoTime()) } - getOrCreateEntity(start.entityId, _ => processChange(EntityStarted(start.entityId))(_ => requester ! ShardRegion.StartEntityAck(start.entityId, shardId))) + getOrCreateEntity(start.entityId, + _ => + processChange(EntityStarted(start.entityId))(_ => + requester ! ShardRegion.StartEntityAck(start.entityId, shardId))) } def receiveStartEntityAck(ack: ShardRegion.StartEntityAck): Unit = { @@ -233,22 +250,23 @@ private[akka] class Shard( if (state.entities.nonEmpty) { val entityHandOffTimeout = (settings.tuningParameters.handOffTimeout - 5.seconds).max(1.seconds) - handOffStopper = Some(context.watch(context.actorOf( - handOffStopperProps(shardId, replyTo, idByRef.keySet, handOffStopMessage, entityHandOffTimeout)))) + handOffStopper = Some( + context.watch(context.actorOf( + handOffStopperProps(shardId, replyTo, idByRef.keySet, handOffStopMessage, entityHandOffTimeout)))) //During hand off we only care about watching for termination of the hand off stopper - context become { + context.become { case Terminated(ref) => receiveTerminated(ref) } } else { replyTo ! ShardStopped(shardId) - context stop self + context.stop(self) } } def receiveTerminated(ref: ActorRef): Unit = { if (handOffStopper.contains(ref)) - context stop self + context.stop(self) else if (idByRef.contains(ref) && handOffStopper.isEmpty) entityTerminated(ref) } @@ -272,13 +290,14 @@ private[akka] class Shard( def passivate(entity: ActorRef, stopMessage: Any): Unit = { idByRef.get(entity) match { - case Some(id) => if (!messageBuffers.contains(id)) { - passivating = passivating + entity - messageBuffers.add(id) - entity ! stopMessage - } else { - log.debug("Passivation already in progress for {}. Not sending stopMessage back to entity.", entity) - } + case Some(id) => + if (!messageBuffers.contains(id)) { + passivating = passivating + entity + messageBuffers.add(id) + entity ! stopMessage + } else { + log.debug("Passivation already in progress for {}. Not sending stopMessage back to entity.", entity) + } case None => log.debug("Unknown entity {}. Not sending stopMessage back to entity.", entity) } } @@ -375,13 +394,12 @@ private[akka] class Shard( } private[akka] object RememberEntityStarter { - def props( - region: ActorRef, - typeName: String, - shardId: ShardRegion.ShardId, - ids: Set[ShardRegion.EntityId], - settings: ClusterShardingSettings, - requestor: ActorRef) = + def props(region: ActorRef, + typeName: String, + shardId: ShardRegion.ShardId, + ids: Set[ShardRegion.EntityId], + settings: ClusterShardingSettings, + requestor: ActorRef) = Props(new RememberEntityStarter(region, typeName, shardId, ids, settings, requestor)) private case object Tick extends NoSerializationVerificationNeeded @@ -390,13 +408,14 @@ private[akka] object RememberEntityStarter { /** * INTERNAL API: Actor responsible for starting entities when rememberEntities is enabled */ -private[akka] class RememberEntityStarter( - region: ActorRef, - typeName: String, - shardId: ShardRegion.ShardId, - ids: Set[ShardRegion.EntityId], - settings: ClusterShardingSettings, - requestor: ActorRef) extends Actor with ActorLogging { +private[akka] class RememberEntityStarter(region: ActorRef, + typeName: String, + shardId: ShardRegion.ShardId, + ids: Set[ShardRegion.EntityId], + settings: ClusterShardingSettings, + requestor: ActorRef) + extends Actor + with ActorLogging { import context.dispatcher import RememberEntityStarter.Tick @@ -449,10 +468,10 @@ private[akka] trait RememberingShard { import settings.tuningParameters._ entityRecoveryStrategy match { case "all" => EntityRecoveryStrategy.allStrategy() - case "constant" => EntityRecoveryStrategy.constantStrategy( - context.system, - entityRecoveryConstantRateStrategyFrequency, - entityRecoveryConstantRateStrategyNumberOfEntities) + case "constant" => + EntityRecoveryStrategy.constantStrategy(context.system, + entityRecoveryConstantRateStrategyFrequency, + entityRecoveryConstantRateStrategyNumberOfEntities) } } @@ -514,16 +533,17 @@ private[akka] trait RememberingShard { * * @see [[ClusterSharding$ ClusterSharding extension]] */ -private[akka] class PersistentShard( - typeName: String, - shardId: ShardRegion.ShardId, - entityProps: String => Props, - override val settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - handOffStopMessage: Any) extends Shard( - typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage) - with RememberingShard with PersistentActor with ActorLogging { +private[akka] class PersistentShard(typeName: String, + shardId: ShardRegion.ShardId, + entityProps: String => Props, + override val settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + handOffStopMessage: Any) + extends Shard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage) + with RememberingShard + with PersistentActor + with ActorLogging { import Shard._ import settings.tuningParameters._ @@ -561,33 +581,34 @@ private[akka] class PersistentShard( log.debug("PersistentShard recovery completed shard [{}] with [{}] entities", shardId, state.entities.size) } - override def receiveCommand: Receive = ({ - case e: SaveSnapshotSuccess => - log.debug("PersistentShard snapshot saved successfully") - internalDeleteMessagesBeforeSnapshot(e, keepNrOfBatches, snapshotAfter) + override def receiveCommand: Receive = + ({ + case e: SaveSnapshotSuccess => + log.debug("PersistentShard snapshot saved successfully") + internalDeleteMessagesBeforeSnapshot(e, keepNrOfBatches, snapshotAfter) - case SaveSnapshotFailure(_, reason) => - log.warning("PersistentShard snapshot failure: [{}]", reason.getMessage) + case SaveSnapshotFailure(_, reason) => + log.warning("PersistentShard snapshot failure: [{}]", reason.getMessage) - case DeleteMessagesSuccess(toSequenceNr) => - val deleteTo = toSequenceNr - 1 - val deleteFrom = math.max(0, deleteTo - (keepNrOfBatches * snapshotAfter)) - log.debug("PersistentShard messages to [{}] deleted successfully. Deleting snapshots from [{}] to [{}]", toSequenceNr, deleteFrom, deleteTo) - deleteSnapshots(SnapshotSelectionCriteria( - minSequenceNr = deleteFrom, - maxSequenceNr = deleteTo - )) + case DeleteMessagesSuccess(toSequenceNr) => + val deleteTo = toSequenceNr - 1 + val deleteFrom = math.max(0, deleteTo - (keepNrOfBatches * snapshotAfter)) + log.debug("PersistentShard messages to [{}] deleted successfully. Deleting snapshots from [{}] to [{}]", + toSequenceNr, + deleteFrom, + deleteTo) + deleteSnapshots(SnapshotSelectionCriteria(minSequenceNr = deleteFrom, maxSequenceNr = deleteTo)) - case DeleteMessagesFailure(reason, toSequenceNr) => - log.warning("PersistentShard messages to [{}] deletion failure: [{}]", toSequenceNr, reason.getMessage) + case DeleteMessagesFailure(reason, toSequenceNr) => + log.warning("PersistentShard messages to [{}] deletion failure: [{}]", toSequenceNr, reason.getMessage) - case DeleteSnapshotsSuccess(m) => - log.debug("PersistentShard snapshots matching [{}] deleted successfully", m) + case DeleteSnapshotsSuccess(m) => + log.debug("PersistentShard snapshots matching [{}] deleted successfully", m) - case DeleteSnapshotsFailure(m, reason) => - log.warning("PersistentShard snapshots matching [{}] deletion failure: [{}]", m, reason.getMessage) + case DeleteSnapshotsFailure(m, reason) => + log.warning("PersistentShard snapshots matching [{}] deletion failure: [{}]", m, reason.getMessage) - }: Receive).orElse(super.receiveCommand) + }: Receive).orElse(super.receiveCommand) } @@ -600,26 +621,25 @@ private[akka] class PersistentShard( * * @see [[ClusterSharding$ ClusterSharding extension]] */ -private[akka] class DDataShard( - typeName: String, - shardId: ShardRegion.ShardId, - entityProps: String => Props, - override val settings: ClusterShardingSettings, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - handOffStopMessage: Any, - replicator: ActorRef, - majorityMinCap: Int) extends Shard( - typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage) - with RememberingShard with Stash with ActorLogging { +private[akka] class DDataShard(typeName: String, + shardId: ShardRegion.ShardId, + entityProps: String => Props, + override val settings: ClusterShardingSettings, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + handOffStopMessage: Any, + replicator: ActorRef, + majorityMinCap: Int) + extends Shard(typeName, shardId, entityProps, settings, extractEntityId, extractShardId, handOffStopMessage) + with RememberingShard + with Stash + with ActorLogging { import ShardRegion.EntityId import Shard._ import settings.tuningParameters._ - private val readMajority = ReadMajority( - settings.tuningParameters.waitingForStateTimeout, - majorityMinCap) + private val readMajority = ReadMajority(settings.tuningParameters.waitingForStateTimeout, majorityMinCap) private val writeMajority = WriteMajority(settings.tuningParameters.updatingStateTimeout, majorityMinCap) private val maxUpdateAttempts = 3 @@ -667,13 +687,12 @@ private[akka] class DDataShard( { case g @ GetSuccess(_, Some(i: Int)) => val key = stateKeys(i) - state = state.copy(entities = state.entities union (g.get(key).elements)) + state = state.copy(entities = state.entities.union(g.get(key).elements)) receiveOne(i) case GetFailure(_, _) => - log.error( - "The DDataShard was unable to get an initial state within 'waiting-for-state-timeout': {} millis", - waitingForStateTimeout.toMillis) + log.error("The DDataShard was unable to get an initial state within 'waiting-for-state-timeout': {} millis", + waitingForStateTimeout.toMillis) // parent ShardRegion supervisor will notice that it terminated and will start it again, after backoff context.stop(self) @@ -699,13 +718,12 @@ private[akka] class DDataShard( } private def sendUpdate(evt: StateChange, retryCount: Int) = { - replicator ! Update(key(evt.entityId), ORSet.empty[EntityId], writeMajority, - Some((evt, retryCount))) { existing => - evt match { - case EntityStarted(id) => existing + id - case EntityStopped(id) => existing - id - } + replicator ! Update(key(evt.entityId), ORSet.empty[EntityId], writeMajority, Some((evt, retryCount))) { existing => + evt match { + case EntityStarted(id) => existing + id + case EntityStopped(id) => existing - id } + } } // this state will stash all messages until it receives UpdateSuccess @@ -721,22 +739,26 @@ private[akka] class DDataShard( // parent ShardRegion supervisor will notice that it terminated and will start it again, after backoff log.error( "The DDataShard was unable to update state after {} attempts, within 'updating-state-timeout'={} millis, event={}. " + - "Shard will be restarted after backoff.", - maxUpdateAttempts, updatingStateTimeout.toMillis, evt) + "Shard will be restarted after backoff.", + maxUpdateAttempts, + updatingStateTimeout.toMillis, + evt) context.stop(self) } else { log.warning( "The DDataShard was unable to update state, attempt {} of {}, within 'updating-state-timeout'={} millis, event={}", - retryCount, maxUpdateAttempts, updatingStateTimeout.toMillis, evt) + retryCount, + maxUpdateAttempts, + updatingStateTimeout.toMillis, + evt) sendUpdate(evt, retryCount + 1) } case ModifyFailure(_, error, cause, Some((`evt`, _))) => - log.error( - cause, - "The DDataShard was unable to update state with error {} and event {}. Shard will be restarted", - error, - evt) + log.error(cause, + "The DDataShard was unable to update state with error {} and event {}. Shard will be restarted", + error, + evt) throw cause case _ => stash() @@ -747,7 +769,9 @@ private[akka] class DDataShard( object EntityRecoveryStrategy { def allStrategy(): EntityRecoveryStrategy = new AllAtOnceEntityRecoveryStrategy() - def constantStrategy(actorSystem: ActorSystem, frequency: FiniteDuration, numberOfEntities: Int): EntityRecoveryStrategy = + def constantStrategy(actorSystem: ActorSystem, + frequency: FiniteDuration, + numberOfEntities: Int): EntityRecoveryStrategy = new ConstantRateEntityRecoveryStrategy(actorSystem, frequency, numberOfEntities) } @@ -767,17 +791,23 @@ final class AllAtOnceEntityRecoveryStrategy extends EntityRecoveryStrategy { if (entities.isEmpty) Set.empty else Set(Future.successful(entities)) } -final class ConstantRateEntityRecoveryStrategy(actorSystem: ActorSystem, frequency: FiniteDuration, numberOfEntities: Int) extends EntityRecoveryStrategy { +final class ConstantRateEntityRecoveryStrategy(actorSystem: ActorSystem, + frequency: FiniteDuration, + numberOfEntities: Int) + extends EntityRecoveryStrategy { import ShardRegion.EntityId import actorSystem.dispatcher import akka.pattern.after override def recoverEntities(entities: Set[EntityId]): Set[Future[Set[EntityId]]] = - entities.grouped(numberOfEntities).foldLeft((frequency, Set[Future[Set[EntityId]]]())) { - case ((interval, scheduledEntityIds), entityIds) => - (interval + frequency, scheduledEntityIds + scheduleEntities(interval, entityIds)) - }._2 + entities + .grouped(numberOfEntities) + .foldLeft((frequency, Set[Future[Set[EntityId]]]())) { + case ((interval, scheduledEntityIds), entityIds) => + (interval + frequency, scheduledEntityIds + scheduleEntities(interval, entityIds)) + } + ._2 private def scheduleEntities(interval: FiniteDuration, entityIds: Set[EntityId]) = after(interval, actorSystem.scheduler)(Future.successful[Set[EntityId]](entityIds)) diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala index 1c27e400a9..878c5adfc3 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardCoordinator.scala @@ -19,7 +19,7 @@ import akka.cluster.ddata.LWWRegister import akka.cluster.ddata.LWWRegisterKey import akka.cluster.ddata.Replicator._ import akka.dispatch.ExecutionContexts -import akka.pattern.{ AskTimeoutException, pipe } +import akka.pattern.{ pipe, AskTimeoutException } import akka.persistence._ import akka.cluster.ClusterEvent import akka.cluster.ddata.GSet @@ -38,7 +38,8 @@ object ShardCoordinator { * INTERNAL API * Factory method for the [[akka.actor.Props]] of the [[ShardCoordinator]] actor. */ - private[akka] def props(typeName: String, settings: ClusterShardingSettings, + private[akka] def props(typeName: String, + settings: ClusterShardingSettings, allocationStrategy: ShardAllocationStrategy): Props = Props(new PersistentShardCoordinator(typeName: String, settings, allocationStrategy)).withDeploy(Deploy.local) @@ -46,11 +47,18 @@ object ShardCoordinator { * INTERNAL API * Factory method for the [[akka.actor.Props]] of the [[ShardCoordinator]] actor with state based on ddata. */ - private[akka] def props(typeName: String, settings: ClusterShardingSettings, + private[akka] def props(typeName: String, + settings: ClusterShardingSettings, allocationStrategy: ShardAllocationStrategy, - replicator: ActorRef, majorityMinCap: Int): Props = - Props(new DDataShardCoordinator(typeName: String, settings, allocationStrategy, replicator, - majorityMinCap, settings.rememberEntities)).withDeploy(Deploy.local) + replicator: ActorRef, + majorityMinCap: Int): Props = + Props( + new DDataShardCoordinator(typeName: String, + settings, + allocationStrategy, + replicator, + majorityMinCap, + settings.rememberEntities)).withDeploy(Deploy.local) /** * Interface of the pluggable shard allocation and rebalancing logic used by the [[ShardCoordinator]]. @@ -58,6 +66,7 @@ object ShardCoordinator { * Java implementations should extend [[AbstractShardAllocationStrategy]]. */ trait ShardAllocationStrategy extends NoSerializationVerificationNeeded { + /** * Invoked when the location of a new shard is to be decided. * @param requester actor reference to the [[ShardRegion]] that requested the location of the @@ -68,7 +77,8 @@ object ShardCoordinator { * @return a `Future` of the actor ref of the [[ShardRegion]] that is to be responsible for the shard, must be one of * the references included in the `currentShardAllocations` parameter */ - def allocateShard(requester: ActorRef, shardId: ShardId, + def allocateShard(requester: ActorRef, + shardId: ShardId, currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]): Future[ActorRef] /** @@ -79,9 +89,8 @@ object ShardCoordinator { * you should not include these in the returned set * @return a `Future` of the shards to be migrated, may be empty to skip rebalance in this round */ - def rebalance( - currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], - rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] + def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], + rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] } /** @@ -89,16 +98,17 @@ object ShardCoordinator { * should extend this abstract class and implement the two methods. */ abstract class AbstractShardAllocationStrategy extends ShardAllocationStrategy { - override final def allocateShard(requester: ActorRef, shardId: ShardId, - currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]): Future[ActorRef] = { + override final def allocateShard( + requester: ActorRef, + shardId: ShardId, + currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]): Future[ActorRef] = { import scala.collection.JavaConverters._ allocateShard(requester, shardId, currentShardAllocations.asJava) } - override final def rebalance( - currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], - rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { + override final def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], + rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { import scala.collection.JavaConverters._ implicit val ec = ExecutionContexts.sameThreadExecutionContext rebalance(currentShardAllocations.asJava, rebalanceInProgress.asJava).map(_.asScala.toSet) @@ -114,7 +124,8 @@ object ShardCoordinator { * @return a `Future` of the actor ref of the [[ShardRegion]] that is to be responsible for the shard, must be one of * the references included in the `currentShardAllocations` parameter */ - def allocateShard(requester: ActorRef, shardId: String, + def allocateShard(requester: ActorRef, + shardId: String, currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]]): Future[ActorRef] /** @@ -125,9 +136,8 @@ object ShardCoordinator { * you should not include these in the returned set * @return a `Future` of the shards to be migrated, may be empty to skip rebalance in this round */ - def rebalance( - currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]], - rebalanceInProgress: java.util.Set[String]): Future[java.util.Set[String]] + def rebalance(currentShardAllocations: java.util.Map[ActorRef, immutable.IndexedSeq[String]], + rebalanceInProgress: java.util.Set[String]): Future[java.util.Set[String]] } private val emptyRebalanceResult = Future.successful(Set.empty[ShardId]) @@ -158,27 +168,30 @@ object ShardCoordinator { */ @SerialVersionUID(1L) class LeastShardAllocationStrategy(rebalanceThreshold: Int, maxSimultaneousRebalance: Int) - extends ShardAllocationStrategy with Serializable { + extends ShardAllocationStrategy + with Serializable { - override def allocateShard(requester: ActorRef, shardId: ShardId, - currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]): Future[ActorRef] = { + override def allocateShard( + requester: ActorRef, + shardId: ShardId, + currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]]): Future[ActorRef] = { val (regionWithLeastShards, _) = currentShardAllocations.minBy { case (_, v) => v.size } Future.successful(regionWithLeastShards) } - override def rebalance( - currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], - rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { + override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardId]], + rebalanceInProgress: Set[ShardId]): Future[Set[ShardId]] = { if (rebalanceInProgress.size < maxSimultaneousRebalance) { val (regionWithLeastShards, leastShards) = currentShardAllocations.minBy { case (_, v) => v.size } - val mostShards = currentShardAllocations.collect { - case (_, v) => v.filterNot(s => rebalanceInProgress(s)) - }.maxBy(_.size) + val mostShards = currentShardAllocations + .collect { + case (_, v) => v.filterNot(s => rebalanceInProgress(s)) + } + .maxBy(_.size) val difference = mostShards.size - leastShards.size if (difference > rebalanceThreshold) { - val n = math.min( - math.min(difference - rebalanceThreshold, rebalanceThreshold), - maxSimultaneousRebalance - rebalanceInProgress.size) + val n = math.min(math.min(difference - rebalanceThreshold, rebalanceThreshold), + maxSimultaneousRebalance - rebalanceInProgress.size) Future.successful(mostShards.sorted.take(n).toSet) } else emptyRebalanceResult @@ -190,49 +203,59 @@ object ShardCoordinator { * INTERNAL API */ private[akka] object Internal { + /** * Messages sent to the coordinator */ sealed trait CoordinatorCommand extends ClusterShardingSerializable + /** * Messages sent from the coordinator */ sealed trait CoordinatorMessage extends ClusterShardingSerializable + /** * `ShardRegion` registers to `ShardCoordinator`, until it receives [[RegisterAck]]. */ - @SerialVersionUID(1L) final case class Register(shardRegion: ActorRef) extends CoordinatorCommand - with DeadLetterSuppression + @SerialVersionUID(1L) final case class Register(shardRegion: ActorRef) + extends CoordinatorCommand + with DeadLetterSuppression /** * `ShardRegion` in proxy only mode registers to `ShardCoordinator`, until it receives [[RegisterAck]]. */ - @SerialVersionUID(1L) final case class RegisterProxy(shardRegionProxy: ActorRef) extends CoordinatorCommand - with DeadLetterSuppression + @SerialVersionUID(1L) final case class RegisterProxy(shardRegionProxy: ActorRef) + extends CoordinatorCommand + with DeadLetterSuppression /** * Acknowledgement from `ShardCoordinator` that [[Register]] or [[RegisterProxy]] was successful. */ @SerialVersionUID(1L) final case class RegisterAck(coordinator: ActorRef) extends CoordinatorMessage + /** * `ShardRegion` requests the location of a shard by sending this message * to the `ShardCoordinator`. */ - @SerialVersionUID(1L) final case class GetShardHome(shard: ShardId) extends CoordinatorCommand - with DeadLetterSuppression + @SerialVersionUID(1L) final case class GetShardHome(shard: ShardId) + extends CoordinatorCommand + with DeadLetterSuppression /** * `ShardCoordinator` replies with this message for [[GetShardHome]] requests. */ @SerialVersionUID(1L) final case class ShardHome(shard: ShardId, ref: ActorRef) extends CoordinatorMessage + /** * `ShardCoordinator` informs a `ShardRegion` that it is hosting this shard */ @SerialVersionUID(1L) final case class HostShard(shard: ShardId) extends CoordinatorMessage + /** * `ShardRegion` replies with this message for [[HostShard]] requests which lead to it hosting the shard */ - @SerialVersionUID(1l) final case class ShardStarted(shard: ShardId) extends CoordinatorMessage + @SerialVersionUID(1L) final case class ShardStarted(shard: ShardId) extends CoordinatorMessage + /** * `ShardCoordinator` initiates rebalancing process by sending this message * to all registered `ShardRegion` actors (including proxy only). They are @@ -242,10 +265,12 @@ object ShardCoordinator { * `HandOff` to the `ShardRegion` responsible for the shard. */ @SerialVersionUID(1L) final case class BeginHandOff(shard: ShardId) extends CoordinatorMessage + /** * Acknowledgement of [[BeginHandOff]] */ @SerialVersionUID(1L) final case class BeginHandOffAck(shard: ShardId) extends CoordinatorCommand + /** * When all `ShardRegion` actors have acknowledged the `BeginHandOff` the * `ShardCoordinator` sends this message to the `ShardRegion` responsible for the @@ -253,6 +278,7 @@ object ShardCoordinator { * all entities have terminated reply with `ShardStopped` to the `ShardCoordinator`. */ @SerialVersionUID(1L) final case class HandOff(shard: ShardId) extends CoordinatorMessage + /** * Reply to `HandOff` when all entities in the shard have been terminated. */ @@ -261,8 +287,9 @@ object ShardCoordinator { /** * `ShardRegion` requests full handoff to be able to shutdown gracefully. */ - @SerialVersionUID(1L) final case class GracefulShutdownReq(shardRegion: ActorRef) extends CoordinatorCommand - with DeadLetterSuppression + @SerialVersionUID(1L) final case class GracefulShutdownReq(shardRegion: ActorRef) + extends CoordinatorCommand + with DeadLetterSuppression // DomainEvents for the persistent state of the event sourced ShardCoordinator sealed trait DomainEvent extends ClusterShardingSerializable @@ -283,13 +310,14 @@ object ShardCoordinator { * Persistent state of the event sourced ShardCoordinator. */ @SerialVersionUID(1L) final case class State private[akka] ( - // region for each shard - shards: Map[ShardId, ActorRef] = Map.empty, - // shards for each region - regions: Map[ActorRef, Vector[ShardId]] = Map.empty, - regionProxies: Set[ActorRef] = Set.empty, - unallocatedShards: Set[ShardId] = Set.empty, - rememberEntities: Boolean = false) extends ClusterShardingSerializable { + // region for each shard + shards: Map[ShardId, ActorRef] = Map.empty, + // shards for each region + regions: Map[ActorRef, Vector[ShardId]] = Map.empty, + regionProxies: Set[ActorRef] = Set.empty, + unallocatedShards: Set[ShardId] = Set.empty, + rememberEntities: Boolean = false) + extends ClusterShardingSerializable { def withRememberEntities(enabled: Boolean): State = { if (enabled) @@ -301,7 +329,7 @@ object ShardCoordinator { def isEmpty: Boolean = shards.isEmpty && regions.isEmpty && regionProxies.isEmpty - def allShards: Set[ShardId] = shards.keySet union unallocatedShards + def allShards: Set[ShardId] = shards.keySet.union(unallocatedShards) def updated(event: DomainEvent): State = event match { case ShardRegionRegistered(region) => @@ -314,10 +342,7 @@ object ShardCoordinator { require(regions.contains(region), s"Terminated region $region not registered: $this") val newUnallocatedShards = if (rememberEntities) (unallocatedShards ++ regions(region)) else unallocatedShards - copy( - regions = regions - region, - shards = shards -- regions(region), - unallocatedShards = newUnallocatedShards) + copy(regions = regions - region, shards = shards -- regions(region), unallocatedShards = newUnallocatedShards) case ShardRegionProxyTerminated(proxy) => require(regionProxies.contains(proxy), s"Terminated region proxy $proxy not registered: $this") copy(regionProxies = regionProxies - proxy) @@ -326,20 +351,18 @@ object ShardCoordinator { require(!shards.contains(shard), s"Shard [$shard] already allocated: $this") val newUnallocatedShards = if (rememberEntities) (unallocatedShards - shard) else unallocatedShards - copy( - shards = shards.updated(shard, region), - regions = regions.updated(region, regions(region) :+ shard), - unallocatedShards = newUnallocatedShards) + copy(shards = shards.updated(shard, region), + regions = regions.updated(region, regions(region) :+ shard), + unallocatedShards = newUnallocatedShards) case ShardHomeDeallocated(shard) => require(shards.contains(shard), s"Shard [$shard] not allocated: $this") val region = shards(shard) require(regions.contains(region), s"Region $region for shard [$shard] not registered: $this") val newUnallocatedShards = if (rememberEntities) (unallocatedShards + shard) else unallocatedShards - copy( - shards = shards - shard, - regions = regions.updated(region, regions(region).filterNot(_ == shard)), - unallocatedShards = newUnallocatedShards) + copy(shards = shards - shard, + regions = regions.updated(region, regions(region).filterNot(_ == shard)), + unallocatedShards = newUnallocatedShards) } } @@ -349,10 +372,12 @@ object ShardCoordinator { * Periodic message to trigger rebalance */ private case object RebalanceTick + /** * End of rebalance process performed by [[RebalanceWorker]] */ private final case class RebalanceDone(shard: ShardId, ok: Boolean) + /** * Check if we've received a shard start request */ @@ -363,8 +388,9 @@ object ShardCoordinator { /** * Result of `allocateShard` is piped to self with this message. */ - private final case class AllocateShardResult( - shard: ShardId, shardRegion: Option[ActorRef], getShardHomeSender: ActorRef) + private final case class AllocateShardResult(shard: ShardId, + shardRegion: Option[ActorRef], + getShardHomeSender: ActorRef) /** * Result of `rebalance` is piped to self with this message. @@ -379,8 +405,11 @@ object ShardCoordinator { * parent `ShardCoordinator`. If the process takes longer than the * `handOffTimeout` it also sends [[akka.cluster.sharding.RebalanceDone]]. */ - private[akka] class RebalanceWorker(shard: String, from: ActorRef, handOffTimeout: FiniteDuration, - regions: Set[ActorRef]) extends Actor { + private[akka] class RebalanceWorker(shard: String, + from: ActorRef, + handOffTimeout: FiniteDuration, + regions: Set[ActorRef]) + extends Actor { import Internal._ regions.foreach(_ ! BeginHandOff(shard)) var remaining = regions @@ -409,7 +438,9 @@ object ShardCoordinator { } } - private[akka] def rebalanceWorkerProps(shard: String, from: ActorRef, handOffTimeout: FiniteDuration, + private[akka] def rebalanceWorkerProps(shard: String, + from: ActorRef, + handOffTimeout: FiniteDuration, regions: Set[ActorRef]): Props = Props(new RebalanceWorker(shard, from, handOffTimeout, regions)) @@ -420,9 +451,11 @@ object ShardCoordinator { * * @see [[ClusterSharding$ ClusterSharding extension]] */ -abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSettings, +abstract class ShardCoordinator(typeName: String, + settings: ClusterShardingSettings, allocationStrategy: ShardCoordinator.ShardAllocationStrategy) - extends Actor with ActorLogging { + extends Actor + with ActorLogging { import ShardCoordinator._ import ShardCoordinator.Internal._ import ShardRegion.ShardId @@ -461,169 +494,185 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti def isMember(region: ActorRef): Boolean = { val regionAddress = region.path.address (region.path.address == self.path.address || - cluster.state.members.exists(m => m.address == regionAddress && m.status == MemberStatus.Up)) + cluster.state.members.exists(m => m.address == regionAddress && m.status == MemberStatus.Up)) } - def active: Receive = ({ - case Register(region) => - if (isMember(region)) { - log.debug("ShardRegion registered: [{}]", region) - aliveRegions += region - if (state.regions.contains(region)) { - region ! RegisterAck(self) - allocateShardHomesForRememberEntities() - } else { - gracefulShutdownInProgress -= region - update(ShardRegionRegistered(region)) { evt => - state = state.updated(evt) - context.watch(region) + def active: Receive = + ({ + case Register(region) => + if (isMember(region)) { + log.debug("ShardRegion registered: [{}]", region) + aliveRegions += region + if (state.regions.contains(region)) { region ! RegisterAck(self) allocateShardHomesForRememberEntities() - } - } - } else { - log.debug("ShardRegion {} was not registered since the coordinator currently does not know about a node of that region", region) - } - - case RegisterProxy(proxy) => - log.debug("ShardRegion proxy registered: [{}]", proxy) - if (state.regionProxies.contains(proxy)) - proxy ! RegisterAck(self) - else { - update(ShardRegionProxyRegistered(proxy)) { evt => - state = state.updated(evt) - context.watch(proxy) - proxy ! RegisterAck(self) - } - } - - case GetShardHome(shard) => - if (!handleGetShardHome(shard)) { - // location not know, yet - val activeRegions = state.regions -- gracefulShutdownInProgress - if (activeRegions.nonEmpty) { - val getShardHomeSender = sender() - val regionFuture = allocationStrategy.allocateShard(getShardHomeSender, shard, activeRegions) - regionFuture.value match { - case Some(Success(region)) => - continueGetShardHome(shard, region, getShardHomeSender) - case _ => - // continue when future is completed - regionFuture.map { region => - AllocateShardResult(shard, Some(region), getShardHomeSender) - }.recover { - case _ => AllocateShardResult(shard, None, getShardHomeSender) - }.pipeTo(self) - } - } - } - - case AllocateShardResult(shard, None, getShardHomeSender) => - log.debug("Shard [{}] allocation failed. It will be retried.", shard) - - case AllocateShardResult(shard, Some(region), getShardHomeSender) => - continueGetShardHome(shard, region, getShardHomeSender) - - case ShardStarted(shard) => - unAckedHostShards.get(shard) match { - case Some(cancel) => - cancel.cancel() - unAckedHostShards = unAckedHostShards - shard - case _ => - } - - case ResendShardHost(shard, region) => - state.shards.get(shard) match { - case Some(`region`) => sendHostShardMsg(shard, region) - case _ => //Reallocated to another region - } - - case RebalanceTick => - if (state.regions.nonEmpty) { - val shardsFuture = allocationStrategy.rebalance(state.regions, rebalanceInProgress.keySet) - shardsFuture.value match { - case Some(Success(shards)) => - continueRebalance(shards) - case _ => - // continue when future is completed - shardsFuture.map { shards => RebalanceResult(shards) - }.recover { - case _ => RebalanceResult(Set.empty) - }.pipeTo(self) - } - } - - case RebalanceResult(shards) => - continueRebalance(shards) - - case RebalanceDone(shard, ok) => - log.debug("Rebalance shard [{}] done [{}]", shard, ok) - // The shard could have been removed by ShardRegionTerminated - if (state.shards.contains(shard)) { - if (ok) { - update(ShardHomeDeallocated(shard)) { evt => - log.debug("Shard [{}] deallocated after rebalance", shard) - state = state.updated(evt) - clearRebalanceInProgress(shard) - allocateShardHomesForRememberEntities() + } else { + gracefulShutdownInProgress -= region + update(ShardRegionRegistered(region)) { evt => + state = state.updated(evt) + context.watch(region) + region ! RegisterAck(self) + allocateShardHomesForRememberEntities() + } + } + } else { + log.debug( + "ShardRegion {} was not registered since the coordinator currently does not know about a node of that region", + region) + } + + case RegisterProxy(proxy) => + log.debug("ShardRegion proxy registered: [{}]", proxy) + if (state.regionProxies.contains(proxy)) + proxy ! RegisterAck(self) + else { + update(ShardRegionProxyRegistered(proxy)) { evt => + state = state.updated(evt) + context.watch(proxy) + proxy ! RegisterAck(self) + } + } + + case GetShardHome(shard) => + if (!handleGetShardHome(shard)) { + // location not know, yet + val activeRegions = state.regions -- gracefulShutdownInProgress + if (activeRegions.nonEmpty) { + val getShardHomeSender = sender() + val regionFuture = allocationStrategy.allocateShard(getShardHomeSender, shard, activeRegions) + regionFuture.value match { + case Some(Success(region)) => + continueGetShardHome(shard, region, getShardHomeSender) + case _ => + // continue when future is completed + regionFuture + .map { region => + AllocateShardResult(shard, Some(region), getShardHomeSender) + } + .recover { + case _ => AllocateShardResult(shard, None, getShardHomeSender) + } + .pipeTo(self) + } + } + } + + case AllocateShardResult(shard, None, getShardHomeSender) => + log.debug("Shard [{}] allocation failed. It will be retried.", shard) + + case AllocateShardResult(shard, Some(region), getShardHomeSender) => + continueGetShardHome(shard, region, getShardHomeSender) + + case ShardStarted(shard) => + unAckedHostShards.get(shard) match { + case Some(cancel) => + cancel.cancel() + unAckedHostShards = unAckedHostShards - shard + case _ => + } + + case ResendShardHost(shard, region) => + state.shards.get(shard) match { + case Some(`region`) => sendHostShardMsg(shard, region) + case _ => //Reallocated to another region + } + + case RebalanceTick => + if (state.regions.nonEmpty) { + val shardsFuture = allocationStrategy.rebalance(state.regions, rebalanceInProgress.keySet) + shardsFuture.value match { + case Some(Success(shards)) => + continueRebalance(shards) + case _ => + // continue when future is completed + shardsFuture + .map { shards => + RebalanceResult(shards) + } + .recover { + case _ => RebalanceResult(Set.empty) + } + .pipeTo(self) + } + } + + case RebalanceResult(shards) => + continueRebalance(shards) + + case RebalanceDone(shard, ok) => + log.debug("Rebalance shard [{}] done [{}]", shard, ok) + // The shard could have been removed by ShardRegionTerminated + if (state.shards.contains(shard)) { + if (ok) { + update(ShardHomeDeallocated(shard)) { evt => + log.debug("Shard [{}] deallocated after rebalance", shard) + state = state.updated(evt) + clearRebalanceInProgress(shard) + allocateShardHomesForRememberEntities() + } + } else { + // rebalance not completed, graceful shutdown will be retried + gracefulShutdownInProgress -= state.shards(shard) + clearRebalanceInProgress(shard) } } else { - // rebalance not completed, graceful shutdown will be retried - gracefulShutdownInProgress -= state.shards(shard) clearRebalanceInProgress(shard) } - } else { - clearRebalanceInProgress(shard) - } - case GracefulShutdownReq(region) => - if (!gracefulShutdownInProgress(region)) - state.regions.get(region) match { - case Some(shards) => - log.debug("Graceful shutdown of region [{}] with shards [{}]", region, shards) - gracefulShutdownInProgress += region - continueRebalance(shards.toSet) - case None => - } + case GracefulShutdownReq(region) => + if (!gracefulShutdownInProgress(region)) + state.regions.get(region) match { + case Some(shards) => + log.debug("Graceful shutdown of region [{}] with shards [{}]", region, shards) + gracefulShutdownInProgress += region + continueRebalance(shards.toSet) + case None => + } - case ShardRegion.GetClusterShardingStats(waitMax) => - import akka.pattern.ask - implicit val timeout: Timeout = waitMax - Future.sequence(aliveRegions.map { regionActor => - (regionActor ? ShardRegion.GetShardRegionStats).mapTo[ShardRegion.ShardRegionStats] - .map(stats => regionActor -> stats) - }).map { allRegionStats => - ShardRegion.ClusterShardingStats(allRegionStats.map { - case (region, stats) => - val regionAddress = region.path.address - val address: Address = - if (regionAddress.hasLocalScope && regionAddress.system == cluster.selfAddress.system) cluster.selfAddress - else regionAddress + case ShardRegion.GetClusterShardingStats(waitMax) => + import akka.pattern.ask + implicit val timeout: Timeout = waitMax + Future + .sequence(aliveRegions.map { regionActor => + (regionActor ? ShardRegion.GetShardRegionStats) + .mapTo[ShardRegion.ShardRegionStats] + .map(stats => regionActor -> stats) + }) + .map { allRegionStats => + ShardRegion.ClusterShardingStats(allRegionStats.map { + case (region, stats) => + val regionAddress = region.path.address + val address: Address = + if (regionAddress.hasLocalScope && regionAddress.system == cluster.selfAddress.system) + cluster.selfAddress + else regionAddress - address -> stats - }.toMap) - }.recover { - case x: AskTimeoutException => ShardRegion.ClusterShardingStats(Map.empty) - }.pipeTo(sender()) + address -> stats + }.toMap) + } + .recover { + case x: AskTimeoutException => ShardRegion.ClusterShardingStats(Map.empty) + } + .pipeTo(sender()) - case ShardHome(_, _) => - //On rebalance, we send ourselves a GetShardHome message to reallocate a - // shard. This receive handles the "response" from that message. i.e. ignores it. + case ShardHome(_, _) => + //On rebalance, we send ourselves a GetShardHome message to reallocate a + // shard. This receive handles the "response" from that message. i.e. ignores it. - case ClusterShuttingDown => - log.debug("Shutting down ShardCoordinator") - // can't stop because supervisor will start it again, - // it will soon be stopped when singleton is stopped - context.become(shuttingDown) + case ClusterShuttingDown => + log.debug("Shutting down ShardCoordinator") + // can't stop because supervisor will start it again, + // it will soon be stopped when singleton is stopped + context.become(shuttingDown) - case ShardRegion.GetCurrentRegions => - val reply = ShardRegion.CurrentRegions(state.regions.keySet.map { ref => - if (ref.path.address.host.isEmpty) cluster.selfAddress - else ref.path.address - }) - sender() ! reply + case ShardRegion.GetCurrentRegions => + val reply = ShardRegion.CurrentRegions(state.regions.keySet.map { ref => + if (ref.path.address.host.isEmpty) cluster.selfAddress + else ref.path.address + }) + sender() ! reply - }: Receive).orElse[Any, Unit](receiveTerminated) + }: Receive).orElse[Any, Unit](receiveTerminated) private def clearRebalanceInProgress(shard: String): Unit = { rebalanceInProgress.get(shard) match { @@ -639,7 +688,9 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti private def deferGetShardHomeRequest(shard: ShardId, from: ActorRef): Unit = { log.debug("GetShardHome [{}] request from [{}] deferred, because rebalance is in progress for this shard. " + - "It will be handled when rebalance is done.", shard, from) + "It will be handled when rebalance is done.", + shard, + from) rebalanceInProgress = rebalanceInProgress.updated(shard, rebalanceInProgress(shard) + from) } @@ -734,7 +785,9 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti if (state.regions.contains(ref)) { log.debug("ShardRegion terminated: [{}]", ref) regionTerminationInProgress += ref - state.regions(ref).foreach { s => self ! GetShardHome(s) } + state.regions(ref).foreach { s => + self ! GetShardHome(s) + } update(ShardRegionTerminated(ref)) { evt => state = state.updated(evt) @@ -784,9 +837,10 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti getShardHomeSender ! ShardHome(evt.shard, evt.region) } } else - log.debug( - "Allocated region {} for shard [{}] is not (any longer) one of the registered regions: {}", - region, shard, state) + log.debug("Allocated region {} for shard [{}] is not (any longer) one of the registered regions: {}", + region, + shard, + state) } } @@ -797,9 +851,12 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti case Some(rebalanceFromRegion) => rebalanceInProgress = rebalanceInProgress.updated(shard, Set.empty) log.debug("Rebalance shard [{}] from [{}]", shard, rebalanceFromRegion) - context.actorOf(rebalanceWorkerProps(shard, rebalanceFromRegion, handOffTimeout, - state.regions.keySet union state.regionProxies) - .withDispatcher(context.props.dispatcher)) + context.actorOf( + rebalanceWorkerProps( + shard, + rebalanceFromRegion, + handOffTimeout, + state.regions.keySet.union(state.regionProxies)).withDispatcher(context.props.dispatcher)) case None => log.debug("Rebalance of non-existing shard [{}] is ignored", shard) } @@ -814,9 +871,11 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti * * @see [[ClusterSharding$ ClusterSharding extension]] */ -class PersistentShardCoordinator(typeName: String, settings: ClusterShardingSettings, +class PersistentShardCoordinator(typeName: String, + settings: ClusterShardingSettings, allocationStrategy: ShardCoordinator.ShardAllocationStrategy) - extends ShardCoordinator(typeName, settings, allocationStrategy) with PersistentActor { + extends ShardCoordinator(typeName, settings, allocationStrategy) + with PersistentActor { import ShardCoordinator.Internal._ import settings.tuningParameters._ @@ -838,9 +897,11 @@ class PersistentShardCoordinator(typeName: String, settings: ClusterShardingSett if (state.regions.contains(region)) state = state.updated(evt) else { - log.debug("ShardRegionTerminated, but region {} was not registered. This inconsistency is due to that " + + log.debug( + "ShardRegionTerminated, but region {} was not registered. This inconsistency is due to that " + " some stored ActorRef in Akka v2.3.0 and v2.3.1 did not contain full address information. It will be " + - "removed by later watch.", region) + "removed by later watch.", + region) } case ShardRegionProxyTerminated(proxy) => if (state.regionProxies.contains(proxy)) @@ -866,12 +927,13 @@ class PersistentShardCoordinator(typeName: String, settings: ClusterShardingSett override def receiveCommand: Receive = waitingForStateInitialized - def waitingForStateInitialized: Receive = ({ - case StateInitialized => - stateInitialized() - context.become(active.orElse[Any, Unit](receiveSnapshotResult)) + def waitingForStateInitialized: Receive = + ({ + case StateInitialized => + stateInitialized() + context.become(active.orElse[Any, Unit](receiveSnapshotResult)) - }: Receive).orElse[Any, Unit](receiveTerminated).orElse[Any, Unit](receiveSnapshotResult) + }: Receive).orElse[Any, Unit](receiveTerminated).orElse[Any, Unit](receiveSnapshotResult) def receiveSnapshotResult: Receive = { case e: SaveSnapshotSuccess => @@ -913,18 +975,18 @@ class PersistentShardCoordinator(typeName: String, settings: ClusterShardingSett * * @see [[ClusterSharding$ ClusterSharding extension]] */ -class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings, +class DDataShardCoordinator(typeName: String, + settings: ClusterShardingSettings, allocationStrategy: ShardCoordinator.ShardAllocationStrategy, - replicator: ActorRef, - majorityMinCap: Int, - rememberEntities: Boolean) - extends ShardCoordinator(typeName, settings, allocationStrategy) with Stash { + replicator: ActorRef, + majorityMinCap: Int, + rememberEntities: Boolean) + extends ShardCoordinator(typeName, settings, allocationStrategy) + with Stash { import ShardCoordinator.Internal._ import akka.cluster.ddata.Replicator.Update - private val readMajority = ReadMajority( - settings.tuningParameters.waitingForStateTimeout, - majorityMinCap) + private val readMajority = ReadMajority(settings.tuningParameters.waitingForStateTimeout, majorityMinCap) private val writeMajority = WriteMajority(settings.tuningParameters.updatingStateTimeout, majorityMinCap) implicit val node = Cluster(context.system) @@ -948,54 +1010,55 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings, override def receive: Receive = waitingForState(allKeys) // This state will drop all other messages since they will be retried - def waitingForState(remainingKeys: Set[Key[ReplicatedData]]): Receive = ({ - case g @ GetSuccess(CoordinatorStateKey, _) => - state = g.get(CoordinatorStateKey).value.withRememberEntities(settings.rememberEntities) - val newRemainingKeys = remainingKeys - CoordinatorStateKey - if (newRemainingKeys.isEmpty) - becomeWaitingForStateInitialized() - else - context.become(waitingForState(newRemainingKeys)) + def waitingForState(remainingKeys: Set[Key[ReplicatedData]]): Receive = + ({ + case g @ GetSuccess(CoordinatorStateKey, _) => + state = g.get(CoordinatorStateKey).value.withRememberEntities(settings.rememberEntities) + val newRemainingKeys = remainingKeys - CoordinatorStateKey + if (newRemainingKeys.isEmpty) + becomeWaitingForStateInitialized() + else + context.become(waitingForState(newRemainingKeys)) - case GetFailure(CoordinatorStateKey, _) => - log.error( - "The ShardCoordinator was unable to get an initial state within 'waiting-for-state-timeout': {} millis (retrying). Has ClusterSharding been started on all nodes?", - readMajority.timeout.toMillis) - // repeat until GetSuccess - getCoordinatorState() + case GetFailure(CoordinatorStateKey, _) => + log.error( + "The ShardCoordinator was unable to get an initial state within 'waiting-for-state-timeout': {} millis (retrying). Has ClusterSharding been started on all nodes?", + readMajority.timeout.toMillis) + // repeat until GetSuccess + getCoordinatorState() - case NotFound(CoordinatorStateKey, _) => - val newRemainingKeys = remainingKeys - CoordinatorStateKey - if (newRemainingKeys.isEmpty) - becomeWaitingForStateInitialized() - else - context.become(waitingForState(newRemainingKeys)) + case NotFound(CoordinatorStateKey, _) => + val newRemainingKeys = remainingKeys - CoordinatorStateKey + if (newRemainingKeys.isEmpty) + becomeWaitingForStateInitialized() + else + context.become(waitingForState(newRemainingKeys)) - case g @ GetSuccess(AllShardsKey, _) => - shards = g.get(AllShardsKey).elements - val newUnallocatedShards = state.unallocatedShards union (shards diff state.shards.keySet) - state = state.copy(unallocatedShards = newUnallocatedShards) - val newRemainingKeys = remainingKeys - AllShardsKey - if (newRemainingKeys.isEmpty) - becomeWaitingForStateInitialized() - else - context.become(waitingForState(newRemainingKeys)) + case g @ GetSuccess(AllShardsKey, _) => + shards = g.get(AllShardsKey).elements + val newUnallocatedShards = state.unallocatedShards.union(shards.diff(state.shards.keySet)) + state = state.copy(unallocatedShards = newUnallocatedShards) + val newRemainingKeys = remainingKeys - AllShardsKey + if (newRemainingKeys.isEmpty) + becomeWaitingForStateInitialized() + else + context.become(waitingForState(newRemainingKeys)) - case GetFailure(AllShardsKey, _) => - log.error( - "The ShardCoordinator was unable to get all shards state within 'waiting-for-state-timeout': {} millis (retrying)", - readMajority.timeout.toMillis) - // repeat until GetSuccess - getAllShards() + case GetFailure(AllShardsKey, _) => + log.error( + "The ShardCoordinator was unable to get all shards state within 'waiting-for-state-timeout': {} millis (retrying)", + readMajority.timeout.toMillis) + // repeat until GetSuccess + getAllShards() - case NotFound(AllShardsKey, _) => - val newRemainingKeys = remainingKeys - AllShardsKey - if (newRemainingKeys.isEmpty) - becomeWaitingForStateInitialized() - else - context.become(waitingForState(newRemainingKeys)) + case NotFound(AllShardsKey, _) => + val newRemainingKeys = remainingKeys - AllShardsKey + if (newRemainingKeys.isEmpty) + becomeWaitingForStateInitialized() + else + context.become(waitingForState(newRemainingKeys)) - }: Receive).orElse[Any, Unit](receiveTerminated) + }: Receive).orElse[Any, Unit](receiveTerminated) private def becomeWaitingForStateInitialized(): Unit = { if (state.isEmpty) { @@ -1020,7 +1083,8 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings, } // this state will stash all messages until it receives UpdateSuccess - def waitingForUpdate[E <: DomainEvent](evt: E, afterUpdateCallback: E => Unit, + def waitingForUpdate[E <: DomainEvent](evt: E, + afterUpdateCallback: E => Unit, remainingKeys: Set[Key[ReplicatedData]]): Receive = { case UpdateSuccess(CoordinatorStateKey, Some(`evt`)) => log.debug("The coordinator state was successfully updated with {}", evt) @@ -1033,8 +1097,9 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings, case UpdateTimeout(CoordinatorStateKey, Some(`evt`)) => log.error( "The ShardCoordinator was unable to update a distributed state within 'updating-state-timeout': {} millis (retrying). " + - "Perhaps the ShardRegion has not started on all active nodes yet? event={}", - writeMajority.timeout.toMillis, evt) + "Perhaps the ShardRegion has not started on all active nodes yet? event={}", + writeMajority.timeout.toMillis, + evt) // repeat until UpdateSuccess sendCoordinatorStateUpdate(evt) @@ -1049,7 +1114,8 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings, case UpdateTimeout(AllShardsKey, Some(newShard: String)) => log.error( "The ShardCoordinator was unable to update shards distributed state within 'updating-state-timeout': {} millis (retrying), event={}", - writeMajority.timeout.toMillis, evt) + writeMajority.timeout.toMillis, + evt) // repeat until UpdateSuccess sendAllShardsUpdate(newShard) @@ -1057,7 +1123,9 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings, log.error( cause, "The ShardCoordinator was unable to update a distributed state {} with error {} and event {}.Coordinator will be restarted", - key, error, evt) + key, + error, + evt) throw cause case GetShardHome(shard) => diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala index b19da256de..a7e5674f0d 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/ShardRegion.scala @@ -29,52 +29,72 @@ import akka.cluster.ClusterSettings.DataCenter * @see [[ClusterSharding$ ClusterSharding extension]] */ object ShardRegion { + /** * INTERNAL API * Factory method for the [[akka.actor.Props]] of the [[ShardRegion]] actor. */ - private[akka] def props( - typeName: String, - entityProps: String => Props, - settings: ClusterShardingSettings, - coordinatorPath: String, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - handOffStopMessage: Any, - replicator: ActorRef, - majorityMinCap: Int): Props = - Props(new ShardRegion(typeName, Some(entityProps), dataCenter = None, settings, coordinatorPath, extractEntityId, - extractShardId, handOffStopMessage, replicator, majorityMinCap)).withDeploy(Deploy.local) + private[akka] def props(typeName: String, + entityProps: String => Props, + settings: ClusterShardingSettings, + coordinatorPath: String, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + handOffStopMessage: Any, + replicator: ActorRef, + majorityMinCap: Int): Props = + Props( + new ShardRegion(typeName, + Some(entityProps), + dataCenter = None, + settings, + coordinatorPath, + extractEntityId, + extractShardId, + handOffStopMessage, + replicator, + majorityMinCap)).withDeploy(Deploy.local) /** * INTERNAL API * Factory method for the [[akka.actor.Props]] of the [[ShardRegion]] actor * when using it in proxy only mode. */ - private[akka] def proxyProps( - typeName: String, - dataCenter: Option[DataCenter], - settings: ClusterShardingSettings, - coordinatorPath: String, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - replicator: ActorRef, - majorityMinCap: Int): Props = - Props(new ShardRegion(typeName, None, dataCenter, settings, coordinatorPath, extractEntityId, extractShardId, - PoisonPill, replicator, majorityMinCap)).withDeploy(Deploy.local) + private[akka] def proxyProps(typeName: String, + dataCenter: Option[DataCenter], + settings: ClusterShardingSettings, + coordinatorPath: String, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + replicator: ActorRef, + majorityMinCap: Int): Props = + Props( + new ShardRegion(typeName, + None, + dataCenter, + settings, + coordinatorPath, + extractEntityId, + extractShardId, + PoisonPill, + replicator, + majorityMinCap)).withDeploy(Deploy.local) /** * Marker type of entity identifier (`String`). */ type EntityId = String + /** * Marker type of shard identifier (`String`). */ type ShardId = String + /** * Marker type of application messages (`Any`). */ type Msg = Any + /** * Interface of the partial function used by the [[ShardRegion]] to * extract the entity id and the message to send to the entity from an @@ -86,6 +106,7 @@ object ShardRegion { * sending to the entity actor. */ type ExtractEntityId = PartialFunction[Msg, (EntityId, Msg)] + /** * Interface of the function used by the [[ShardRegion]] to * extract the shard id from an incoming message. @@ -100,11 +121,13 @@ object ShardRegion { * incoming message. */ trait MessageExtractor { + /** * Extract the entity id from an incoming `message`. If `null` is returned * the message will be `unhandled`, i.e. posted as `Unhandled` messages on the event stream */ def entityId(message: Any): String + /** * Extract the message to send to the entity from an incoming `message`. * Note that the extracted message does not have to be the same as the incoming @@ -112,6 +135,7 @@ object ShardRegion { * sending to the entity actor. */ def entityMessage(message: Any): Any + /** * Extract the shard id from an incoming `message`. Only messages that passed the [[#entityId]] * function will be used as input to this function. @@ -137,6 +161,7 @@ object ShardRegion { * of unique shards is limited by the given `maxNumberOfShards`. */ abstract class HashCodeMessageExtractor(maxNumberOfShards: Int) extends MessageExtractor { + /** * Default implementation pass on the message as is. */ @@ -341,14 +366,19 @@ object ShardRegion { * them have terminated it replies with `ShardStopped`. * If the entities don't terminate after `handoffTimeout` it will try stopping them forcefully. */ - private[akka] class HandOffStopper(shard: String, replyTo: ActorRef, entities: Set[ActorRef], stopMessage: Any, handoffTimeout: FiniteDuration) - extends Actor with ActorLogging { + private[akka] class HandOffStopper(shard: String, + replyTo: ActorRef, + entities: Set[ActorRef], + stopMessage: Any, + handoffTimeout: FiniteDuration) + extends Actor + with ActorLogging { import ShardCoordinator.Internal.ShardStopped context.setReceiveTimeout(handoffTimeout) entities.foreach { a => - context watch a + context.watch(a) a ! stopMessage } @@ -357,24 +387,28 @@ object ShardRegion { def receive = { case ReceiveTimeout => log.warning("HandOffStopMessage[{}] is not handled by some of the entities of the `{}` shard, " + - "stopping the remaining entities.", stopMessage.getClass.getName, shard) + "stopping the remaining entities.", + stopMessage.getClass.getName, + shard) - remaining.foreach { - ref => - context stop ref + remaining.foreach { ref => + context.stop(ref) } case Terminated(ref) => remaining -= ref if (remaining.isEmpty) { replyTo ! ShardStopped(shard) - context stop self + context.stop(self) } } } - private[akka] def handOffStopperProps( - shard: String, replyTo: ActorRef, entities: Set[ActorRef], stopMessage: Any, handoffTimeout: FiniteDuration): Props = + private[akka] def handOffStopperProps(shard: String, + replyTo: ActorRef, + entities: Set[ActorRef], + stopMessage: Any, + handoffTimeout: FiniteDuration): Props = Props(new HandOffStopper(shard, replyTo, entities, stopMessage, handoffTimeout)).withDeploy(Deploy.local) } @@ -387,17 +421,18 @@ object ShardRegion { * * @see [[ClusterSharding$ ClusterSharding extension]] */ -private[akka] class ShardRegion( - typeName: String, - entityProps: Option[String => Props], - dataCenter: Option[DataCenter], - settings: ClusterShardingSettings, - coordinatorPath: String, - extractEntityId: ShardRegion.ExtractEntityId, - extractShardId: ShardRegion.ExtractShardId, - handOffStopMessage: Any, - replicator: ActorRef, - majorityMinCap: Int) extends Actor with ActorLogging { +private[akka] class ShardRegion(typeName: String, + entityProps: Option[String => Props], + dataCenter: Option[DataCenter], + settings: ClusterShardingSettings, + coordinatorPath: String, + extractEntityId: ShardRegion.ExtractEntityId, + extractShardId: ShardRegion.ExtractShardId, + handOffStopMessage: Any, + replicator: ActorRef, + majorityMinCap: Int) + extends Actor + with ActorLogging { import ShardCoordinator.Internal._ import ShardRegion._ @@ -426,9 +461,8 @@ private[akka] class ShardRegion( // for CoordinatedShutdown val gracefulShutdownProgress = Promise[Done]() - CoordinatedShutdown(context.system).addTask( - CoordinatedShutdown.PhaseClusterShardingShutdownRegion, - "region-shutdown") { () => + CoordinatedShutdown(context.system) + .addTask(CoordinatedShutdown.PhaseClusterShardingShutdownRegion, "region-shutdown") { () => if (cluster.isTerminated || cluster.selfMember.status == MemberStatus.Down) { Future.successful(Done) } else { @@ -479,7 +513,9 @@ private[akka] class ShardRegion( membersByAge = newMembers if (before != after) { if (log.isDebugEnabled) - log.debug("Coordinator moved from [{}] to [{}]", before.map(_.address).getOrElse(""), after.map(_.address).getOrElse("")) + log.debug("Coordinator moved from [{}] to [{}]", + before.map(_.address).getOrElse(""), + after.map(_.address).getOrElse("")) coordinator = None register() } @@ -496,12 +532,17 @@ private[akka] class ShardRegion( case msg: RestartShard => deliverMessage(msg, sender()) case msg: StartEntity => deliverStartEntity(msg, sender()) case msg if extractEntityId.isDefinedAt(msg) => deliverMessage(msg, sender()) - case unknownMsg => log.warning("Message does not have an extractor defined in shard [{}] so it was ignored: {}", typeName, unknownMsg) + case unknownMsg => + log.warning("Message does not have an extractor defined in shard [{}] so it was ignored: {}", + typeName, + unknownMsg) } def receiveClusterState(state: CurrentClusterState): Unit = { - changeMembers(immutable.SortedSet.empty(ageOrdering) union state.members.filter(m => - m.status == MemberStatus.Up && matchingRole(m))) + changeMembers( + immutable.SortedSet + .empty(ageOrdering) + .union(state.members.filter(m => m.status == MemberStatus.Up && matchingRole(m)))) } def receiveClusterEvent(evt: ClusterDomainEvent): Unit = evt match { @@ -524,7 +565,7 @@ private[akka] class ShardRegion( case _: MemberEvent => // these are expected, no need to warn about them - case _ => unhandled(evt) + case _ => unhandled(evt) } def receiveCoordinatorMessage(msg: CoordinatorMessage): Unit = msg match { @@ -586,7 +627,7 @@ private[akka] class ShardRegion( if (shards.contains(shard)) { handingOff += shards(shard) - shards(shard) forward msg + shards(shard).forward(msg) } else sender() ! ShardStopped(shard) @@ -631,7 +672,7 @@ private[akka] class ShardRegion( replyToRegionStatsQuery(sender()) case msg: GetClusterShardingStats => - coordinator.fold(sender ! ClusterShardingStats(Map.empty))(_ forward msg) + coordinator.fold(sender ! ClusterShardingStats(Map.empty))(_.forward(msg)) case _ => unhandled(query) } @@ -667,23 +708,29 @@ private[akka] class ShardRegion( } def replyToRegionStateQuery(ref: ActorRef): Unit = { - askAllShards[Shard.CurrentShardState](Shard.GetCurrentShardState).map { shardStates => - CurrentShardRegionState(shardStates.map { - case (shardId, state) => ShardRegion.ShardState(shardId, state.entityIds) - }.toSet) - }.recover { - case _: AskTimeoutException => CurrentShardRegionState(Set.empty) - }.pipeTo(ref) + askAllShards[Shard.CurrentShardState](Shard.GetCurrentShardState) + .map { shardStates => + CurrentShardRegionState(shardStates.map { + case (shardId, state) => ShardRegion.ShardState(shardId, state.entityIds) + }.toSet) + } + .recover { + case _: AskTimeoutException => CurrentShardRegionState(Set.empty) + } + .pipeTo(ref) } def replyToRegionStatsQuery(ref: ActorRef): Unit = { - askAllShards[Shard.ShardStats](Shard.GetShardStats).map { shardStats => - ShardRegionStats(shardStats.map { - case (shardId, stats) => (shardId, stats.entityCount) - }.toMap) - }.recover { - case x: AskTimeoutException => ShardRegionStats(Map.empty) - }.pipeTo(ref) + askAllShards[Shard.ShardStats](Shard.GetShardStats) + .map { shardStats => + ShardRegionStats(shardStats.map { + case (shardId, stats) => (shardId, stats.entityCount) + }.toMap) + } + .recover { + case x: AskTimeoutException => ShardRegionStats(Map.empty) + } + .pipeTo(ref) } def askAllShards[T: ClassTag](msg: Any): Future[Seq[(ShardId, T)]] = { @@ -707,11 +754,13 @@ private[akka] class ShardRegion( else s"Coordinator [${membersByAge.head}] is reachable." log.warning( "Trying to register to coordinator at [{}], but no acknowledgement. Total [{}] buffered messages. [{}]", - actorSelection, shardBuffers.totalSize, coordinatorMessage - ) - case None => log.warning( - "No coordinator found to register. Probably, no seed-nodes configured and manual cluster join not performed? Total [{}] buffered messages.", - shardBuffers.totalSize) + actorSelection, + shardBuffers.totalSize, + coordinatorMessage) + case None => + log.warning( + "No coordinator found to register. Probably, no seed-nodes configured and manual cluster join not performed? Total [{}] buffered messages.", + shardBuffers.totalSize) } } @@ -720,15 +769,16 @@ private[akka] class ShardRegion( def requestShardBufferHomes(): Unit = { shardBuffers.foreach { - case (shard, buf) => coordinator.foreach { c => - val logMsg = "Retry request for shard [{}] homes from coordinator at [{}]. [{}] buffered messages." - if (retryCount >= 5) - log.warning(logMsg, shard, c, buf.size) - else - log.debug(logMsg, shard, c, buf.size) + case (shard, buf) => + coordinator.foreach { c => + val logMsg = "Retry request for shard [{}] homes from coordinator at [{}]. [{}] buffered messages." + if (retryCount >= 5) + log.warning(logMsg, shard, c, buf.size) + else + log.debug(logMsg, shard, c, buf.size) - c ! GetShardHome(shard) - } + c ! GetShardHome(shard) + } } } @@ -758,7 +808,8 @@ private[akka] class ShardRegion( if (tot <= bufferSize / 2) log.info(logMsg) else - log.warning(logMsg + " The coordinator might not be available. You might want to check cluster membership status.") + log.warning( + logMsg + " The coordinator might not be available. You might want to check cluster membership status.") } } } @@ -832,24 +883,27 @@ private[akka] class ShardRegion( if (startingShards.contains(id)) None else { - shards.get(id).orElse( - entityProps match { + shards + .get(id) + .orElse(entityProps match { case Some(props) if !shardsByRef.values.exists(_ == id) => log.debug("Starting shard [{}] in region", id) val name = URLEncoder.encode(id, "utf-8") - val shard = context.watch(context.actorOf( - Shard.props( - typeName, - id, - props, - settings, - extractEntityId, - extractShardId, - handOffStopMessage, - replicator, - majorityMinCap).withDispatcher(context.props.dispatcher), - name)) + val shard = context.watch( + context.actorOf( + Shard + .props(typeName, + id, + props, + settings, + extractEntityId, + extractShardId, + handOffStopMessage, + replicator, + majorityMinCap) + .withDispatcher(context.props.dispatcher), + name)) shardsByRef = shardsByRef.updated(shard, id) shards = shards.updated(id, shard) startingShards += id diff --git a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala index 5eee5fde82..14492d2237 100644 --- a/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala +++ b/akka-cluster-sharding/src/main/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializer.scala @@ -29,7 +29,8 @@ import akka.cluster.sharding.ShardRegion._ * INTERNAL API: Protobuf serializer of ClusterSharding messages. */ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSystem) - extends SerializerWithStringManifest with BaseSerializer { + extends SerializerWithStringManifest + with BaseSerializer { import ShardCoordinator.Internal._ import Shard.{ GetShardStats, ShardStats } import Shard.{ State => EntityState, EntityStarted, EntityStopped } @@ -73,41 +74,76 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy EntityStateManifest -> entityStateFromBinary, EntityStartedManifest -> entityStartedFromBinary, EntityStoppedManifest -> entityStoppedFromBinary, - CoordinatorStateManifest -> coordinatorStateFromBinary, - ShardRegionRegisteredManifest -> { bytes => ShardRegionRegistered(actorRefMessageFromBinary(bytes)) }, - ShardRegionProxyRegisteredManifest -> { bytes => ShardRegionProxyRegistered(actorRefMessageFromBinary(bytes)) }, - ShardRegionTerminatedManifest -> { bytes => ShardRegionTerminated(actorRefMessageFromBinary(bytes)) }, - ShardRegionProxyTerminatedManifest -> { bytes => ShardRegionProxyTerminated(actorRefMessageFromBinary(bytes)) }, + ShardRegionRegisteredManifest -> { bytes => + ShardRegionRegistered(actorRefMessageFromBinary(bytes)) + }, + ShardRegionProxyRegisteredManifest -> { bytes => + ShardRegionProxyRegistered(actorRefMessageFromBinary(bytes)) + }, + ShardRegionTerminatedManifest -> { bytes => + ShardRegionTerminated(actorRefMessageFromBinary(bytes)) + }, + ShardRegionProxyTerminatedManifest -> { bytes => + ShardRegionProxyTerminated(actorRefMessageFromBinary(bytes)) + }, ShardHomeAllocatedManifest -> shardHomeAllocatedFromBinary, - ShardHomeDeallocatedManifest -> { bytes => ShardHomeDeallocated(shardIdMessageFromBinary(bytes)) }, - - RegisterManifest -> { bytes => Register(actorRefMessageFromBinary(bytes)) }, - RegisterProxyManifest -> { bytes => RegisterProxy(actorRefMessageFromBinary(bytes)) }, - RegisterAckManifest -> { bytes => RegisterAck(actorRefMessageFromBinary(bytes)) }, - GetShardHomeManifest -> { bytes => GetShardHome(shardIdMessageFromBinary(bytes)) }, + ShardHomeDeallocatedManifest -> { bytes => + ShardHomeDeallocated(shardIdMessageFromBinary(bytes)) + }, + RegisterManifest -> { bytes => + Register(actorRefMessageFromBinary(bytes)) + }, + RegisterProxyManifest -> { bytes => + RegisterProxy(actorRefMessageFromBinary(bytes)) + }, + RegisterAckManifest -> { bytes => + RegisterAck(actorRefMessageFromBinary(bytes)) + }, + GetShardHomeManifest -> { bytes => + GetShardHome(shardIdMessageFromBinary(bytes)) + }, ShardHomeManifest -> shardHomeFromBinary, - HostShardManifest -> { bytes => HostShard(shardIdMessageFromBinary(bytes)) }, - ShardStartedManifest -> { bytes => ShardStarted(shardIdMessageFromBinary(bytes)) }, - BeginHandOffManifest -> { bytes => BeginHandOff(shardIdMessageFromBinary(bytes)) }, - BeginHandOffAckManifest -> { bytes => BeginHandOffAck(shardIdMessageFromBinary(bytes)) }, - HandOffManifest -> { bytes => HandOff(shardIdMessageFromBinary(bytes)) }, - ShardStoppedManifest -> { bytes => ShardStopped(shardIdMessageFromBinary(bytes)) }, - GracefulShutdownReqManifest -> { bytes => GracefulShutdownReq(actorRefMessageFromBinary(bytes)) }, - - GetShardStatsManifest -> { bytes => GetShardStats }, - ShardStatsManifest -> { bytes => shardStatsFromBinary(bytes) }, - GetShardRegionStatsManifest -> { bytes => GetShardRegionStats }, - ShardRegionStatsManifest -> { bytes => shardRegionStatsFromBinary(bytes) }, - + HostShardManifest -> { bytes => + HostShard(shardIdMessageFromBinary(bytes)) + }, + ShardStartedManifest -> { bytes => + ShardStarted(shardIdMessageFromBinary(bytes)) + }, + BeginHandOffManifest -> { bytes => + BeginHandOff(shardIdMessageFromBinary(bytes)) + }, + BeginHandOffAckManifest -> { bytes => + BeginHandOffAck(shardIdMessageFromBinary(bytes)) + }, + HandOffManifest -> { bytes => + HandOff(shardIdMessageFromBinary(bytes)) + }, + ShardStoppedManifest -> { bytes => + ShardStopped(shardIdMessageFromBinary(bytes)) + }, + GracefulShutdownReqManifest -> { bytes => + GracefulShutdownReq(actorRefMessageFromBinary(bytes)) + }, + GetShardStatsManifest -> { bytes => + GetShardStats + }, + ShardStatsManifest -> { bytes => + shardStatsFromBinary(bytes) + }, + GetShardRegionStatsManifest -> { bytes => + GetShardRegionStats + }, + ShardRegionStatsManifest -> { bytes => + shardRegionStatsFromBinary(bytes) + }, StartEntityManifest -> { startEntityFromBinary(_) }, - StartEntityAckManifest -> { startEntityAckFromBinary(_) } - ) + StartEntityAckManifest -> { startEntityAckFromBinary(_) }) override def manifest(obj: AnyRef): String = obj match { - case _: EntityState => EntityStateManifest - case _: EntityStarted => EntityStartedManifest - case _: EntityStopped => EntityStoppedManifest + case _: EntityState => EntityStateManifest + case _: EntityStarted => EntityStartedManifest + case _: EntityStopped => EntityStoppedManifest case _: State => CoordinatorStateManifest case _: ShardRegionRegistered => ShardRegionRegisteredManifest @@ -117,26 +153,26 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy case _: ShardHomeAllocated => ShardHomeAllocatedManifest case _: ShardHomeDeallocated => ShardHomeDeallocatedManifest - case _: Register => RegisterManifest - case _: RegisterProxy => RegisterProxyManifest - case _: RegisterAck => RegisterAckManifest - case _: GetShardHome => GetShardHomeManifest - case _: ShardHome => ShardHomeManifest - case _: HostShard => HostShardManifest - case _: ShardStarted => ShardStartedManifest - case _: BeginHandOff => BeginHandOffManifest - case _: BeginHandOffAck => BeginHandOffAckManifest - case _: HandOff => HandOffManifest - case _: ShardStopped => ShardStoppedManifest - case _: GracefulShutdownReq => GracefulShutdownReqManifest + case _: Register => RegisterManifest + case _: RegisterProxy => RegisterProxyManifest + case _: RegisterAck => RegisterAckManifest + case _: GetShardHome => GetShardHomeManifest + case _: ShardHome => ShardHomeManifest + case _: HostShard => HostShardManifest + case _: ShardStarted => ShardStartedManifest + case _: BeginHandOff => BeginHandOffManifest + case _: BeginHandOffAck => BeginHandOffAckManifest + case _: HandOff => HandOffManifest + case _: ShardStopped => ShardStoppedManifest + case _: GracefulShutdownReq => GracefulShutdownReqManifest - case _: StartEntity => StartEntityManifest - case _: StartEntityAck => StartEntityAckManifest + case _: StartEntity => StartEntityManifest + case _: StartEntityAck => StartEntityAckManifest - case GetShardStats => GetShardStatsManifest - case _: ShardStats => ShardStatsManifest - case GetShardRegionStats => GetShardRegionStatsManifest - case _: ShardRegionStats => ShardRegionStatsManifest + case GetShardStats => GetShardStatsManifest + case _: ShardStats => ShardStatsManifest + case GetShardRegionStats => GetShardRegionStatsManifest + case _: ShardRegionStats => ShardRegionStatsManifest case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]") } @@ -150,26 +186,26 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy case m: ShardHomeAllocated => shardHomeAllocatedToProto(m).toByteArray case ShardHomeDeallocated(shardId) => shardIdMessageToProto(shardId).toByteArray - case Register(ref) => actorRefMessageToProto(ref).toByteArray - case RegisterProxy(ref) => actorRefMessageToProto(ref).toByteArray - case RegisterAck(ref) => actorRefMessageToProto(ref).toByteArray - case GetShardHome(shardId) => shardIdMessageToProto(shardId).toByteArray - case m: ShardHome => shardHomeToProto(m).toByteArray - case HostShard(shardId) => shardIdMessageToProto(shardId).toByteArray - case ShardStarted(shardId) => shardIdMessageToProto(shardId).toByteArray - case BeginHandOff(shardId) => shardIdMessageToProto(shardId).toByteArray - case BeginHandOffAck(shardId) => shardIdMessageToProto(shardId).toByteArray - case HandOff(shardId) => shardIdMessageToProto(shardId).toByteArray - case ShardStopped(shardId) => shardIdMessageToProto(shardId).toByteArray + case Register(ref) => actorRefMessageToProto(ref).toByteArray + case RegisterProxy(ref) => actorRefMessageToProto(ref).toByteArray + case RegisterAck(ref) => actorRefMessageToProto(ref).toByteArray + case GetShardHome(shardId) => shardIdMessageToProto(shardId).toByteArray + case m: ShardHome => shardHomeToProto(m).toByteArray + case HostShard(shardId) => shardIdMessageToProto(shardId).toByteArray + case ShardStarted(shardId) => shardIdMessageToProto(shardId).toByteArray + case BeginHandOff(shardId) => shardIdMessageToProto(shardId).toByteArray + case BeginHandOffAck(shardId) => shardIdMessageToProto(shardId).toByteArray + case HandOff(shardId) => shardIdMessageToProto(shardId).toByteArray + case ShardStopped(shardId) => shardIdMessageToProto(shardId).toByteArray case GracefulShutdownReq(ref) => actorRefMessageToProto(ref).toByteArray - case m: EntityState => entityStateToProto(m).toByteArray - case m: EntityStarted => entityStartedToProto(m).toByteArray - case m: EntityStopped => entityStoppedToProto(m).toByteArray + case m: EntityState => entityStateToProto(m).toByteArray + case m: EntityStarted => entityStartedToProto(m).toByteArray + case m: EntityStopped => entityStoppedToProto(m).toByteArray - case s: StartEntity => startEntityToByteArray(s) - case s: StartEntityAck => startEntityAckToByteArray(s) + case s: StartEntity => startEntityToByteArray(s) + case s: StartEntityAck => startEntityAckToByteArray(s) case GetShardStats => Array.emptyByteArray case m: ShardStats => shardStatsToProto(m).toByteArray @@ -183,20 +219,25 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = fromBinaryMap.get(manifest) match { case Some(f) => f(bytes) - case None => throw new NotSerializableException( - s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + case None => + throw new NotSerializableException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") } private def coordinatorStateToProto(state: State): sm.CoordinatorState = { - val regions = state.regions.map { - case (regionRef, _) => Serialization.serializedActorPath(regionRef) - }.toVector.asJava + val regions = state.regions + .map { + case (regionRef, _) => Serialization.serializedActorPath(regionRef) + } + .toVector + .asJava val builder = sm.CoordinatorState.newBuilder() state.shards.foreach { case (shardId, regionRef) => - val b = sm.CoordinatorState.ShardEntry.newBuilder() + val b = sm.CoordinatorState.ShardEntry + .newBuilder() .setShardId(shardId) .setRegionRef(Serialization.serializedActorPath(regionRef)) builder.addShards(b) @@ -204,7 +245,9 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy state.regions.foreach { case (regionRef, _) => builder.addRegions(Serialization.serializedActorPath(regionRef)) } - state.regionProxies.foreach { ref => builder.addRegionProxies(Serialization.serializedActorPath(ref)) } + state.regionProxies.foreach { ref => + builder.addRegionProxies(Serialization.serializedActorPath(ref)) + } state.unallocatedShards.foreach { builder.addUnallocatedShards } builder.build() @@ -222,7 +265,9 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy val regionsZero: Map[ActorRef, Vector[String]] = state.getRegionsList.asScala.toVector.iterator.map(resolveActorRef(_) -> Vector.empty[String]).toMap val regions: Map[ActorRef, Vector[String]] = - shards.foldLeft(regionsZero) { case (acc, (shardId, regionRef)) => acc.updated(regionRef, acc(regionRef) :+ shardId) } + shards.foldLeft(regionsZero) { + case (acc, (shardId, regionRef)) => acc.updated(regionRef, acc(regionRef) :+ shardId) + } val proxies: Set[ActorRef] = state.getRegionProxiesList.asScala.iterator.map { resolveActorRef }.to(immutable.Set) val unallocatedShards: Set[String] = state.getUnallocatedShardsList.asScala.toSet @@ -243,8 +288,11 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy sm.ShardIdMessage.parseFrom(bytes).getShard private def shardHomeAllocatedToProto(evt: ShardHomeAllocated): sm.ShardHomeAllocated = - sm.ShardHomeAllocated.newBuilder().setShard(evt.shard) - .setRegion(Serialization.serializedActorPath(evt.region)).build() + sm.ShardHomeAllocated + .newBuilder() + .setShard(evt.shard) + .setRegion(Serialization.serializedActorPath(evt.region)) + .build() private def shardHomeAllocatedFromBinary(bytes: Array[Byte]): ShardHomeAllocated = { val m = sm.ShardHomeAllocated.parseFrom(bytes) @@ -252,8 +300,7 @@ private[akka] class ClusterShardingMessageSerializer(val system: ExtendedActorSy } private def shardHomeToProto(m: ShardHome): sm.ShardHome = - sm.ShardHome.newBuilder().setShard(m.shard) - .setRegion(Serialization.serializedActorPath(m.ref)).build() + sm.ShardHome.newBuilder().setShard(m.shard).setRegion(Serialization.serializedActorPath(m.ref)).build() private def shardHomeFromBinary(bytes: Array[Byte]): ShardHome = { val m = sm.ShardHome.parseFrom(bytes) diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala index b82d6674ce..a95c17c21a 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingCustomShardAllocationSpec.scala @@ -65,11 +65,15 @@ object ClusterShardingCustomShardAllocationSpec { case class TestAllocationStrategy(ref: ActorRef) extends ShardAllocationStrategy { implicit val timeout = Timeout(3.seconds) - override def allocateShard(requester: ActorRef, shardId: ShardRegion.ShardId, currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]]): Future[ActorRef] = { + override def allocateShard( + requester: ActorRef, + shardId: ShardRegion.ShardId, + currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]]): Future[ActorRef] = { (ref ? AllocateReq).mapTo[ActorRef] } - override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]], rebalanceInProgress: Set[ShardRegion.ShardId]): Future[Set[ShardRegion.ShardId]] = { + override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]], + rebalanceInProgress: Set[ShardRegion.ShardId]): Future[Set[ShardRegion.ShardId]] = { (ref ? RebalanceReq).mapTo[Set[String]] } } @@ -99,19 +103,28 @@ abstract class ClusterShardingCustomShardAllocationSpecConfig(val mode: String) """).withFallback(MultiNodeClusterSpec.clusterConfig)) } -object PersistentClusterShardingCustomShardAllocationSpecConfig extends ClusterShardingCustomShardAllocationSpecConfig("persistence") -object DDataClusterShardingCustomShardAllocationSpecConfig extends ClusterShardingCustomShardAllocationSpecConfig("ddata") +object PersistentClusterShardingCustomShardAllocationSpecConfig + extends ClusterShardingCustomShardAllocationSpecConfig("persistence") +object DDataClusterShardingCustomShardAllocationSpecConfig + extends ClusterShardingCustomShardAllocationSpecConfig("ddata") -class PersistentClusterShardingCustomShardAllocationSpec extends ClusterShardingCustomShardAllocationSpec(PersistentClusterShardingCustomShardAllocationSpecConfig) -class DDataClusterShardingCustomShardAllocationSpec extends ClusterShardingCustomShardAllocationSpec(DDataClusterShardingCustomShardAllocationSpecConfig) +class PersistentClusterShardingCustomShardAllocationSpec + extends ClusterShardingCustomShardAllocationSpec(PersistentClusterShardingCustomShardAllocationSpecConfig) +class DDataClusterShardingCustomShardAllocationSpec + extends ClusterShardingCustomShardAllocationSpec(DDataClusterShardingCustomShardAllocationSpecConfig) -class PersistentClusterShardingCustomShardAllocationMultiJvmNode1 extends PersistentClusterShardingCustomShardAllocationSpec -class PersistentClusterShardingCustomShardAllocationMultiJvmNode2 extends PersistentClusterShardingCustomShardAllocationSpec +class PersistentClusterShardingCustomShardAllocationMultiJvmNode1 + extends PersistentClusterShardingCustomShardAllocationSpec +class PersistentClusterShardingCustomShardAllocationMultiJvmNode2 + extends PersistentClusterShardingCustomShardAllocationSpec class DDataClusterShardingCustomShardAllocationMultiJvmNode1 extends DDataClusterShardingCustomShardAllocationSpec class DDataClusterShardingCustomShardAllocationMultiJvmNode2 extends DDataClusterShardingCustomShardAllocationSpec -abstract class ClusterShardingCustomShardAllocationSpec(config: ClusterShardingCustomShardAllocationSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingCustomShardAllocationSpec(config: ClusterShardingCustomShardAllocationSpecConfig) + extends MultiNodeSpec(config) + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingCustomShardAllocationSpec._ import config._ @@ -119,21 +132,20 @@ abstract class ClusterShardingCustomShardAllocationSpec(config: ClusterShardingC def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) startSharding() } enterBarrier(from.name + "-joined") } def startSharding(): Unit = { - ClusterSharding(system).start( - typeName = "Entity", - entityProps = Props[Entity], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId, - allocationStrategy = TestAllocationStrategy(allocator), - handOffStopMessage = PoisonPill) + ClusterSharding(system).start(typeName = "Entity", + entityProps = Props[Entity], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId, + allocationStrategy = TestAllocationStrategy(allocator), + handOffStopMessage = PoisonPill) } lazy val region = ClusterSharding(system).shardRegion("Entity") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala index 6109790887..55767ef649 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingFailureSpec.scala @@ -88,7 +88,8 @@ abstract class ClusterShardingFailureSpecConfig(val mode: String) extends MultiN object PersistentClusterShardingFailureSpecConfig extends ClusterShardingFailureSpecConfig("persistence") object DDataClusterShardingFailureSpecConfig extends ClusterShardingFailureSpecConfig("ddata") -class PersistentClusterShardingFailureSpec extends ClusterShardingFailureSpec(PersistentClusterShardingFailureSpecConfig) +class PersistentClusterShardingFailureSpec + extends ClusterShardingFailureSpec(PersistentClusterShardingFailureSpecConfig) class DDataClusterShardingFailureSpec extends ClusterShardingFailureSpec(DDataClusterShardingFailureSpecConfig) class PersistentClusterShardingFailureMultiJvmNode1 extends PersistentClusterShardingFailureSpec @@ -99,14 +100,17 @@ class DDataClusterShardingFailureMultiJvmNode1 extends DDataClusterShardingFailu class DDataClusterShardingFailureMultiJvmNode2 extends DDataClusterShardingFailureSpec class DDataClusterShardingFailureMultiJvmNode3 extends DDataClusterShardingFailureSpec -abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConfig) + extends MultiNodeSpec(config) + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingFailureSpec._ import config._ override def initialParticipants = roles.size - val storageLocations = List(new File(system.settings.config.getString( - "akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + val storageLocations = List( + new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) override protected def atStartup(): Unit = { storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir)) @@ -121,7 +125,7 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) startSharding() within(remaining) { @@ -135,12 +139,11 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf } def startSharding(): Unit = { - ClusterSharding(system).start( - typeName = "Entity", - entityProps = Props[Entity], - settings = ClusterShardingSettings(system).withRememberEntities(true), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = "Entity", + entityProps = Props[Entity], + settings = ClusterShardingSettings(system).withRememberEntities(true), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } lazy val region = ClusterSharding(system).shardRegion("Entity") @@ -276,4 +279,3 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf } } - diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala index 79e3786c16..9abe5ab4f3 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStateSpec.scala @@ -61,8 +61,7 @@ object ClusterShardingGetStateSpecConfig extends MultiNodeConfig { } """).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first, second)(ConfigFactory.parseString( - """akka.cluster.roles=["shard"]""")) + nodeConfig(first, second)(ConfigFactory.parseString("""akka.cluster.roles=["shard"]""")) } @@ -70,7 +69,9 @@ class ClusterShardingGetStateSpecMultiJvmNode1 extends ClusterShardingGetStateSp class ClusterShardingGetStateSpecMultiJvmNode2 extends ClusterShardingGetStateSpec class ClusterShardingGetStateSpecMultiJvmNode3 extends ClusterShardingGetStateSpec -abstract class ClusterShardingGetStateSpec extends MultiNodeSpec(ClusterShardingGetStateSpecConfig) with STMultiNodeSpec { +abstract class ClusterShardingGetStateSpec + extends MultiNodeSpec(ClusterShardingGetStateSpecConfig) + with STMultiNodeSpec { import ClusterShardingGetStateSpec._ import ClusterShardingGetStateSpecConfig._ @@ -78,20 +79,18 @@ abstract class ClusterShardingGetStateSpec extends MultiNodeSpec(ClusterSharding def initialParticipants = roles.size def startShard(): ActorRef = { - ClusterSharding(system).start( - typeName = shardTypeName, - entityProps = Props(new ShardedActor), - settings = ClusterShardingSettings(system).withRole("shard"), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = shardTypeName, + entityProps = Props(new ShardedActor), + settings = ClusterShardingSettings(system).withRole("shard"), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } def startProxy(): ActorRef = { - ClusterSharding(system).startProxy( - typeName = shardTypeName, - role = Some("shard"), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).startProxy(typeName = shardTypeName, + role = Some("shard"), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } def join(from: RoleName): Unit = { diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala index 4136a77ee5..04ff39bbf9 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGetStatsSpec.scala @@ -63,8 +63,7 @@ object ClusterShardingGetStatsSpecConfig extends MultiNodeConfig { akka.actor.warn-about-java-serializer-usage=false """).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first, second, third)(ConfigFactory.parseString( - """akka.cluster.roles=["shard"]""")) + nodeConfig(first, second, third)(ConfigFactory.parseString("""akka.cluster.roles=["shard"]""")) } @@ -73,7 +72,9 @@ class ClusterShardingGetStatsSpecMultiJvmNode2 extends ClusterShardingGetStatsSp class ClusterShardingGetStatsSpecMultiJvmNode3 extends ClusterShardingGetStatsSpec class ClusterShardingGetStatsSpecMultiJvmNode4 extends ClusterShardingGetStatsSpec -abstract class ClusterShardingGetStatsSpec extends MultiNodeSpec(ClusterShardingGetStatsSpecConfig) with STMultiNodeSpec { +abstract class ClusterShardingGetStatsSpec + extends MultiNodeSpec(ClusterShardingGetStatsSpecConfig) + with STMultiNodeSpec { import ClusterShardingGetStatsSpec._ import ClusterShardingGetStatsSpecConfig._ @@ -81,20 +82,18 @@ abstract class ClusterShardingGetStatsSpec extends MultiNodeSpec(ClusterSharding def initialParticipants = roles.size def startShard(): ActorRef = { - ClusterSharding(system).start( - typeName = shardTypeName, - entityProps = Props(new ShardedActor), - settings = ClusterShardingSettings(system).withRole("shard"), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = shardTypeName, + entityProps = Props(new ShardedActor), + settings = ClusterShardingSettings(system).withRole("shard"), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } def startProxy(): ActorRef = { - ClusterSharding(system).startProxy( - typeName = shardTypeName, - role = Some("shard"), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).startProxy(typeName = shardTypeName, + role = Some("shard"), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } def join(from: RoleName): Unit = { diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala index f1c1335fff..6634d4b7d3 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingGracefulShutdownSpec.scala @@ -35,9 +35,10 @@ object ClusterShardingGracefulShutdownSpec { case id: Int => (id.toString, id) } - val extractShardId: ShardRegion.ExtractShardId = msg => msg match { - case id: Int => id.toString - } + val extractShardId: ShardRegion.ExtractShardId = msg => + msg match { + case id: Int => id.toString + } } @@ -67,11 +68,14 @@ abstract class ClusterShardingGracefulShutdownSpecConfig(val mode: String) exten """).withFallback(MultiNodeClusterSpec.clusterConfig)) } -object PersistentClusterShardingGracefulShutdownSpecConfig extends ClusterShardingGracefulShutdownSpecConfig("persistence") +object PersistentClusterShardingGracefulShutdownSpecConfig + extends ClusterShardingGracefulShutdownSpecConfig("persistence") object DDataClusterShardingGracefulShutdownSpecConfig extends ClusterShardingGracefulShutdownSpecConfig("ddata") -class PersistentClusterShardingGracefulShutdownSpec extends ClusterShardingGracefulShutdownSpec(PersistentClusterShardingGracefulShutdownSpecConfig) -class DDataClusterShardingGracefulShutdownSpec extends ClusterShardingGracefulShutdownSpec(DDataClusterShardingGracefulShutdownSpecConfig) +class PersistentClusterShardingGracefulShutdownSpec + extends ClusterShardingGracefulShutdownSpec(PersistentClusterShardingGracefulShutdownSpecConfig) +class DDataClusterShardingGracefulShutdownSpec + extends ClusterShardingGracefulShutdownSpec(DDataClusterShardingGracefulShutdownSpecConfig) class PersistentClusterShardingGracefulShutdownMultiJvmNode1 extends PersistentClusterShardingGracefulShutdownSpec class PersistentClusterShardingGracefulShutdownMultiJvmNode2 extends PersistentClusterShardingGracefulShutdownSpec @@ -79,14 +83,17 @@ class PersistentClusterShardingGracefulShutdownMultiJvmNode2 extends PersistentC class DDataClusterShardingGracefulShutdownMultiJvmNode1 extends DDataClusterShardingGracefulShutdownSpec class DDataClusterShardingGracefulShutdownMultiJvmNode2 extends DDataClusterShardingGracefulShutdownSpec -abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracefulShutdownSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracefulShutdownSpecConfig) + extends MultiNodeSpec(config) + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingGracefulShutdownSpec._ import config._ override def initialParticipants = roles.size - val storageLocations = List(new File(system.settings.config.getString( - "akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + val storageLocations = List( + new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) override protected def atStartup(): Unit = { storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir)) @@ -99,22 +106,22 @@ abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracef def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) startSharding() } enterBarrier(from.name + "-joined") } def startSharding(): Unit = { - val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1) - ClusterSharding(system).start( - typeName = "Entity", - entityProps = Props[Entity], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId, - allocationStrategy, - handOffStopMessage = StopEntity) + val allocationStrategy = + new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1) + ClusterSharding(system).start(typeName = "Entity", + entityProps = Props[Entity], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId, + allocationStrategy, + handOffStopMessage = StopEntity) } lazy val region = ClusterSharding(system).shardRegion("Entity") @@ -185,15 +192,15 @@ abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracef "gracefully shutdown empty region" in within(30.seconds) { runOn(first) { - val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1) - val regionEmpty = ClusterSharding(system).start( - typeName = "EntityEmpty", - entityProps = Props[Entity], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId, - allocationStrategy, - handOffStopMessage = StopEntity) + val allocationStrategy = + new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1) + val regionEmpty = ClusterSharding(system).start(typeName = "EntityEmpty", + entityProps = Props[Entity], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId, + allocationStrategy, + handOffStopMessage = StopEntity) watch(regionEmpty) regionEmpty ! GracefulShutdown @@ -203,4 +210,3 @@ abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracef } } - diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala index 3b94c9a2dd..490bca8a2f 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingIncorrectSetupSpec.scala @@ -13,8 +13,7 @@ object ClusterShardingIncorrectSetupSpecConfig extends MultiNodeConfig { val first = role("first") val second = role("second") - val commonConfig = ConfigFactory.parseString( - """ + val commonConfig = ConfigFactory.parseString(""" akka.loglevel = INFO akka.cluster.sharding { waiting-for-state-timeout = 100ms @@ -37,7 +36,10 @@ object ClusterShardingIncorrectSetupSpec { } } -abstract class ClusterShardingIncorrectSetupSpec extends MultiNodeSpec(ClusterShardingIncorrectSetupSpecConfig) with MultiNodeClusterSpec with ImplicitSender { +abstract class ClusterShardingIncorrectSetupSpec + extends MultiNodeSpec(ClusterShardingIncorrectSetupSpecConfig) + with MultiNodeClusterSpec + with ImplicitSender { import ClusterShardingIncorrectSetupSpec._ import ClusterShardingIncorrectSetupSpecConfig._ @@ -48,16 +50,14 @@ abstract class ClusterShardingIncorrectSetupSpec extends MultiNodeSpec(ClusterSh enterBarrier("cluster-up") runOn(first) { EventFilter.error(pattern = """Has ClusterSharding been started on all nodes?""").intercept { - ClusterSharding(system).start( - typeName = "Entity", - entityProps = TestActors.echoActorProps, - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = "Entity", + entityProps = TestActors.echoActorProps, + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } } enterBarrier("helpful error message logged") } } } - diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala index 3363fdf36f..6928b914ba 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingLeavingSpec.scala @@ -85,7 +85,8 @@ abstract class ClusterShardingLeavingSpecConfig(val mode: String) extends MultiN object PersistentClusterShardingLeavingSpecConfig extends ClusterShardingLeavingSpecConfig("persistence") object DDataClusterShardingLeavingSpecConfig extends ClusterShardingLeavingSpecConfig("ddata") -class PersistentClusterShardingLeavingSpec extends ClusterShardingLeavingSpec(PersistentClusterShardingLeavingSpecConfig) +class PersistentClusterShardingLeavingSpec + extends ClusterShardingLeavingSpec(PersistentClusterShardingLeavingSpecConfig) class DDataClusterShardingLeavingSpec extends ClusterShardingLeavingSpec(DDataClusterShardingLeavingSpecConfig) class PersistentClusterShardingLeavingMultiJvmNode1 extends PersistentClusterShardingLeavingSpec @@ -98,14 +99,17 @@ class DDataClusterShardingLeavingMultiJvmNode2 extends DDataClusterShardingLeavi class DDataClusterShardingLeavingMultiJvmNode3 extends DDataClusterShardingLeavingSpec class DDataClusterShardingLeavingMultiJvmNode4 extends DDataClusterShardingLeavingSpec -abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConfig) + extends MultiNodeSpec(config) + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingLeavingSpec._ import config._ override def initialParticipants = roles.size - val storageLocations = List(new File(system.settings.config.getString( - "akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + val storageLocations = List( + new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) override protected def atStartup(): Unit = { storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir)) @@ -120,7 +124,7 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) startSharding() within(15.seconds) { awaitAssert(cluster.state.members.exists { m => @@ -132,12 +136,11 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf } def startSharding(): Unit = { - ClusterSharding(system).start( - typeName = "Entity", - entityProps = Props[Entity], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = "Entity", + entityProps = Props[Entity], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } lazy val region = ClusterSharding(system).shardRegion("Entity") @@ -221,4 +224,3 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf } } - diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingMinMembersSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingMinMembersSpec.scala index 5ef7953fa3..34adf56387 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingMinMembersSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingMinMembersSpec.scala @@ -28,9 +28,10 @@ object ClusterShardingMinMembersSpec { case id: Int => (id.toString, id) } - val extractShardId: ShardRegion.ExtractShardId = msg => msg match { - case id: Int => id.toString - } + val extractShardId: ShardRegion.ExtractShardId = msg => + msg match { + case id: Int => id.toString + } } @@ -66,7 +67,8 @@ abstract class ClusterShardingMinMembersSpecConfig(val mode: String) extends Mul object PersistentClusterShardingMinMembersSpecConfig extends ClusterShardingMinMembersSpecConfig("persistence") object DDataClusterShardingMinMembersSpecConfig extends ClusterShardingMinMembersSpecConfig("ddata") -class PersistentClusterShardingMinMembersSpec extends ClusterShardingMinMembersSpec(PersistentClusterShardingMinMembersSpecConfig) +class PersistentClusterShardingMinMembersSpec + extends ClusterShardingMinMembersSpec(PersistentClusterShardingMinMembersSpecConfig) class DDataClusterShardingMinMembersSpec extends ClusterShardingMinMembersSpec(DDataClusterShardingMinMembersSpecConfig) class PersistentClusterShardingMinMembersMultiJvmNode1 extends PersistentClusterShardingMinMembersSpec @@ -77,14 +79,17 @@ class DDataClusterShardingMinMembersMultiJvmNode1 extends DDataClusterShardingMi class DDataClusterShardingMinMembersMultiJvmNode2 extends DDataClusterShardingMinMembersSpec class DDataClusterShardingMinMembersMultiJvmNode3 extends DDataClusterShardingMinMembersSpec -abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSpecConfig) + extends MultiNodeSpec(config) + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingMinMembersSpec._ import config._ override def initialParticipants = roles.size - val storageLocations = List(new File(system.settings.config.getString( - "akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + val storageLocations = List( + new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) override protected def atStartup(): Unit = { storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir)) @@ -97,7 +102,7 @@ abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSp def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -105,15 +110,15 @@ abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSp val cluster = Cluster(system) def startSharding(): Unit = { - val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1) - ClusterSharding(system).start( - typeName = "Entity", - entityProps = TestActors.echoActorProps, - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId, - allocationStrategy, - handOffStopMessage = StopEntity) + val allocationStrategy = + new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1) + ClusterSharding(system).start(typeName = "Entity", + entityProps = TestActors.echoActorProps, + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId, + allocationStrategy, + handOffStopMessage = StopEntity) } lazy val region = ClusterSharding(system).shardRegion("Entity") @@ -195,4 +200,3 @@ abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSp } } - diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala index d6dffd9dfb..4d7c8cdec9 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesNewExtractorSpec.scala @@ -78,21 +78,18 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpecConfig(val mode: S } """).withFallback(MultiNodeClusterSpec.clusterConfig)) - val roleConfig = ConfigFactory.parseString( - """ + val roleConfig = ConfigFactory.parseString(""" akka.cluster.roles = [sharding] """) // we pretend node 4 and 5 are new incarnations of node 2 and 3 as they never run in parallel // so we can use the same lmdb store for them and have node 4 pick up the persisted data of node 2 - val ddataNodeAConfig = ConfigFactory.parseString( - """ + val ddataNodeAConfig = ConfigFactory.parseString(""" akka.cluster.sharding.distributed-data.durable.lmdb { dir = target/ShardingRememberEntitiesNewExtractorSpec/sharding-node-a } """) - val ddataNodeBConfig = ConfigFactory.parseString( - """ + val ddataNodeBConfig = ConfigFactory.parseString(""" akka.cluster.sharding.distributed-data.durable.lmdb { dir = target/ShardingRememberEntitiesNewExtractorSpec/sharding-node-b } @@ -103,26 +100,37 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpecConfig(val mode: S } -object PersistentClusterShardingRememberEntitiesSpecNewExtractorConfig extends ClusterShardingRememberEntitiesNewExtractorSpecConfig( - ClusterShardingSettings.StateStoreModePersistence) -object DDataClusterShardingRememberEntitiesNewExtractorSpecConfig extends ClusterShardingRememberEntitiesNewExtractorSpecConfig( - ClusterShardingSettings.StateStoreModeDData) +object PersistentClusterShardingRememberEntitiesSpecNewExtractorConfig + extends ClusterShardingRememberEntitiesNewExtractorSpecConfig(ClusterShardingSettings.StateStoreModePersistence) +object DDataClusterShardingRememberEntitiesNewExtractorSpecConfig + extends ClusterShardingRememberEntitiesNewExtractorSpecConfig(ClusterShardingSettings.StateStoreModeDData) -class PersistentClusterShardingRememberEntitiesNewExtractorSpec extends ClusterShardingRememberEntitiesNewExtractorSpec( - PersistentClusterShardingRememberEntitiesSpecNewExtractorConfig) +class PersistentClusterShardingRememberEntitiesNewExtractorSpec + extends ClusterShardingRememberEntitiesNewExtractorSpec( + PersistentClusterShardingRememberEntitiesSpecNewExtractorConfig) -class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode1 extends PersistentClusterShardingRememberEntitiesNewExtractorSpec -class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode2 extends PersistentClusterShardingRememberEntitiesNewExtractorSpec -class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode3 extends PersistentClusterShardingRememberEntitiesNewExtractorSpec +class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode1 + extends PersistentClusterShardingRememberEntitiesNewExtractorSpec +class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode2 + extends PersistentClusterShardingRememberEntitiesNewExtractorSpec +class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode3 + extends PersistentClusterShardingRememberEntitiesNewExtractorSpec -class DDataClusterShardingRememberEntitiesNewExtractorSpec extends ClusterShardingRememberEntitiesNewExtractorSpec( - DDataClusterShardingRememberEntitiesNewExtractorSpecConfig) +class DDataClusterShardingRememberEntitiesNewExtractorSpec + extends ClusterShardingRememberEntitiesNewExtractorSpec(DDataClusterShardingRememberEntitiesNewExtractorSpecConfig) -class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode1 extends DDataClusterShardingRememberEntitiesNewExtractorSpec -class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode2 extends DDataClusterShardingRememberEntitiesNewExtractorSpec -class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode3 extends DDataClusterShardingRememberEntitiesNewExtractorSpec +class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode1 + extends DDataClusterShardingRememberEntitiesNewExtractorSpec +class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode2 + extends DDataClusterShardingRememberEntitiesNewExtractorSpec +class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode3 + extends DDataClusterShardingRememberEntitiesNewExtractorSpec -abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterShardingRememberEntitiesNewExtractorSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingRememberEntitiesNewExtractorSpec( + config: ClusterShardingRememberEntitiesNewExtractorSpecConfig) + extends MultiNodeSpec(config) + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingRememberEntitiesNewExtractorSpec._ import config._ @@ -130,8 +138,8 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterSh override def initialParticipants = roles.size - val storageLocations = List(new File(system.settings.config.getString( - "akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + val storageLocations = List( + new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) override protected def atStartup(): Unit = { storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir)) @@ -144,7 +152,7 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterSh def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -152,21 +160,21 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterSh val cluster = Cluster(system) def startShardingWithExtractor1(): Unit = { - ClusterSharding(system).start( - typeName = typeName, - entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(None), - settings = ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"), - extractEntityId = extractEntityId, - extractShardId = extractShardId1) + ClusterSharding(system).start(typeName = typeName, + entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(None), + settings = + ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"), + extractEntityId = extractEntityId, + extractShardId = extractShardId1) } def startShardingWithExtractor2(sys: ActorSystem, probe: ActorRef): Unit = { - ClusterSharding(sys).start( - typeName = typeName, - entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(Some(probe)), - settings = ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"), - extractEntityId = extractEntityId, - extractShardId = extractShardId2) + ClusterSharding(sys).start(typeName = typeName, + entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(Some(probe)), + settings = + ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"), + extractEntityId = extractEntityId, + extractShardId = extractShardId2) } def region(sys: ActorSystem = system) = ClusterSharding(sys).shardRegion(typeName) @@ -297,4 +305,3 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterSh } } - diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala index a5d3fc01b9..2be28b7248 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingRememberEntitiesSpec.scala @@ -37,10 +37,11 @@ object ClusterShardingRememberEntitiesSpec { case id: Int => (id.toString, id) } - val extractShardId: ShardRegion.ExtractShardId = msg => msg match { - case id: Int => id.toString - case ShardRegion.StartEntity(id) => id - } + val extractShardId: ShardRegion.ExtractShardId = msg => + msg match { + case id: Int => id.toString + case ShardRegion.StartEntity(id) => id + } } @@ -79,33 +80,36 @@ abstract class ClusterShardingRememberEntitiesSpecConfig(val mode: String) exten """)) } -object PersistentClusterShardingRememberEntitiesSpecConfig extends ClusterShardingRememberEntitiesSpecConfig( - ClusterShardingSettings.StateStoreModePersistence) -object DDataClusterShardingRememberEntitiesSpecConfig extends ClusterShardingRememberEntitiesSpecConfig( - ClusterShardingSettings.StateStoreModeDData) +object PersistentClusterShardingRememberEntitiesSpecConfig + extends ClusterShardingRememberEntitiesSpecConfig(ClusterShardingSettings.StateStoreModePersistence) +object DDataClusterShardingRememberEntitiesSpecConfig + extends ClusterShardingRememberEntitiesSpecConfig(ClusterShardingSettings.StateStoreModeDData) -class PersistentClusterShardingRememberEntitiesSpec extends ClusterShardingRememberEntitiesSpec( - PersistentClusterShardingRememberEntitiesSpecConfig) +class PersistentClusterShardingRememberEntitiesSpec + extends ClusterShardingRememberEntitiesSpec(PersistentClusterShardingRememberEntitiesSpecConfig) class PersistentClusterShardingRememberEntitiesMultiJvmNode1 extends PersistentClusterShardingRememberEntitiesSpec class PersistentClusterShardingRememberEntitiesMultiJvmNode2 extends PersistentClusterShardingRememberEntitiesSpec class PersistentClusterShardingRememberEntitiesMultiJvmNode3 extends PersistentClusterShardingRememberEntitiesSpec -class DDataClusterShardingRememberEntitiesSpec extends ClusterShardingRememberEntitiesSpec( - DDataClusterShardingRememberEntitiesSpecConfig) +class DDataClusterShardingRememberEntitiesSpec + extends ClusterShardingRememberEntitiesSpec(DDataClusterShardingRememberEntitiesSpecConfig) class DDataClusterShardingRememberEntitiesMultiJvmNode1 extends DDataClusterShardingRememberEntitiesSpec class DDataClusterShardingRememberEntitiesMultiJvmNode2 extends DDataClusterShardingRememberEntitiesSpec class DDataClusterShardingRememberEntitiesMultiJvmNode3 extends DDataClusterShardingRememberEntitiesSpec -abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememberEntitiesSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememberEntitiesSpecConfig) + extends MultiNodeSpec(config) + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingRememberEntitiesSpec._ import config._ override def initialParticipants = roles.size - val storageLocations = List(new File(system.settings.config.getString( - "akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + val storageLocations = List( + new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) override protected def atStartup(): Unit = { storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir)) @@ -118,7 +122,7 @@ abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememb def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -126,12 +130,11 @@ abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememb val cluster = Cluster(system) def startSharding(sys: ActorSystem = system, probe: ActorRef = testActor): Unit = { - ClusterSharding(sys).start( - typeName = "Entity", - entityProps = ClusterShardingRememberEntitiesSpec.props(probe), - settings = ClusterShardingSettings(system).withRememberEntities(true), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(sys).start(typeName = "Entity", + entityProps = ClusterShardingRememberEntitiesSpec.props(probe), + settings = ClusterShardingSettings(system).withRememberEntities(true), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } lazy val region = ClusterSharding(system).shardRegion("Entity") @@ -229,4 +232,3 @@ abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememb } } } - diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala index cf1871611e..32f8818197 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSingleShardPerEntitySpec.scala @@ -60,8 +60,10 @@ class ClusterShardingSingleShardPerEntitySpecMultiJvmNode3 extends ClusterShardi class ClusterShardingSingleShardPerEntitySpecMultiJvmNode4 extends ClusterShardingSingleShardPerEntitySpec class ClusterShardingSingleShardPerEntitySpecMultiJvmNode5 extends ClusterShardingSingleShardPerEntitySpec -abstract class ClusterShardingSingleShardPerEntitySpec extends MultiNodeSpec(ClusterShardingSingleShardPerEntitySpecConfig) - with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingSingleShardPerEntitySpec + extends MultiNodeSpec(ClusterShardingSingleShardPerEntitySpecConfig) + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingSingleShardPerEntitySpec._ import ClusterShardingSingleShardPerEntitySpecConfig._ @@ -69,19 +71,18 @@ abstract class ClusterShardingSingleShardPerEntitySpec extends MultiNodeSpec(Clu def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) startSharding() } enterBarrier(from.name + "-joined") } def startSharding(): Unit = { - ClusterSharding(system).start( - typeName = "Entity", - entityProps = Props[Entity], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = "Entity", + entityProps = Props[Entity], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } lazy val region = ClusterSharding(system).shardRegion("Entity") diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala index 3ff8022d21..d41145564c 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/ClusterShardingSpec.scala @@ -111,17 +111,15 @@ object ClusterShardingSpec { } def receive = { - case msg => counter forward msg + case msg => counter.forward(msg) } } //#supervisor } -abstract class ClusterShardingSpecConfig( - val mode: String, - val entityRecoveryStrategy: String = "all") - extends MultiNodeConfig { +abstract class ClusterShardingSpecConfig(val mode: String, val entityRecoveryStrategy: String = "all") + extends MultiNodeConfig { val controller = role("controller") val first = role("first") @@ -186,8 +184,8 @@ object ClusterShardingDocCode { val numberOfShards = 100 val extractShardId: ShardRegion.ExtractShardId = { - case EntityEnvelope(id, _) => (id % numberOfShards).toString - case Get(id) => (id % numberOfShards).toString + case EntityEnvelope(id, _) => (id % numberOfShards).toString + case Get(id) => (id % numberOfShards).toString case ShardRegion.StartEntity(id) => // StartEntity is used by remembering entities feature (id.toLong % numberOfShards).toString @@ -197,8 +195,8 @@ object ClusterShardingDocCode { { //#extractShardId-StartEntity val extractShardId: ShardRegion.ExtractShardId = { - case EntityEnvelope(id, _) => (id % numberOfShards).toString - case Get(id) => (id % numberOfShards).toString + case EntityEnvelope(id, _) => (id % numberOfShards).toString + case Get(id) => (id % numberOfShards).toString case ShardRegion.StartEntity(id) => // StartEntity is used by remembering entities feature (id.toLong % numberOfShards).toString @@ -210,17 +208,15 @@ object ClusterShardingDocCode { object PersistentClusterShardingSpecConfig extends ClusterShardingSpecConfig("persistence") object DDataClusterShardingSpecConfig extends ClusterShardingSpecConfig("ddata") -object PersistentClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig( - "persistence", - "all") -object DDataClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig( - "ddata", - "constant") +object PersistentClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig("persistence", "all") +object DDataClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig("ddata", "constant") class PersistentClusterShardingSpec extends ClusterShardingSpec(PersistentClusterShardingSpecConfig) class DDataClusterShardingSpec extends ClusterShardingSpec(DDataClusterShardingSpecConfig) -class PersistentClusterShardingWithEntityRecoverySpec extends ClusterShardingSpec(PersistentClusterShardingWithEntityRecoverySpecConfig) -class DDataClusterShardingWithEntityRecoverySpec extends ClusterShardingSpec(DDataClusterShardingWithEntityRecoverySpecConfig) +class PersistentClusterShardingWithEntityRecoverySpec + extends ClusterShardingSpec(PersistentClusterShardingWithEntityRecoverySpecConfig) +class DDataClusterShardingWithEntityRecoverySpec + extends ClusterShardingSpec(DDataClusterShardingWithEntityRecoverySpecConfig) class PersistentClusterShardingMultiJvmNode1 extends PersistentClusterShardingSpec class PersistentClusterShardingMultiJvmNode2 extends PersistentClusterShardingSpec @@ -254,13 +250,16 @@ class DDataClusterShardingWithEntityRecoveryMultiJvmNode5 extends DDataClusterSh class DDataClusterShardingWithEntityRecoveryMultiJvmNode6 extends DDataClusterShardingSpec class DDataClusterShardingWithEntityRecoveryMultiJvmNode7 extends DDataClusterShardingSpec -abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends MultiNodeSpec(config) with MultiNodeClusterSpec - with STMultiNodeSpec with ImplicitSender { +abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) + extends MultiNodeSpec(config) + with MultiNodeClusterSpec + with STMultiNodeSpec + with ImplicitSender { import ClusterShardingSpec._ import config._ - val storageLocations = List(new File(system.settings.config.getString( - "akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) + val storageLocations = List( + new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile) override protected def atStartup(): Unit = { storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir)) @@ -273,51 +272,57 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) createCoordinator() } enterBarrier(from.name + "-joined") } - lazy val replicator = system.actorOf(Replicator.props( - ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)), "replicator") + lazy val replicator = system.actorOf( + Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)), + "replicator") def createCoordinator(): Unit = { def coordinatorProps(typeName: String, rebalanceEnabled: Boolean, rememberEntities: Boolean) = { - val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1) + val allocationStrategy = + new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1) val cfg = ConfigFactory.parseString(s""" handoff-timeout = 10s shard-start-timeout = 10s rebalance-interval = ${if (rebalanceEnabled) "2s" else "3600s"} """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities) - val majorityMinCap = system.settings.config.getInt( - "akka.cluster.sharding.distributed-data.majority-min-cap") + val majorityMinCap = system.settings.config.getInt("akka.cluster.sharding.distributed-data.majority-min-cap") if (settings.stateStoreMode == "persistence") ShardCoordinator.props(typeName, settings, allocationStrategy) else ShardCoordinator.props(typeName, settings, allocationStrategy, replicator, majorityMinCap) } - List("counter", "rebalancingCounter", "RememberCounterEntities", "AnotherRememberCounter", - "RememberCounter", "RebalancingRememberCounter", "AutoMigrateRememberRegionTest").foreach { typeName => - val rebalanceEnabled = typeName.toLowerCase.startsWith("rebalancing") - val rememberEnabled = typeName.toLowerCase.contains("remember") - val singletonProps = BackoffSupervisor.props( - childProps = coordinatorProps(typeName, rebalanceEnabled, rememberEnabled), - childName = "coordinator", - minBackoff = 5.seconds, - maxBackoff = 5.seconds, - randomFactor = 0.1, - maxNrOfRetries = -1).withDeploy(Deploy.local) - system.actorOf( - ClusterSingletonManager.props( - singletonProps, - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(system)), - name = typeName + "Coordinator") - } + List("counter", + "rebalancingCounter", + "RememberCounterEntities", + "AnotherRememberCounter", + "RememberCounter", + "RebalancingRememberCounter", + "AutoMigrateRememberRegionTest").foreach { typeName => + val rebalanceEnabled = typeName.toLowerCase.startsWith("rebalancing") + val rememberEnabled = typeName.toLowerCase.contains("remember") + val singletonProps = BackoffSupervisor + .props(childProps = coordinatorProps(typeName, rebalanceEnabled, rememberEnabled), + childName = "coordinator", + minBackoff = 5.seconds, + maxBackoff = 5.seconds, + randomFactor = 0.1, + maxNrOfRetries = -1) + .withDeploy(Deploy.local) + system.actorOf( + ClusterSingletonManager.props(singletonProps, + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(system)), + name = typeName + "Coordinator") + } } def createRegion(typeName: String, rememberEntities: Boolean): ActorRef = { @@ -327,19 +332,17 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu entity-restart-backoff = 1s buffer-size = 1000 """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) - val settings = ClusterShardingSettings(cfg) - .withRememberEntities(rememberEntities) + val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities) system.actorOf( - ShardRegion.props( - typeName = typeName, - entityProps = _ => qualifiedCounterProps(typeName), - settings = settings, - coordinatorPath = "/user/" + typeName + "Coordinator/singleton/coordinator", - extractEntityId = extractEntityId, - extractShardId = extractShardId, - handOffStopMessage = PoisonPill, - replicator, - majorityMinCap = 3), + ShardRegion.props(typeName = typeName, + entityProps = _ => qualifiedCounterProps(typeName), + settings = settings, + coordinatorPath = "/user/" + typeName + "Coordinator/singleton/coordinator", + extractEntityId = extractEntityId, + extractShardId = extractShardId, + handOffStopMessage = PoisonPill, + replicator, + majorityMinCap = 3), name = typeName + "Region") } @@ -460,17 +463,16 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu buffer-size = 1000 """).withFallback(system.settings.config.getConfig("akka.cluster.sharding")) val settings = ClusterShardingSettings(cfg) - val proxy = system.actorOf( - ShardRegion.proxyProps( - typeName = "counter", - dataCenter = None, - settings, - coordinatorPath = "/user/counterCoordinator/singleton/coordinator", - extractEntityId = extractEntityId, - extractShardId = extractShardId, - system.deadLetters, - majorityMinCap = 0), - name = "regionProxy") + val proxy = system.actorOf(ShardRegion.proxyProps(typeName = "counter", + dataCenter = None, + settings, + coordinatorPath = + "/user/counterCoordinator/singleton/coordinator", + extractEntityId = extractEntityId, + extractShardId = extractShardId, + system.deadLetters, + majorityMinCap = 0), + name = "regionProxy") proxy ! Get(1) expectMsg(2) @@ -630,27 +632,24 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu "easy to use with extensions" in within(50.seconds) { runOn(third, fourth, fifth, sixth) { //#counter-start - val counterRegion: ActorRef = ClusterSharding(system).start( - typeName = "Counter", - entityProps = Props[Counter], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + val counterRegion: ActorRef = ClusterSharding(system).start(typeName = "Counter", + entityProps = Props[Counter], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) //#counter-start - ClusterSharding(system).start( - typeName = "AnotherCounter", - entityProps = Props[AnotherCounter], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = "AnotherCounter", + entityProps = Props[AnotherCounter], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) //#counter-supervisor-start - ClusterSharding(system).start( - typeName = "SupervisedCounter", - entityProps = Props[CounterSupervisor], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = "SupervisedCounter", + entityProps = Props[CounterSupervisor], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) //#counter-supervisor-start } enterBarrier("extension-started") @@ -687,12 +686,11 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu } "easy API for starting" in within(50.seconds) { runOn(first) { - val counterRegionViaStart: ActorRef = ClusterSharding(system).start( - typeName = "ApiTest", - entityProps = Props[Counter], - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + val counterRegionViaStart: ActorRef = ClusterSharding(system).start(typeName = "ApiTest", + entityProps = Props[Counter], + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) val counterRegionViaGet: ActorRef = ClusterSharding(system).shardRegion("ApiTest") @@ -705,12 +703,11 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu "demonstrate API for DC proxy" in within(50.seconds) { runOn(sixth) { // #proxy-dc - val counterProxyDcB: ActorRef = ClusterSharding(system).startProxy( - typeName = "Counter", - role = None, - dataCenter = Some("B"), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + val counterProxyDcB: ActorRef = ClusterSharding(system).startProxy(typeName = "Counter", + role = None, + dataCenter = Some("B"), + extractEntityId = extractEntityId, + extractShardId = extractShardId) // #proxy-dc } enterBarrier("after-dc-proxy") @@ -960,4 +957,3 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu } } } - diff --git a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala index 75e6f7c7fd..56867aa794 100644 --- a/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala +++ b/akka-cluster-sharding/src/multi-jvm/scala/akka/cluster/sharding/MultiDcClusterShardingSpec.scala @@ -80,16 +80,20 @@ class MultiDcClusterShardingSpecMultiJvmNode2 extends MultiDcClusterShardingSpec class MultiDcClusterShardingSpecMultiJvmNode3 extends MultiDcClusterShardingSpec class MultiDcClusterShardingSpecMultiJvmNode4 extends MultiDcClusterShardingSpec -abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterShardingSpecConfig) with MultiNodeClusterSpec - with STMultiNodeSpec with ImplicitSender { +abstract class MultiDcClusterShardingSpec + extends MultiNodeSpec(MultiDcClusterShardingSpecConfig) + with MultiNodeClusterSpec + with STMultiNodeSpec + with ImplicitSender { import MultiDcClusterShardingSpec._ import MultiDcClusterShardingSpecConfig._ def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) startSharding() - withClue(s"Failed waiting for ${cluster.selfUniqueAddress} to be up. Current state: ${cluster.state}" + cluster.state) { + withClue( + s"Failed waiting for ${cluster.selfUniqueAddress} to be up. Current state: ${cluster.state}" + cluster.state) { within(15.seconds) { awaitAssert(cluster.state.members.exists { m => m.uniqueAddress == cluster.selfUniqueAddress && m.status == MemberStatus.Up @@ -101,12 +105,11 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh } def startSharding(): Unit = { - ClusterSharding(system).start( - typeName = "Entity", - entityProps = Props[Entity](), - settings = ClusterShardingSettings(system), - extractEntityId = extractEntityId, - extractShardId = extractShardId) + ClusterSharding(system).start(typeName = "Entity", + entityProps = Props[Entity](), + settings = ClusterShardingSettings(system), + extractEntityId = extractEntityId, + extractShardId = extractShardId) } lazy val region = ClusterSharding(system).shardRegion("Entity") @@ -190,12 +193,11 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh "allow proxy within same data center" in { runOn(second) { - val proxy = ClusterSharding(system).startProxy( - typeName = "Entity", - role = None, - dataCenter = None, // by default use own DC - extractEntityId = extractEntityId, - extractShardId = extractShardId) + val proxy = ClusterSharding(system).startProxy(typeName = "Entity", + role = None, + dataCenter = None, // by default use own DC + extractEntityId = extractEntityId, + extractShardId = extractShardId) proxy ! GetCount("5") expectMsg(1) } @@ -204,12 +206,11 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh "allow proxy across different data centers" in { runOn(second) { - val proxy = ClusterSharding(system).startProxy( - typeName = "Entity", - role = None, - dataCenter = Some("DC2"), // proxy to other DC - extractEntityId = extractEntityId, - extractShardId = extractShardId) + val proxy = ClusterSharding(system).startProxy(typeName = "Entity", + role = None, + dataCenter = Some("DC2"), // proxy to other DC + extractEntityId = extractEntityId, + extractShardId = extractShardId) proxy ! GetCount("5") expectMsg(2) @@ -219,4 +220,3 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh } } - diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala index c459ddc8ef..86a92fa7e8 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ClusterShardingInternalsSpec.scala @@ -28,8 +28,7 @@ object ClusterShardingInternalsSpec { } } -class ClusterShardingInternalsSpec extends AkkaSpec( - """ +class ClusterShardingInternalsSpec extends AkkaSpec(""" |akka.actor.provider = cluster |akka.remote.netty.tcp.port = 0 |akka.remote.artery.canonical.port = 0 @@ -46,21 +45,19 @@ class ClusterShardingInternalsSpec extends AkkaSpec( val extractEntityId = mock[ShardRegion.ExtractEntityId] val extractShardId = mock[ShardRegion.ExtractShardId] - clusterSharding.start( - typeName = typeName, - entityProps = Props.empty, - settings = settingsWithRole, - extractEntityId = extractEntityId, - extractShardId = extractShardId, - allocationStrategy = mock[ShardAllocationStrategy], - handOffStopMessage = PoisonPill) + clusterSharding.start(typeName = typeName, + entityProps = Props.empty, + settings = settingsWithRole, + extractEntityId = extractEntityId, + extractShardId = extractShardId, + allocationStrategy = mock[ShardAllocationStrategy], + handOffStopMessage = PoisonPill) - verify(clusterSharding).startProxy( - ArgumentMatchers.eq(typeName), - ArgumentMatchers.eq(settingsWithRole.role), - ArgumentMatchers.eq(None), - ArgumentMatchers.eq(extractEntityId), - ArgumentMatchers.eq(extractShardId)) + verify(clusterSharding).startProxy(ArgumentMatchers.eq(typeName), + ArgumentMatchers.eq(settingsWithRole.role), + ArgumentMatchers.eq(None), + ArgumentMatchers.eq(extractEntityId), + ArgumentMatchers.eq(extractShardId)) } "HandOffStopper must stop the entity even if the entity doesn't handle handOffStopMessage" in { @@ -68,8 +65,7 @@ class ClusterShardingInternalsSpec extends AkkaSpec( val shardName = "test" val emptyHandlerActor = system.actorOf(Props(new EmptyHandlerActor)) val handOffStopper = system.actorOf( - Props(new HandOffStopper(shardName, probe.ref, Set(emptyHandlerActor), HandOffStopMessage, 10.millis)) - ) + Props(new HandOffStopper(shardName, probe.ref, Set(emptyHandlerActor), HandOffStopMessage, 10.millis))) watch(emptyHandlerActor) expectTerminated(emptyHandlerActor, 1.seconds) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala index c0c6125e53..1f6ca0c62f 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConcurrentStartupShardingSpec.scala @@ -44,9 +44,10 @@ object ConcurrentStartupShardingSpec { class Starter(n: Int, probe: ActorRef) extends Actor { override def preStart(): Unit = { - val region = ClusterSharding(context.system).start(s"type-$n", Props.empty, ClusterShardingSettings(context.system), - { case msg => (msg.toString, msg) }, - _ => "1") + val region = + ClusterSharding(context.system).start(s"type-$n", Props.empty, ClusterShardingSettings(context.system), { + case msg => (msg.toString, msg) + }, _ => "1") probe ! region } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConstantRateEntityRecoveryStrategySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConstantRateEntityRecoveryStrategySpec.scala index 0c9863ab3e..28bf9ca64b 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConstantRateEntityRecoveryStrategySpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ConstantRateEntityRecoveryStrategySpec.scala @@ -18,11 +18,11 @@ class ConstantRateEntityRecoveryStrategySpec extends AkkaSpec { import system.dispatcher val entities = Set[EntityId]("1", "2", "3", "4", "5") val startTime = System.nanoTime() - val resultWithTimes = strategy.recoverEntities(entities).map( - _.map(entityIds => entityIds -> (System.nanoTime() - startTime).nanos)) + val resultWithTimes = + strategy.recoverEntities(entities).map(_.map(entityIds => entityIds -> (System.nanoTime() - startTime).nanos)) - val result = Await.result(Future.sequence(resultWithTimes), 6.seconds) - .toVector.sortBy { case (_, duration) => duration } + val result = + Await.result(Future.sequence(resultWithTimes), 6.seconds).toVector.sortBy { case (_, duration) => duration } result.size should ===(3) val scheduledEntities = result.map(_._1) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala index a2eb6d3b37..de7a52735e 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/CoordinatedShutdownShardingSpec.scala @@ -43,12 +43,21 @@ class CoordinatedShutdownShardingSpec extends AkkaSpec(CoordinatedShutdownShardi val sys2 = ActorSystem(system.name, system.settings.config) val sys3 = system - val region1 = ClusterSharding(sys1).start("type1", Props[EchoActor](), ClusterShardingSettings(sys1), - extractEntityId, extractShardId) - val region2 = ClusterSharding(sys2).start("type1", Props[EchoActor](), ClusterShardingSettings(sys2), - extractEntityId, extractShardId) - val region3 = ClusterSharding(sys3).start("type1", Props[EchoActor](), ClusterShardingSettings(sys3), - extractEntityId, extractShardId) + val region1 = ClusterSharding(sys1).start("type1", + Props[EchoActor](), + ClusterShardingSettings(sys1), + extractEntityId, + extractShardId) + val region2 = ClusterSharding(sys2).start("type1", + Props[EchoActor](), + ClusterShardingSettings(sys2), + extractEntityId, + extractShardId) + val region3 = ClusterSharding(sys3).start("type1", + Props[EchoActor](), + ClusterShardingSettings(sys3), + extractEntityId, + extractShardId) val probe1 = TestProbe()(sys1) val probe2 = TestProbe()(sys2) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/InactiveEntityPassivationSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/InactiveEntityPassivationSpec.scala index 530bdfdcb1..daeda42b9b 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/InactiveEntityPassivationSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/InactiveEntityPassivationSpec.scala @@ -60,21 +60,17 @@ class InactiveEntityPassivationSpec extends AkkaSpec(InactiveEntityPassivationSp Cluster(system).join(Cluster(system).selfAddress) val probe = TestProbe() val settings = ClusterShardingSettings(system) - val region = ClusterSharding(system).start( - "myType", - InactiveEntityPassivationSpec.Entity.props(probe.ref), - settings, - extractEntityId, - extractShardId, - ClusterSharding(system).defaultShardAllocationStrategy(settings), - Passivate - ) + val region = ClusterSharding(system).start("myType", + InactiveEntityPassivationSpec.Entity.props(probe.ref), + settings, + extractEntityId, + extractShardId, + ClusterSharding(system).defaultShardAllocationStrategy(settings), + Passivate) region ! 1 region ! 2 - val responses = Set( - probe.expectMsgType[GotIt], - probe.expectMsgType[GotIt]) + val responses = Set(probe.expectMsgType[GotIt], probe.expectMsgType[GotIt]) responses.map(_.id) should ===(Set("1", "2")) val timeOneSawMessage = responses.find(_.id == "1").get.when Thread.sleep(1000) @@ -92,9 +88,7 @@ class InactiveEntityPassivationSpec extends AkkaSpec(InactiveEntityPassivationSp // but it can be re activated just fine: region ! 1 region ! 2 - Set( - probe.expectMsgType[GotIt], - probe.expectMsgType[GotIt]).map(_.id) should ===(Set("1", "2")) + Set(probe.expectMsgType[GotIt], probe.expectMsgType[GotIt]).map(_.id) should ===(Set("1", "2")) } } diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/JoinConfigCompatCheckShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/JoinConfigCompatCheckShardingSpec.scala index 730df9b19e..464aed9b7d 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/JoinConfigCompatCheckShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/JoinConfigCompatCheckShardingSpec.scala @@ -22,14 +22,12 @@ class JoinConfigCompatCheckShardingSpec extends AkkaSpec() { } val baseConfig: Config = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.coordinated-shutdown.terminate-actor-system = on akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 - """ - ) + """) "A Joining Node" must { @@ -37,8 +35,7 @@ class JoinConfigCompatCheckShardingSpec extends AkkaSpec() { "NOT be allowed to join a cluster using a different value for akka.cluster.sharding.state-store-mode" taggedAs LongRunningTest in { val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # use 'persistence' for state store @@ -48,8 +45,7 @@ class JoinConfigCompatCheckShardingSpec extends AkkaSpec() { enforce-on-join = on } } - """ - ) + """) val seedNode = ActorSystem(system.name, baseConfig) val joiningNode = ActorSystem(system.name, joinNodeConfig.withFallback(baseConfig)) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala index 5a83040310..721d5023e1 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/LeastShardAllocationStrategySpec.scala @@ -17,10 +17,9 @@ class LeastShardAllocationStrategySpec extends AkkaSpec { def createAllocations(aCount: Int, bCount: Int = 0, cCount: Int = 0): Map[ActorRef, Vector[String]] = { val shards = (1 to (aCount + bCount + cCount)).map(n => ("00" + n.toString).takeRight(3)) - Map( - regionA -> shards.take(aCount).toVector, - regionB -> shards.slice(aCount, aCount + bCount).toVector, - regionC -> shards.takeRight(cCount).toVector) + Map(regionA -> shards.take(aCount).toVector, + regionB -> shards.slice(aCount, aCount + bCount).toVector, + regionC -> shards.takeRight(cCount).toVector) } "LeastShardAllocationStrategy" must { @@ -125,8 +124,8 @@ class LeastShardAllocationStrategySpec extends AkkaSpec { val allocations = createAllocations(aCount = 50, cCount = 50) allocationStrategy.rebalance(allocations, Set.empty).futureValue should ===(Set("001", "002")) allocationStrategy.rebalance(allocations, Set("001", "002")).futureValue should ===(Set("051", "052")) - allocationStrategy.rebalance(allocations, Set("001", "002", "051", "052")) - .futureValue should ===(Set("003", "004")) + allocationStrategy.rebalance(allocations, Set("001", "002", "051", "052")).futureValue should ===( + Set("003", "004")) } "limit number of simultaneous rebalance" in { diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardSpec.scala index f915c26b71..1bca0ad1f3 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/PersistentShardSpec.scala @@ -19,8 +19,7 @@ object PersistentShardSpec { } } - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.persistence.journal.plugin = "akka.persistence.journal.inmem" """.stripMargin) } @@ -30,17 +29,10 @@ class PersistentShardSpec extends AkkaSpec(PersistentShardSpec.config) with Word "Persistent Shard" must { "remember entities started with StartEntity" in { - val props = Props(new PersistentShard( - "cats", - "shard-1", - id => Props(new EntityActor(id)), - ClusterShardingSettings(system), - { + val props = Props( + new PersistentShard("cats", "shard-1", id => Props(new EntityActor(id)), ClusterShardingSettings(system), { case _ => ("entity-1", "msg") - }, - _ => "shard-1", - PoisonPill - )) + }, _ => "shard-1", PoisonPill)) val persistentShard = system.actorOf(props) watch(persistentShard) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala index 26b97bff5a..157667facc 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/ProxyShardingSpec.scala @@ -41,34 +41,29 @@ class ProxyShardingSpec extends AkkaSpec(ProxyShardingSpec.config) { clusterSharding.startProxy("myType", Some(role), idExtractor, shardResolver) "Proxy should be found" in { - val proxyActor: ActorRef = Await.result( - system - .actorSelection("akka://ProxyShardingSpec/system/sharding/myTypeProxy") - .resolveOne(FiniteDuration(5, SECONDS)), - 3.seconds) + val proxyActor: ActorRef = Await.result(system + .actorSelection("akka://ProxyShardingSpec/system/sharding/myTypeProxy") + .resolveOne(FiniteDuration(5, SECONDS)), + 3.seconds) proxyActor.path should not be null proxyActor.path.toString should endWith("Proxy") } "Shard region should be found" in { - val shardRegion: ActorRef = clusterSharding.start( - "myType", - TestActors.echoActorProps, - shardingSettings, - messageExtractor) + val shardRegion: ActorRef = + clusterSharding.start("myType", TestActors.echoActorProps, shardingSettings, messageExtractor) shardRegion.path should not be null shardRegion.path.toString should endWith("myType") } "Shard coordinator should be found" in { - val shardCoordinator: ActorRef = Await.result( - system - .actorSelection( - "akka://ProxyShardingSpec/system/sharding/myTypeCoordinator") - .resolveOne(FiniteDuration(5, SECONDS)), - 3.seconds) + val shardCoordinator: ActorRef = + Await.result(system + .actorSelection("akka://ProxyShardingSpec/system/sharding/myTypeCoordinator") + .resolveOne(FiniteDuration(5, SECONDS)), + 3.seconds) shardCoordinator.path should not be null shardCoordinator.path.toString should endWith("Coordinator") diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala index 3e6882ae63..3e816989b8 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/RemoveInternalClusterShardingDataSpec.scala @@ -91,13 +91,14 @@ object RemoveInternalClusterShardingDataSpec { } -class RemoveInternalClusterShardingDataSpec extends AkkaSpec(RemoveInternalClusterShardingDataSpec.config) - with ImplicitSender { +class RemoveInternalClusterShardingDataSpec + extends AkkaSpec(RemoveInternalClusterShardingDataSpec.config) + with ImplicitSender { import RemoveInternalClusterShardingDataSpec._ - val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + val storageLocations = + List("akka.persistence.journal.leveldb.dir", "akka.persistence.snapshot-store.local.dir").map(s => + new File(system.settings.config.getString(s))) override protected def atStartup(): Unit = { storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteDirectory(dir)) @@ -131,8 +132,9 @@ class RemoveInternalClusterShardingDataSpec extends AkkaSpec(RemoveInternalClust "work when no data" in within(10.seconds) { hasSnapshots("type1") should ===(false) hasEvents("type1") should ===(false) - val rm = system.actorOf(RemoveInternalClusterShardingData.RemoveOnePersistenceId.props( - journalPluginId = "", persistenceId("type1"), testActor)) + val rm = system.actorOf( + RemoveInternalClusterShardingData.RemoveOnePersistenceId + .props(journalPluginId = "", persistenceId("type1"), testActor)) watch(rm) expectMsg(Result(Success(Removals(events = false, snapshots = false)))) expectTerminated(rm) @@ -145,8 +147,9 @@ class RemoveInternalClusterShardingDataSpec extends AkkaSpec(RemoveInternalClust hasSnapshots("type1") should ===(false) hasEvents("type1") should ===(true) - val rm = system.actorOf(RemoveInternalClusterShardingData.RemoveOnePersistenceId.props( - journalPluginId = "", persistenceId("type1"), testActor)) + val rm = system.actorOf( + RemoveInternalClusterShardingData.RemoveOnePersistenceId + .props(journalPluginId = "", persistenceId("type1"), testActor)) watch(rm) expectMsg(Result(Success(Removals(events = true, snapshots = false)))) expectTerminated(rm) @@ -164,8 +167,9 @@ class RemoveInternalClusterShardingDataSpec extends AkkaSpec(RemoveInternalClust } hasEvents("type2") should ===(true) - val rm = system.actorOf(RemoveInternalClusterShardingData.RemoveOnePersistenceId.props( - journalPluginId = "", persistenceId("type2"), testActor)) + val rm = system.actorOf( + RemoveInternalClusterShardingData.RemoveOnePersistenceId + .props(journalPluginId = "", persistenceId("type2"), testActor)) watch(rm) expectMsg(Result(Success(Removals(events = true, snapshots = true)))) expectTerminated(rm) @@ -197,9 +201,11 @@ class RemoveInternalClusterShardingDataSpec extends AkkaSpec(RemoveInternalClust hasEvents(typeName) should ===(true) } - val result = RemoveInternalClusterShardingData.remove( - system, journalPluginId = "", typeNames.toSet, - terminateSystem = false, remove2dot3Data = true) + val result = RemoveInternalClusterShardingData.remove(system, + journalPluginId = "", + typeNames.toSet, + terminateSystem = false, + remove2dot3Data = true) Await.ready(result, remaining) typeNames.foreach { typeName => diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/SupervisionSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/SupervisionSpec.scala index 23853552f2..ce55889c4c 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/SupervisionSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/SupervisionSpec.scala @@ -15,8 +15,7 @@ import scala.concurrent.duration._ object SupervisionSpec { val config = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.loglevel = INFO """) @@ -68,23 +67,22 @@ class SupervisionSpec extends AkkaSpec(SupervisionSpec.config) with ImplicitSend "allow passivation" in { - val supervisedProps = BackoffSupervisor.props(Backoff.onStop( - Props(new PassivatingActor()), - childName = "child", - minBackoff = 1.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2, - maxNrOfRetries = -1 - ).withFinalStopMessage(_ == StopMessage)) + val supervisedProps = BackoffSupervisor.props( + Backoff + .onStop(Props(new PassivatingActor()), + childName = "child", + minBackoff = 1.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2, + maxNrOfRetries = -1) + .withFinalStopMessage(_ == StopMessage)) Cluster(system).join(Cluster(system).selfAddress) - val region = ClusterSharding(system).start( - "passy", - supervisedProps, - ClusterShardingSettings(system), - idExtractor, - shardResolver - ) + val region = ClusterSharding(system).start("passy", + supervisedProps, + ClusterShardingSettings(system), + idExtractor, + shardResolver) region ! Msg(10, "hello") val response = expectMsgType[Response](5.seconds) @@ -103,22 +101,21 @@ class SupervisionSpec extends AkkaSpec(SupervisionSpec.config) with ImplicitSend "allow passivation" in { - val supervisedProps = BackoffSupervisor.props(BackoffOpts.onStop( - Props(new PassivatingActor()), - childName = "child", - minBackoff = 1.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2 - ).withFinalStopMessage(_ == StopMessage)) + val supervisedProps = BackoffSupervisor.props( + BackoffOpts + .onStop(Props(new PassivatingActor()), + childName = "child", + minBackoff = 1.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2) + .withFinalStopMessage(_ == StopMessage)) Cluster(system).join(Cluster(system).selfAddress) - val region = ClusterSharding(system).start( - "passy", - supervisedProps, - ClusterShardingSettings(system), - idExtractor, - shardResolver - ) + val region = ClusterSharding(system).start("passy", + supervisedProps, + ClusterShardingSettings(system), + idExtractor, + shardResolver) region ! Msg(10, "hello") val response = expectMsgType[Response](5.seconds) diff --git a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala index 4ef42ca059..2140ecc618 100644 --- a/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala +++ b/akka-cluster-sharding/src/test/scala/akka/cluster/sharding/protobuf/ClusterShardingMessageSerializerSpec.scala @@ -30,11 +30,11 @@ class ClusterShardingMessageSerializerSpec extends AkkaSpec { "ClusterShardingMessageSerializer" must { "be able to serialize ShardCoordinator snapshot State" in { - val state = State( - shards = Map("a" -> region1, "b" -> region2, "c" -> region2), - regions = Map(region1 -> Vector("a"), region2 -> Vector("b", "c"), region3 -> Vector.empty[String]), - regionProxies = Set(regionProxy1, regionProxy2), - unallocatedShards = Set("d")) + val state = State(shards = Map("a" -> region1, "b" -> region2, "c" -> region2), + regions = + Map(region1 -> Vector("a"), region2 -> Vector("b", "c"), region3 -> Vector.empty[String]), + regionProxies = Set(regionProxy1, regionProxy2), + unallocatedShards = Set("d")) checkSerialization(state) } diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala index 22db792c08..aacfc0d882 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/ClusterClient.scala @@ -42,6 +42,7 @@ import akka.util.ccompat._ import scala.collection.immutable.{ HashMap, HashSet } object ClusterClientSettings { + /** * Create settings from the default configuration * `akka.cluster.client`. @@ -108,29 +109,33 @@ object ClusterClientSettings { * to watch it from another actor and possibly acquire a new list of initialContacts from some * external service registry */ -final class ClusterClientSettings( - val initialContacts: Set[ActorPath], - val establishingGetContactsInterval: FiniteDuration, - val refreshContactsInterval: FiniteDuration, - val heartbeatInterval: FiniteDuration, - val acceptableHeartbeatPause: FiniteDuration, - val bufferSize: Int, - val reconnectTimeout: Option[FiniteDuration]) extends NoSerializationVerificationNeeded { +final class ClusterClientSettings(val initialContacts: Set[ActorPath], + val establishingGetContactsInterval: FiniteDuration, + val refreshContactsInterval: FiniteDuration, + val heartbeatInterval: FiniteDuration, + val acceptableHeartbeatPause: FiniteDuration, + val bufferSize: Int, + val reconnectTimeout: Option[FiniteDuration]) + extends NoSerializationVerificationNeeded { require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000") /** * For binary/source compatibility */ - def this( - initialContacts: Set[ActorPath], - establishingGetContactsInterval: FiniteDuration, - refreshContactsInterval: FiniteDuration, - heartbeatInterval: FiniteDuration, - acceptableHeartbeatPause: FiniteDuration, - bufferSize: Int) = - this(initialContacts, establishingGetContactsInterval, refreshContactsInterval, heartbeatInterval, - acceptableHeartbeatPause, bufferSize, None) + def this(initialContacts: Set[ActorPath], + establishingGetContactsInterval: FiniteDuration, + refreshContactsInterval: FiniteDuration, + heartbeatInterval: FiniteDuration, + acceptableHeartbeatPause: FiniteDuration, + bufferSize: Int) = + this(initialContacts, + establishingGetContactsInterval, + refreshContactsInterval, + heartbeatInterval, + acceptableHeartbeatPause, + bufferSize, + None) /** * Scala API @@ -154,7 +159,8 @@ final class ClusterClientSettings( def withRefreshContactsInterval(refreshContactsInterval: FiniteDuration): ClusterClientSettings = copy(refreshContactsInterval = refreshContactsInterval) - def withHeartbeat(heartbeatInterval: FiniteDuration, acceptableHeartbeatPause: FiniteDuration): ClusterClientSettings = + def withHeartbeat(heartbeatInterval: FiniteDuration, + acceptableHeartbeatPause: FiniteDuration): ClusterClientSettings = copy(heartbeatInterval = heartbeatInterval, acceptableHeartbeatPause = acceptableHeartbeatPause) def withBufferSize(bufferSize: Int): ClusterClientSettings = @@ -163,16 +169,20 @@ final class ClusterClientSettings( def withReconnectTimeout(reconnectTimeout: Option[FiniteDuration]): ClusterClientSettings = copy(reconnectTimeout = reconnectTimeout) - private def copy( - initialContacts: Set[ActorPath] = initialContacts, - establishingGetContactsInterval: FiniteDuration = establishingGetContactsInterval, - refreshContactsInterval: FiniteDuration = refreshContactsInterval, - heartbeatInterval: FiniteDuration = heartbeatInterval, - acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause, - bufferSize: Int = bufferSize, - reconnectTimeout: Option[FiniteDuration] = reconnectTimeout): ClusterClientSettings = - new ClusterClientSettings(initialContacts, establishingGetContactsInterval, refreshContactsInterval, - heartbeatInterval, acceptableHeartbeatPause, bufferSize, reconnectTimeout) + private def copy(initialContacts: Set[ActorPath] = initialContacts, + establishingGetContactsInterval: FiniteDuration = establishingGetContactsInterval, + refreshContactsInterval: FiniteDuration = refreshContactsInterval, + heartbeatInterval: FiniteDuration = heartbeatInterval, + acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause, + bufferSize: Int = bufferSize, + reconnectTimeout: Option[FiniteDuration] = reconnectTimeout): ClusterClientSettings = + new ClusterClientSettings(initialContacts, + establishingGetContactsInterval, + refreshContactsInterval, + heartbeatInterval, + acceptableHeartbeatPause, + bufferSize, + reconnectTimeout) } /** @@ -196,6 +206,7 @@ final case class ContactPointAdded(override val contactPoint: ActorPath) extends final case class ContactPointRemoved(override val contactPoint: ActorPath) extends ContactPointChange sealed abstract class SubscribeContactPoints + /** * Subscribe to a cluster client's contact point changes where * it is guaranteed that a sender receives the initial state @@ -205,6 +216,7 @@ sealed abstract class SubscribeContactPoints * terminates. */ case object SubscribeContactPoints extends SubscribeContactPoints { + /** * Java API: get the singleton instance */ @@ -212,10 +224,12 @@ case object SubscribeContactPoints extends SubscribeContactPoints { } sealed abstract class UnsubscribeContactPoints + /** * Explicitly unsubscribe from contact point change events. */ case object UnsubscribeContactPoints extends UnsubscribeContactPoints { + /** * Java API: get the singleton instance */ @@ -223,11 +237,13 @@ case object UnsubscribeContactPoints extends UnsubscribeContactPoints { } sealed abstract class GetContactPoints + /** * Get the contact points known to this client. A ``ContactPoints`` message * will be replied. */ case object GetContactPoints extends GetContactPoints { + /** * Java API: get the singleton instance */ @@ -259,6 +275,7 @@ object ClusterClient { @SerialVersionUID(1L) final case class Send(path: String, msg: Any, localAffinity: Boolean) { + /** * Convenience constructor with `localAffinity` false */ @@ -344,8 +361,7 @@ final class ClusterClient(settings: ClusterClientSettings) extends Actor with Ac var subscribers = Vector.empty[ActorRef] import context.dispatcher - val heartbeatTask = context.system.scheduler.schedule( - heartbeatInterval, heartbeatInterval, self, HeartbeatTick) + val heartbeatTask = context.system.scheduler.schedule(heartbeatInterval, heartbeatInterval, self, HeartbeatTick) var refreshContactsTask: Option[Cancellable] = None scheduleRefreshContactsTick(establishingGetContactsInterval) self ! RefreshContactsTick @@ -353,18 +369,17 @@ final class ClusterClient(settings: ClusterClientSettings) extends Actor with Ac var buffer = MessageBuffer.empty def scheduleRefreshContactsTick(interval: FiniteDuration): Unit = { - refreshContactsTask foreach { _.cancel() } - refreshContactsTask = Some(context.system.scheduler.schedule( - interval, interval, self, RefreshContactsTick)) + refreshContactsTask.foreach { _.cancel() } + refreshContactsTask = Some(context.system.scheduler.schedule(interval, interval, self, RefreshContactsTick)) } override def postStop(): Unit = { super.postStop() heartbeatTask.cancel() - refreshContactsTask foreach { _.cancel() } + refreshContactsTask.foreach { _.cancel() } } - def receive = establishing orElse contactPointMessages + def receive = establishing.orElse(contactPointMessages) def establishing: Actor.Receive = { val connectTimerCancelable = settings.reconnectTimeout.map { timeout => @@ -376,14 +391,14 @@ final class ClusterClient(settings: ClusterClientSettings) extends Actor with Ac if (contactPoints.nonEmpty) { contactPaths = contactPoints.map(ActorPath.fromString).to(HashSet) contacts = contactPaths.map(context.actorSelection) - contacts foreach { _ ! Identify(Array.emptyByteArray) } + contacts.foreach { _ ! Identify(Array.emptyByteArray) } } publishContactPoints() case ActorIdentity(_, Some(receptionist)) => log.info("Connected to [{}]", receptionist.path) scheduleRefreshContactsTick(refreshContactsInterval) sendBuffered(receptionist) - context.become(active(receptionist) orElse contactPointMessages) + context.become(active(receptionist).orElse(contactPointMessages)) connectTimerCancelable.foreach(_.cancel()) failureDetector.heartbeat() self ! HeartbeatTick // will register us as active client of the selected receptionist @@ -398,7 +413,8 @@ final class ClusterClient(settings: ClusterClientSettings) extends Actor with Ac case Publish(topic, msg) => buffer(DistributedPubSubMediator.Publish(topic, msg)) case ReconnectTimeout => - log.warning("Receptionist reconnect not successful within {} stopping cluster client", settings.reconnectTimeout) + log.warning("Receptionist reconnect not successful within {} stopping cluster client", + settings.reconnectTimeout) context.stop(self) case ReceptionistShutdown => // ok, haven't chosen a receptionist yet } @@ -406,11 +422,11 @@ final class ClusterClient(settings: ClusterClientSettings) extends Actor with Ac def active(receptionist: ActorRef): Actor.Receive = { case Send(path, msg, localAffinity) => - receptionist forward DistributedPubSubMediator.Send(path, msg, localAffinity) + receptionist.forward(DistributedPubSubMediator.Send(path, msg, localAffinity)) case SendToAll(path, msg) => - receptionist forward DistributedPubSubMediator.SendToAll(path, msg) + receptionist.forward(DistributedPubSubMediator.SendToAll(path, msg)) case Publish(topic, msg) => - receptionist forward DistributedPubSubMediator.Publish(topic, msg) + receptionist.forward(DistributedPubSubMediator.Publish(topic, msg)) case HeartbeatTick => if (!failureDetector.isAvailable) { log.info("Lost contact with [{}], reestablishing connection", receptionist) @@ -454,7 +470,7 @@ final class ClusterClient(settings: ClusterClientSettings) extends Actor with Ac def sendGetContacts(): Unit = { val sendTo = if (contacts.isEmpty) initialContactsSel - else if (contacts.size == 1) initialContactsSel union contacts + else if (contacts.size == 1) initialContactsSel.union(contacts) else contacts if (log.isDebugEnabled) log.debug(s"""Sending GetContacts to [${sendTo.mkString(",")}]""") @@ -495,7 +511,7 @@ final class ClusterClient(settings: ClusterClientSettings) extends Actor with Ac def reestablish(): Unit = { sendGetContacts() scheduleRefreshContactsTick(establishingGetContactsInterval) - context.become(establishing orElse contactPointMessages) + context.become(establishing.orElse(contactPointMessages)) failureDetector.heartbeat() } } @@ -578,8 +594,9 @@ final class ClusterClientReceptionist(system: ExtendedActorSystem) extends Exten } // important to use val mediator here to activate it outside of ClusterReceptionist constructor val mediator = pubSubMediator - system.systemActorOf(ClusterReceptionist.props(mediator, ClusterReceptionistSettings(config)) - .withDispatcher(dispatcher), name) + system.systemActorOf( + ClusterReceptionist.props(mediator, ClusterReceptionistSettings(config)).withDispatcher(dispatcher), + name) } } @@ -592,6 +609,7 @@ final class ClusterClientReceptionist(system: ExtendedActorSystem) extends Exten } object ClusterReceptionistSettings { + /** * Create settings from the default configuration * `akka.cluster.client.receptionist`. @@ -639,10 +657,10 @@ object ClusterReceptionistSettings { * @param responseTunnelReceiveTimeout The actor that tunnel response messages to the * client will be stopped after this time of inactivity. */ -final class ClusterReceptionistSettings( - val role: Option[String], - val numberOfContacts: Int, - val responseTunnelReceiveTimeout: FiniteDuration) extends NoSerializationVerificationNeeded { +final class ClusterReceptionistSettings(val role: Option[String], + val numberOfContacts: Int, + val responseTunnelReceiveTimeout: FiniteDuration) + extends NoSerializationVerificationNeeded { def withRole(role: String): ClusterReceptionistSettings = copy(role = ClusterReceptionistSettings.roleOption(role)) @@ -654,14 +672,12 @@ final class ClusterReceptionistSettings( def withResponseTunnelReceiveTimeout(responseTunnelReceiveTimeout: FiniteDuration): ClusterReceptionistSettings = copy(responseTunnelReceiveTimeout = responseTunnelReceiveTimeout) - def withHeartbeat( - heartbeatInterval: FiniteDuration, - acceptableHeartbeatPause: FiniteDuration, - failureDetectionInterval: FiniteDuration): ClusterReceptionistSettings = - copy( - heartbeatInterval = heartbeatInterval, - acceptableHeartbeatPause = acceptableHeartbeatPause, - failureDetectionInterval = failureDetectionInterval) + def withHeartbeat(heartbeatInterval: FiniteDuration, + acceptableHeartbeatPause: FiniteDuration, + failureDetectionInterval: FiniteDuration): ClusterReceptionistSettings = + copy(heartbeatInterval = heartbeatInterval, + acceptableHeartbeatPause = acceptableHeartbeatPause, + failureDetectionInterval = failureDetectionInterval) // BEGIN BINARY COMPATIBILITY // The following is required in order to maintain binary @@ -681,13 +697,12 @@ final class ClusterReceptionistSettings( private var _acceptableHeartbeatPause: FiniteDuration = 13.seconds private var _failureDetectionInterval: FiniteDuration = 2.second - def this( - role: Option[String], - numberOfContacts: Int, - responseTunnelReceiveTimeout: FiniteDuration, - heartbeatInterval: FiniteDuration, - acceptableHeartbeatPause: FiniteDuration, - failureDetectionInterval: FiniteDuration) = { + def this(role: Option[String], + numberOfContacts: Int, + responseTunnelReceiveTimeout: FiniteDuration, + heartbeatInterval: FiniteDuration, + acceptableHeartbeatPause: FiniteDuration, + failureDetectionInterval: FiniteDuration) = { this(role, numberOfContacts, responseTunnelReceiveTimeout) this._heartbeatInterval = heartbeatInterval this._acceptableHeartbeatPause = acceptableHeartbeatPause @@ -696,20 +711,18 @@ final class ClusterReceptionistSettings( // END BINARY COMPATIBILITY - private def copy( - role: Option[String] = role, - numberOfContacts: Int = numberOfContacts, - responseTunnelReceiveTimeout: FiniteDuration = responseTunnelReceiveTimeout, - heartbeatInterval: FiniteDuration = heartbeatInterval, - acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause, - failureDetectionInterval: FiniteDuration = failureDetectionInterval): ClusterReceptionistSettings = - new ClusterReceptionistSettings( - role, - numberOfContacts, - responseTunnelReceiveTimeout, - heartbeatInterval, - acceptableHeartbeatPause, - failureDetectionInterval) + private def copy(role: Option[String] = role, + numberOfContacts: Int = numberOfContacts, + responseTunnelReceiveTimeout: FiniteDuration = responseTunnelReceiveTimeout, + heartbeatInterval: FiniteDuration = heartbeatInterval, + acceptableHeartbeatPause: FiniteDuration = acceptableHeartbeatPause, + failureDetectionInterval: FiniteDuration = failureDetectionInterval): ClusterReceptionistSettings = + new ClusterReceptionistSettings(role, + numberOfContacts, + responseTunnelReceiveTimeout, + heartbeatInterval, + acceptableHeartbeatPause, + failureDetectionInterval) } /** @@ -738,6 +751,7 @@ final case class ClusterClientUp(override val clusterClient: ActorRef) extends C final case class ClusterClientUnreachable(override val clusterClient: ActorRef) extends ClusterClientInteraction sealed abstract class SubscribeClusterClients + /** * Subscribe to a cluster receptionist's client interactions where * it is guaranteed that a sender receives the initial state @@ -747,6 +761,7 @@ sealed abstract class SubscribeClusterClients * terminates. */ case object SubscribeClusterClients extends SubscribeClusterClients { + /** * Java API: get the singleton instance */ @@ -754,10 +769,12 @@ case object SubscribeClusterClients extends SubscribeClusterClients { } sealed abstract class UnsubscribeClusterClients + /** * Explicitly unsubscribe from client interaction events. */ case object UnsubscribeClusterClients extends UnsubscribeClusterClients { + /** * Java API: get the singleton instance */ @@ -765,11 +782,13 @@ case object UnsubscribeClusterClients extends UnsubscribeClusterClients { } sealed abstract class GetClusterClients + /** * Get the cluster clients known to this receptionist. A ``ClusterClients`` message * will be replied. */ case object GetClusterClients extends GetClusterClients { + /** * Java API: get the singleton instance */ @@ -796,9 +815,7 @@ object ClusterReceptionist { /** * Scala API: Factory method for `ClusterReceptionist` [[akka.actor.Props]]. */ - def props( - pubSubMediator: ActorRef, - settings: ClusterReceptionistSettings): Props = + def props(pubSubMediator: ActorRef, settings: ClusterReceptionistSettings): Props = Props(new ClusterReceptionist(pubSubMediator, settings)).withDeploy(Deploy.local) /** @@ -835,11 +852,11 @@ object ClusterReceptionist { case Ping => // keep alive from client case ReceiveTimeout => log.debug("ClientResponseTunnel for client [{}] stopped due to inactivity", client.path) - context stop self + context.stop(self) case msg => client.tell(msg, Actor.noSender) if (isAsk) - context stop self + context.stop(self) } } } @@ -869,9 +886,10 @@ object ClusterReceptionist { * */ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterReceptionistSettings) - extends Actor with ActorLogging { + extends Actor + with ActorLogging { - import DistributedPubSubMediator.{ Send, SendToAll, Publish } + import DistributedPubSubMediator.{ Publish, Send, SendToAll } import ClusterReceptionist.Internal._ import settings._ @@ -880,9 +898,7 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep val verboseHeartbeat = cluster.settings.Debug.VerboseHeartbeatLogging import cluster.selfAddress - require( - role.forall(cluster.selfRoles.contains), - s"This cluster member [$selfAddress] doesn't have the role [$role]") + require(role.forall(cluster.selfRoles.contains), s"This cluster member [$selfAddress] doesn't have the role [$role]") var nodes: immutable.SortedSet[Address] = { def hashFor(node: Address): Int = node match { @@ -906,11 +922,9 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep var subscribers = Vector.empty[ActorRef] - val checkDeadlinesTask = context.system.scheduler.schedule( - failureDetectionInterval, - failureDetectionInterval, - self, - CheckDeadlines)(context.dispatcher) + val checkDeadlinesTask = + context.system.scheduler.schedule(failureDetectionInterval, failureDetectionInterval, self, CheckDeadlines)( + context.dispatcher) override def preStart(): Unit = { super.preStart() @@ -920,7 +934,7 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep override def postStop(): Unit = { super.postStop() - cluster unsubscribe self + cluster.unsubscribe(self) checkDeadlinesTask.cancel() clientInteractions.keySet.foreach(_ ! ReceptionistShutdown) } @@ -954,7 +968,9 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep if (numberOfContacts >= nodes.size) { val contacts = Contacts(nodes.iterator.map(a => self.path.toStringWithAddress(a)).to(immutable.IndexedSeq)) if (log.isDebugEnabled) - log.debug("Client [{}] gets contactPoints [{}] (all nodes)", sender().path, contacts.contactPoints.mkString(",")) + log.debug("Client [{}] gets contactPoints [{}] (all nodes)", + sender().path, + contacts.contactPoints.mkString(",")) sender() ! contacts } else { // using toStringWithAddress in case the client is local, normally it is not, and @@ -963,7 +979,7 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep val slice = { val first = nodes.rangeFrom(a).tail.take(numberOfContacts) if (first.size == numberOfContacts) first - else first union nodes.take(numberOfContacts - first.size) + else first.union(nodes.take(numberOfContacts - first.size)) } val contacts = Contacts(slice.iterator.map(a => self.path.toStringWithAddress(a)).to(immutable.IndexedSeq)) if (log.isDebugEnabled) @@ -972,7 +988,9 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep } case state: CurrentClusterState => - nodes = nodes.empty union state.members.collect { case m if m.status != MemberStatus.Joining && matchingRole(m) => m.address } + nodes = nodes.empty.union(state.members.collect { + case m if m.status != MemberStatus.Joining && matchingRole(m) => m.address + }) consistentHash = ConsistentHash(nodes, virtualNodesFactor) case MemberUp(m) => @@ -983,7 +1001,7 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep case MemberRemoved(m, _) => if (m.address == selfAddress) - context stop self + context.stop(self) else if (matchingRole(m)) { nodes -= m.address consistentHash = ConsistentHash(nodes, virtualNodesFactor) @@ -1039,4 +1057,3 @@ final class ClusterReceptionist(pubSubMediator: ActorRef, settings: ClusterRecep clientsPublished = publishableClients } } - diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala index da97d54f20..ca0fc09077 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializer.scala @@ -17,7 +17,8 @@ import java.io.NotSerializableException * INTERNAL API: Serializer of ClusterClient messages. */ private[akka] class ClusterClientMessageSerializer(val system: ExtendedActorSystem) - extends SerializerWithStringManifest with BaseSerializer { + extends SerializerWithStringManifest + with BaseSerializer { import ClusterReceptionist.Internal._ private lazy val serialization = SerializationExtension(system) @@ -32,10 +33,18 @@ private[akka] class ClusterClientMessageSerializer(val system: ExtendedActorSyst private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef]( ContactsManifest -> contactsFromBinary, - GetContactsManifest -> { _ => GetContacts }, - HeartbeatManifest -> { _ => Heartbeat }, - HeartbeatRspManifest -> { _ => HeartbeatRsp }, - ReceptionistShutdownManifest -> { _ => ReceptionistShutdown }) + GetContactsManifest -> { _ => + GetContacts + }, + HeartbeatManifest -> { _ => + Heartbeat + }, + HeartbeatRspManifest -> { _ => + HeartbeatRsp + }, + ReceptionistShutdownManifest -> { _ => + ReceptionistShutdown + }) override def manifest(obj: AnyRef): String = obj match { case _: Contacts => ContactsManifest @@ -60,8 +69,9 @@ private[akka] class ClusterClientMessageSerializer(val system: ExtendedActorSyst override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = fromBinaryMap.get(manifest) match { case Some(f) => f(bytes) - case None => throw new NotSerializableException( - s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + case None => + throw new NotSerializableException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") } private def contactsToProto(m: Contacts): cm.Contacts = diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala index 2278d7ee93..a58e10b881 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/DistributedPubSubMediator.scala @@ -32,6 +32,7 @@ import com.typesafe.config.Config import akka.dispatch.Dispatchers object DistributedPubSubSettings { + /** * Create settings from the default configuration * `akka.cluster.pub-sub`. @@ -47,11 +48,13 @@ object DistributedPubSubSettings { new DistributedPubSubSettings( role = roleOption(config.getString("role")), routingLogic = config.getString("routing-logic") match { - case "random" => RandomRoutingLogic() - case "round-robin" => RoundRobinRoutingLogic() - case "consistent-hashing" => throw new IllegalArgumentException(s"'consistent-hashing' routing logic can't be used by the pub-sub mediator") - case "broadcast" => BroadcastRoutingLogic() - case other => throw new IllegalArgumentException(s"Unknown 'routing-logic': [$other]") + case "random" => RandomRoutingLogic() + case "round-robin" => RoundRobinRoutingLogic() + case "consistent-hashing" => + throw new IllegalArgumentException( + s"'consistent-hashing' routing logic can't be used by the pub-sub mediator") + case "broadcast" => BroadcastRoutingLogic() + case other => throw new IllegalArgumentException(s"Unknown 'routing-logic': [$other]") }, gossipInterval = config.getDuration("gossip-interval", MILLISECONDS).millis, removedTimeToLive = config.getDuration("removed-time-to-live", MILLISECONDS).millis, @@ -87,27 +90,30 @@ object DistributedPubSubSettings { * the registries. Next chunk will be transferred in next round of gossip. * @param sendToDeadLettersWhenNoSubscribers When a message is published to a topic with no subscribers send it to the dead letters. */ -final class DistributedPubSubSettings( - val role: Option[String], - val routingLogic: RoutingLogic, - val gossipInterval: FiniteDuration, - val removedTimeToLive: FiniteDuration, - val maxDeltaElements: Int, - val sendToDeadLettersWhenNoSubscribers: Boolean) extends NoSerializationVerificationNeeded { +final class DistributedPubSubSettings(val role: Option[String], + val routingLogic: RoutingLogic, + val gossipInterval: FiniteDuration, + val removedTimeToLive: FiniteDuration, + val maxDeltaElements: Int, + val sendToDeadLettersWhenNoSubscribers: Boolean) + extends NoSerializationVerificationNeeded { @deprecated("Use the other constructor instead.", "2.5.5") - def this( - role: Option[String], - routingLogic: RoutingLogic, - gossipInterval: FiniteDuration, - removedTimeToLive: FiniteDuration, - maxDeltaElements: Int) { - this(role, routingLogic, gossipInterval, removedTimeToLive, maxDeltaElements, sendToDeadLettersWhenNoSubscribers = true) + def this(role: Option[String], + routingLogic: RoutingLogic, + gossipInterval: FiniteDuration, + removedTimeToLive: FiniteDuration, + maxDeltaElements: Int) { + this(role, + routingLogic, + gossipInterval, + removedTimeToLive, + maxDeltaElements, + sendToDeadLettersWhenNoSubscribers = true) } - require( - !routingLogic.isInstanceOf[ConsistentHashingRoutingLogic], - "'ConsistentHashingRoutingLogic' can't be used by the pub-sub mediator") + require(!routingLogic.isInstanceOf[ConsistentHashingRoutingLogic], + "'ConsistentHashingRoutingLogic' can't be used by the pub-sub mediator") def withRole(role: String): DistributedPubSubSettings = copy(role = DistributedPubSubSettings.roleOption(role)) @@ -129,13 +135,18 @@ final class DistributedPubSubSettings( copy(sendToDeadLettersWhenNoSubscribers = sendToDeadLetterWhenNoSubscribers) private def copy( - role: Option[String] = role, - routingLogic: RoutingLogic = routingLogic, - gossipInterval: FiniteDuration = gossipInterval, - removedTimeToLive: FiniteDuration = removedTimeToLive, - maxDeltaElements: Int = maxDeltaElements, - sendToDeadLettersWhenNoSubscribers: Boolean = sendToDeadLettersWhenNoSubscribers): DistributedPubSubSettings = - new DistributedPubSubSettings(role, routingLogic, gossipInterval, removedTimeToLive, maxDeltaElements, sendToDeadLettersWhenNoSubscribers) + role: Option[String] = role, + routingLogic: RoutingLogic = routingLogic, + gossipInterval: FiniteDuration = gossipInterval, + removedTimeToLive: FiniteDuration = removedTimeToLive, + maxDeltaElements: Int = maxDeltaElements, + sendToDeadLettersWhenNoSubscribers: Boolean = sendToDeadLettersWhenNoSubscribers): DistributedPubSubSettings = + new DistributedPubSubSettings(role, + routingLogic, + gossipInterval, + removedTimeToLive, + maxDeltaElements, + sendToDeadLettersWhenNoSubscribers) } object DistributedPubSubMediator { @@ -150,6 +161,7 @@ object DistributedPubSubMediator { @SerialVersionUID(1L) final case class Remove(path: String) @SerialVersionUID(1L) final case class Subscribe(topic: String, group: Option[String], ref: ActorRef) { require(topic != null && topic != "", "topic must be defined") + /** * Convenience constructor with `group` None */ @@ -173,19 +185,23 @@ object DistributedPubSubMediator { } @SerialVersionUID(1L) final case class SubscribeAck(subscribe: Subscribe) extends DeadLetterSuppression @SerialVersionUID(1L) final case class UnsubscribeAck(unsubscribe: Unsubscribe) - @SerialVersionUID(1L) final case class Publish(topic: String, msg: Any, sendOneMessageToEachGroup: Boolean) extends DistributedPubSubMessage { + @SerialVersionUID(1L) final case class Publish(topic: String, msg: Any, sendOneMessageToEachGroup: Boolean) + extends DistributedPubSubMessage { def this(topic: String, msg: Any) = this(topic, msg, sendOneMessageToEachGroup = false) } object Publish { def apply(topic: String, msg: Any) = new Publish(topic, msg) } - @SerialVersionUID(1L) final case class Send(path: String, msg: Any, localAffinity: Boolean) extends DistributedPubSubMessage { + @SerialVersionUID(1L) final case class Send(path: String, msg: Any, localAffinity: Boolean) + extends DistributedPubSubMessage { + /** * Convenience constructor with `localAffinity` false */ def this(path: String, msg: Any) = this(path, msg, localAffinity = false) } - @SerialVersionUID(1L) final case class SendToAll(path: String, msg: Any, allButSelf: Boolean = false) extends DistributedPubSubMessage { + @SerialVersionUID(1L) final case class SendToAll(path: String, msg: Any, allButSelf: Boolean = false) + extends DistributedPubSubMessage { def this(path: String, msg: Any) = this(path, msg, allButSelf = false) } @@ -211,6 +227,7 @@ object DistributedPubSubMediator { */ @SerialVersionUID(1L) final case class CurrentTopics(topics: Set[String]) { + /** * Java API */ @@ -231,22 +248,21 @@ object DistributedPubSubMediator { case object Prune @SerialVersionUID(1L) - final case class Bucket( - owner: Address, - version: Long, - content: TreeMap[String, ValueHolder]) + final case class Bucket(owner: Address, version: Long, content: TreeMap[String, ValueHolder]) @SerialVersionUID(1L) final case class ValueHolder(version: Long, ref: Option[ActorRef]) { - @transient lazy val routee: Option[Routee] = ref map ActorRefRoutee + @transient lazy val routee: Option[Routee] = ref.map(ActorRefRoutee) } @SerialVersionUID(1L) - final case class Status(versions: Map[Address, Long], isReplyToStatus: Boolean) extends DistributedPubSubMessage - with DeadLetterSuppression + final case class Status(versions: Map[Address, Long], isReplyToStatus: Boolean) + extends DistributedPubSubMessage + with DeadLetterSuppression @SerialVersionUID(1L) - final case class Delta(buckets: immutable.Iterable[Bucket]) extends DistributedPubSubMessage - with DeadLetterSuppression + final case class Delta(buckets: immutable.Iterable[Bucket]) + extends DistributedPubSubMessage + with DeadLetterSuppression // Only for testing purposes, to verify replication case object DeltaCount @@ -322,12 +338,12 @@ object DistributedPubSubMediator { def defaultReceive: Receive = { case msg @ Subscribe(_, _, ref) => - context watch ref + context.watch(ref) subscribers += ref pruneDeadline = None context.parent ! Subscribed(SubscribeAck(msg), sender()) case msg @ Unsubscribe(_, _, ref) => - context unwatch ref + context.unwatch(ref) remove(ref) context.parent ! Unsubscribed(UnsubscribeAck(msg), sender()) case Terminated(ref) => @@ -339,13 +355,13 @@ object DistributedPubSubMediator { } case TerminateRequest => if (subscribers.isEmpty && context.children.isEmpty) - context stop self + context.stop(self) else context.parent ! NewSubscriberArrived case Count => sender() ! subscribers.size case msg => - subscribers foreach { _ forward msg } + subscribers.foreach { _.forward(msg) } } def business: Receive @@ -360,14 +376,16 @@ object DistributedPubSubMediator { } } - class Topic(val emptyTimeToLive: FiniteDuration, routingLogic: RoutingLogic) extends TopicLike with PerGroupingBuffer { + class Topic(val emptyTimeToLive: FiniteDuration, routingLogic: RoutingLogic) + extends TopicLike + with PerGroupingBuffer { def business = { case msg @ Subscribe(_, Some(group), _) => val encGroup = encName(group) bufferOr(mkKey(self.path / encGroup), msg, sender()) { context.child(encGroup) match { - case Some(g) => g forward msg - case None => newGroupActor(encGroup) forward msg + case Some(g) => g.forward(msg) + case None => newGroupActor(encGroup).forward(msg) } } pruneDeadline = None @@ -375,14 +393,14 @@ object DistributedPubSubMediator { val encGroup = encName(group) bufferOr(mkKey(self.path / encGroup), msg, sender()) { context.child(encGroup) match { - case Some(g) => g forward msg + case Some(g) => g.forward(msg) case None => // no such group here } } case msg: Subscribed => - context.parent forward msg + context.parent.forward(msg) case msg: Unsubscribed => - context.parent forward msg + context.parent.forward(msg) case NoMoreSubscribers => val key = mkKey(sender()) initializeGrouping(key) @@ -398,7 +416,7 @@ object DistributedPubSubMediator { def newGroupActor(encGroup: String): ActorRef = { val g = context.actorOf(Props(classOf[Group], emptyTimeToLive, routingLogic), name = encGroup) - context watch g + context.watch(g) context.parent ! RegisterTopic(g) g } @@ -408,7 +426,7 @@ object DistributedPubSubMediator { def business = { case SendToOneSubscriber(msg) => if (subscribers.nonEmpty) - Router(routingLogic, (subscribers map ActorRefRoutee).toVector).route(wrapIfNeeded(msg), sender()) + Router(routingLogic, subscribers.map(ActorRefRoutee).toVector).route(wrapIfNeeded(msg), sender()) } } @@ -504,22 +522,23 @@ trait DistributedPubSubMessage extends Serializable * Not intended for subclassing by user code. */ @DoNotInherit -class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Actor with ActorLogging with PerGroupingBuffer { +class DistributedPubSubMediator(settings: DistributedPubSubSettings) + extends Actor + with ActorLogging + with PerGroupingBuffer { import DistributedPubSubMediator._ import DistributedPubSubMediator.Internal._ import settings._ - require( - !routingLogic.isInstanceOf[ConsistentHashingRoutingLogic], - "'consistent-hashing' routing logic can't be used by the pub-sub mediator") + require(!routingLogic.isInstanceOf[ConsistentHashingRoutingLogic], + "'consistent-hashing' routing logic can't be used by the pub-sub mediator") val cluster = Cluster(context.system) import cluster.selfAddress - require( - role.forall(cluster.selfRoles.contains), - s"This cluster member [${selfAddress}] doesn't have the role [$role]") + require(role.forall(cluster.selfRoles.contains), + s"This cluster member [${selfAddress}] doesn't have the role [$role]") val removedTimeToLiveMillis = removedTimeToLive.toMillis @@ -551,7 +570,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act override def postStop(): Unit = { super.postStop() - cluster unsubscribe self + cluster.unsubscribe(self) gossipTask.cancel() pruneTask.cancel() } @@ -609,8 +628,8 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act bufferOr(mkKey(self.path / encTopic), msg, sender()) { context.child(encTopic) match { - case Some(t) => t forward msg - case None => newTopicActor(encTopic) forward msg + case Some(t) => t.forward(msg) + case None => newTopicActor(encTopic).forward(msg) } } @@ -636,7 +655,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act val encTopic = encName(topic) bufferOr(mkKey(self.path / encTopic), msg, sender()) { context.child(encTopic) match { - case Some(t) => t forward msg + case Some(t) => t.forward(msg) case None => // no such topic here } } @@ -664,7 +683,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act // only accept deltas/buckets from known nodes, otherwise there is a risk of // adding back entries when nodes are removed if (nodes(sender().path.address)) { - buckets foreach { b => + buckets.foreach { b => if (nodes(b.owner)) { val myBucket = registry(b.owner) if (b.version > myBucket.version) { @@ -676,7 +695,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act case GossipTick => gossip() - case Prune => prune() + case Prune => prune() case Terminated(a) => val key = mkKey(a) @@ -715,7 +734,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act case MemberRemoved(m, _) => if (m.address == selfAddress) - context stop self + context.stop(self) else if (matchingRole(m)) { nodes -= m.address registry -= m.address @@ -725,9 +744,10 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act case Count => val count = registry.map { - case (owner, bucket) => bucket.content.count { - case (_, valueHolder) => valueHolder.ref.isDefined - } + case (owner, bucket) => + bucket.content.count { + case (_, valueHolder) => valueHolder.ref.isDefined + } }.sum sender() ! count @@ -745,7 +765,8 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act } private def ignoreOrSendToDeadLetters(msg: Any) = - if (settings.sendToDeadLettersWhenNoSubscribers) context.system.deadLetters ! DeadLetter(msg, sender(), context.self) + if (settings.sendToDeadLettersWhenNoSubscribers) + context.system.deadLetters ! DeadLetter(msg, sender(), context.self) def publish(path: String, msg: Any, allButSelf: Boolean = false): Unit = { val refs = for { @@ -772,11 +793,10 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act ignoreOrSendToDeadLetters(msg) } else { val wrappedMsg = SendToOneSubscriber(msg) - groups foreach { - group => - val routees = group.map(_._2).toVector - if (routees.nonEmpty) - Router(routingLogic, routees).route(wrappedMsg, sender()) + groups.foreach { group => + val routees = group.map(_._2).toVector + if (routees.nonEmpty) + Router(routingLogic, routees).route(wrappedMsg, sender()) } } } @@ -784,9 +804,8 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act def put(key: String, valueOption: Option[ActorRef]): Unit = { val bucket = registry(selfAddress) val v = nextVersion() - registry += (selfAddress -> bucket.copy( - version = v, - content = bucket.content + (key -> ValueHolder(v, valueOption)))) + registry += (selfAddress -> bucket.copy(version = v, + content = bucket.content + (key -> ValueHolder(v, valueOption)))) } def getCurrentTopics(): Set[String] = { @@ -841,7 +860,7 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act /** * Gossip to peer nodes. */ - def gossip(): Unit = selectRandomNode((nodes - selfAddress).toVector) foreach gossipTo + def gossip(): Unit = selectRandomNode((nodes - selfAddress).toVector).foreach(gossipTo) def gossipTo(address: Address): Unit = { val sel = context.actorSelection(self.path.toStringWithAddress(address)) @@ -849,10 +868,10 @@ class DistributedPubSubMediator(settings: DistributedPubSubSettings) extends Act } def selectRandomNode(addresses: immutable.IndexedSeq[Address]): Option[Address] = - if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) + if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current.nextInt(addresses.size))) def prune(): Unit = { - registry foreach { + registry.foreach { case (owner, bucket) => val oldRemoved = bucket.content.collect { case (key, ValueHolder(version, None)) if (bucket.version - version > removedTimeToLiveMillis) => key diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala index 911d4d0083..5ec241d285 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializer.scala @@ -24,7 +24,8 @@ import java.io.NotSerializableException * INTERNAL API: Protobuf serializer of DistributedPubSubMediator messages. */ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActorSystem) - extends SerializerWithStringManifest with BaseSerializer { + extends SerializerWithStringManifest + with BaseSerializer { private lazy val serialization = SerializationExtension(system) @@ -70,8 +71,9 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = fromBinaryMap.get(manifest) match { case Some(f) => f(bytes) - case None => throw new NotSerializableException( - s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + case None => + throw new NotSerializableException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") } private def compress(msg: MessageLite): Array[Byte] = { @@ -109,17 +111,14 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor Address(address.getProtocol, address.getSystem, address.getHostname, address.getPort) private def statusToProto(status: Status): dm.Status = { - val versions = status.versions.map { - case (a, v) => - dm.Status.Version.newBuilder(). - setAddress(addressToProto(a)). - setTimestamp(v). - build() - }.toVector.asJava - dm.Status.newBuilder() - .addAllVersions(versions) - .setReplyToStatus(status.isReplyToStatus) - .build() + val versions = status.versions + .map { + case (a, v) => + dm.Status.Version.newBuilder().setAddress(addressToProto(a)).setTimestamp(v).build() + } + .toVector + .asJava + dm.Status.newBuilder().addAllVersions(versions).setReplyToStatus(status.isReplyToStatus).build() } private def statusFromBinary(bytes: Array[Byte]): Status = @@ -127,25 +126,32 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor private def statusFromProto(status: dm.Status): Status = { val isReplyToStatus = if (status.hasReplyToStatus) status.getReplyToStatus else false - Status(status.getVersionsList.asScala.iterator.map(v => - addressFromProto(v.getAddress) -> v.getTimestamp).toMap, isReplyToStatus) + Status(status.getVersionsList.asScala.iterator.map(v => addressFromProto(v.getAddress) -> v.getTimestamp).toMap, + isReplyToStatus) } private def deltaToProto(delta: Delta): dm.Delta = { - val buckets = delta.buckets.map { b => - val entries = b.content.map { - case (key, value) => - val b = dm.Delta.Entry.newBuilder().setKey(key).setVersion(value.version) - value.ref.foreach(r => b.setRef(Serialization.serializedActorPath(r))) - b.build() - }.toVector.asJava + val buckets = delta.buckets + .map { b => + val entries = b.content + .map { + case (key, value) => + val b = dm.Delta.Entry.newBuilder().setKey(key).setVersion(value.version) + value.ref.foreach(r => b.setRef(Serialization.serializedActorPath(r))) + b.build() + } + .toVector + .asJava - dm.Delta.Bucket.newBuilder(). - setOwner(addressToProto(b.owner)). - setVersion(b.version). - addAllContent(entries). - build() - }.toVector.asJava + dm.Delta.Bucket + .newBuilder() + .setOwner(addressToProto(b.owner)) + .setVersion(b.version) + .addAllContent(entries) + .build() + } + .toVector + .asJava dm.Delta.newBuilder().addAllBuckets(buckets).build() } @@ -154,9 +160,10 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor private def deltaFromProto(delta: dm.Delta): Delta = Delta(delta.getBucketsList.asScala.toVector.map { b => - val content: TreeMap[String, ValueHolder] = scala.collection.immutable.TreeMap.from(b.getContentList.asScala.iterator.map { entry => - entry.getKey -> ValueHolder(entry.getVersion, if (entry.hasRef) Some(resolveActorRef(entry.getRef)) else None) - }) + val content: TreeMap[String, ValueHolder] = + scala.collection.immutable.TreeMap.from(b.getContentList.asScala.iterator.map { entry => + entry.getKey -> ValueHolder(entry.getVersion, if (entry.hasRef) Some(resolveActorRef(entry.getRef)) else None) + }) Bucket(addressFromProto(b.getOwner), b.getVersion, content) }) @@ -165,11 +172,12 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor } private def sendToProto(send: Send): dm.Send = { - dm.Send.newBuilder(). - setPath(send.path). - setLocalAffinity(send.localAffinity). - setPayload(payloadToProto(send.msg)). - build() + dm.Send + .newBuilder() + .setPath(send.path) + .setLocalAffinity(send.localAffinity) + .setPayload(payloadToProto(send.msg)) + .build() } private def sendFromBinary(bytes: Array[Byte]): Send = @@ -179,11 +187,12 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor Send(send.getPath, payloadFromProto(send.getPayload), send.getLocalAffinity) private def sendToAllToProto(sendToAll: SendToAll): dm.SendToAll = { - dm.SendToAll.newBuilder(). - setPath(sendToAll.path). - setAllButSelf(sendToAll.allButSelf). - setPayload(payloadToProto(sendToAll.msg)). - build() + dm.SendToAll + .newBuilder() + .setPath(sendToAll.path) + .setAllButSelf(sendToAll.allButSelf) + .setPayload(payloadToProto(sendToAll.msg)) + .build() } private def sendToAllFromBinary(bytes: Array[Byte]): SendToAll = @@ -193,10 +202,7 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor SendToAll(sendToAll.getPath, payloadFromProto(sendToAll.getPayload), sendToAll.getAllButSelf) private def publishToProto(publish: Publish): dm.Publish = { - dm.Publish.newBuilder(). - setTopic(publish.topic). - setPayload(payloadToProto(publish.msg)). - build() + dm.Publish.newBuilder().setTopic(publish.topic).setPayload(payloadToProto(publish.msg)).build() } private def publishFromBinary(bytes: Array[Byte]): Publish = @@ -206,9 +212,7 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor Publish(publish.getTopic, payloadFromProto(publish.getPayload)) private def sendToOneSubscriberToProto(sendToOneSubscriber: SendToOneSubscriber): dm.SendToOneSubscriber = { - dm.SendToOneSubscriber.newBuilder(). - setPayload(payloadToProto(sendToOneSubscriber.msg)). - build() + dm.SendToOneSubscriber.newBuilder().setPayload(payloadToProto(sendToOneSubscriber.msg)).build() } private def sendToOneSubscriberFromBinary(bytes: Array[Byte]): SendToOneSubscriber = @@ -220,8 +224,9 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor private def payloadToProto(msg: Any): dm.Payload = { val m = msg.asInstanceOf[AnyRef] val msgSerializer = serialization.findSerializerFor(m) - val builder = dm.Payload.newBuilder(). - setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(m))) + val builder = dm.Payload + .newBuilder() + .setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(m))) .setSerializerId(msgSerializer.identifier) val ms = Serializers.manifestFor(msgSerializer, m) @@ -232,10 +237,7 @@ private[akka] class DistributedPubSubMessageSerializer(val system: ExtendedActor private def payloadFromProto(payload: dm.Payload): AnyRef = { val manifest = if (payload.hasMessageManifest) payload.getMessageManifest.toStringUtf8 else "" - serialization.deserialize( - payload.getEnclosedMessage.toByteArray, - payload.getSerializerId, - manifest).get + serialization.deserialize(payload.getEnclosedMessage.toByteArray, payload.getSerializerId, manifest).get } } diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala index 234e0f0624..acb96abba6 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonManager.scala @@ -44,8 +44,8 @@ object ClusterSingletonManagerSettings { */ def apply(system: ActorSystem): ClusterSingletonManagerSettings = apply(system.settings.config.getConfig("akka.cluster.singleton")) - // note that this setting has some additional logic inside the ClusterSingletonManager - // falling back to DowningProvider.downRemovalMargin if it is off/Zero + // note that this setting has some additional logic inside the ClusterSingletonManager + // falling back to DowningProvider.downRemovalMargin if it is off/Zero .withRemovalMargin(Cluster(system).settings.DownRemovalMargin) /** @@ -53,11 +53,11 @@ object ClusterSingletonManagerSettings { * the default configuration `akka.cluster.singleton`. */ def apply(config: Config): ClusterSingletonManagerSettings = - new ClusterSingletonManagerSettings( - singletonName = config.getString("singleton-name"), - role = roleOption(config.getString("role")), - removalMargin = Duration.Zero, // defaults to ClusterSettins.DownRemovalMargin - handOverRetryInterval = config.getDuration("hand-over-retry-interval", MILLISECONDS).millis) + new ClusterSingletonManagerSettings(singletonName = config.getString("singleton-name"), + role = roleOption(config.getString("role")), + removalMargin = Duration.Zero, // defaults to ClusterSettins.DownRemovalMargin + handOverRetryInterval = + config.getDuration("hand-over-retry-interval", MILLISECONDS).millis) /** * Java API: Create settings from the default configuration @@ -99,15 +99,16 @@ object ClusterSingletonManagerSettings { * over has started or the previous oldest member is removed from the cluster * (+ `removalMargin`). */ -final class ClusterSingletonManagerSettings( - val singletonName: String, - val role: Option[String], - val removalMargin: FiniteDuration, - val handOverRetryInterval: FiniteDuration) extends NoSerializationVerificationNeeded { +final class ClusterSingletonManagerSettings(val singletonName: String, + val role: Option[String], + val removalMargin: FiniteDuration, + val handOverRetryInterval: FiniteDuration) + extends NoSerializationVerificationNeeded { def withSingletonName(name: String): ClusterSingletonManagerSettings = copy(singletonName = name) - def withRole(role: String): ClusterSingletonManagerSettings = copy(role = ClusterSingletonManagerSettings.roleOption(role)) + def withRole(role: String): ClusterSingletonManagerSettings = + copy(role = ClusterSingletonManagerSettings.roleOption(role)) def withRole(role: Option[String]) = copy(role = role) @@ -117,11 +118,10 @@ final class ClusterSingletonManagerSettings( def withHandOverRetryInterval(retryInterval: FiniteDuration): ClusterSingletonManagerSettings = copy(handOverRetryInterval = retryInterval) - private def copy( - singletonName: String = singletonName, - role: Option[String] = role, - removalMargin: FiniteDuration = removalMargin, - handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings = + private def copy(singletonName: String = singletonName, + role: Option[String] = role, + removalMargin: FiniteDuration = removalMargin, + handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings = new ClusterSingletonManagerSettings(singletonName, role, removalMargin, handOverRetryInterval) } @@ -135,10 +135,7 @@ object ClusterSingletonManager { /** * Scala API: Factory method for `ClusterSingletonManager` [[akka.actor.Props]]. */ - def props( - singletonProps: Props, - terminationMessage: Any, - settings: ClusterSingletonManagerSettings): Props = + def props(singletonProps: Props, terminationMessage: Any, settings: ClusterSingletonManagerSettings): Props = Props(new ClusterSingletonManager(singletonProps, terminationMessage, settings)).withDeploy(Deploy.local) /** @@ -146,6 +143,7 @@ object ClusterSingletonManager { * public due to the `with FSM` type parameters */ sealed trait State + /** * INTERNAL API * public due to the `with FSM` type parameters @@ -156,24 +154,28 @@ object ClusterSingletonManager { * INTERNAL API */ private[akka] object Internal { + /** * Sent from new oldest to previous oldest to initiate the * hand-over process. `HandOverInProgress` and `HandOverDone` * are expected replies. */ case object HandOverToMe extends ClusterSingletonMessage with DeadLetterSuppression + /** * Confirmation by the previous oldest that the hand * over process, shut down of the singleton actor, has * started. */ case object HandOverInProgress extends ClusterSingletonMessage + /** * Confirmation by the previous oldest that the singleton * actor has been terminated and the hand-over process is * completed. */ case object HandOverDone extends ClusterSingletonMessage + /** * Sent from from previous oldest to new oldest to * initiate the normal hand-over process. @@ -202,8 +204,10 @@ object ClusterSingletonManager { final case class YoungerData(oldestOption: Option[UniqueAddress]) extends Data final case class BecomingOldestData(previousOldestOption: Option[UniqueAddress]) extends Data final case class OldestData(singleton: ActorRef, singletonTerminated: Boolean = false) extends Data - final case class WasOldestData(singleton: ActorRef, singletonTerminated: Boolean, - newOldestOption: Option[UniqueAddress]) extends Data + final case class WasOldestData(singleton: ActorRef, + singletonTerminated: Boolean, + newOldestOption: Option[UniqueAddress]) + extends Data final case class HandingOverData(singleton: ActorRef, handOverTo: Option[ActorRef]) extends Data final case class StoppingData(singleton: ActorRef) extends Data case object EndData extends Data @@ -215,10 +219,12 @@ object ClusterSingletonManager { val CleanupTimer = "cleanup" object OldestChangedBuffer { + /** * Request to deliver one more event. */ case object GetNext + /** * The first event, corresponding to CurrentClusterState. */ @@ -281,12 +287,15 @@ object ClusterSingletonManager { } def handleInitial(state: CurrentClusterState): Unit = { - membersByAge = immutable.SortedSet.empty(ageOrdering) union state.members.filter(m => - m.status == MemberStatus.Up && matchingRole(m)) + membersByAge = immutable.SortedSet + .empty(ageOrdering) + .union(state.members.filter(m => m.status == MemberStatus.Up && matchingRole(m))) // If there is some removal in progress of an older node it's not safe to immediately become oldest, // removal of younger nodes doesn't matter. Note that it can also be started via restart after // ClusterSingletonManagerIsStuck. - val selfUpNumber = state.members.collectFirst { case m if m.uniqueAddress == cluster.selfUniqueAddress => m.upNumber }.getOrElse(Int.MaxValue) + val selfUpNumber = state.members + .collectFirst { case m if m.uniqueAddress == cluster.selfUniqueAddress => m.upNumber } + .getOrElse(Int.MaxValue) val safeToBeOldest = !state.members.exists { m => m.upNumber <= selfUpNumber && matchingRole(m) && (m.status == MemberStatus.Down || m.status == MemberStatus.Exiting || m.status == MemberStatus.Leaving) } @@ -428,11 +437,9 @@ class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(mess * @param settings see [[ClusterSingletonManagerSettings]] */ @DoNotInherit -class ClusterSingletonManager( - singletonProps: Props, - terminationMessage: Any, - settings: ClusterSingletonManagerSettings) - extends Actor with FSM[ClusterSingletonManager.State, ClusterSingletonManager.Data] { +class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, settings: ClusterSingletonManagerSettings) + extends Actor + with FSM[ClusterSingletonManager.State, ClusterSingletonManager.Data] { import ClusterSingletonManager.Internal._ import ClusterSingletonManager.Internal.OldestChangedBuffer._ @@ -443,9 +450,8 @@ class ClusterSingletonManager( val selfUniqueAddressOption = Some(cluster.selfUniqueAddress) import cluster.settings.LogInfo - require( - role.forall(cluster.selfRoles.contains), - s"This cluster member [${cluster.selfAddress}] doesn't have the role [$role]") + require(role.forall(cluster.selfRoles.contains), + s"This cluster member [${cluster.selfAddress}] doesn't have the role [$role]") val removalMargin = if (settings.removalMargin <= Duration.Zero) cluster.downingProvider.downRemovalMargin @@ -453,8 +459,7 @@ class ClusterSingletonManager( val (maxHandOverRetries, maxTakeOverRetries) = { val n = (removalMargin.toMillis / handOverRetryInterval.toMillis).toInt - val minRetries = context.system.settings.config.getInt( - "akka.cluster.singleton.min-number-of-hand-over-retries") + val minRetries = context.system.settings.config.getInt("akka.cluster.singleton.min-number-of-hand-over-retries") require(minRetries >= 1, "min-number-of-hand-over-retries must be >= 1") val handOverRetries = math.max(minRetries, n + 3) val takeOverRetries = math.max(1, handOverRetries - 3) @@ -476,7 +481,7 @@ class ClusterSingletonManager( removed += node -> (Deadline.now + 15.minutes) def cleanupOverdueNotMemberAnyMore(): Unit = { - removed = removed filter { case (_, deadline) => deadline.hasTimeLeft } + removed = removed.filter { case (_, deadline) => deadline.hasTimeLeft } } // for CoordinatedShutdown @@ -539,8 +544,8 @@ class ClusterSingletonManager( when(Start) { case Event(StartOldestChangedBuffer, _) => - oldestChangedBuffer = context.actorOf(Props(classOf[OldestChangedBuffer], role). - withDispatcher(context.props.dispatcher)) + oldestChangedBuffer = + context.actorOf(Props(classOf[OldestChangedBuffer], role).withDispatcher(context.props.dispatcher)) getNextOldestChanged() stay @@ -550,9 +555,9 @@ class ClusterSingletonManager( // oldest immediately gotoOldest() else if (oldestOption == selfUniqueAddressOption) - goto(BecomingOldest) using BecomingOldestData(None) + goto(BecomingOldest).using(BecomingOldestData(None)) else - goto(Younger) using YoungerData(oldestOption) + goto(Younger).using(YoungerData(oldestOption)) } when(Younger) { @@ -565,12 +570,14 @@ class ClusterSingletonManager( case Some(prev) if removed.contains(prev) => gotoOldest() case Some(prev) => peer(prev.address) ! HandOverToMe - goto(BecomingOldest) using BecomingOldestData(previousOldestOption) + goto(BecomingOldest).using(BecomingOldestData(previousOldestOption)) } } else { - logInfo("Younger observed OldestChanged: [{} -> {}]", previousOldestOption.map(_.address), oldestOption.map(_.address)) + logInfo("Younger observed OldestChanged: [{} -> {}]", + previousOldestOption.map(_.address), + oldestOption.map(_.address)) getNextOldestChanged() - stay using YoungerData(oldestOption) + stay.using(YoungerData(oldestOption)) } case Event(MemberDowned(m), _) if m.uniqueAddress == cluster.selfUniqueAddress => @@ -589,7 +596,7 @@ class ClusterSingletonManager( logInfo("Previous oldest removed [{}]", m.address) addRemoved(m.uniqueAddress) // transition when OldestChanged - stay using YoungerData(None) + stay.using(YoungerData(None)) case Event(HandOverToMe, _) => // this node was probably quickly restarted with same hostname:port, @@ -610,9 +617,9 @@ class ClusterSingletonManager( if (sender().path.address == previousOldest.address) gotoOldest() else { - logInfo( - "Ignoring HandOverDone in BecomingOldest from [{}]. Expected previous oldest [{}]", - sender().path.address, previousOldest.address) + logInfo("Ignoring HandOverDone in BecomingOldest from [{}]. Expected previous oldest [{}]", + sender().path.address, + previousOldest.address) stay } @@ -628,7 +635,8 @@ class ClusterSingletonManager( scheduleDelayedMemberRemoved(m) stay - case Event(DelayedMemberRemoved(m), BecomingOldestData(Some(previousOldest))) if m.uniqueAddress == previousOldest => + case Event(DelayedMemberRemoved(m), BecomingOldestData(Some(previousOldest))) + if m.uniqueAddress == previousOldest => logInfo("Previous oldest [{}] removed", previousOldest.address) addRemoved(m.uniqueAddress) gotoOldest() @@ -640,20 +648,20 @@ class ClusterSingletonManager( cluster.state.members.collectFirst { case m if m.address == senderAddress => m.uniqueAddress } match { case None => // from unknown node, ignore - logInfo( - "Ignoring TakeOver request from unknown node in BecomingOldest from [{}].", senderAddress) + logInfo("Ignoring TakeOver request from unknown node in BecomingOldest from [{}].", senderAddress) stay case Some(senderUniqueAddress) => previousOldestOption match { case Some(previousOldest) => if (previousOldest == senderUniqueAddress) sender() ! HandOverToMe - else logInfo( - "Ignoring TakeOver request in BecomingOldest from [{}]. Expected previous oldest [{}]", - sender().path.address, previousOldest.address) + else + logInfo("Ignoring TakeOver request in BecomingOldest from [{}]. Expected previous oldest [{}]", + sender().path.address, + previousOldest.address) stay case None => sender() ! HandOverToMe - stay using BecomingOldestData(Some(senderUniqueAddress)) + stay.using(BecomingOldestData(Some(senderUniqueAddress))) } } @@ -663,7 +671,7 @@ class ClusterSingletonManager( previousOldestOption.foreach(node => peer(node.address) ! HandOverToMe) setTimer(HandOverRetryTimer, HandOverRetry(count + 1), handOverRetryInterval, repeat = false) stay() - } else if (previousOldestOption forall removed.contains) { + } else if (previousOldestOption.forall(removed.contains)) { // can't send HandOverToMe, previousOldest unknown for new node (or restart) // previous oldest might be down or removed, so no TakeOverFromMe message is received logInfo("Timeout in BecomingOldest. Previous oldest unknown, removed and no TakeOver request.") @@ -684,9 +692,9 @@ class ClusterSingletonManager( } def gotoOldest(): State = { - val singleton = context watch context.actorOf(singletonProps, singletonName) + val singleton = context.watch(context.actorOf(singletonProps, singletonName)) logInfo("Singleton manager starting singleton actor [{}]", singleton.path) - goto(Oldest) using OldestData(singleton) + goto(Oldest).using(OldestData(singleton)) } when(Oldest) { @@ -706,11 +714,11 @@ class ClusterSingletonManager( // send TakeOver request in case the new oldest doesn't know previous oldest peer(a.address) ! TakeOverFromMe setTimer(TakeOverRetryTimer, TakeOverRetry(1), handOverRetryInterval, repeat = false) - goto(WasOldest) using WasOldestData(singleton, singletonTerminated, newOldestOption = Some(a)) + goto(WasOldest).using(WasOldestData(singleton, singletonTerminated, newOldestOption = Some(a))) case None => // new oldest will initiate the hand-over setTimer(TakeOverRetryTimer, TakeOverRetry(1), handOverRetryInterval, repeat = false) - goto(WasOldest) using WasOldestData(singleton, singletonTerminated, newOldestOption = None) + goto(WasOldest).using(WasOldestData(singleton, singletonTerminated, newOldestOption = None)) } case Event(HandOverToMe, OldestData(singleton, singletonTerminated)) => @@ -723,7 +731,7 @@ class ClusterSingletonManager( case Event(Terminated(ref), d @ OldestData(singleton, _)) if ref == singleton => logInfo("Singleton actor [{}] was terminated", singleton.path) - stay using d.copy(singletonTerminated = true) + stay.using(d.copy(singletonTerminated = true)) case Event(SelfExiting, _) => selfMemberExited() @@ -731,7 +739,8 @@ class ClusterSingletonManager( sender() ! Done // reply to ask stay - case Event(MemberDowned(m), OldestData(singleton, singletonTerminated)) if m.uniqueAddress == cluster.selfUniqueAddress => + case Event(MemberDowned(m), OldestData(singleton, singletonTerminated)) + if m.uniqueAddress == cluster.selfUniqueAddress => if (singletonTerminated) { logInfo("Self downed, stopping ClusterSingletonManager") stop() @@ -764,13 +773,14 @@ class ClusterSingletonManager( logInfo("Self removed, stopping ClusterSingletonManager") stop() - case Event(MemberRemoved(m, _), WasOldestData(singleton, singletonTerminated, Some(newOldest))) if !selfExited && m.uniqueAddress == newOldest => + case Event(MemberRemoved(m, _), WasOldestData(singleton, singletonTerminated, Some(newOldest))) + if !selfExited && m.uniqueAddress == newOldest => addRemoved(m.uniqueAddress) gotoHandingOver(singleton, singletonTerminated, None) case Event(Terminated(ref), d @ WasOldestData(singleton, _, _)) if ref == singleton => logInfo("Singleton actor [{}] was terminated", singleton.path) - stay using d.copy(singletonTerminated = true) + stay.using(d.copy(singletonTerminated = true)) case Event(SelfExiting, _) => selfMemberExited() @@ -778,7 +788,8 @@ class ClusterSingletonManager( sender() ! Done // reply to ask stay - case Event(MemberDowned(m), OldestData(singleton, singletonTerminated)) if m.uniqueAddress == cluster.selfUniqueAddress => + case Event(MemberDowned(m), OldestData(singleton, singletonTerminated)) + if m.uniqueAddress == cluster.selfUniqueAddress => if (singletonTerminated) { logInfo("Self downed, stopping ClusterSingletonManager") stop() @@ -793,10 +804,10 @@ class ClusterSingletonManager( if (singletonTerminated) { handOverDone(handOverTo) } else { - handOverTo foreach { _ ! HandOverInProgress } + handOverTo.foreach { _ ! HandOverInProgress } logInfo("Singleton manager stopping singleton actor [{}]", singleton.path) singleton ! terminationMessage - goto(HandingOver) using HandingOverData(singleton, handOverTo) + goto(HandingOver).using(HandingOverData(singleton, handOverTo)) } } @@ -819,21 +830,21 @@ class ClusterSingletonManager( def handOverDone(handOverTo: Option[ActorRef]): State = { val newOldest = handOverTo.map(_.path.address) logInfo("Singleton terminated, hand-over done [{} -> {}]", cluster.selfAddress, newOldest) - handOverTo foreach { _ ! HandOverDone } + handOverTo.foreach { _ ! HandOverDone } memberExitingProgress.trySuccess(Done) if (removed.contains(cluster.selfUniqueAddress)) { logInfo("Self removed, stopping ClusterSingletonManager") stop() } else if (handOverTo.isEmpty) - goto(Younger) using YoungerData(None) + goto(Younger).using(YoungerData(None)) else - goto(End) using EndData + goto(End).using(EndData) } def gotoStopping(singleton: ActorRef): State = { logInfo("Singleton manager stopping singleton actor [{}]", singleton.path) singleton ! terminationMessage - goto(Stopping) using StoppingData(singleton) + goto(Stopping).using(StoppingData(singleton)) } when(Stopping) { diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala index 69b1f64a6f..58e1e3b394 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/ClusterSingletonProxy.scala @@ -36,11 +36,11 @@ object ClusterSingletonProxySettings { * the default configuration `akka.cluster.singleton-proxy`. */ def apply(config: Config): ClusterSingletonProxySettings = - new ClusterSingletonProxySettings( - singletonName = config.getString("singleton-name"), - role = roleOption(config.getString("role")), - singletonIdentificationInterval = config.getDuration("singleton-identification-interval", MILLISECONDS).millis, - bufferSize = config.getInt("buffer-size")) + new ClusterSingletonProxySettings(singletonName = config.getString("singleton-name"), + role = roleOption(config.getString("role")), + singletonIdentificationInterval = + config.getDuration("singleton-identification-interval", MILLISECONDS).millis, + bufferSize = config.getInt("buffer-size")) /** * Java API: Create settings from the default configuration @@ -72,26 +72,26 @@ object ClusterSingletonProxySettings { * when new messages are sent viea the proxy. Use 0 to disable buffering, i.e. messages will be dropped * immediately if the location of the singleton is unknown. */ -final class ClusterSingletonProxySettings( - val singletonName: String, - val role: Option[String], - val dataCenter: Option[DataCenter], - val singletonIdentificationInterval: FiniteDuration, - val bufferSize: Int) extends NoSerializationVerificationNeeded { +final class ClusterSingletonProxySettings(val singletonName: String, + val role: Option[String], + val dataCenter: Option[DataCenter], + val singletonIdentificationInterval: FiniteDuration, + val bufferSize: Int) + extends NoSerializationVerificationNeeded { // for backwards compatibility - def this( - singletonName: String, - role: Option[String], - singletonIdentificationInterval: FiniteDuration, - bufferSize: Int) = + def this(singletonName: String, + role: Option[String], + singletonIdentificationInterval: FiniteDuration, + bufferSize: Int) = this(singletonName, role, None, singletonIdentificationInterval, bufferSize) require(bufferSize >= 0 && bufferSize <= 10000, "bufferSize must be >= 0 and <= 10000") def withSingletonName(name: String): ClusterSingletonProxySettings = copy(singletonName = name) - def withRole(role: String): ClusterSingletonProxySettings = copy(role = ClusterSingletonProxySettings.roleOption(role)) + def withRole(role: String): ClusterSingletonProxySettings = + copy(role = ClusterSingletonProxySettings.roleOption(role)) def withRole(role: Option[String]): ClusterSingletonProxySettings = copy(role = role) @@ -99,22 +99,23 @@ final class ClusterSingletonProxySettings( def withDataCenter(dataCenter: Option[DataCenter]): ClusterSingletonProxySettings = copy(dataCenter = dataCenter) - def withSingletonIdentificationInterval(singletonIdentificationInterval: FiniteDuration): ClusterSingletonProxySettings = + def withSingletonIdentificationInterval( + singletonIdentificationInterval: FiniteDuration): ClusterSingletonProxySettings = copy(singletonIdentificationInterval = singletonIdentificationInterval) def withBufferSize(bufferSize: Int): ClusterSingletonProxySettings = copy(bufferSize = bufferSize) - private def copy( - singletonName: String = singletonName, - role: Option[String] = role, - dataCenter: Option[DataCenter] = dataCenter, - singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval, - bufferSize: Int = bufferSize): ClusterSingletonProxySettings = + private def copy(singletonName: String = singletonName, + role: Option[String] = role, + dataCenter: Option[DataCenter] = dataCenter, + singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval, + bufferSize: Int = bufferSize): ClusterSingletonProxySettings = new ClusterSingletonProxySettings(singletonName, role, dataCenter, singletonIdentificationInterval, bufferSize) } object ClusterSingletonProxy { + /** * Scala API: Factory method for `ClusterSingletonProxy` [[akka.actor.Props]]. * @@ -147,7 +148,9 @@ object ClusterSingletonProxy { * Note that this is a best effort implementation: messages can always be lost due to the distributed nature of the * actors involved. */ -final class ClusterSingletonProxy(singletonManagerPath: String, settings: ClusterSingletonProxySettings) extends Actor with ActorLogging { +final class ClusterSingletonProxy(singletonManagerPath: String, settings: ClusterSingletonProxySettings) + extends Actor + with ActorLogging { import settings._ val singletonPath = (singletonManagerPath + "/" + settings.singletonName).split("/") var identifyCounter = 0 @@ -188,11 +191,12 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste member.hasRole(targetDcRole) && role.forall(member.hasRole) def handleInitial(state: CurrentClusterState): Unit = { - trackChange { - () => - membersByAge = immutable.SortedSet.empty(ageOrdering) union state.members.collect { + trackChange { () => + membersByAge = immutable.SortedSet + .empty(ageOrdering) + .union(state.members.collect { case m if m.status == MemberStatus.Up && matchingRole(m) => m - } + }) } } @@ -206,7 +210,9 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste identifyId = createIdentifyId(identifyCounter) singleton = None cancelTimer() - identifyTimer = Some(context.system.scheduler.schedule(0 milliseconds, singletonIdentificationInterval, self, ClusterSingletonProxy.TryToIdentifySingleton)) + identifyTimer = Some( + context.system.scheduler + .schedule(0 milliseconds, singletonIdentificationInterval, self, ClusterSingletonProxy.TryToIdentifySingleton)) } def trackChange(block: () => Unit): Unit = { @@ -266,7 +272,7 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste case ClusterSingletonProxy.TryToIdentifySingleton => identifyTimer match { case Some(_) => - membersByAge.headOption foreach { oldest => + membersByAge.headOption.foreach { oldest => val singletonAddress = RootActorPath(oldest.address) / singletonPath log.debug("Trying to identify singleton at [{}]", singletonAddress) context.actorSelection(singletonAddress) ! Identify(identifyId) @@ -285,10 +291,10 @@ final class ClusterSingletonProxy(singletonManagerPath: String, settings: Cluste singleton match { case Some(s) => if (log.isDebugEnabled) - log.debug( - "Forwarding message of type [{}] to current singleton instance at [{}]", - Logging.simpleName(msg.getClass), s.path) - s forward msg + log.debug("Forwarding message of type [{}] to current singleton instance at [{}]", + Logging.simpleName(msg.getClass), + s.path) + s.forward(msg) case None => buffer(msg) } diff --git a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala index 33ac1038a1..3eff105825 100644 --- a/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala +++ b/akka-cluster-tools/src/main/scala/akka/cluster/singleton/protobuf/ClusterSingletonMessageSerializer.scala @@ -20,7 +20,8 @@ import java.io.NotSerializableException * the ClusterSingleton we want to make protobuf representations of them. */ private[akka] class ClusterSingletonMessageSerializer(val system: ExtendedActorSystem) - extends SerializerWithStringManifest with BaseSerializer { + extends SerializerWithStringManifest + with BaseSerializer { private lazy val serialization = SerializationExtension(system) @@ -31,11 +32,15 @@ private[akka] class ClusterSingletonMessageSerializer(val system: ExtendedActorS private val emptyByteArray = Array.empty[Byte] - private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef]( - HandOverToMeManifest -> { _ => HandOverToMe }, - HandOverInProgressManifest -> { _ => HandOverInProgress }, - HandOverDoneManifest -> { _ => HandOverDone }, - TakeOverFromMeManifest -> { _ => TakeOverFromMe }) + private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef](HandOverToMeManifest -> { _ => + HandOverToMe + }, HandOverInProgressManifest -> { _ => + HandOverInProgress + }, HandOverDoneManifest -> { _ => + HandOverDone + }, TakeOverFromMeManifest -> { _ => + TakeOverFromMe + }) override def manifest(obj: AnyRef): String = obj match { case HandOverToMe => HandOverToMeManifest @@ -58,8 +63,9 @@ private[akka] class ClusterSingletonMessageSerializer(val system: ExtendedActorS override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = fromBinaryMap.get(manifest) match { case Some(f) => f(bytes) - case None => throw new NotSerializableException( - s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + case None => + throw new NotSerializableException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientHandoverSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientHandoverSpec.scala index 76cb2418d8..ff7a9a2f10 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientHandoverSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientHandoverSpec.scala @@ -36,7 +36,11 @@ class ClusterClientHandoverSpecMultiJvmNode1 extends ClusterClientHandoverSpec class ClusterClientHandoverSpecMultiJvmNode2 extends ClusterClientHandoverSpec class ClusterClientHandoverSpecMultiJvmNode3 extends ClusterClientHandoverSpec -class ClusterClientHandoverSpec extends MultiNodeSpec(ClusterClientHandoverSpec) with STMultiNodeSpec with ImplicitSender with MultiNodeClusterSpec { +class ClusterClientHandoverSpec + extends MultiNodeSpec(ClusterClientHandoverSpec) + with STMultiNodeSpec + with ImplicitSender + with MultiNodeClusterSpec { import ClusterClientHandoverSpec._ @@ -44,7 +48,7 @@ class ClusterClientHandoverSpec extends MultiNodeSpec(ClusterClientHandoverSpec) def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) ClusterClientReceptionist(system) } enterBarrier(from.name + "-joined") @@ -70,8 +74,9 @@ class ClusterClientHandoverSpec extends MultiNodeSpec(ClusterClientHandoverSpec) "establish connection to first node" in { runOn(client) { - clusterClient = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(initialContacts)), "client1") + clusterClient = + system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), + "client1") clusterClient ! ClusterClient.Send("/user/testService", "hello", localAffinity = true) expectMsgType[String](3.seconds) should be("hello") } @@ -90,7 +95,7 @@ class ClusterClientHandoverSpec extends MultiNodeSpec(ClusterClientHandoverSpec) "remove first node from the cluster" in { runOn(first) { - Cluster(system) leave node(first).address + Cluster(system).leave(node(first).address) } runOn(second) { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala index 1277499c91..023cc3b784 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientSpec.scala @@ -7,7 +7,16 @@ package akka.cluster.client import language.postfixOps import scala.concurrent.duration._ import com.typesafe.config.ConfigFactory -import akka.actor.{ Actor, ActorPath, ActorRef, ActorSystem, Address, ExtendedActorSystem, NoSerializationVerificationNeeded, Props } +import akka.actor.{ + Actor, + ActorPath, + ActorRef, + ActorSystem, + Address, + ExtendedActorSystem, + NoSerializationVerificationNeeded, + Props +} import akka.cluster.Cluster import akka.cluster.client.ClusterClientSpec.TestClientListener.LatestContactPoints import akka.cluster.client.ClusterClientSpec.TestReceptionistListener.LatestClusterClients @@ -54,7 +63,7 @@ object ClusterClientSpec extends MultiNodeConfig { case "shutdown" => context.system.terminate() case msg => - testActor forward msg + testActor.forward(msg) sender() ! Reply(msg + "-ack", Cluster(context.system).selfAddress) } } @@ -157,7 +166,7 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) createReceptionist() } enterBarrier(from.name + "-joined") @@ -200,8 +209,8 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod "communicate to actor on any node in cluster" in within(10 seconds) { runOn(client) { - val c = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(initialContacts)), "client1") + val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), + "client1") c ! ClusterClient.Send("/user/testService", "hello", localAffinity = true) expectMsgType[Reply].msg should be("hello-ack") system.stop(c) @@ -216,8 +225,8 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod "work with ask" in within(10 seconds) { runOn(client) { import akka.pattern.ask - val c = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(initialContacts)), "ask-client") + val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), + "ask-client") implicit val timeout = Timeout(remaining) val reply = c ? ClusterClient.Send("/user/testService", "hello-request", localAffinity = true) Await.result(reply.mapTo[Reply], remaining).msg should be("hello-request-ack") @@ -254,8 +263,8 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod //#client runOn(client) { - val c = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(initialContacts)), "client") + val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), + "client") c ! ClusterClient.Send("/user/serviceA", "hello", localAffinity = true) c ! ClusterClient.SendToAll("/user/serviceB", "hi") } @@ -268,11 +277,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod lazy val docOnly = { //not used, only demo //#initialContacts - val initialContacts = Set( - ActorPath.fromString("akka.tcp://OtherSys@host1:2552/system/receptionist"), - ActorPath.fromString("akka.tcp://OtherSys@host2:2552/system/receptionist")) - val settings = ClusterClientSettings(system) - .withInitialContacts(initialContacts) + val initialContacts = Set(ActorPath.fromString("akka.tcp://OtherSys@host1:2552/system/receptionist"), + ActorPath.fromString("akka.tcp://OtherSys@host2:2552/system/receptionist")) + val settings = ClusterClientSettings(system).withInitialContacts(initialContacts) //#initialContacts } @@ -307,7 +314,8 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod log.info("Testing that the receptionist has just one client") val l = system.actorOf(Props(classOf[TestReceptionistListener], r), "reporter-receptionist-listener") - val expectedClient = Await.result(system.actorSelection(node(client) / "user" / "client").resolveOne(), timeout.duration) + val expectedClient = + Await.result(system.actorSelection(node(client) / "user" / "client").resolveOne(), timeout.duration) awaitAssert({ val probe = TestProbe() l.tell(TestReceptionistListener.GetLatestClusterClients, probe.ref) @@ -333,8 +341,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod testConductor.blackhole(client, role, Direction.Both).await } - val c = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(expectedContacts + unreachableContact)), "client5") + val c = system.actorOf( + ClusterClient.props(ClusterClientSettings(system).withInitialContacts(expectedContacts + unreachableContact)), + "client5") val probe = TestProbe() c.tell(SubscribeContactPoints, probe.ref) @@ -360,8 +369,9 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod enterBarrier("service2-replicated") runOn(client) { - val client = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(initialContacts)), "client2") + val client = + system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), + "client2") client ! ClusterClient.Send("/user/service2", "bonjour", localAffinity = true) val reply = expectMsgType[Reply] @@ -399,8 +409,8 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod "re-establish connection to receptionist after partition" in within(30 seconds) { runOn(client) { - val c = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(initialContacts)), "client3") + val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), + "client3") c ! ClusterClient.Send("/user/service2", "bonjour2", localAffinity = true) val reply = expectMsgType[Reply] @@ -444,16 +454,17 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod val remainingContacts = remainingServerRoleNames.map { r => node(r) / "system" / "receptionist" } - val c = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(remainingContacts)), "client4") + val c = + system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(remainingContacts)), + "client4") c ! ClusterClient.Send("/user/service2", "bonjour4", localAffinity = true) expectMsg(10.seconds, Reply("bonjour4-ack", remainingContacts.head.address)) val logSource = s"${system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress}/user/client4" - EventFilter.info(start = "Connected to", source = logSource, occurrences = 1) intercept { - EventFilter.info(start = "Lost contact", source = logSource, occurrences = 1) intercept { + EventFilter.info(start = "Connected to", source = logSource, occurrences = 1).intercept { + EventFilter.info(start = "Lost contact", source = logSource, occurrences = 1).intercept { // shutdown server testConductor.shutdown(remainingServerRoleNames.head).await } @@ -470,10 +481,8 @@ class ClusterClientSpec extends MultiNodeSpec(ClusterClientSpec) with STMultiNod Await.ready(system.whenTerminated, 20.seconds) // start new system on same port val port = Cluster(system).selfAddress.port.get - val sys2 = ActorSystem( - system.name, - ConfigFactory.parseString( - s""" + val sys2 = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.artery.canonical.port=$port akka.remote.netty.tcp.port=$port """).withFallback(system.settings.config)) diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientStopSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientStopSpec.scala index b22e01819c..9d57cd5fdb 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientStopSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/client/ClusterClientStopSpec.scala @@ -8,7 +8,7 @@ import akka.actor.{ Actor, Props } import akka.cluster.Cluster import akka.cluster.pubsub.{ DistributedPubSub, DistributedPubSubMediator } import akka.remote.testconductor.RoleName -import akka.remote.testkit.{ STMultiNodeSpec, MultiNodeSpec, MultiNodeConfig } +import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec, STMultiNodeSpec } import akka.testkit.{ EventFilter, ImplicitSender } import com.typesafe.config.ConfigFactory import scala.concurrent.Await @@ -51,7 +51,7 @@ class ClusterClientStopSpec extends MultiNodeSpec(ClusterClientStopSpec) with ST def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) ClusterClientReceptionist(system) } enterBarrier(from.name + "-joined") @@ -86,8 +86,8 @@ class ClusterClientStopSpec extends MultiNodeSpec(ClusterClientStopSpec) with ST "stop if re-establish fails for too long time" in within(20.seconds) { runOn(client) { - val c = system.actorOf(ClusterClient.props( - ClusterClientSettings(system).withInitialContacts(initialContacts)), "client1") + val c = system.actorOf(ClusterClient.props(ClusterClientSettings(system).withInitialContacts(initialContacts)), + "client1") c ! ClusterClient.Send("/user/testService", "hello", localAffinity = true) expectMsgType[String](3.seconds) should be("hello") enterBarrier("was-in-contact") diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala index 2855b2c739..91e1725632 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubMediatorSpec.scala @@ -125,7 +125,10 @@ class DistributedPubSubMediatorMultiJvmNode1 extends DistributedPubSubMediatorSp class DistributedPubSubMediatorMultiJvmNode2 extends DistributedPubSubMediatorSpec class DistributedPubSubMediatorMultiJvmNode3 extends DistributedPubSubMediatorSpec -class DistributedPubSubMediatorSpec extends MultiNodeSpec(DistributedPubSubMediatorSpec) with STMultiNodeSpec with ImplicitSender { +class DistributedPubSubMediatorSpec + extends MultiNodeSpec(DistributedPubSubMediatorSpec) + with STMultiNodeSpec + with ImplicitSender { import DistributedPubSubMediatorSpec._ import DistributedPubSubMediatorSpec.TestChatUser._ import DistributedPubSubMediator._ @@ -134,7 +137,7 @@ class DistributedPubSubMediatorSpec extends MultiNodeSpec(DistributedPubSubMedia def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) createMediator() } enterBarrier(from.name + "-joined") @@ -558,8 +561,10 @@ class DistributedPubSubMediatorSpec extends MultiNodeSpec(DistributedPubSubMedia runOn(first) { mediator ! GetTopics expectMsgPF() { - case CurrentTopics(topics) if topics.contains("topic_a1") - && topics.contains("topic_a2") => true + case CurrentTopics(topics) + if topics.contains("topic_a1") + && topics.contains("topic_a2") => + true } } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubRestartSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubRestartSpec.scala index 3e951e2f88..220473052d 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubRestartSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/pubsub/DistributedPubSubRestartSpec.scala @@ -50,7 +50,10 @@ class DistributedPubSubRestartMultiJvmNode1 extends DistributedPubSubRestartSpec class DistributedPubSubRestartMultiJvmNode2 extends DistributedPubSubRestartSpec class DistributedPubSubRestartMultiJvmNode3 extends DistributedPubSubRestartSpec -class DistributedPubSubRestartSpec extends MultiNodeSpec(DistributedPubSubRestartSpec) with STMultiNodeSpec with ImplicitSender { +class DistributedPubSubRestartSpec + extends MultiNodeSpec(DistributedPubSubRestartSpec) + with STMultiNodeSpec + with ImplicitSender { import DistributedPubSubRestartSpec._ import DistributedPubSubMediator._ @@ -58,7 +61,7 @@ class DistributedPubSubRestartSpec extends MultiNodeSpec(DistributedPubSubRestar def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) createMediator() } enterBarrier(from.name + "-joined") @@ -136,8 +139,7 @@ class DistributedPubSubRestartSpec extends MultiNodeSpec(DistributedPubSubRestar Await.result(system.whenTerminated, 10.seconds) val newSystem = { val port = Cluster(system).selfAddress.port.get - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.remote.artery.canonical.port=$port akka.remote.netty.tcp.port=$port """).withFallback(system.settings.config) diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala index 44d0cbe4a8..3c3ae8fecd 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerChaosSpec.scala @@ -39,6 +39,7 @@ object ClusterSingletonManagerChaosSpec extends MultiNodeConfig { """)) case object EchoStarted + /** * The singleton actor */ @@ -59,30 +60,32 @@ class ClusterSingletonManagerChaosMultiJvmNode5 extends ClusterSingletonManagerC class ClusterSingletonManagerChaosMultiJvmNode6 extends ClusterSingletonManagerChaosSpec class ClusterSingletonManagerChaosMultiJvmNode7 extends ClusterSingletonManagerChaosSpec -class ClusterSingletonManagerChaosSpec extends MultiNodeSpec(ClusterSingletonManagerChaosSpec) with STMultiNodeSpec with ImplicitSender { +class ClusterSingletonManagerChaosSpec + extends MultiNodeSpec(ClusterSingletonManagerChaosSpec) + with STMultiNodeSpec + with ImplicitSender { import ClusterSingletonManagerChaosSpec._ override def initialParticipants = roles.size def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) createSingleton() } } def createSingleton(): ActorRef = { system.actorOf( - ClusterSingletonManager.props( - singletonProps = Props(classOf[Echo], testActor), - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(system)), + ClusterSingletonManager.props(singletonProps = Props(classOf[Echo], testActor), + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(system)), name = "echo") } def crash(roles: RoleName*): Unit = { runOn(controller) { - roles foreach { r => + roles.foreach { r => log.info("Shutdown [{}]", node(r).address) testConductor.exit(r, 0).await } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala index 5568d4aa57..083d05fb48 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerDownedSpec.scala @@ -35,6 +35,7 @@ object ClusterSingletonManagerDownedSpec extends MultiNodeConfig { case object EchoStarted case object EchoStopped + /** * The singleton actor */ @@ -55,7 +56,10 @@ class ClusterSingletonManagerDownedMultiJvmNode1 extends ClusterSingletonManager class ClusterSingletonManagerDownedMultiJvmNode2 extends ClusterSingletonManagerDownedSpec class ClusterSingletonManagerDownedMultiJvmNode3 extends ClusterSingletonManagerDownedSpec -class ClusterSingletonManagerDownedSpec extends MultiNodeSpec(ClusterSingletonManagerDownedSpec) with STMultiNodeSpec with ImplicitSender { +class ClusterSingletonManagerDownedSpec + extends MultiNodeSpec(ClusterSingletonManagerDownedSpec) + with STMultiNodeSpec + with ImplicitSender { import ClusterSingletonManagerDownedSpec._ override def initialParticipants = roles.size @@ -71,10 +75,9 @@ class ClusterSingletonManagerDownedSpec extends MultiNodeSpec(ClusterSingletonMa def createSingleton(): ActorRef = { system.actorOf( - ClusterSingletonManager.props( - singletonProps = Props(classOf[Echo], testActor), - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(system)), + ClusterSingletonManager.props(singletonProps = Props(classOf[Echo], testActor), + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(system)), name = "echo") } diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala index e642d77d29..9668febb25 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerLeaveSpec.scala @@ -30,6 +30,7 @@ object ClusterSingletonManagerLeaveSpec extends MultiNodeConfig { """)) case object EchoStarted + /** * The singleton actor */ @@ -55,7 +56,10 @@ class ClusterSingletonManagerLeaveMultiJvmNode1 extends ClusterSingletonManagerL class ClusterSingletonManagerLeaveMultiJvmNode2 extends ClusterSingletonManagerLeaveSpec class ClusterSingletonManagerLeaveMultiJvmNode3 extends ClusterSingletonManagerLeaveSpec -class ClusterSingletonManagerLeaveSpec extends MultiNodeSpec(ClusterSingletonManagerLeaveSpec) with STMultiNodeSpec with ImplicitSender { +class ClusterSingletonManagerLeaveSpec + extends MultiNodeSpec(ClusterSingletonManagerLeaveSpec) + with STMultiNodeSpec + with ImplicitSender { import ClusterSingletonManagerLeaveSpec._ override def initialParticipants = roles.size @@ -64,28 +68,26 @@ class ClusterSingletonManagerLeaveSpec extends MultiNodeSpec(ClusterSingletonMan def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) createSingleton() } } def createSingleton(): ActorRef = { system.actorOf( - ClusterSingletonManager.props( - singletonProps = Props(classOf[Echo], testActor), - terminationMessage = "stop", - settings = ClusterSingletonManagerSettings(system)), + ClusterSingletonManager.props(singletonProps = Props(classOf[Echo], testActor), + terminationMessage = "stop", + settings = ClusterSingletonManagerSettings(system)), name = "echo") } val echoProxyTerminatedProbe = TestProbe() lazy val echoProxy: ActorRef = { - echoProxyTerminatedProbe.watch(system.actorOf( - ClusterSingletonProxy.props( - singletonManagerPath = "/user/echo", - settings = ClusterSingletonProxySettings(system)), - name = "echoProxy")) + echoProxyTerminatedProbe.watch( + system.actorOf(ClusterSingletonProxy.props(singletonManagerPath = "/user/echo", + settings = ClusterSingletonProxySettings(system)), + name = "echoProxy")) } "Leaving ClusterSingletonManager" must { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala index 8d329766cf..e37252faca 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerSpec.scala @@ -41,8 +41,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { akka.cluster.auto-down-unreachable-after = 0s """)) - nodeConfig(first, second, third, fourth, fifth, sixth)( - ConfigFactory.parseString("akka.cluster.roles =[worker]")) + nodeConfig(first, second, third, fourth, fifth, sixth)(ConfigFactory.parseString("akka.cluster.roles =[worker]")) //#singleton-message-classes object PointToPointChannel { @@ -79,7 +78,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { case UnregisterConsumer => log.info("UnexpectedUnregistration: [{}]", sender().path) sender() ! UnexpectedUnregistration - context stop self + context.stop(self) case Reset => sender() ! ResetOk case msg => // no consumer, drop } @@ -92,11 +91,11 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { case UnregisterConsumer => log.info("UnexpectedUnregistration: [{}], expected [{}]", sender().path, consumer.path) sender() ! UnexpectedUnregistration - context stop self + context.stop(self) case RegisterConsumer => log.info("Unexpected RegisterConsumer [{}], active consumer [{}]", sender().path, consumer.path) sender() ! UnexpectedRegistration - context stop self + context.stop(self) case Reset => context.become(idle) sender() ! ResetOk @@ -146,7 +145,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig { queue ! UnregisterConsumer case UnregistrationOk => stoppedBeforeUnregistration = false - context stop self + context.stop(self) case Ping => sender() ! Pong //#consumer-end @@ -164,7 +163,10 @@ class ClusterSingletonManagerMultiJvmNode6 extends ClusterSingletonManagerSpec class ClusterSingletonManagerMultiJvmNode7 extends ClusterSingletonManagerSpec class ClusterSingletonManagerMultiJvmNode8 extends ClusterSingletonManagerSpec -class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerSpec) with STMultiNodeSpec with ImplicitSender { +class ClusterSingletonManagerSpec + extends MultiNodeSpec(ClusterSingletonManagerSpec) + with STMultiNodeSpec + with ImplicitSender { import ClusterSingletonManagerSpec._ import ClusterSingletonManagerSpec.PointToPointChannel._ @@ -191,7 +193,7 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) if (Cluster(system).selfRoles.contains("worker")) { createSingleton() createSingletonProxy() @@ -213,10 +215,9 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS def createSingleton(): ActorRef = { //#create-singleton-manager system.actorOf( - ClusterSingletonManager.props( - singletonProps = Props(classOf[Consumer], queue, testActor), - terminationMessage = End, - settings = ClusterSingletonManagerSettings(system).withRole("worker")), + ClusterSingletonManager.props(singletonProps = Props(classOf[Consumer], queue, testActor), + terminationMessage = End, + settings = ClusterSingletonManagerSettings(system).withRole("worker")), name = "consumer") //#create-singleton-manager } @@ -224,9 +225,8 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS def createSingletonProxy(): ActorRef = { //#create-singleton-proxy val proxy = system.actorOf( - ClusterSingletonProxy.props( - singletonManagerPath = "/user/consumer", - settings = ClusterSingletonProxySettings(system).withRole("worker")), + ClusterSingletonProxy.props(singletonManagerPath = "/user/consumer", + settings = ClusterSingletonProxySettings(system).withRole("worker")), name = "consumerProxy") //#create-singleton-proxy proxy @@ -235,11 +235,9 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS def createSingletonProxyDc(): ActorRef = { //#create-singleton-proxy-dc val proxyDcB = system.actorOf( - ClusterSingletonProxy.props( - singletonManagerPath = "/user/consumer", - settings = ClusterSingletonProxySettings(system) - .withRole("worker") - .withDataCenter("B")), + ClusterSingletonProxy.props(singletonManagerPath = "/user/consumer", + settings = + ClusterSingletonProxySettings(system).withRole("worker").withDataCenter("B")), name = "consumerProxyDcB") //#create-singleton-proxy-dc proxyDcB @@ -313,7 +311,7 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS runOn(controller) { queue ! Reset expectMsg(ResetOk) - roles foreach { r => + roles.foreach { r => log.info("Shutdown [{}]", node(r).address) testConductor.exit(r, 0).await } @@ -386,7 +384,7 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS val newOldestRole = second runOn(leaveRole) { - Cluster(system) leave node(leaveRole).address + Cluster(system).leave(node(leaveRole).address) } verifyRegistration(second) diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala index b6de14cbf1..d4b4f2c3de 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/ClusterSingletonManagerStartupSpec.scala @@ -31,6 +31,7 @@ object ClusterSingletonManagerStartupSpec extends MultiNodeConfig { """)) case object EchoStarted + /** * The singleton actor */ @@ -46,33 +47,33 @@ class ClusterSingletonManagerStartupMultiJvmNode1 extends ClusterSingletonManage class ClusterSingletonManagerStartupMultiJvmNode2 extends ClusterSingletonManagerStartupSpec class ClusterSingletonManagerStartupMultiJvmNode3 extends ClusterSingletonManagerStartupSpec -class ClusterSingletonManagerStartupSpec extends MultiNodeSpec(ClusterSingletonManagerStartupSpec) with STMultiNodeSpec with ImplicitSender { +class ClusterSingletonManagerStartupSpec + extends MultiNodeSpec(ClusterSingletonManagerStartupSpec) + with STMultiNodeSpec + with ImplicitSender { import ClusterSingletonManagerStartupSpec._ override def initialParticipants = roles.size def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - Cluster(system) join node(to).address + Cluster(system).join(node(to).address) createSingleton() } } def createSingleton(): ActorRef = { system.actorOf( - ClusterSingletonManager.props( - singletonProps = Props(classOf[Echo], testActor), - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(system)), + ClusterSingletonManager.props(singletonProps = Props(classOf[Echo], testActor), + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(system)), name = "echo") } lazy val echoProxy: ActorRef = { - system.actorOf( - ClusterSingletonProxy.props( - singletonManagerPath = "/user/echo", - settings = ClusterSingletonProxySettings(system)), - name = "echoProxy") + system.actorOf(ClusterSingletonProxy.props(singletonManagerPath = "/user/echo", + settings = ClusterSingletonProxySettings(system)), + name = "echoProxy") } "Startup of Cluster Singleton" must { diff --git a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala index 4f5b1566ba..34cae2c2b2 100644 --- a/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala +++ b/akka-cluster-tools/src/multi-jvm/scala/akka/cluster/singleton/MultiDcSingletonManagerSpec.scala @@ -64,7 +64,10 @@ object MultiDcSingleton { case class Pong(fromDc: String, fromAddress: Address, roles: Set[String]) } -abstract class MultiDcSingletonManagerSpec extends MultiNodeSpec(MultiDcSingletonManagerSpec) with STMultiNodeSpec with ImplicitSender { +abstract class MultiDcSingletonManagerSpec + extends MultiNodeSpec(MultiDcSingletonManagerSpec) + with STMultiNodeSpec + with ImplicitSender { import MultiDcSingletonManagerSpec._ override def initialParticipants = roles.size @@ -79,17 +82,14 @@ abstract class MultiDcSingletonManagerSpec extends MultiNodeSpec(MultiDcSingleto "start a singleton instance for each data center" in { runOn(first, second, third) { - system.actorOf( - ClusterSingletonManager.props( - Props[MultiDcSingleton](), - PoisonPill, - ClusterSingletonManagerSettings(system).withRole(worker)), - "singletonManager") + system.actorOf(ClusterSingletonManager.props(Props[MultiDcSingleton](), + PoisonPill, + ClusterSingletonManagerSettings(system).withRole(worker)), + "singletonManager") } - val proxy = system.actorOf(ClusterSingletonProxy.props( - "/user/singletonManager", - ClusterSingletonProxySettings(system).withRole(worker))) + val proxy = system.actorOf( + ClusterSingletonProxy.props("/user/singletonManager", ClusterSingletonProxySettings(system).withRole(worker))) enterBarrier("managers-started") @@ -112,9 +112,9 @@ abstract class MultiDcSingletonManagerSpec extends MultiNodeSpec(MultiDcSingleto "be able to use proxy across different data centers" in { runOn(third) { - val proxy = system.actorOf(ClusterSingletonProxy.props( - "/user/singletonManager", - ClusterSingletonProxySettings(system).withRole(worker).withDataCenter("one"))) + val proxy = system.actorOf( + ClusterSingletonProxy.props("/user/singletonManager", + ClusterSingletonProxySettings(system).withRole(worker).withDataCenter("one"))) proxy ! MultiDcSingleton.Ping val pong = expectMsgType[MultiDcSingleton.Pong](10.seconds) pong.fromDc should ===("one") diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializerSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializerSpec.scala index a7289399a2..5926bd9040 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializerSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/client/protobuf/ClusterClientMessageSerializerSpec.scala @@ -21,10 +21,9 @@ class ClusterClientMessageSerializerSpec extends AkkaSpec { "ClusterClientMessages" must { "be serializable" in { - val contactPoints = Vector( - "akka.tcp://system@node-1:2552/system/receptionist", - "akka.tcp://system@node-2:2552/system/receptionist", - "akka.tcp://system@node-3:2552/system/receptionist") + val contactPoints = Vector("akka.tcp://system@node-1:2552/system/receptionist", + "akka.tcp://system@node-2:2552/system/receptionist", + "akka.tcp://system@node-3:2552/system/receptionist") checkSerialization(Contacts(contactPoints)) checkSerialization(GetContacts) checkSerialization(Heartbeat) diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/DistributedPubSubMediatorDeadLettersSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/DistributedPubSubMediatorDeadLettersSpec.scala index a03357fb8c..21e81769ed 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/DistributedPubSubMediatorDeadLettersSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/DistributedPubSubMediatorDeadLettersSpec.scala @@ -30,8 +30,8 @@ trait DeadLettersProbe { this: TestKitBase => } class DistributedPubSubMediatorSendingToDeadLettersSpec - extends AkkaSpec(DistributedPubSubMediatorDeadLettersSpec.config(sendToDeadLettersWhenNoSubscribers = true)) - with DeadLettersProbe { + extends AkkaSpec(DistributedPubSubMediatorDeadLettersSpec.config(sendToDeadLettersWhenNoSubscribers = true)) + with DeadLettersProbe { val mediator = DistributedPubSub(system).mediator val msg = "hello" @@ -78,8 +78,8 @@ class DistributedPubSubMediatorSendingToDeadLettersSpec } class DistributedPubSubMediatorNotSendingToDeadLettersSpec - extends AkkaSpec(DistributedPubSubMediatorDeadLettersSpec.config(sendToDeadLettersWhenNoSubscribers = false)) - with DeadLettersProbe { + extends AkkaSpec(DistributedPubSubMediatorDeadLettersSpec.config(sendToDeadLettersWhenNoSubscribers = false)) + with DeadLettersProbe { val mediator = DistributedPubSub(system).mediator val msg = "hello" diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/DistributedPubSubMediatorRouterSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/DistributedPubSubMediatorRouterSpec.scala index c6fe3d1093..ce13f5f6bd 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/DistributedPubSubMediatorRouterSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/DistributedPubSubMediatorRouterSpec.scala @@ -90,25 +90,29 @@ trait DistributedPubSubMediatorRouterSpec { this: WordSpecLike with TestKit with } class DistributedPubSubMediatorWithRandomRouterSpec - extends AkkaSpec(DistributedPubSubMediatorRouterSpec.config("random")) - with DistributedPubSubMediatorRouterSpec with DefaultTimeout with ImplicitSender { + extends AkkaSpec(DistributedPubSubMediatorRouterSpec.config("random")) + with DistributedPubSubMediatorRouterSpec + with DefaultTimeout + with ImplicitSender { val mediator = DistributedPubSub(system).mediator "DistributedPubSubMediator when sending wrapped message" must { val msg = WrappedMessage("hello") - behave like nonUnwrappingPubSub(mediator, testActor, msg) + behave.like(nonUnwrappingPubSub(mediator, testActor, msg)) } "DistributedPubSubMediator when sending unwrapped message" must { val msg = UnwrappedMessage("hello") - behave like nonUnwrappingPubSub(mediator, testActor, msg) + behave.like(nonUnwrappingPubSub(mediator, testActor, msg)) } } class DistributedPubSubMediatorWithHashRouterSpec - extends AkkaSpec(DistributedPubSubMediatorRouterSpec.config("consistent-hashing")) - with DistributedPubSubMediatorRouterSpec with DefaultTimeout with ImplicitSender { + extends AkkaSpec(DistributedPubSubMediatorRouterSpec.config("consistent-hashing")) + with DistributedPubSubMediatorRouterSpec + with DefaultTimeout + with ImplicitSender { "DistributedPubSubMediator with Consistent Hash router" must { "not be allowed" when { @@ -119,8 +123,10 @@ class DistributedPubSubMediatorWithHashRouterSpec } "constructed by settings" in { intercept[IllegalArgumentException] { - val config = ConfigFactory.parseString(DistributedPubSubMediatorRouterSpec.config("random")) - .withFallback(system.settings.config).getConfig("akka.cluster.pub-sub") + val config = ConfigFactory + .parseString(DistributedPubSubMediatorRouterSpec.config("random")) + .withFallback(system.settings.config) + .getConfig("akka.cluster.pub-sub") DistributedPubSubSettings(config).withRoutingLogic(ConsistentHashingRoutingLogic(system)) } } diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala index a821810907..741789cda7 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/pubsub/protobuf/DistributedPubSubMessageSerializerSpec.scala @@ -4,7 +4,7 @@ package akka.cluster.pubsub.protobuf -import akka.actor.{ ExtendedActorSystem, Address } +import akka.actor.{ Address, ExtendedActorSystem } import akka.testkit.AkkaSpec import akka.cluster.pubsub.DistributedPubSubMediator._ import akka.cluster.pubsub.DistributedPubSubMediator.Internal._ @@ -32,10 +32,11 @@ class DistributedPubSubMessageSerializerSpec extends AkkaSpec { val u3 = system.actorOf(Props.empty, "u3") val u4 = system.actorOf(Props.empty, "u4") checkSerialization(Status(Map(address1 -> 3, address2 -> 17, address3 -> 5), isReplyToStatus = true)) - checkSerialization(Delta(List( - Bucket(address1, 3, TreeMap("/user/u1" -> ValueHolder(2, Some(u1)), "/user/u2" -> ValueHolder(3, Some(u2)))), - Bucket(address2, 17, TreeMap("/user/u3" -> ValueHolder(17, Some(u3)))), - Bucket(address3, 5, TreeMap("/user/u4" -> ValueHolder(4, Some(u4)), "/user/u5" -> ValueHolder(5, None)))))) + checkSerialization( + Delta(List( + Bucket(address1, 3, TreeMap("/user/u1" -> ValueHolder(2, Some(u1)), "/user/u2" -> ValueHolder(3, Some(u2)))), + Bucket(address2, 17, TreeMap("/user/u3" -> ValueHolder(17, Some(u3)))), + Bucket(address3, 5, TreeMap("/user/u4" -> ValueHolder(4, Some(u4)), "/user/u5" -> ValueHolder(5, None)))))) checkSerialization(Send("/user/u3", "hello", localAffinity = true)) checkSerialization(SendToAll("/user/u3", "hello", allButSelf = true)) checkSerialization(Publish("mytopic", "hello")) diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala index ab45001640..bec2b9436b 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonLeavingSpeedSpec.scala @@ -39,7 +39,9 @@ object ClusterSingletonLeavingSpeedSpec { } } -class ClusterSingletonLeavingSpeedSpec extends AkkaSpec(""" +class ClusterSingletonLeavingSpeedSpec + extends AkkaSpec( + """ akka.loglevel = DEBUG akka.actor.provider = akka.cluster.ClusterActorRefProvider akka.cluster.auto-down-unreachable-after = 2s @@ -72,10 +74,9 @@ class ClusterSingletonLeavingSpeedSpec extends AkkaSpec(""" def join(from: ActorSystem, to: ActorSystem, probe: ActorRef): Unit = { from.actorOf( - ClusterSingletonManager.props( - singletonProps = TheSingleton.props(probe), - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(from)), + ClusterSingletonManager.props(singletonProps = TheSingleton.props(probe), + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(from)), name = "echo") Cluster(from).join(Cluster(to).selfAddress) @@ -122,7 +123,8 @@ class ClusterSingletonLeavingSpeedSpec extends AkkaSpec(""" } } - println(s"Singleton $i stopped in ${stoppedDuration.toMillis} ms, started in ${startedDuration.toMillis} ms, " + + println( + s"Singleton $i stopped in ${stoppedDuration.toMillis} ms, started in ${startedDuration.toMillis} ms, " + s"diff ${(startedDuration - stoppedDuration).toMillis} ms") (stoppedDuration, startedDuration) @@ -130,7 +132,8 @@ class ClusterSingletonLeavingSpeedSpec extends AkkaSpec(""" durations.zipWithIndex.foreach { case ((stoppedDuration, startedDuration), i) => - println(s"Singleton $i stopped in ${stoppedDuration.toMillis} ms, started in ${startedDuration.toMillis} ms, " + + println( + s"Singleton $i stopped in ${stoppedDuration.toMillis} ms, started in ${startedDuration.toMillis} ms, " + s"diff ${(startedDuration - stoppedDuration).toMillis} ms") } @@ -141,4 +144,3 @@ class ClusterSingletonLeavingSpeedSpec extends AkkaSpec(""" systems.foreach(shutdown(_)) } } - diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala index 46995df172..3548b68147 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonProxySpec.scala @@ -5,7 +5,7 @@ package akka.cluster.singleton import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } -import akka.testkit.{ TestProbe, TestKit } +import akka.testkit.{ TestKit, TestProbe } import akka.actor._ import com.typesafe.config.ConfigFactory import akka.cluster.Cluster @@ -37,23 +37,22 @@ class ClusterSingletonProxySpec extends WordSpecLike with Matchers with BeforeAn object ClusterSingletonProxySpec { class ActorSys(name: String = "ClusterSingletonProxySystem", joinTo: Option[Address] = None) - extends TestKit(ActorSystem(name, ConfigFactory.parseString(cfg))) { + extends TestKit(ActorSystem(name, ConfigFactory.parseString(cfg))) { val cluster = Cluster(system) cluster.join(joinTo.getOrElse(cluster.selfAddress)) cluster.registerOnMemberUp { system.actorOf( - ClusterSingletonManager.props( - singletonProps = Props[Singleton], - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(system).withRemovalMargin(5.seconds)), + ClusterSingletonManager.props(singletonProps = Props[Singleton], + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(system).withRemovalMargin(5.seconds)), name = "singletonManager") } - val proxy = system.actorOf(ClusterSingletonProxy.props( - "user/singletonManager", - settings = ClusterSingletonProxySettings(system)), s"singletonProxy-${cluster.selfAddress.port.getOrElse(0)}") + val proxy = system.actorOf( + ClusterSingletonProxy.props("user/singletonManager", settings = ClusterSingletonProxySettings(system)), + s"singletonProxy-${cluster.selfAddress.port.getOrElse(0)}") def testProxy(msg: String): Unit = { val probe = TestProbe() diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala index 81478bc822..a900cb09f3 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestart2Spec.scala @@ -47,24 +47,22 @@ class ClusterSingletonRestart2Spec extends AkkaSpec(""" val sys1 = ActorSystem(system.name, system.settings.config) val sys2 = ActorSystem(system.name, system.settings.config) - val sys3 = ActorSystem( - system.name, - ConfigFactory.parseString("akka.cluster.roles = [other]").withFallback(system.settings.config)) + val sys3 = ActorSystem(system.name, + ConfigFactory.parseString("akka.cluster.roles = [other]").withFallback(system.settings.config)) var sys4: ActorSystem = null def join(from: ActorSystem, to: ActorSystem): Unit = { if (Cluster(from).selfRoles.contains("singleton")) from.actorOf( - ClusterSingletonManager.props( - singletonProps = ClusterSingletonRestart2Spec.singletonActorProps, - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(from).withRole("singleton")), + ClusterSingletonManager.props(singletonProps = ClusterSingletonRestart2Spec.singletonActorProps, + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(from).withRole("singleton")), name = "echo") within(45.seconds) { import akka.util.ccompat.imm._ awaitAssert { - Cluster(from) join Cluster(to).selfAddress + Cluster(from).join(Cluster(to).selfAddress) Cluster(from).state.members.map(_.uniqueAddress) should contain(Cluster(from).selfUniqueAddress) Cluster(from).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) } @@ -77,9 +75,9 @@ class ClusterSingletonRestart2Spec extends AkkaSpec(""" join(sys2, sys1) join(sys3, sys1) - val proxy3 = sys3.actorOf(ClusterSingletonProxy.props( - "user/echo", - ClusterSingletonProxySettings(sys3).withRole("singleton")), "proxy3") + val proxy3 = sys3.actorOf( + ClusterSingletonProxy.props("user/echo", ClusterSingletonProxySettings(sys3).withRole("singleton")), + "proxy3") within(5.seconds) { awaitAssert { @@ -100,8 +98,7 @@ class ClusterSingletonRestart2Spec extends AkkaSpec(""" val sys2port = Cluster(sys2).selfAddress.port.get val sys4Config = - ConfigFactory.parseString( - s""" + ConfigFactory.parseString(s""" akka.remote.artery.canonical.port=$sys2port akka.remote.netty.tcp.port=$sys2port """).withFallback(system.settings.config) @@ -134,4 +131,3 @@ class ClusterSingletonRestart2Spec extends AkkaSpec(""" shutdown(sys4) } } - diff --git a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala index 78035543fc..a20c41c326 100644 --- a/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala +++ b/akka-cluster-tools/src/test/scala/akka/cluster/singleton/ClusterSingletonRestartSpec.scala @@ -36,16 +36,15 @@ class ClusterSingletonRestartSpec extends AkkaSpec(""" def join(from: ActorSystem, to: ActorSystem): Unit = { from.actorOf( - ClusterSingletonManager.props( - singletonProps = TestActors.echoActorProps, - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(from)), + ClusterSingletonManager.props(singletonProps = TestActors.echoActorProps, + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(from)), name = "echo") within(10.seconds) { import akka.util.ccompat.imm._ awaitAssert { - Cluster(from) join Cluster(to).selfAddress + Cluster(from).join(Cluster(to).selfAddress) Cluster(from).state.members.map(_.uniqueAddress) should contain(Cluster(from).selfUniqueAddress) Cluster(from).state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up)) } @@ -74,8 +73,7 @@ class ClusterSingletonRestartSpec extends AkkaSpec(""" val sys1port = Cluster(sys1).selfAddress.port.get val sys3Config = - ConfigFactory.parseString( - s""" + ConfigFactory.parseString(s""" akka.remote.artery.canonical.port=$sys1port akka.remote.netty.tcp.port=$sys1port """).withFallback(system.settings.config) @@ -120,4 +118,3 @@ class ClusterSingletonRestartSpec extends AkkaSpec(""" shutdown(sys3) } } - diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala index 52a4b5c03b..ba9ee5771f 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/internal/ReplicatorBehavior.scala @@ -26,27 +26,31 @@ import akka.actor.typed.Terminated import akka.cluster.ddata.typed.javadsl.{ Replicator => JReplicator } import akka.cluster.ddata.typed.scaladsl.{ Replicator => SReplicator } - private case class InternalChanged[A <: ReplicatedData](chg: dd.Replicator.Changed[A], subscriber: ActorRef[JReplicator.Changed[A]]) - extends JReplicator.Command + private case class InternalChanged[A <: ReplicatedData](chg: dd.Replicator.Changed[A], + subscriber: ActorRef[JReplicator.Changed[A]]) + extends JReplicator.Command val localAskTimeout = 60.seconds // ReadLocal, WriteLocal shouldn't timeout val additionalAskTimeout = 1.second - def behavior(settings: dd.ReplicatorSettings, underlyingReplicator: Option[akka.actor.ActorRef]): Behavior[SReplicator.Command] = { + def behavior(settings: dd.ReplicatorSettings, + underlyingReplicator: Option[akka.actor.ActorRef]): Behavior[SReplicator.Command] = { Behaviors.setup { ctx => val untypedReplicator = underlyingReplicator match { case Some(ref) => ref - case None => + case None => // FIXME perhaps add supervisor for restarting val untypedReplicatorProps = dd.Replicator.props(settings) ctx.actorOf(untypedReplicatorProps, name = "underlying") } def withState( - subscribeAdapters: Map[ActorRef[JReplicator.Changed[ReplicatedData]], ActorRef[dd.Replicator.Changed[ReplicatedData]]]): Behavior[SReplicator.Command] = { + subscribeAdapters: Map[ActorRef[JReplicator.Changed[ReplicatedData]], + ActorRef[dd.Replicator.Changed[ReplicatedData]]]): Behavior[SReplicator.Command] = { - def stopSubscribeAdapter(subscriber: ActorRef[JReplicator.Changed[ReplicatedData]]): Behavior[SReplicator.Command] = { + def stopSubscribeAdapter( + subscriber: ActorRef[JReplicator.Changed[ReplicatedData]]): Behavior[SReplicator.Command] = { subscribeAdapters.get(subscriber) match { case Some(adapter) => // will be unsubscribed from untypedReplicator via Terminated @@ -57,131 +61,139 @@ import akka.actor.typed.Terminated } } - Behaviors.receive[SReplicator.Command] { (ctx, msg) => - msg match { - case cmd: SReplicator.Get[_] => - untypedReplicator.tell( - dd.Replicator.Get(cmd.key, cmd.consistency, cmd.request), - sender = cmd.replyTo.toUntyped) - Behaviors.same + Behaviors + .receive[SReplicator.Command] { (ctx, msg) => + msg match { + case cmd: SReplicator.Get[_] => + untypedReplicator.tell(dd.Replicator.Get(cmd.key, cmd.consistency, cmd.request), + sender = cmd.replyTo.toUntyped) + Behaviors.same - case cmd: JReplicator.Get[d] => - implicit val timeout = Timeout(cmd.consistency.timeout match { - case java.time.Duration.ZERO => localAskTimeout - case t => t.asScala + additionalAskTimeout - }) - import ctx.executionContext - val reply = - (untypedReplicator ? dd.Replicator.Get(cmd.key, cmd.consistency.toUntyped, cmd.request.asScala)) - .mapTo[dd.Replicator.GetResponse[d]].map { - case rsp: dd.Replicator.GetSuccess[d] => JReplicator.GetSuccess(rsp.key, rsp.request.asJava)(rsp.dataValue) - case rsp: dd.Replicator.NotFound[d] => JReplicator.NotFound(rsp.key, rsp.request.asJava) - case rsp: dd.Replicator.GetFailure[d] => JReplicator.GetFailure(rsp.key, rsp.request.asJava) - }.recover { - case _ => JReplicator.GetFailure(cmd.key, cmd.request) - } - reply.foreach { cmd.replyTo ! _ } - Behaviors.same + case cmd: JReplicator.Get[d] => + implicit val timeout = Timeout(cmd.consistency.timeout match { + case java.time.Duration.ZERO => localAskTimeout + case t => t.asScala + additionalAskTimeout + }) + import ctx.executionContext + val reply = + (untypedReplicator ? dd.Replicator.Get(cmd.key, cmd.consistency.toUntyped, cmd.request.asScala)) + .mapTo[dd.Replicator.GetResponse[d]] + .map { + case rsp: dd.Replicator.GetSuccess[d] => + JReplicator.GetSuccess(rsp.key, rsp.request.asJava)(rsp.dataValue) + case rsp: dd.Replicator.NotFound[d] => JReplicator.NotFound(rsp.key, rsp.request.asJava) + case rsp: dd.Replicator.GetFailure[d] => JReplicator.GetFailure(rsp.key, rsp.request.asJava) + } + .recover { + case _ => JReplicator.GetFailure(cmd.key, cmd.request) + } + reply.foreach { cmd.replyTo ! _ } + Behaviors.same - case cmd: SReplicator.Update[_] => - untypedReplicator.tell( - dd.Replicator.Update(cmd.key, cmd.writeConsistency, cmd.request)(cmd.modify), - sender = cmd.replyTo.toUntyped) - Behaviors.same + case cmd: SReplicator.Update[_] => + untypedReplicator.tell(dd.Replicator.Update(cmd.key, cmd.writeConsistency, cmd.request)(cmd.modify), + sender = cmd.replyTo.toUntyped) + Behaviors.same - case cmd: JReplicator.Update[d] => - implicit val timeout = Timeout(cmd.writeConsistency.timeout match { - case java.time.Duration.ZERO => localAskTimeout - case t => t.asScala + additionalAskTimeout - }) - import ctx.executionContext - val reply = - (untypedReplicator ? dd.Replicator.Update(cmd.key, cmd.writeConsistency.toUntyped, cmd.request.asScala)(cmd.modify)) - .mapTo[dd.Replicator.UpdateResponse[d]].map { - case rsp: dd.Replicator.UpdateSuccess[d] => JReplicator.UpdateSuccess(rsp.key, rsp.request.asJava) - case rsp: dd.Replicator.UpdateTimeout[d] => JReplicator.UpdateTimeout(rsp.key, rsp.request.asJava) - case rsp: dd.Replicator.ModifyFailure[d] => JReplicator.ModifyFailure(rsp.key, rsp.errorMessage, rsp.cause, rsp.request.asJava) - case rsp: dd.Replicator.StoreFailure[d] => JReplicator.StoreFailure(rsp.key, rsp.request.asJava) - }.recover { - case _ => JReplicator.UpdateTimeout(cmd.key, cmd.request) - } - reply.foreach { cmd.replyTo ! _ } - Behaviors.same + case cmd: JReplicator.Update[d] => + implicit val timeout = Timeout(cmd.writeConsistency.timeout match { + case java.time.Duration.ZERO => localAskTimeout + case t => t.asScala + additionalAskTimeout + }) + import ctx.executionContext + val reply = + (untypedReplicator ? dd.Replicator.Update(cmd.key, + cmd.writeConsistency.toUntyped, + cmd.request.asScala)(cmd.modify)) + .mapTo[dd.Replicator.UpdateResponse[d]] + .map { + case rsp: dd.Replicator.UpdateSuccess[d] => JReplicator.UpdateSuccess(rsp.key, rsp.request.asJava) + case rsp: dd.Replicator.UpdateTimeout[d] => JReplicator.UpdateTimeout(rsp.key, rsp.request.asJava) + case rsp: dd.Replicator.ModifyFailure[d] => + JReplicator.ModifyFailure(rsp.key, rsp.errorMessage, rsp.cause, rsp.request.asJava) + case rsp: dd.Replicator.StoreFailure[d] => JReplicator.StoreFailure(rsp.key, rsp.request.asJava) + } + .recover { + case _ => JReplicator.UpdateTimeout(cmd.key, cmd.request) + } + reply.foreach { cmd.replyTo ! _ } + Behaviors.same - case cmd: SReplicator.Subscribe[_] => - // For the Scala API the Changed messages can be sent directly to the subscriber - untypedReplicator.tell( - dd.Replicator.Subscribe(cmd.key, cmd.subscriber.toUntyped), - sender = cmd.subscriber.toUntyped) - Behaviors.same + case cmd: SReplicator.Subscribe[_] => + // For the Scala API the Changed messages can be sent directly to the subscriber + untypedReplicator.tell(dd.Replicator.Subscribe(cmd.key, cmd.subscriber.toUntyped), + sender = cmd.subscriber.toUntyped) + Behaviors.same - case cmd: JReplicator.Subscribe[ReplicatedData] @unchecked => - // For the Java API the Changed messages must be mapped to the JReplicator.Changed class. - // That is done with an adapter, and we have to keep track of the lifecycle of the original - // subscriber and stop the adapter when the original subscriber is stopped. - val adapter: ActorRef[dd.Replicator.Changed[ReplicatedData]] = ctx.spawnMessageAdapter { - chg => InternalChanged(chg, cmd.subscriber) - } + case cmd: JReplicator.Subscribe[ReplicatedData] @unchecked => + // For the Java API the Changed messages must be mapped to the JReplicator.Changed class. + // That is done with an adapter, and we have to keep track of the lifecycle of the original + // subscriber and stop the adapter when the original subscriber is stopped. + val adapter: ActorRef[dd.Replicator.Changed[ReplicatedData]] = ctx.spawnMessageAdapter { chg => + InternalChanged(chg, cmd.subscriber) + } - untypedReplicator.tell( - dd.Replicator.Subscribe(cmd.key, adapter.toUntyped), - sender = akka.actor.ActorRef.noSender) + untypedReplicator.tell(dd.Replicator.Subscribe(cmd.key, adapter.toUntyped), + sender = akka.actor.ActorRef.noSender) - ctx.watch(cmd.subscriber) + ctx.watch(cmd.subscriber) - withState(subscribeAdapters.updated(cmd.subscriber, adapter)) + withState(subscribeAdapters.updated(cmd.subscriber, adapter)) - case InternalChanged(chg, subscriber) => - subscriber ! JReplicator.Changed(chg.key)(chg.dataValue) - Behaviors.same + case InternalChanged(chg, subscriber) => + subscriber ! JReplicator.Changed(chg.key)(chg.dataValue) + Behaviors.same - case cmd: JReplicator.Unsubscribe[ReplicatedData] @unchecked => - stopSubscribeAdapter(cmd.subscriber) + case cmd: JReplicator.Unsubscribe[ReplicatedData] @unchecked => + stopSubscribeAdapter(cmd.subscriber) - case cmd: SReplicator.Delete[_] => - untypedReplicator.tell( - dd.Replicator.Delete(cmd.key, cmd.consistency, cmd.request), - sender = cmd.replyTo.toUntyped) - Behaviors.same + case cmd: SReplicator.Delete[_] => + untypedReplicator.tell(dd.Replicator.Delete(cmd.key, cmd.consistency, cmd.request), + sender = cmd.replyTo.toUntyped) + Behaviors.same - case cmd: JReplicator.Delete[d] => - implicit val timeout = Timeout(cmd.consistency.timeout match { - case java.time.Duration.ZERO => localAskTimeout - case t => t.asScala + additionalAskTimeout - }) - import ctx.executionContext - val reply = - (untypedReplicator ? dd.Replicator.Delete(cmd.key, cmd.consistency.toUntyped, cmd.request.asScala)) - .mapTo[dd.Replicator.DeleteResponse[d]].map { - case rsp: dd.Replicator.DeleteSuccess[d] => JReplicator.DeleteSuccess(rsp.key, rsp.request.asJava) - case rsp: dd.Replicator.ReplicationDeleteFailure[d] => JReplicator.ReplicationDeleteFailure(rsp.key, rsp.request.asJava) - case rsp: dd.Replicator.DataDeleted[d] => JReplicator.DataDeleted(rsp.key, rsp.request.asJava) - case rsp: dd.Replicator.StoreFailure[d] => JReplicator.StoreFailure(rsp.key, rsp.request.asJava) - }.recover { - case _ => JReplicator.ReplicationDeleteFailure(cmd.key, cmd.request) - } - reply.foreach { cmd.replyTo ! _ } - Behaviors.same + case cmd: JReplicator.Delete[d] => + implicit val timeout = Timeout(cmd.consistency.timeout match { + case java.time.Duration.ZERO => localAskTimeout + case t => t.asScala + additionalAskTimeout + }) + import ctx.executionContext + val reply = + (untypedReplicator ? dd.Replicator.Delete(cmd.key, cmd.consistency.toUntyped, cmd.request.asScala)) + .mapTo[dd.Replicator.DeleteResponse[d]] + .map { + case rsp: dd.Replicator.DeleteSuccess[d] => JReplicator.DeleteSuccess(rsp.key, rsp.request.asJava) + case rsp: dd.Replicator.ReplicationDeleteFailure[d] => + JReplicator.ReplicationDeleteFailure(rsp.key, rsp.request.asJava) + case rsp: dd.Replicator.DataDeleted[d] => JReplicator.DataDeleted(rsp.key, rsp.request.asJava) + case rsp: dd.Replicator.StoreFailure[d] => JReplicator.StoreFailure(rsp.key, rsp.request.asJava) + } + .recover { + case _ => JReplicator.ReplicationDeleteFailure(cmd.key, cmd.request) + } + reply.foreach { cmd.replyTo ! _ } + Behaviors.same - case SReplicator.GetReplicaCount(replyTo) => - untypedReplicator.tell(dd.Replicator.GetReplicaCount, sender = replyTo.toUntyped) - Behaviors.same + case SReplicator.GetReplicaCount(replyTo) => + untypedReplicator.tell(dd.Replicator.GetReplicaCount, sender = replyTo.toUntyped) + Behaviors.same - case JReplicator.GetReplicaCount(replyTo) => - implicit val timeout = Timeout(localAskTimeout) - import ctx.executionContext - val reply = - (untypedReplicator ? dd.Replicator.GetReplicaCount) - .mapTo[dd.Replicator.ReplicaCount].map(rsp => JReplicator.ReplicaCount(rsp.n)) - reply.foreach { replyTo ! _ } - Behaviors.same + case JReplicator.GetReplicaCount(replyTo) => + implicit val timeout = Timeout(localAskTimeout) + import ctx.executionContext + val reply = + (untypedReplicator ? dd.Replicator.GetReplicaCount) + .mapTo[dd.Replicator.ReplicaCount] + .map(rsp => JReplicator.ReplicaCount(rsp.n)) + reply.foreach { replyTo ! _ } + Behaviors.same - case SReplicator.FlushChanges | JReplicator.FlushChanges => - untypedReplicator.tell(dd.Replicator.FlushChanges, sender = akka.actor.ActorRef.noSender) - Behaviors.same + case SReplicator.FlushChanges | JReplicator.FlushChanges => + untypedReplicator.tell(dd.Replicator.FlushChanges, sender = akka.actor.ActorRef.noSender) + Behaviors.same + } } - } .receiveSignal { case (_, Terminated(ref: ActorRef[JReplicator.Changed[ReplicatedData]] @unchecked)) => stopSubscribeAdapter(ref) diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/DistributedData.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/DistributedData.scala index 2f72fb51d9..4ce8a8bfd2 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/DistributedData.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/DistributedData.scala @@ -35,6 +35,7 @@ object DistributedData extends ExtensionId[DistributedData] { */ @DoNotInherit abstract class DistributedData extends Extension { + /** * `ActorRef` of the [[Replicator]] . */ @@ -70,4 +71,4 @@ object DistributedDataSetup { * for tests that need to replace extension with stub/mock implementations. */ final class DistributedDataSetup(createExtension: java.util.function.Function[ActorSystem[_], DistributedData]) - extends ExtensionSetup[DistributedData](DistributedData, createExtension) + extends ExtensionSetup[DistributedData](DistributedData, createExtension) diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala index 3bc7a326ae..77645907a3 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/Replicator.scala @@ -68,6 +68,7 @@ object Replicator { @InternalApi private[akka] override def toUntyped = dd.Replicator.ReadMajority(timeout.asScala, minCap) } final case class ReadAll(timeout: Duration) extends ReadConsistency { + /** INTERNAL API */ @InternalApi private[akka] override def toUntyped = dd.Replicator.ReadAll(timeout.asScala) } @@ -97,6 +98,7 @@ object Replicator { @InternalApi private[akka] override def toUntyped = dd.Replicator.WriteMajority(timeout.asScala, minCap) } final case class WriteAll(timeout: Duration) extends WriteConsistency { + /** INTERNAL API */ @InternalApi private[akka] override def toUntyped = dd.Replicator.WriteAll(timeout.asScala) } @@ -119,8 +121,11 @@ object Replicator { * way to pass contextual information (e.g. original sender) without having to use `ask` * or maintain local correlation data structures. */ - final case class Get[A <: ReplicatedData](key: Key[A], consistency: ReadConsistency, replyTo: ActorRef[GetResponse[A]], request: Optional[Any]) - extends Command { + final case class Get[A <: ReplicatedData](key: Key[A], + consistency: ReadConsistency, + replyTo: ActorRef[GetResponse[A]], + request: Optional[Any]) + extends Command { def this(key: Key[A], consistency: ReadConsistency, replyTo: ActorRef[GetResponse[A]]) = this(key, consistency, replyTo, Optional.empty[Any]) @@ -131,11 +136,12 @@ object Replicator { def request: Optional[Any] def getRequest: Optional[Any] = request } + /** * Reply from `Get`. The data value is retrieved with [[#get]] using the typed key. */ final case class GetSuccess[A <: ReplicatedData](key: Key[A], request: Optional[Any])(data: A) - extends GetResponse[A] { + extends GetResponse[A] { /** * The data value, with correct type. @@ -150,14 +156,13 @@ object Replicator { */ def dataValue: A = data } - final case class NotFound[A <: ReplicatedData](key: Key[A], request: Optional[Any]) - extends GetResponse[A] + final case class NotFound[A <: ReplicatedData](key: Key[A], request: Optional[Any]) extends GetResponse[A] + /** * The [[Get]] request could not be fulfill according to the given * [[ReadConsistency consistency level]] and [[ReadConsistency#timeout timeout]]. */ - final case class GetFailure[A <: ReplicatedData](key: Key[A], request: Optional[Any]) - extends GetResponse[A] + final case class GetFailure[A <: ReplicatedData](key: Key[A], request: Optional[Any]) extends GetResponse[A] object Update { @@ -166,6 +171,7 @@ object Replicator { case None => modify(initial) } } + /** * Send this message to the local `Replicator` to update a data value for the * given `key`. The `Replicator` will reply with one of the [[UpdateResponse]] messages. @@ -179,9 +185,12 @@ object Replicator { * function that only uses the data parameter and stable fields from enclosing scope. It must * for example not access `sender()` reference of an enclosing actor. */ - final case class Update[A <: ReplicatedData] private (key: Key[A], writeConsistency: WriteConsistency, - replyTo: ActorRef[UpdateResponse[A]], request: Optional[Any])(val modify: Option[A] => A) - extends Command with NoSerializationVerificationNeeded { + final case class Update[A <: ReplicatedData] private (key: Key[A], + writeConsistency: WriteConsistency, + replyTo: ActorRef[UpdateResponse[A]], + request: Optional[Any])(val modify: Option[A] => A) + extends Command + with NoSerializationVerificationNeeded { /** * Modify value of local `Replicator` and replicate with given `writeConsistency`. @@ -190,8 +199,11 @@ object Replicator { * If there is no current data value for the `key` the `initial` value will be * passed to the `modify` function. */ - def this( - key: Key[A], initial: A, writeConsistency: WriteConsistency, replyTo: ActorRef[UpdateResponse[A]], modify: JFunction[A, A]) = + def this(key: Key[A], + initial: A, + writeConsistency: WriteConsistency, + replyTo: ActorRef[UpdateResponse[A]], + modify: JFunction[A, A]) = this(key, writeConsistency, replyTo, Optional.empty[Any])( Update.modifyWithInitial(initial, data => modify.apply(data))) @@ -206,9 +218,12 @@ object Replicator { * way to pass contextual information (e.g. original sender) without having to use `ask` * or local correlation data structures. */ - def this( - key: Key[A], initial: A, writeConsistency: WriteConsistency, replyTo: ActorRef[UpdateResponse[A]], - request: Optional[Any], modify: JFunction[A, A]) = + def this(key: Key[A], + initial: A, + writeConsistency: WriteConsistency, + replyTo: ActorRef[UpdateResponse[A]], + request: Optional[Any], + modify: JFunction[A, A]) = this(key, writeConsistency, replyTo, request)(Update.modifyWithInitial(initial, data => modify.apply(data))) } @@ -219,7 +234,8 @@ object Replicator { def getRequest: Optional[Any] = request } final case class UpdateSuccess[A <: ReplicatedData](key: Key[A], request: Optional[Any]) - extends UpdateResponse[A] with DeadLetterSuppression + extends UpdateResponse[A] + with DeadLetterSuppression @DoNotInherit sealed abstract class UpdateFailure[A <: ReplicatedData] extends UpdateResponse[A] @@ -233,14 +249,19 @@ object Replicator { * crashes before it has been able to communicate with other replicas. */ final case class UpdateTimeout[A <: ReplicatedData](key: Key[A], request: Optional[Any]) extends UpdateFailure[A] + /** * If the `modify` function of the [[Update]] throws an exception the reply message * will be this `ModifyFailure` message. The original exception is included as `cause`. */ - final case class ModifyFailure[A <: ReplicatedData](key: Key[A], errorMessage: String, cause: Throwable, request: Optional[Any]) - extends UpdateFailure[A] { + final case class ModifyFailure[A <: ReplicatedData](key: Key[A], + errorMessage: String, + cause: Throwable, + request: Optional[Any]) + extends UpdateFailure[A] { override def toString: String = s"ModifyFailure [$key]: $errorMessage" } + /** * The local store or direct replication of the [[Update]] could not be fulfill according to * the given [[WriteConsistency consistency level]] due to durable store errors. This is @@ -252,7 +273,8 @@ object Replicator { * crashes before it has been able to communicate with other replicas. */ final case class StoreFailure[A <: ReplicatedData](key: Key[A], request: Optional[Any]) - extends UpdateFailure[A] with DeleteResponse[A] { + extends UpdateFailure[A] + with DeleteResponse[A] { override def getRequest: Optional[Any] = request } @@ -272,18 +294,21 @@ object Replicator { * message. */ final case class Subscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef[Changed[A]]) extends Command + /** * Unregister a subscriber. * * @see [[Replicator.Subscribe]] */ final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef[Changed[A]]) extends Command + /** * The data value is retrieved with [[#get]] using the typed key. * * @see [[Replicator.Subscribe]] */ final case class Changed[A <: ReplicatedData](key: Key[A])(data: A) { + /** * The data value, with correct type. * Scala pattern matching cannot infer the type from the `key` parameter. @@ -307,9 +332,12 @@ object Replicator { * way to pass contextual information (e.g. original sender) without having to use `ask` * or maintain local correlation data structures. */ - final case class Delete[A <: ReplicatedData](key: Key[A], consistency: WriteConsistency, - replyTo: ActorRef[DeleteResponse[A]], request: Optional[Any]) - extends Command with NoSerializationVerificationNeeded { + final case class Delete[A <: ReplicatedData](key: Key[A], + consistency: WriteConsistency, + replyTo: ActorRef[DeleteResponse[A]], + request: Optional[Any]) + extends Command + with NoSerializationVerificationNeeded { def this(key: Key[A], consistency: WriteConsistency, replyTo: ActorRef[DeleteResponse[A]]) = this(key, consistency, replyTo, Optional.empty()) @@ -321,9 +349,12 @@ object Replicator { def getRequest: Optional[Any] = request } final case class DeleteSuccess[A <: ReplicatedData](key: Key[A], request: Optional[Any]) extends DeleteResponse[A] - final case class ReplicationDeleteFailure[A <: ReplicatedData](key: Key[A], request: Optional[Any]) extends DeleteResponse[A] + final case class ReplicationDeleteFailure[A <: ReplicatedData](key: Key[A], request: Optional[Any]) + extends DeleteResponse[A] final case class DataDeleted[A <: ReplicatedData](key: Key[A], request: Optional[Any]) - extends RuntimeException with NoStackTrace with DeleteResponse[A] { + extends RuntimeException + with NoStackTrace + with DeleteResponse[A] { override def toString: String = s"DataDeleted [$key]" } diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/ReplicatorSettings.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/ReplicatorSettings.scala index 4239949fd1..1deb640e37 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/ReplicatorSettings.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/javadsl/ReplicatorSettings.scala @@ -10,6 +10,7 @@ import akka.actor.typed.scaladsl.adapter._ import com.typesafe.config.Config object ReplicatorSettings { + /** * Create settings from the default configuration * `akka.cluster.distributed-data`. diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/DistributedData.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/DistributedData.scala index ae56b80f03..9bdfa1a856 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/DistributedData.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/DistributedData.scala @@ -43,7 +43,8 @@ class DistributedData(system: ActorSystem[_]) extends Extension { */ val replicator: ActorRef[Replicator.Command] = if (isTerminated) { - system.log.warning("Replicator points to dead letters: Make sure the cluster node is not terminated and has the proper role!") + system.log.warning( + "Replicator points to dead letters: Make sure the cluster node is not terminated and has the proper role!") system.deadLetters } else { val underlyingReplicator = dd.DistributedData(untypedSystem).replicator @@ -58,4 +59,3 @@ class DistributedData(system: ActorSystem[_]) extends Extension { private def isTerminated: Boolean = dd.DistributedData(system.toUntyped).isTerminated } - diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala index 13b4d61298..3574de80cb 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/Replicator.scala @@ -46,6 +46,7 @@ object Replicator { trait Command object Get { + /** * Convenience for `ask`. */ @@ -61,8 +62,11 @@ object Replicator { * way to pass contextual information (e.g. original sender) without having to use `ask` * or maintain local correlation data structures. */ - final case class Get[A <: ReplicatedData](key: Key[A], consistency: ReadConsistency, - replyTo: ActorRef[GetResponse[A]], request: Option[Any] = None) extends Command + final case class Get[A <: ReplicatedData](key: Key[A], + consistency: ReadConsistency, + replyTo: ActorRef[GetResponse[A]], + request: Option[Any] = None) + extends Command /** * Reply from `Get`. The data value is retrieved with [[#get]] using the typed key. @@ -73,6 +77,7 @@ object Replicator { } type GetSuccess[A <: ReplicatedData] = dd.Replicator.GetSuccess[A] type NotFound[A <: ReplicatedData] = dd.Replicator.NotFound[A] + /** * The [[Get]] request could not be fulfill according to the given * [[ReadConsistency consistency level]] and [[ReadConsistency#timeout timeout]]. @@ -92,16 +97,18 @@ object Replicator { * way to pass contextual information (e.g. original sender) without having to use `ask` * or local correlation data structures. */ - def apply[A <: ReplicatedData]( - key: Key[A], initial: A, writeConsistency: WriteConsistency, replyTo: ActorRef[UpdateResponse[A]], - request: Option[Any] = None)(modify: A => A): Update[A] = + def apply[A <: ReplicatedData](key: Key[A], + initial: A, + writeConsistency: WriteConsistency, + replyTo: ActorRef[UpdateResponse[A]], + request: Option[Any] = None)(modify: A => A): Update[A] = Update(key, writeConsistency, replyTo, request)(modifyWithInitial(initial, modify)) /** * Convenience for `ask`. */ - def apply[A <: ReplicatedData](key: Key[A], initial: A, - writeConsistency: WriteConsistency)(modify: A => A): ActorRef[UpdateResponse[A]] => Update[A] = + def apply[A <: ReplicatedData](key: Key[A], initial: A, writeConsistency: WriteConsistency)( + modify: A => A): ActorRef[UpdateResponse[A]] => Update[A] = (replyTo => Update(key, writeConsistency, replyTo, None)(modifyWithInitial(initial, modify))) private def modifyWithInitial[A <: ReplicatedData](initial: A, modify: A => A): Option[A] => A = { @@ -126,15 +133,17 @@ object Replicator { * function that only uses the data parameter and stable fields from enclosing scope. It must * for example not access `sender()` reference of an enclosing actor. */ - final case class Update[A <: ReplicatedData](key: Key[A], writeConsistency: WriteConsistency, + final case class Update[A <: ReplicatedData](key: Key[A], + writeConsistency: WriteConsistency, replyTo: ActorRef[UpdateResponse[A]], request: Option[Any])(val modify: Option[A] => A) - extends Command with NoSerializationVerificationNeeded { - } + extends Command + with NoSerializationVerificationNeeded {} type UpdateResponse[A <: ReplicatedData] = dd.Replicator.UpdateResponse[A] type UpdateSuccess[A <: ReplicatedData] = dd.Replicator.UpdateSuccess[A] type UpdateFailure[A <: ReplicatedData] = dd.Replicator.UpdateFailure[A] + /** * The direct replication of the [[Update]] could not be fulfill according to * the given [[WriteConsistency consistency level]] and @@ -145,11 +154,13 @@ object Replicator { * crashes before it has been able to communicate with other replicas. */ type UpdateTimeout[A <: ReplicatedData] = dd.Replicator.UpdateTimeout[A] + /** * If the `modify` function of the [[Update]] throws an exception the reply message * will be this `ModifyFailure` message. The original exception is included as `cause`. */ type ModifyFailure[A <: ReplicatedData] = dd.Replicator.ModifyFailure[A] + /** * The local store or direct replication of the [[Update]] could not be fulfill according to * the given [[WriteConsistency consistency level]] due to durable store errors. This is @@ -176,20 +187,19 @@ object Replicator { * If the key is deleted the subscriber is notified with a [[Deleted]] * message. */ - final case class Subscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef[Changed[A]]) - extends Command + final case class Subscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef[Changed[A]]) extends Command /** * Unregister a subscriber. * * @see [[Replicator.Subscribe]] */ - final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef[Changed[A]]) - extends Command + final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef[Changed[A]]) extends Command object Changed { def unapply[A <: ReplicatedData](chg: Changed[A]): Option[Key[A]] = Some(chg.key) } + /** * The data value is retrieved with [[#get]] using the typed key. * @@ -198,12 +208,15 @@ object Replicator { type Changed[A <: ReplicatedData] = dd.Replicator.Changed[A] object Delete { + /** * Convenience for `ask`. */ - def apply[A <: ReplicatedData](key: Key[A], consistency: WriteConsistency): ActorRef[DeleteResponse[A]] => Delete[A] = + def apply[A <: ReplicatedData](key: Key[A], + consistency: WriteConsistency): ActorRef[DeleteResponse[A]] => Delete[A] = (replyTo => Delete(key, consistency, replyTo, None)) } + /** * Send this message to the local `Replicator` to delete a data value for the * given `key`. The `Replicator` will reply with one of the [[DeleteResponse]] messages. @@ -212,9 +225,12 @@ object Replicator { * way to pass contextual information (e.g. original sender) without having to use `ask` * or maintain local correlation data structures. */ - final case class Delete[A <: ReplicatedData](key: Key[A], consistency: WriteConsistency, - replyTo: ActorRef[DeleteResponse[A]], request: Option[Any]) - extends Command with NoSerializationVerificationNeeded + final case class Delete[A <: ReplicatedData](key: Key[A], + consistency: WriteConsistency, + replyTo: ActorRef[DeleteResponse[A]], + request: Option[Any]) + extends Command + with NoSerializationVerificationNeeded type DeleteResponse[A <: ReplicatedData] = dd.Replicator.DeleteResponse[A] type DeleteSuccess[A <: ReplicatedData] = dd.Replicator.DeleteSuccess[A] @@ -222,6 +238,7 @@ object Replicator { type DataDeleted[A <: ReplicatedData] = dd.Replicator.DataDeleted[A] object GetReplicaCount { + /** * Convenience for `ask`. */ diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala index 231c96c36f..9c5f51aee9 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSettings.scala @@ -14,6 +14,7 @@ import com.typesafe.config.Config * @see [[akka.cluster.ddata.ReplicatorSettings]]. */ object ReplicatorSettings { + /** * Create settings from the default configuration * `akka.cluster.distributed-data`. diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/package.scala b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/package.scala index 72104d5889..c8849dfb3a 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/package.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/ddata/typed/scaladsl/package.scala @@ -7,6 +7,7 @@ package akka.cluster.ddata.typed import akka.cluster.{ ddata => dd } package object scaladsl { + /** * @see [[akka.cluster.ddata.ReplicatorSettings]]. */ diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/Cluster.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/Cluster.scala index e356e06baa..cd182315a6 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/Cluster.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/Cluster.scala @@ -32,11 +32,11 @@ sealed trait ClusterStateSubscription * `ReachabilityEvent` or one of the common supertypes, such as `MemberEvent` to get * all the subtypes of events. */ -final case class Subscribe[A <: ClusterDomainEvent]( - subscriber: ActorRef[A], - eventClass: Class[A]) extends ClusterStateSubscription +final case class Subscribe[A <: ClusterDomainEvent](subscriber: ActorRef[A], eventClass: Class[A]) + extends ClusterStateSubscription object Subscribe { + /** * Java API */ @@ -84,6 +84,7 @@ sealed trait ClusterCommand final case class Join(address: Address) extends ClusterCommand object Join { + /** * Java API */ @@ -129,6 +130,7 @@ final case class JoinSeedNodes(seedNodes: immutable.Seq[Address]) extends Cluste final case class Leave(address: Address) extends ClusterCommand object Leave { + /** * Java API */ @@ -201,4 +203,4 @@ object ClusterSetup { * for tests that need to replace extension with stub/mock implementations. */ final class ClusterSetup(createExtension: java.util.function.Function[ActorSystem[_], Cluster]) - extends ExtensionSetup[Cluster](Cluster, createExtension) + extends ExtensionSetup[Cluster](Cluster, createExtension) diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala index ece31d0e5b..e79919ab11 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/ClusterSingleton.scala @@ -7,7 +7,10 @@ package akka.cluster.typed import akka.actor.NoSerializationVerificationNeeded import akka.annotation.{ DoNotInherit, InternalApi } import akka.cluster.ClusterSettings.DataCenter -import akka.cluster.singleton.{ ClusterSingletonProxySettings, ClusterSingletonManagerSettings => UntypedClusterSingletonManagerSettings } +import akka.cluster.singleton.{ + ClusterSingletonProxySettings, + ClusterSingletonManagerSettings => UntypedClusterSingletonManagerSettings +} import akka.cluster.typed.internal.AdaptedClusterSingletonImpl import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, Extension, ExtensionId, Props } import akka.util.JavaDurationConverters._ @@ -18,40 +21,35 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } import akka.actor.typed.ExtensionSetup object ClusterSingletonSettings { - def apply( - system: ActorSystem[_] - ): ClusterSingletonSettings = fromConfig(system.settings.config.getConfig("akka.cluster")) + def apply(system: ActorSystem[_]): ClusterSingletonSettings = + fromConfig(system.settings.config.getConfig("akka.cluster")) /** * Java API */ def create(system: ActorSystem[_]): ClusterSingletonSettings = apply(system) - def fromConfig( - config: Config - ): ClusterSingletonSettings = { + def fromConfig(config: Config): ClusterSingletonSettings = { // TODO introduce a config namespace for typed singleton and read that? // currently singleton name is required and then discarded, for example val mgrSettings = ClusterSingletonManagerSettings(config.getConfig("singleton")) val proxySettings = ClusterSingletonProxySettings(config.getConfig("singleton-proxy")) - new ClusterSingletonSettings( - mgrSettings.role, - proxySettings.dataCenter, - proxySettings.singletonIdentificationInterval, - mgrSettings.removalMargin, - mgrSettings.handOverRetryInterval, - proxySettings.bufferSize - ) + new ClusterSingletonSettings(mgrSettings.role, + proxySettings.dataCenter, + proxySettings.singletonIdentificationInterval, + mgrSettings.removalMargin, + mgrSettings.handOverRetryInterval, + proxySettings.bufferSize) } } -final class ClusterSingletonSettings( - val role: Option[String], - val dataCenter: Option[DataCenter], - val singletonIdentificationInterval: FiniteDuration, - val removalMargin: FiniteDuration, - val handOverRetryInterval: FiniteDuration, - val bufferSize: Int) extends NoSerializationVerificationNeeded { +final class ClusterSingletonSettings(val role: Option[String], + val dataCenter: Option[DataCenter], + val singletonIdentificationInterval: FiniteDuration, + val removalMargin: FiniteDuration, + val handOverRetryInterval: FiniteDuration, + val bufferSize: Int) + extends NoSerializationVerificationNeeded { def withRole(role: String): ClusterSingletonSettings = copy(role = Some(role)) @@ -62,21 +60,28 @@ final class ClusterSingletonSettings( def withNoDataCenter(): ClusterSingletonSettings = copy(dataCenter = None) def withRemovalMargin(removalMargin: FiniteDuration): ClusterSingletonSettings = copy(removalMargin = removalMargin) - def withRemovalMargin(removalMargin: java.time.Duration): ClusterSingletonSettings = withRemovalMargin(removalMargin.asScala) + def withRemovalMargin(removalMargin: java.time.Duration): ClusterSingletonSettings = + withRemovalMargin(removalMargin.asScala) - def withHandoverRetryInterval(handOverRetryInterval: FiniteDuration): ClusterSingletonSettings = copy(handOverRetryInterval = handOverRetryInterval) - def withHandoverRetryInterval(handOverRetryInterval: java.time.Duration): ClusterSingletonSettings = withHandoverRetryInterval(handOverRetryInterval.asScala) + def withHandoverRetryInterval(handOverRetryInterval: FiniteDuration): ClusterSingletonSettings = + copy(handOverRetryInterval = handOverRetryInterval) + def withHandoverRetryInterval(handOverRetryInterval: java.time.Duration): ClusterSingletonSettings = + withHandoverRetryInterval(handOverRetryInterval.asScala) def withBufferSize(bufferSize: Int): ClusterSingletonSettings = copy(bufferSize = bufferSize) - private def copy( - role: Option[String] = role, - dataCenter: Option[DataCenter] = dataCenter, - singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval, - removalMargin: FiniteDuration = removalMargin, - handOverRetryInterval: FiniteDuration = handOverRetryInterval, - bufferSize: Int = bufferSize) = - new ClusterSingletonSettings(role, dataCenter, singletonIdentificationInterval, removalMargin, handOverRetryInterval, bufferSize) + private def copy(role: Option[String] = role, + dataCenter: Option[DataCenter] = dataCenter, + singletonIdentificationInterval: FiniteDuration = singletonIdentificationInterval, + removalMargin: FiniteDuration = removalMargin, + handOverRetryInterval: FiniteDuration = handOverRetryInterval, + bufferSize: Int = bufferSize) = + new ClusterSingletonSettings(role, + dataCenter, + singletonIdentificationInterval, + removalMargin, + handOverRetryInterval, + bufferSize) /** * INTERNAL API: @@ -100,10 +105,11 @@ final class ClusterSingletonSettings( @InternalApi private[akka] def shouldRunManager(cluster: Cluster): Boolean = { (role.isEmpty || cluster.selfMember.roles(role.get)) && - (dataCenter.isEmpty || dataCenter.contains(cluster.selfMember.dataCenter)) + (dataCenter.isEmpty || dataCenter.contains(cluster.selfMember.dataCenter)) } - override def toString = s"ClusterSingletonSettings($role, $dataCenter, $singletonIdentificationInterval, $removalMargin, $handOverRetryInterval, $bufferSize)" + override def toString = + s"ClusterSingletonSettings($role, $dataCenter, $singletonIdentificationInterval, $removalMargin, $handOverRetryInterval, $bufferSize)" } object ClusterSingleton extends ExtensionId[ClusterSingleton] { @@ -125,11 +131,13 @@ private[akka] object ClusterSingletonImpl { } object SingletonActor { + /** * @param name Unique name for the singleton * @param behavior Behavior for the singleton */ - def apply[M](behavior: Behavior[M], name: String): SingletonActor[M] = new SingletonActor[M](behavior, name, Props.empty, None, None) + def apply[M](behavior: Behavior[M], name: String): SingletonActor[M] = + new SingletonActor[M](behavior, name, Props.empty, None, None) /** * Java API @@ -140,13 +148,11 @@ object SingletonActor { def of[M](behavior: Behavior[M], name: String): SingletonActor[M] = apply(behavior, name) } -final class SingletonActor[M] private ( - val behavior: Behavior[M], - val name: String, - val props: Props, - val stopMessage: Option[M], - val settings: Option[ClusterSingletonSettings] -) { +final class SingletonActor[M] private (val behavior: Behavior[M], + val name: String, + val props: Props, + val stopMessage: Option[M], + val settings: Option[ClusterSingletonSettings]) { /** * [[akka.actor.typed.Props]] of the singleton actor, such as dispatcher settings. @@ -166,12 +172,11 @@ final class SingletonActor[M] private ( */ def withSettings(settings: ClusterSingletonSettings): SingletonActor[M] = copy(settings = Option(settings)) - private def copy( - behavior: Behavior[M] = behavior, - props: Props = props, - stopMessage: Option[M] = stopMessage, - settings: Option[ClusterSingletonSettings] = settings - ): SingletonActor[M] = new SingletonActor[M](behavior, name, props, stopMessage, settings) + private def copy(behavior: Behavior[M] = behavior, + props: Props = props, + stopMessage: Option[M] = stopMessage, + settings: Option[ClusterSingletonSettings] = settings): SingletonActor[M] = + new SingletonActor[M](behavior, name, props, stopMessage, settings) } /** @@ -209,11 +214,11 @@ object ClusterSingletonManagerSettings { * the default configuration `akka.cluster.singleton`. */ def apply(config: Config): ClusterSingletonManagerSettings = - new ClusterSingletonManagerSettings( - singletonName = config.getString("singleton-name"), - role = roleOption(config.getString("role")), - removalMargin = Duration.Zero, // defaults to ClusterSettins.DownRemovalMargin - handOverRetryInterval = config.getDuration("hand-over-retry-interval", MILLISECONDS).millis) + new ClusterSingletonManagerSettings(singletonName = config.getString("singleton-name"), + role = roleOption(config.getString("role")), + removalMargin = Duration.Zero, // defaults to ClusterSettins.DownRemovalMargin + handOverRetryInterval = + config.getDuration("hand-over-retry-interval", MILLISECONDS).millis) /** * Java API: Create settings from the default configuration @@ -255,15 +260,16 @@ object ClusterSingletonManagerSettings { * over has started or the previous oldest member is removed from the cluster * (+ `removalMargin`). */ -final class ClusterSingletonManagerSettings( - val singletonName: String, - val role: Option[String], - val removalMargin: FiniteDuration, - val handOverRetryInterval: FiniteDuration) extends NoSerializationVerificationNeeded { +final class ClusterSingletonManagerSettings(val singletonName: String, + val role: Option[String], + val removalMargin: FiniteDuration, + val handOverRetryInterval: FiniteDuration) + extends NoSerializationVerificationNeeded { def withSingletonName(name: String): ClusterSingletonManagerSettings = copy(singletonName = name) - def withRole(role: String): ClusterSingletonManagerSettings = copy(role = UntypedClusterSingletonManagerSettings.roleOption(role)) + def withRole(role: String): ClusterSingletonManagerSettings = + copy(role = UntypedClusterSingletonManagerSettings.roleOption(role)) def withRole(role: Option[String]): ClusterSingletonManagerSettings = copy(role = role) @@ -277,11 +283,10 @@ final class ClusterSingletonManagerSettings( def withHandOverRetryInterval(retryInterval: java.time.Duration): ClusterSingletonManagerSettings = withHandOverRetryInterval(retryInterval.asScala) - private def copy( - singletonName: String = singletonName, - role: Option[String] = role, - removalMargin: FiniteDuration = removalMargin, - handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings = + private def copy(singletonName: String = singletonName, + role: Option[String] = role, + removalMargin: FiniteDuration = removalMargin, + handOverRetryInterval: FiniteDuration = handOverRetryInterval): ClusterSingletonManagerSettings = new ClusterSingletonManagerSettings(singletonName, role, removalMargin, handOverRetryInterval) } @@ -299,4 +304,4 @@ object ClusterSingletonSetup { * for tests that need to replace extension with stub/mock implementations. */ final class ClusterSingletonSetup(createExtension: java.util.function.Function[ActorSystem[_], ClusterSingleton]) - extends ExtensionSetup[ClusterSingleton](ClusterSingleton, createExtension) + extends ExtensionSetup[ClusterSingleton](ClusterSingleton, createExtension) diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala index 4ace983941..1a0a7b4965 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterImpl.scala @@ -25,106 +25,110 @@ private[akka] object AdapterClusterImpl { private case object Up extends SeenState private case class Removed(previousStatus: MemberStatus) extends SeenState - private def subscriptionsBehavior(adaptedCluster: akka.cluster.Cluster) = Behaviors.setup[ClusterStateSubscription] { ctx => - var seenState: SeenState = BeforeUp - var upSubscribers: List[ActorRef[SelfUp]] = Nil - var removedSubscribers: List[ActorRef[SelfRemoved]] = Nil + private def subscriptionsBehavior(adaptedCluster: akka.cluster.Cluster) = Behaviors.setup[ClusterStateSubscription] { + ctx => + var seenState: SeenState = BeforeUp + var upSubscribers: List[ActorRef[SelfUp]] = Nil + var removedSubscribers: List[ActorRef[SelfRemoved]] = Nil - adaptedCluster.subscribe(ctx.self.toUntyped, ClusterEvent.initialStateAsEvents, classOf[MemberEvent]) + adaptedCluster.subscribe(ctx.self.toUntyped, ClusterEvent.initialStateAsEvents, classOf[MemberEvent]) - // important to not eagerly refer to it or we get a cycle here - lazy val cluster = Cluster(ctx.system) - def onSelfMemberEvent(event: MemberEvent): Unit = { - event match { - case ClusterEvent.MemberUp(_) => - seenState = Up - val upMessage = SelfUp(cluster.state) - upSubscribers.foreach(_ ! upMessage) - upSubscribers = Nil + // important to not eagerly refer to it or we get a cycle here + lazy val cluster = Cluster(ctx.system) + def onSelfMemberEvent(event: MemberEvent): Unit = { + event match { + case ClusterEvent.MemberUp(_) => + seenState = Up + val upMessage = SelfUp(cluster.state) + upSubscribers.foreach(_ ! upMessage) + upSubscribers = Nil - case ClusterEvent.MemberRemoved(_, previousStatus) => - seenState = Removed(previousStatus) - val removedMessage = SelfRemoved(previousStatus) - removedSubscribers.foreach(_ ! removedMessage) - removedSubscribers = Nil + case ClusterEvent.MemberRemoved(_, previousStatus) => + seenState = Removed(previousStatus) + val removedMessage = SelfRemoved(previousStatus) + removedSubscribers.foreach(_ ! removedMessage) + removedSubscribers = Nil - case _ => // This is fine. + case _ => // This is fine. + } } - } - Behaviors.receive[AnyRef] { (ctx, msg) => + Behaviors + .receive[AnyRef] { (ctx, msg) => + msg match { + case Subscribe(subscriber: ActorRef[SelfUp] @unchecked, clazz) if clazz == classOf[SelfUp] => + seenState match { + case Up => subscriber ! SelfUp(adaptedCluster.state) + case BeforeUp => + ctx.watch(subscriber) + upSubscribers = subscriber :: upSubscribers + case _: Removed => + // self did join, but is now no longer up, we want to avoid subscribing + // to not get a memory leak, but also not signal anything + } + Behaviors.same + + case Subscribe(subscriber: ActorRef[SelfRemoved] @unchecked, clazz) if clazz == classOf[SelfRemoved] => + seenState match { + case BeforeUp | Up => removedSubscribers = subscriber :: removedSubscribers + case Removed(s) => subscriber ! SelfRemoved(s) + } + Behaviors.same + + case Subscribe(subscriber, eventClass) => + adaptedCluster.subscribe(subscriber.toUntyped, + initialStateMode = ClusterEvent.initialStateAsEvents, + eventClass) + Behaviors.same + + case Unsubscribe(subscriber) => + adaptedCluster.unsubscribe(subscriber.toUntyped) + Behaviors.same + + case GetCurrentState(sender) => + adaptedCluster.sendCurrentClusterState(sender.toUntyped) + Behaviors.same + + case evt: MemberEvent if evt.member.uniqueAddress == cluster.selfMember.uniqueAddress => + onSelfMemberEvent(evt) + Behaviors.same + + case _: MemberEvent => + Behaviors.same - msg match { - case Subscribe(subscriber: ActorRef[SelfUp] @unchecked, clazz) if clazz == classOf[SelfUp] => - seenState match { - case Up => subscriber ! SelfUp(adaptedCluster.state) - case BeforeUp => - ctx.watch(subscriber) - upSubscribers = subscriber :: upSubscribers - case _: Removed => - // self did join, but is now no longer up, we want to avoid subscribing - // to not get a memory leak, but also not signal anything } - Behaviors.same + } + .receiveSignal { - case Subscribe(subscriber: ActorRef[SelfRemoved] @unchecked, clazz) if clazz == classOf[SelfRemoved] => - seenState match { - case BeforeUp | Up => removedSubscribers = subscriber :: removedSubscribers - case Removed(s) => subscriber ! SelfRemoved(s) - } - Behaviors.same + case (_, Terminated(ref)) => + upSubscribers = upSubscribers.filterNot(_ == ref) + removedSubscribers = removedSubscribers.filterNot(_ == ref) + Behaviors.same - case Subscribe(subscriber, eventClass) => - adaptedCluster.subscribe(subscriber.toUntyped, initialStateMode = ClusterEvent.initialStateAsEvents, eventClass) - Behaviors.same - - case Unsubscribe(subscriber) => - adaptedCluster.unsubscribe(subscriber.toUntyped) - Behaviors.same - - case GetCurrentState(sender) => - adaptedCluster.sendCurrentClusterState(sender.toUntyped) - Behaviors.same - - case evt: MemberEvent if evt.member.uniqueAddress == cluster.selfMember.uniqueAddress => - onSelfMemberEvent(evt) - Behaviors.same - - case _: MemberEvent => - Behaviors.same - - } - }.receiveSignal { - - case (_, Terminated(ref)) => - upSubscribers = upSubscribers.filterNot(_ == ref) - removedSubscribers = removedSubscribers.filterNot(_ == ref) - Behaviors.same - - }.narrow[ClusterStateSubscription] + } + .narrow[ClusterStateSubscription] } - private def managerBehavior(adaptedCluster: akka.cluster.Cluster) = Behaviors.receive[ClusterCommand]((_, msg) => - msg match { - case Join(address) => - adaptedCluster.join(address) - Behaviors.same + private def managerBehavior(adaptedCluster: akka.cluster.Cluster) = + Behaviors.receive[ClusterCommand]((_, msg) => + msg match { + case Join(address) => + adaptedCluster.join(address) + Behaviors.same - case Leave(address) => - adaptedCluster.leave(address) - Behaviors.same + case Leave(address) => + adaptedCluster.leave(address) + Behaviors.same - case Down(address) => - adaptedCluster.down(address) - Behaviors.same + case Down(address) => + adaptedCluster.down(address) + Behaviors.same - case JoinSeedNodes(addresses) => - adaptedCluster.joinSeedNodes(addresses) - Behaviors.same + case JoinSeedNodes(addresses) => + adaptedCluster.joinSeedNodes(addresses) + Behaviors.same - } - - ) + }) } @@ -144,11 +148,9 @@ private[akka] final class AdapterClusterImpl(system: ActorSystem[_]) extends Clu // must not be lazy as it also updates the cached selfMember override val subscriptions: ActorRef[ClusterStateSubscription] = - system.internalSystemActorOf( - subscriptionsBehavior(untypedCluster), "clusterStateSubscriptions", Props.empty) + system.internalSystemActorOf(subscriptionsBehavior(untypedCluster), "clusterStateSubscriptions", Props.empty) override lazy val manager: ActorRef[ClusterCommand] = - system.internalSystemActorOf( - managerBehavior(untypedCluster), "clusterCommandManager", Props.empty) + system.internalSystemActorOf(managerBehavior(untypedCluster), "clusterCommandManager", Props.empty) } diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala index 3d372542b7..cd85c38874 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AdaptedClusterSingletonImpl.scala @@ -23,7 +23,8 @@ import akka.cluster.typed */ @InternalApi private[akka] final class AdaptedClusterSingletonImpl(system: ActorSystem[_]) extends ClusterSingleton { - require(system.isInstanceOf[ActorSystemAdapter[_]], "only adapted actor systems can be used for the typed cluster singleton") + require(system.isInstanceOf[ActorSystemAdapter[_]], + "only adapted actor systems can be used for the typed cluster singleton") import ClusterSingletonImpl._ import akka.actor.typed.scaladsl.adapter._ @@ -50,9 +51,10 @@ private[akka] final class AdaptedClusterSingletonImpl(system: ActorSystem[_]) ex // start singleton on this node val untypedProps = PropsAdapter(poisonPillInterceptor(singleton.behavior), singleton.props) try { - untypedSystem.systemActorOf( - OldSingletonManager.props(untypedProps, singleton.stopMessage.getOrElse(PoisonPill), settings.toManagerSettings(singleton.name)), - managerName) + untypedSystem.systemActorOf(OldSingletonManager.props(untypedProps, + singleton.stopMessage.getOrElse(PoisonPill), + settings.toManagerSettings(singleton.name)), + managerName) } catch { case ex: InvalidActorNameException if ex.getMessage.endsWith("is not unique!") => // This is fine. We just wanted to make sure it is running and it already is @@ -68,9 +70,9 @@ private[akka] final class AdaptedClusterSingletonImpl(system: ActorSystem[_]) ex println("Creating for " + singletonNameAndDc) val (singletonName, _) = singletonNameAndDc val proxyName = s"singletonProxy$singletonName-${settings.dataCenter.getOrElse("no-dc")}" - untypedSystem.systemActorOf( - ClusterSingletonProxy.props(s"/system/${managerNameFor(singletonName)}", settings.toProxySettings(singletonName)), - proxyName) + untypedSystem.systemActorOf(ClusterSingletonProxy.props(s"/system/${managerNameFor(singletonName)}", + settings.toProxySettings(singletonName)), + proxyName) } } proxies.computeIfAbsent((name, settings.dataCenter), proxyCreator).asInstanceOf[ActorRef[T]] diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializer.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializer.scala index 8ecaf47530..5cef82a8b2 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializer.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializer.scala @@ -1,7 +1,6 @@ /** * Copyright (C) 2009-${YEAR} Lightbend Inc. */ - package akka.cluster.typed.internal import java.io.NotSerializableException @@ -19,7 +18,8 @@ import akka.cluster.typed.internal.receptionist.ClusterReceptionist.Entry */ @InternalApi private[akka] final class AkkaClusterTypedSerializer(override val system: ExtendedActorSystem) - extends SerializerWithStringManifest with BaseSerializer { + extends SerializerWithStringManifest + with BaseSerializer { // Serializers are initialized early on. `toTyped` might then try to initialize the untyped ActorSystemAdapter extension. private lazy val resolver = ActorRefResolver(system.toTyped) @@ -45,7 +45,8 @@ private[akka] final class AkkaClusterTypedSerializer(override val system: Extend } private def receptionistEntryToBinary(e: Entry): Array[Byte] = - ClusterMessages.ReceptionistEntry.newBuilder() + ClusterMessages.ReceptionistEntry + .newBuilder() .setActorRef(resolver.toSerializationFormat(e.ref)) .setSystemUid(e.systemUid) .build() @@ -53,9 +54,6 @@ private[akka] final class AkkaClusterTypedSerializer(override val system: Extend private def receptionistEntryFromBinary(bytes: Array[Byte]): Entry = { val re = ClusterMessages.ReceptionistEntry.parseFrom(bytes) - Entry( - resolver.resolveActorRef(re.getActorRef), - re.getSystemUid - ) + Entry(resolver.resolveActorRef(re.getActorRef), re.getSystemUid) } } diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala index 1b4bbe08af..3d0605d0e9 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionist.scala @@ -47,11 +47,11 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { private sealed trait InternalCommand extends Command private final case class RegisteredActorTerminated[T](key: ServiceKey[T], ref: ActorRef[T]) extends InternalCommand - private final case class SubscriberTerminated[T](key: ServiceKey[T], ref: ActorRef[ReceptionistMessages.Listing[T]]) extends InternalCommand + private final case class SubscriberTerminated[T](key: ServiceKey[T], ref: ActorRef[ReceptionistMessages.Listing[T]]) + extends InternalCommand private final case class NodeRemoved(addresses: UniqueAddress) extends InternalCommand - private final case class ChangeFromReplicator( - key: DDataKey, - value: ORMultiMap[ServiceKey[_], Entry]) extends InternalCommand + private final case class ChangeFromReplicator(key: DDataKey, value: ORMultiMap[ServiceKey[_], Entry]) + extends InternalCommand private case object RemoveTick extends InternalCommand private case object PruneTombstonesTick extends InternalCommand @@ -75,7 +75,6 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { Behaviors.setup { ctx => ctx.setLoggerClass(classOf[ClusterReceptionist]) Behaviors.withTimers { timers => - val setup = new Setup(ctx) val registry = ShardedServiceRegistry(setup.settings.distributedKeyCount) @@ -83,19 +82,19 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { val replicatorMessageAdapter: ActorRef[Replicator.ReplicatorMessage] = ctx.messageAdapter[Replicator.ReplicatorMessage] { case changed: Replicator.Changed[_] @unchecked => - ChangeFromReplicator( - changed.key.asInstanceOf[DDataKey], - changed.dataValue.asInstanceOf[ORMultiMap[ServiceKey[_], Entry]]) + ChangeFromReplicator(changed.key.asInstanceOf[DDataKey], + changed.dataValue.asInstanceOf[ORMultiMap[ServiceKey[_], Entry]]) } registry.allDdataKeys.foreach(key => - setup.replicator ! Replicator.Subscribe(key, replicatorMessageAdapter.toUntyped) - ) + setup.replicator ! Replicator.Subscribe(key, replicatorMessageAdapter.toUntyped)) // remove entries when members are removed val clusterEventMessageAdapter: ActorRef[MemberRemoved] = ctx.messageAdapter[MemberRemoved] { case MemberRemoved(member, _) => NodeRemoved(member.uniqueAddress) } - setup.cluster.subscribe(clusterEventMessageAdapter.toUntyped, ClusterEvent.InitialStateAsEvents, classOf[MemberRemoved]) + setup.cluster.subscribe(clusterEventMessageAdapter.toUntyped, + ClusterEvent.InitialStateAsEvents, + classOf[MemberRemoved]) // also periodic cleanup in case removal from ORMultiMap is skipped due to concurrent update, // which is possible for OR CRDTs - done with an adapter to leverage the existing NodesRemoved message @@ -105,11 +104,7 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { // around isn't very costly so don't prune often timers.startPeriodicTimer("prune-tombstones", PruneTombstonesTick, setup.keepTombstonesFor / 24) - behavior( - setup, - registry, - TypedMultiMap.empty[AbstractServiceKey, SubscriptionsKV] - ) + behavior(setup, registry, TypedMultiMap.empty[AbstractServiceKey, SubscriptionsKV]) } } @@ -117,17 +112,12 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { * @param registry The last seen state from the replicator - only updated when we get an update from th replicator * @param subscriptions Locally subscriptions, not replicated */ - def behavior( - setup: Setup, - registry: ShardedServiceRegistry, - subscriptions: SubscriptionRegistry): Behavior[Command] = + def behavior(setup: Setup, registry: ShardedServiceRegistry, subscriptions: SubscriptionRegistry): Behavior[Command] = Behaviors.setup { ctx => import setup._ // Helper to create new behavior - def next( - newState: ShardedServiceRegistry = registry, - newSubscriptions: SubscriptionRegistry = subscriptions) = + def next(newState: ShardedServiceRegistry = registry, newSubscriptions: SubscriptionRegistry = subscriptions) = behavior(setup, newState, newSubscriptions) /* @@ -137,12 +127,11 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { def watchWith(ctx: ActorContext[Command], target: ActorRef[_], msg: InternalCommand): Unit = ctx.spawnAnonymous[Nothing](Behaviors.setup[Nothing] { innerCtx => innerCtx.watch(target) - Behaviors.receive[Nothing]((_, _) => Behaviors.same) - .receiveSignal { - case (_, Terminated(`target`)) => - ctx.self ! msg - Behaviors.stopped - } + Behaviors.receive[Nothing]((_, _) => Behaviors.same).receiveSignal { + case (_, Terminated(`target`)) => + ctx.self ! msg + Behaviors.stopped + } }) def notifySubscribersFor(key: AbstractServiceKey, state: ServiceRegistry): Unit = { @@ -172,12 +161,13 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { if (removals.nonEmpty) { if (ctx.log.isDebugEnabled) - ctx.log.debug( - "Node(s) [{}] removed, updating registry removing: [{}]", - addresses.mkString(","), - removals.map { - case (key, entries) => key.asServiceKey.id -> entries.mkString("[", ", ", "]") - }.mkString(",")) + ctx.log.debug("Node(s) [{}] removed, updating registry removing: [{}]", + addresses.mkString(","), + removals + .map { + case (key, entries) => key.asServiceKey.id -> entries.mkString("[", ", ", "]") + } + .mkString(",")) // shard changes over the ddata keys they belong to val removalsPerDdataKey = registry.entriesPerDdataKey(removals) @@ -246,14 +236,12 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { val newRegistry = registry.withServiceRegistry(ddataKey, newState) if (changedKeys.nonEmpty) { if (ctx.log.isDebugEnabled) { - ctx.log.debug( - "Change from replicator: [{}], changes: [{}], tombstones [{}]", - newState.entries.entries, - changedKeys.map(key => - key.asServiceKey.id -> newState.entriesFor(key).mkString("[", ", ", "]") - ).mkString(", "), - registry.tombstones.mkString(", ") - ) + ctx.log.debug("Change from replicator: [{}], changes: [{}], tombstones [{}]", + newState.entries.entries, + changedKeys + .map(key => key.asServiceKey.id -> newState.entriesFor(key).mkString("[", ", ", "]")) + .mkString(", "), + registry.tombstones.mkString(", ")) } changedKeys.foreach { changedKey => notifySubscribersFor(changedKey, newState) @@ -288,7 +276,7 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider { if (cluster.state.leader.contains(cluster.selfAddress)) { val allAddressesInState: Set[UniqueAddress] = registry.allUniqueAddressesInState(setup.selfUniqueAddress) val clusterAddresses = cluster.state.members.map(_.uniqueAddress) - val notInCluster = allAddressesInState diff clusterAddresses + val notInCluster = allAddressesInState.diff(clusterAddresses) if (notInCluster.isEmpty) Behavior.same else { diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistConfigCompatChecker.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistConfigCompatChecker.scala index c232d6578c..f9b7b317e6 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistConfigCompatChecker.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistConfigCompatChecker.scala @@ -21,4 +21,3 @@ final class ClusterReceptionistConfigCompatChecker extends JoinConfigCompatCheck override def check(toCheck: Config, actualConfig: Config): ConfigValidation = JoinConfigCompatChecker.fullMatch(requiredKeys, toCheck, actualConfig) } - diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala index 7cc5995039..9a30f89cce 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSettings.scala @@ -33,11 +33,9 @@ private[akka] object ClusterReceptionistSettings { case _ => Replicator.WriteTo(config.getInt(key), writeTimeout) } } - ClusterReceptionistSettings( - writeConsistency, - pruningInterval = config.getDuration("pruning-interval", MILLISECONDS).millis, - config.getInt("distributed-key-count") - ) + ClusterReceptionistSettings(writeConsistency, + pruningInterval = config.getDuration("pruning-interval", MILLISECONDS).millis, + config.getInt("distributed-key-count")) } } @@ -45,8 +43,6 @@ private[akka] object ClusterReceptionistSettings { * Internal API */ @InternalApi -private[akka] case class ClusterReceptionistSettings( - writeConsistency: WriteConsistency, - pruningInterval: FiniteDuration, - distributedKeyCount: Int) - +private[akka] case class ClusterReceptionistSettings(writeConsistency: WriteConsistency, + pruningInterval: FiniteDuration, + distributedKeyCount: Int) diff --git a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala index 48d07b57a2..e53d67fae8 100644 --- a/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala +++ b/akka-cluster-typed/src/main/scala/akka/cluster/typed/internal/receptionist/Registry.scala @@ -37,9 +37,8 @@ import scala.concurrent.duration.Deadline * the service key * INTERNAL API */ -@InternalApi private[akka] final case class ShardedServiceRegistry( - serviceRegistries: Map[DDataKey, ServiceRegistry], - tombstones: Map[ActorRef[_], Deadline]) { +@InternalApi private[akka] final case class ShardedServiceRegistry(serviceRegistries: Map[DDataKey, ServiceRegistry], + tombstones: Map[ActorRef[_], Deadline]) { private val keys = serviceRegistries.keySet.toArray @@ -75,7 +74,8 @@ import scala.concurrent.duration.Deadline ServiceRegistry.collectChangedKeys(previousRegistry, newRegistry) } - def entriesPerDdataKey(entries: Map[AbstractServiceKey, Set[Entry]]): Map[DDataKey, Map[AbstractServiceKey, Set[Entry]]] = + def entriesPerDdataKey( + entries: Map[AbstractServiceKey, Set[Entry]]): Map[DDataKey, Map[AbstractServiceKey, Set[Entry]]] = entries.foldLeft(Map.empty[DDataKey, Map[AbstractServiceKey, Set[Entry]]]) { case (acc, (key, entries)) => val ddataKey = ddataKeyFor(key.asServiceKey) diff --git a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiDcClusterSingletonSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiDcClusterSingletonSpec.scala index eb7c068a26..b34df5d56a 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiDcClusterSingletonSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiDcClusterSingletonSpec.scala @@ -18,20 +18,15 @@ object MultiDcClusterSingletonSpecConfig extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig( - ConfigFactory.parseString( - """ + commonConfig(ConfigFactory.parseString(""" akka.loglevel = DEBUG - """).withFallback( - MultiNodeClusterSpec.clusterConfig)) + """).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first)(ConfigFactory.parseString( - """ + nodeConfig(first)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" """)) - nodeConfig(second, third)(ConfigFactory.parseString( - """ + nodeConfig(second, third)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc2" """)) @@ -42,8 +37,9 @@ class MultiDcClusterSingletonMultiJvmNode1 extends MultiDcClusterSingletonSpec class MultiDcClusterSingletonMultiJvmNode2 extends MultiDcClusterSingletonSpec class MultiDcClusterSingletonMultiJvmNode3 extends MultiDcClusterSingletonSpec -abstract class MultiDcClusterSingletonSpec extends MultiNodeSpec(MultiDcClusterSingletonSpecConfig) - with MultiNodeTypedClusterSpec { +abstract class MultiDcClusterSingletonSpec + extends MultiNodeSpec(MultiDcClusterSingletonSpecConfig) + with MultiNodeTypedClusterSpec { import MultiDcClusterActors._ import MultiDcClusterSingletonSpecConfig._ @@ -68,9 +64,7 @@ abstract class MultiDcClusterSingletonSpec extends MultiNodeSpec(MultiDcClusterS "be able to create and ping singleton in same DC" in { runOn(first) { val singleton = ClusterSingleton(typedSystem) - val pinger = singleton.init( - SingletonActor(multiDcPinger, "ping").withStopMessage(NoMore) - ) + val pinger = singleton.init(SingletonActor(multiDcPinger, "ping").withStopMessage(NoMore)) val probe = TestProbe[Pong] pinger ! Ping(probe.ref) probe.expectMessage(Pong("dc1")) @@ -85,10 +79,9 @@ abstract class MultiDcClusterSingletonSpec extends MultiNodeSpec(MultiDcClusterS runOn(second) { val singleton = ClusterSingleton(system.toTyped) val pinger = singleton.init( - SingletonActor(multiDcPinger, "ping").withStopMessage(NoMore).withSettings( - ClusterSingletonSettings(typedSystem).withDataCenter("dc1") - ) - ) + SingletonActor(multiDcPinger, "ping") + .withStopMessage(NoMore) + .withSettings(ClusterSingletonSettings(typedSystem).withDataCenter("dc1"))) val probe = TestProbe[Pong] pinger ! Ping(probe.ref) probe.expectMessage(Pong("dc1")) @@ -100,9 +93,7 @@ abstract class MultiDcClusterSingletonSpec extends MultiNodeSpec(MultiDcClusterS "be able to target singleton with the same name in own dc " in { runOn(second, third) { val singleton = ClusterSingleton(typedSystem) - val pinger = singleton.init( - SingletonActor(multiDcPinger, "ping").withStopMessage(NoMore) - ) + val pinger = singleton.init(SingletonActor(multiDcPinger, "ping").withStopMessage(NoMore)) val probe = TestProbe[Pong] pinger ! Ping(probe.ref) probe.expectMessage(Pong("dc2")) @@ -112,4 +103,3 @@ abstract class MultiDcClusterSingletonSpec extends MultiNodeSpec(MultiDcClusterS } } } - diff --git a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiNodeTypedClusterSpec.scala b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiNodeTypedClusterSpec.scala index 8192151a69..816547ee60 100644 --- a/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiNodeTypedClusterSpec.scala +++ b/akka-cluster-typed/src/multi-jvm/scala/akka/cluster/typed/MultiNodeTypedClusterSpec.scala @@ -18,7 +18,12 @@ import akka.remote.testconductor.RoleName import scala.concurrent.duration._ import scala.language.implicitConversions -trait MultiNodeTypedClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoroner with FlightRecordingSupport with Matchers { +trait MultiNodeTypedClusterSpec + extends Suite + with STMultiNodeSpec + with WatchedByCoroner + with FlightRecordingSupport + with Matchers { self: MultiNodeSpec => override def initialParticipants: Int = roles.size @@ -63,7 +68,7 @@ trait MultiNodeTypedClusterSpec extends Suite with STMultiNodeSpec with WatchedB } enterBarrier(first.name + "-joined") - rest foreach { node => + rest.foreach { node => runOn(node) { cluster.manager ! Join(address(first)) awaitAssert(cluster.state.members.exists { m => diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSpec.scala index 6bc62410d0..0b4da73d74 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/ddata/typed/scaladsl/ReplicatorSpec.scala @@ -27,8 +27,7 @@ import scala.concurrent.duration._ object ReplicatorSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 @@ -49,7 +48,6 @@ object ReplicatorSpec { def client(replicator: ActorRef[Replicator.Command])(implicit node: SelfUniqueAddress): Behavior[ClientCommand] = Behaviors.setup[ClientCommand] { ctx => - val updateResponseAdapter: ActorRef[Replicator.UpdateResponse[GCounter]] = ctx.messageAdapter(InternalUpdateResponse.apply) @@ -76,21 +74,22 @@ object ReplicatorSpec { replicator ! Replicator.Get(Key, Replicator.ReadLocal, getResponseAdapter, Some(replyTo)) Behaviors.same - case internal: InternalMsg => internal match { - case InternalUpdateResponse(_) => Behaviors.same // ok + case internal: InternalMsg => + internal match { + case InternalUpdateResponse(_) => Behaviors.same // ok - case InternalGetResponse(rsp @ Replicator.GetSuccess(Key, Some(replyTo: ActorRef[Int] @unchecked))) => - val value = rsp.get(Key).value.toInt - replyTo ! value - Behaviors.same + case InternalGetResponse(rsp @ Replicator.GetSuccess(Key, Some(replyTo: ActorRef[Int] @unchecked))) => + val value = rsp.get(Key).value.toInt + replyTo ! value + Behaviors.same - case InternalGetResponse(rsp) => - Behaviors.unhandled // not dealing with failures + case InternalGetResponse(rsp) => + Behaviors.unhandled // not dealing with failures - case InternalChanged(chg @ Replicator.Changed(Key)) => - val value = chg.get(Key).value.intValue - behavior(value) - } + case InternalChanged(chg @ Replicator.Changed(Key)) => + val value = chg.get(Key).value.intValue + behavior(value) + } } } } @@ -175,4 +174,3 @@ class ReplicatorSpec extends ScalaTestWithActorTestKit(ReplicatorSpec.config) wi } } } - diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala index 48ba6e4234..7ab26cde54 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ActorSystemSpec.scala @@ -16,12 +16,10 @@ import scala.concurrent.{ Future, Promise } import scala.concurrent.duration._ import scala.util.control.NonFatal -class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll - with ScalaFutures with Eventually { +class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll with ScalaFutures with Eventually { override implicit val patienceConfig = PatienceConfig(1.second) - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.actor.provider = "akka.remote.RemoteActorRefProvider" """).withFallback(ConfigFactory.load()) def system[T](behavior: Behavior[T], name: String) = ActorSystem(behavior, name, config) @@ -29,7 +27,8 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll case class Probe(msg: String, replyTo: ActorRef[String]) - def withSystem[T](name: String, behavior: Behavior[T], doTerminate: Boolean = true)(block: ActorSystem[T] => Unit): Terminated = { + def withSystem[T](name: String, behavior: Behavior[T], doTerminate: Boolean = true)( + block: ActorSystem[T] => Unit): Terminated = { val sys = system(behavior, s"$suite-$name") try { block(sys) @@ -43,16 +42,16 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll "An ActorSystem" must { "start the guardian actor and terminate when it terminates" in { - val t = withSystem( - "a", - Behaviors.receive[Probe] { case (_, p) => p.replyTo ! p.msg; Behaviors.stopped }, doTerminate = false) { sys => - val inbox = TestInbox[String]("a") - sys ! Probe("hello", inbox.ref) - eventually { - inbox.hasMessages should ===(true) - } - inbox.receiveAll() should ===("hello" :: Nil) + val t = withSystem("a", + Behaviors.receive[Probe] { case (_, p) => p.replyTo ! p.msg; Behaviors.stopped }, + doTerminate = false) { sys => + val inbox = TestInbox[String]("a") + sys ! Probe("hello", inbox.ref) + eventually { + inbox.hasMessages should ===(true) } + inbox.receiveAll() should ===("hello" :: Nil) + } val p = t.ref.path p.name should ===("/") p.address.system should ===(suite + "-a") @@ -68,15 +67,16 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll "terminate the guardian actor" in { val inbox = TestInbox[String]("terminate") - val sys = system( - Behaviors.receive[Probe] { - case (_, _) => Behaviors.unhandled - } receiveSignal { - case (_, PostStop) => - inbox.ref ! "done" - Behaviors.same - }, - "terminate") + val sys = system(Behaviors + .receive[Probe] { + case (_, _) => Behaviors.unhandled + } + .receiveSignal { + case (_, PostStop) => + inbox.ref ! "done" + Behaviors.same + }, + "terminate") sys.terminate().futureValue inbox.receiveAll() should ===("done" :: Nil) } @@ -102,9 +102,11 @@ class ActorSystemSpec extends WordSpec with Matchers with BeforeAndAfterAll "have a working thread factory" in { withSystem("thread", Behaviors.empty[String]) { sys => val p = Promise[Int] - sys.threadFactory.newThread(new Runnable { - def run(): Unit = p.success(42) - }).start() + sys.threadFactory + .newThread(new Runnable { + def run(): Unit = p.success(42) + }) + .start() p.future.futureValue should ===(42) } } diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterApiSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterApiSpec.scala index 1084570b3a..abe5e56f08 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterApiSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterApiSpec.scala @@ -15,8 +15,8 @@ import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import org.scalatest.WordSpecLike object ClusterApiSpec { - val config = ConfigFactory.parseString( - """ + val config = + ConfigFactory.parseString(""" akka.actor.provider = cluster akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala index 2da4cf31db..2a41a17f3d 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonApiSpec.scala @@ -22,8 +22,7 @@ import org.scalatest.WordSpecLike object ClusterSingletonApiSpec { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.actor { provider = cluster serialize-messages = off @@ -51,7 +50,6 @@ object ClusterSingletonApiSpec { case object Perish extends PingProtocol val pingPong = Behaviors.receive[PingProtocol] { (_, msg) => - msg match { case Ping(respondTo) => respondTo ! Pong @@ -95,10 +93,8 @@ class ClusterSingletonApiSpec extends ScalaTestWithActorTestKit(ClusterSingleton val clusterNode1 = Cluster(system) val untypedSystem1 = system.toUntyped - val system2 = akka.actor.ActorSystem( - system.name, - ConfigFactory.parseString( - """ + val system2 = akka.actor.ActorSystem(system.name, + ConfigFactory.parseString(""" akka.cluster.roles = ["singleton"] """).withFallback(system.settings.config)) val adaptedSystem2 = system2.toTyped @@ -127,8 +123,10 @@ class ClusterSingletonApiSpec extends ScalaTestWithActorTestKit(ClusterSingleton val node2ref = cs2.init(SingletonActor(pingPong, "ping-pong").withStopMessage(Perish).withSettings(settings)) // subsequent spawning returns the same refs - cs1.init(SingletonActor(pingPong, "ping-pong").withStopMessage(Perish).withSettings(settings)) should ===(node1ref) - cs2.init(SingletonActor(pingPong, "ping-pong").withStopMessage(Perish).withSettings(settings)) should ===(node2ref) + cs1.init(SingletonActor(pingPong, "ping-pong").withStopMessage(Perish).withSettings(settings)) should ===( + node1ref) + cs2.init(SingletonActor(pingPong, "ping-pong").withStopMessage(Perish).withSettings(settings)) should ===( + node2ref) val node1PongProbe = TestProbe[Pong.type]()(system) val node2PongProbe = TestProbe[Pong.type]()(adaptedSystem2) diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPersistenceSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPersistenceSpec.scala index 320fa43a7e..3464fbedf9 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPersistenceSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPersistenceSpec.scala @@ -13,8 +13,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike object ClusterSingletonPersistenceSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.actor.provider = cluster akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 @@ -36,21 +35,24 @@ object ClusterSingletonPersistenceSpec { private final case object StopPlz extends Command val persistentActor: Behavior[Command] = - EventSourcedBehavior[Command, String, String]( - persistenceId = PersistenceId("TheSingleton"), - emptyState = "", - commandHandler = (state, cmd) => cmd match { - case Add(s) => Effect.persist(s) - case Get(replyTo) => - replyTo ! state - Effect.none - case StopPlz => Effect.stop() - }, - eventHandler = (state, evt) => if (state.isEmpty) evt else state + "|" + evt) + EventSourcedBehavior[Command, String, String](persistenceId = PersistenceId("TheSingleton"), + emptyState = "", + commandHandler = (state, cmd) => + cmd match { + case Add(s) => Effect.persist(s) + case Get(replyTo) => + replyTo ! state + Effect.none + case StopPlz => Effect.stop() + }, + eventHandler = + (state, evt) => if (state.isEmpty) evt else state + "|" + evt) } -class ClusterSingletonPersistenceSpec extends ScalaTestWithActorTestKit(ClusterSingletonPersistenceSpec.config) with WordSpecLike { +class ClusterSingletonPersistenceSpec + extends ScalaTestWithActorTestKit(ClusterSingletonPersistenceSpec.config) + with WordSpecLike { import ClusterSingletonPersistenceSpec._ import akka.actor.typed.scaladsl.adapter._ diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPoisonPillSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPoisonPillSpec.scala index 0d348f919a..5ac6e0b68d 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPoisonPillSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/ClusterSingletonPoisonPillSpec.scala @@ -25,7 +25,9 @@ object ClusterSingletonPoisonPillSpec { } } -class ClusterSingletonPoisonPillSpec extends ScalaTestWithActorTestKit(ClusterSingletonApiSpec.config) with WordSpecLike { +class ClusterSingletonPoisonPillSpec + extends ScalaTestWithActorTestKit(ClusterSingletonApiSpec.config) + with WordSpecLike { implicit val testSettings = TestKitSettings(system) val clusterNode1 = Cluster(system) @@ -35,7 +37,8 @@ class ClusterSingletonPoisonPillSpec extends ScalaTestWithActorTestKit(ClusterSi "support using PoisonPill to stop" in { val probe = TestProbe[ActorRef[Any]] - val singleton = ClusterSingleton(system).init(SingletonActor(ClusterSingletonPoisonPillSpec.sneakyBehavior, "sneaky")) + val singleton = + ClusterSingleton(system).init(SingletonActor(ClusterSingletonPoisonPillSpec.sneakyBehavior, "sneaky")) singleton ! GetSelf(probe.ref) val singletonRef = probe.receiveMessage() singletonRef ! PoisonPill diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteContextAskSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteContextAskSpec.scala index 0bb7ed55cc..d33bb36375 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteContextAskSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteContextAskSpec.scala @@ -43,8 +43,7 @@ class RemoteContextAskSpecSerializer(system: ExtendedActorSystem) extends Serial } object RemoteContextAskSpec { - def config = ConfigFactory.parseString( - s""" + def config = ConfigFactory.parseString(s""" akka { loglevel = debug actor { diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala index fb4028c9e3..48e6cdaae0 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteDeployNotAllowedSpec.scala @@ -15,8 +15,7 @@ import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import org.scalatest.WordSpecLike object RemoteDeployNotAllowedSpec { - def config = ConfigFactory.parseString( - s""" + def config = ConfigFactory.parseString(s""" akka { loglevel = warning actor { @@ -35,8 +34,7 @@ object RemoteDeployNotAllowedSpec { } """) - def configWithRemoteDeployment(otherSystemPort: Int) = ConfigFactory.parseString( - s""" + def configWithRemoteDeployment(otherSystemPort: Int) = ConfigFactory.parseString(s""" akka.actor.deployment { "/*" { remote = "akka://sampleActorSystem@127.0.0.1:$otherSystemPort" @@ -45,7 +43,9 @@ object RemoteDeployNotAllowedSpec { """).withFallback(config) } -class RemoteDeployNotAllowedSpec extends ScalaTestWithActorTestKit(RemoteDeployNotAllowedSpec.config) with WordSpecLike { +class RemoteDeployNotAllowedSpec + extends ScalaTestWithActorTestKit(RemoteDeployNotAllowedSpec.config) + with WordSpecLike { "Typed cluster" must { @@ -59,14 +59,13 @@ class RemoteDeployNotAllowedSpec extends ScalaTestWithActorTestKit(RemoteDeployN case object SpawnAnonymous extends GuardianProtocol val guardianBehavior = Behaviors.receive[GuardianProtocol] { (ctx, msg) => - msg match { case SpawnChild(name) => // this should throw try { - ctx.spawn( - Behaviors.setup[AnyRef] { ctx => Behaviors.empty }, - name) + ctx.spawn(Behaviors.setup[AnyRef] { ctx => + Behaviors.empty + }, name) } catch { case ex: Exception => probe.ref ! ex } @@ -75,7 +74,9 @@ class RemoteDeployNotAllowedSpec extends ScalaTestWithActorTestKit(RemoteDeployN case SpawnAnonymous => // this should throw try { - ctx.spawnAnonymous(Behaviors.setup[AnyRef] { ctx => Behaviors.empty }) + ctx.spawnAnonymous(Behaviors.setup[AnyRef] { ctx => + Behaviors.empty + }) } catch { case ex: Exception => probe.ref ! ex } @@ -84,8 +85,10 @@ class RemoteDeployNotAllowedSpec extends ScalaTestWithActorTestKit(RemoteDeployN } - val system2 = ActorSystem(guardianBehavior, system.name, - RemoteDeployNotAllowedSpec.configWithRemoteDeployment(node1.selfMember.address.port.get)) + val system2 = + ActorSystem(guardianBehavior, + system.name, + RemoteDeployNotAllowedSpec.configWithRemoteDeployment(node1.selfMember.address.port.get)) try { val node2 = Cluster(system2) node2.manager ! Join(node1.selfMember.address) diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala index bee93b2791..c4469745bb 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/RemoteMessageSpec.scala @@ -32,8 +32,7 @@ class PingSerializer(system: ExtendedActorSystem) extends SerializerWithStringMa } object RemoteMessageSpec { - def config = ConfigFactory.parseString( - s""" + def config = ConfigFactory.parseString(s""" akka { loglevel = debug actor { diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializerSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializerSpec.scala index beecbd9328..57b7a1c0c1 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializerSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/AkkaClusterTypedSerializerSpec.scala @@ -1,7 +1,6 @@ /** * Copyright (C) 2009-${YEAR} Lightbend Inc. */ - package akka.cluster.typed.internal import akka.actor.ExtendedActorSystem @@ -20,19 +19,17 @@ class AkkaClusterTypedSerializerSpec extends ScalaTestWithActorTestKit with Word "AkkaClusterTypedSerializer" must { - Seq( - "ReceptionistEntry" -> ClusterReceptionist.Entry(ref, 666L) - ).foreach { - case (scenario, item) => - s"resolve serializer for $scenario" in { - val serializer = SerializationExtension(untypedSystem) - serializer.serializerFor(item.getClass).getClass should be(classOf[AkkaClusterTypedSerializer]) - } + Seq("ReceptionistEntry" -> ClusterReceptionist.Entry(ref, 666L)).foreach { + case (scenario, item) => + s"resolve serializer for $scenario" in { + val serializer = SerializationExtension(untypedSystem) + serializer.serializerFor(item.getClass).getClass should be(classOf[AkkaClusterTypedSerializer]) + } - s"serialize and de-serialize $scenario" in { - verifySerialization(item) - } - } + s"serialize and de-serialize $scenario" in { + verifySerialization(item) + } + } } def verifySerialization(msg: AnyRef): Unit = { diff --git a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala index 88ef3d81eb..92deca471e 100644 --- a/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala +++ b/akka-cluster-typed/src/test/scala/akka/cluster/typed/internal/receptionist/ClusterReceptionistSpec.scala @@ -23,8 +23,7 @@ import scala.concurrent.Await import scala.concurrent.duration._ object ClusterReceptionistSpec { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.loglevel = DEBUG # issue #24960 akka.actor { provider = cluster @@ -74,9 +73,10 @@ object ClusterReceptionistSpec { } def toBinary(o: AnyRef): Array[Byte] = o match { - case p: Ping => ActorRefResolver(system.toTyped).toSerializationFormat(p.respondTo).getBytes(StandardCharsets.UTF_8) - case Pong => Array.emptyByteArray - case Perish => Array.emptyByteArray + case p: Ping => + ActorRefResolver(system.toTyped).toSerializationFormat(p.respondTo).getBytes(StandardCharsets.UTF_8) + case Pong => Array.emptyByteArray + case Perish => Array.emptyByteArray } def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match { @@ -252,25 +252,28 @@ class ClusterReceptionistSpec extends WordSpec with Matchers { try { val system3 = testKit3.system - system1.log.debug("Starting system3 at same hostname port as system2, uid: [{}]", Cluster(system3).selfMember.uniqueAddress.longUid) + system1.log.debug("Starting system3 at same hostname port as system2, uid: [{}]", + Cluster(system3).selfMember.uniqueAddress.longUid) val clusterNode3 = Cluster(system3) clusterNode3.manager ! Join(clusterNode1.selfMember.address) val regProbe3 = TestProbe[Any]()(system3) // and registers the same service key val service3 = testKit3.spawn(pingPongBehavior, "instance") - system3.log.debug("Spawning/registering ping service in new incarnation {}#{}", service3.path, service3.path.uid) + system3.log.debug("Spawning/registering ping service in new incarnation {}#{}", + service3.path, + service3.path.uid) system3.receptionist ! Register(PingKey, service3, regProbe3.ref) regProbe3.expectMessage(Registered(PingKey, service3)) system3.log.debug("Registered actor [{}#{}] for system3", service3.path, service3.path.uid) // make sure it joined fine and node1 has upped it regProbe1.awaitAssert { - clusterNode1.state.members.exists(m => - m.uniqueAddress == clusterNode3.selfMember.uniqueAddress && + clusterNode1.state.members.exists( + m => + m.uniqueAddress == clusterNode3.selfMember.uniqueAddress && m.status == MemberStatus.Up && - !clusterNode1.state.unreachable(m) - ) + !clusterNode1.state.unreachable(m)) } // we should get either empty message and then updated with the new incarnation actor @@ -297,8 +300,7 @@ class ClusterReceptionistSpec extends WordSpec with Matchers { } "not lose removals on concurrent updates to same key" in { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" # disable delta propagation so we can have repeatable concurrent writes # without delta reaching between nodes already akka.cluster.distributed-data.delta-crdt.enabled=false @@ -331,10 +333,7 @@ class ClusterReceptionistSpec extends WordSpec with Matchers { system1.receptionist ! Register(TheKey, actor1) system1.receptionist ! Subscribe(TheKey, regProbe1.ref) - regProbe1.awaitAssert( - regProbe1.expectMessage(Listing(TheKey, Set(actor1))), - 5.seconds - ) + regProbe1.awaitAssert(regProbe1.expectMessage(Listing(TheKey, Set(actor1))), 5.seconds) system2.receptionist ! Subscribe(TheKey, regProbe2.ref) regProbe2.fishForMessage(10.seconds) { diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala index fdc811472b..2ccacfae88 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/BasicClusterExampleSpec.scala @@ -21,8 +21,7 @@ import org.scalatest.time.{ Millis, Seconds, Span } import scala.concurrent.duration._ object BasicClusterExampleSpec { - val configSystem1 = ConfigFactory.parseString( - s""" + val configSystem1 = ConfigFactory.parseString(s""" #config-seeds akka { actor { @@ -44,12 +43,10 @@ akka { #config-seeds """) - val configSystem2 = ConfigFactory.parseString( - s""" + val configSystem2 = ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 - """ - ).withFallback(configSystem1) + """).withFallback(configSystem1) } class BasicClusterConfigSpec extends WordSpec with ScalaFutures with Eventually with Matchers { @@ -83,8 +80,7 @@ class BasicClusterConfigSpec extends WordSpec with ScalaFutures with Eventually } object BasicClusterManualSpec { - val clusterConfig = ConfigFactory.parseString( - s""" + val clusterConfig = ConfigFactory.parseString(s""" #config akka { actor.provider = "cluster" @@ -185,9 +181,15 @@ class BasicClusterManualSpec extends WordSpec with ScalaFutures with Eventually probe1.expectMessageType[MemberUp].member.address shouldEqual cluster3.selfMember.address } eventually { - cluster1.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up, MemberStatus.up, MemberStatus.up) - cluster2.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up, MemberStatus.up, MemberStatus.up) - cluster3.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up, MemberStatus.up, MemberStatus.up) + cluster1.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up, + MemberStatus.up, + MemberStatus.up) + cluster2.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up, + MemberStatus.up, + MemberStatus.up) + cluster3.state.members.toList.map(_.status) shouldEqual List(MemberStatus.up, + MemberStatus.up, + MemberStatus.up) } //#cluster-leave-example diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala index 2c4d172c46..6cad9dfa67 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/ReceptionistExampleSpec.scala @@ -45,27 +45,30 @@ object PingPongExample { //#pinger //#pinger-guardian - val guardian: Behavior[Nothing] = Behaviors.setup[Listing] { ctx => - ctx.system.receptionist ! Receptionist.Subscribe(PingServiceKey, ctx.self) - val ps = ctx.spawnAnonymous(pingService) - ctx.watch(ps) - Behaviors.receiveMessagePartial[Listing] { - case PingServiceKey.Listing(listings) if listings.nonEmpty => - listings.foreach(ps => ctx.spawnAnonymous(pinger(ps))) - Behaviors.same - } receiveSignal { - case (_, Terminated(`ps`)) => - println("Ping service has shut down") - Behaviors.stopped + val guardian: Behavior[Nothing] = Behaviors + .setup[Listing] { ctx => + ctx.system.receptionist ! Receptionist.Subscribe(PingServiceKey, ctx.self) + val ps = ctx.spawnAnonymous(pingService) + ctx.watch(ps) + Behaviors + .receiveMessagePartial[Listing] { + case PingServiceKey.Listing(listings) if listings.nonEmpty => + listings.foreach(ps => ctx.spawnAnonymous(pinger(ps))) + Behaviors.same + } + .receiveSignal { + case (_, Terminated(`ps`)) => + println("Ping service has shut down") + Behaviors.stopped + } } - }.narrow + .narrow //#pinger-guardian } object ReceptionistExampleSpec { - val clusterConfig = ConfigFactory.parseString( - s""" + val clusterConfig = ConfigFactory.parseString(s""" #config akka { actor { diff --git a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala index b9085809cd..d37b5da8e1 100644 --- a/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala +++ b/akka-cluster-typed/src/test/scala/docs/akka/cluster/typed/SingletonCompileOnlySpec.scala @@ -39,9 +39,7 @@ object SingletonCompileOnlySpec { val singletonManager = ClusterSingleton(system) // Start if needed and provide a proxy to a named singleton val proxy: ActorRef[CounterCommand] = singletonManager.init( - SingletonActor(Behaviors.supervise(counter(0)) - .onFailure[Exception](SupervisorStrategy.restart), "GlobalCounter") - ) + SingletonActor(Behaviors.supervise(counter(0)).onFailure[Exception](SupervisorStrategy.restart), "GlobalCounter")) proxy ! Increment //#singleton @@ -53,8 +51,9 @@ object SingletonCompileOnlySpec { //#backoff val proxyBackOff: ActorRef[CounterCommand] = singletonManager.init( - SingletonActor(Behaviors.supervise(counter(0)) - .onFailure[Exception](SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 0.2)), "GlobalCounter") - ) + SingletonActor(Behaviors + .supervise(counter(0)) + .onFailure[Exception](SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 0.2)), + "GlobalCounter")) //#backoff } diff --git a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala index 5e04353efb..34490d67e5 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala @@ -36,9 +36,10 @@ final class AutoDowning(system: ActorSystem) extends DowningProvider { override def downingActorProps: Option[Props] = clusterSettings.AutoDownUnreachableAfter match { case d: FiniteDuration => Some(AutoDown.props(d)) - case _ => + case _ => // I don't think this can actually happen - throw new ConfigurationException("AutoDowning downing provider selected but 'akka.cluster.auto-down-unreachable-after' not set") + throw new ConfigurationException( + "AutoDowning downing provider selected but 'akka.cluster.auto-down-unreachable-after' not set") } } @@ -53,7 +54,8 @@ final class AutoDowning(system: ActorSystem) extends DowningProvider { * able to unit test the logic without running cluster. */ private[cluster] class AutoDown(autoDownUnreachableAfter: FiniteDuration) - extends AutoDownBase(autoDownUnreachableAfter) with ActorLogging { + extends AutoDownBase(autoDownUnreachableAfter) + with ActorLogging { val cluster = Cluster(context.system) import cluster.ClusterLogger._ @@ -64,7 +66,8 @@ private[cluster] class AutoDown(autoDownUnreachableAfter: FiniteDuration) // re-subscribe when restart override def preStart(): Unit = { - log.warning("Don't use auto-down feature of Akka Cluster in production. " + + log.warning( + "Don't use auto-down feature of Akka Cluster in production. " + "See 'Auto-downing (DO NOT USE)' section of Akka Cluster documentation.") cluster.subscribe(self, classOf[ClusterDomainEvent]) super.preStart() @@ -77,8 +80,9 @@ private[cluster] class AutoDown(autoDownUnreachableAfter: FiniteDuration) override def down(node: Address): Unit = { require(leader) logInfo("Leader is auto-downing unreachable node [{}]. " + - "Don't use auto-down feature of Akka Cluster in production. " + - "See 'Auto-downing (DO NOT USE)' section of Akka Cluster documentation.", node) + "Don't use auto-down feature of Akka Cluster in production. " + + "See 'Auto-downing (DO NOT USE)' section of Akka Cluster documentation.", + node) cluster.down(node) } @@ -109,18 +113,18 @@ private[cluster] abstract class AutoDownBase(autoDownUnreachableAfter: FiniteDur var leader = false override def postStop(): Unit = { - scheduledUnreachable.values foreach { _.cancel } + scheduledUnreachable.values.foreach { _.cancel } } def receive = { case state: CurrentClusterState => leader = state.leader.exists(_ == selfAddress) - state.unreachable foreach unreachableMember + state.unreachable.foreach(unreachableMember) case UnreachableMember(m) => unreachableMember(m) - case ReachableMember(m) => remove(m.uniqueAddress) - case MemberRemoved(m, _) => remove(m.uniqueAddress) + case ReachableMember(m) => remove(m.uniqueAddress) + case MemberRemoved(m, _) => remove(m.uniqueAddress) case LeaderChanged(leaderOption) => leader = leaderOption.exists(_ == selfAddress) @@ -163,7 +167,7 @@ private[cluster] abstract class AutoDownBase(autoDownUnreachableAfter: FiniteDur } def remove(node: UniqueAddress): Unit = { - scheduledUnreachable.get(node) foreach { _.cancel } + scheduledUnreachable.get(node).foreach { _.cancel } scheduledUnreachable -= node pendingUnreachable -= node } diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index d501812dbd..006e7ce093 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -64,6 +64,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { import settings._ private val joinConfigCompatChecker: JoinConfigCompatChecker = JoinConfigCompatChecker.load(system, settings) + /** * The address including a `uid` of this cluster member. * The `uid` is needed to be able to distinguish different @@ -72,8 +73,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { val selfUniqueAddress: UniqueAddress = system.provider match { case c: ClusterActorRefProvider => UniqueAddress(c.transport.defaultAddress, AddressUidExtension(system).longAddressUid) - case other => throw new ConfigurationException( - s"ActorSystem [${system}] needs to have 'akka.actor.provider' set to 'cluster' in the configuration, currently uses [${other.getClass.getName}]") + case other => + throw new ConfigurationException( + s"ActorSystem [${system}] needs to have 'akka.actor.provider' set to 'cluster' in the configuration, currently uses [${other.getClass.getName}]") } /** @@ -111,9 +113,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { val crossDcFailureDetector: FailureDetectorRegistry[Address] = { val createFailureDetector = () => - FailureDetectorLoader.load( - settings.MultiDataCenter.CrossDcFailureDetectorSettings.ImplementationClass, - settings.MultiDataCenter.CrossDcFailureDetectorSettings.config, system) + FailureDetectorLoader.load(settings.MultiDataCenter.CrossDcFailureDetectorSettings.ImplementationClass, + settings.MultiDataCenter.CrossDcFailureDetectorSettings.config, + system) new DefaultFailureDetectorRegistry(createFailureDetector) } @@ -131,22 +133,24 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { */ private[cluster] val scheduler: Scheduler = { if (system.scheduler.maxFrequency < 1.second / SchedulerTickDuration) { - logInfo( - "Using a dedicated scheduler for cluster. Default scheduler can be used if configured " + - "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].", - (1000 / system.scheduler.maxFrequency).toInt, SchedulerTickDuration.toMillis) + logInfo("Using a dedicated scheduler for cluster. Default scheduler can be used if configured " + + "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].", + (1000 / system.scheduler.maxFrequency).toInt, + SchedulerTickDuration.toMillis) - val cfg = ConfigFactory.parseString( - s"akka.scheduler.tick-duration=${SchedulerTickDuration.toMillis}ms").withFallback( - system.settings.config) + val cfg = ConfigFactory + .parseString(s"akka.scheduler.tick-duration=${SchedulerTickDuration.toMillis}ms") + .withFallback(system.settings.config) val threadFactory = system.threadFactory match { case tf: MonitorableThreadFactory => tf.withName(tf.name + "-cluster-scheduler") case tf => tf } - system.dynamicAccess.createInstanceFor[Scheduler](system.settings.SchedulerClass, immutable.Seq( - classOf[Config] -> cfg, - classOf[LoggingAdapter] -> log, - classOf[ThreadFactory] -> threadFactory)).get + system.dynamicAccess + .createInstanceFor[Scheduler](system.settings.SchedulerClass, + immutable.Seq(classOf[Config] -> cfg, + classOf[LoggingAdapter] -> log, + classOf[ThreadFactory] -> threadFactory)) + .get } else { // delegate to system.scheduler, but don't close over system val systemScheduler = system.scheduler @@ -155,13 +159,12 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { override def maxFrequency: Double = systemScheduler.maxFrequency - override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = + override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)( + implicit executor: ExecutionContext): Cancellable = systemScheduler.schedule(initialDelay, interval, runnable) - override def scheduleOnce( - delay: FiniteDuration, - runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)( + implicit executor: ExecutionContext): Cancellable = systemScheduler.scheduleOnce(delay, runnable) } } @@ -169,8 +172,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { // create supervisor for daemons under path "/system/cluster" private val clusterDaemons: ActorRef = { - system.systemActorOf(Props(classOf[ClusterDaemon], joinConfigCompatChecker). - withDispatcher(UseDispatcher).withDeploy(Deploy.local), name = "cluster") + system.systemActorOf( + Props(classOf[ClusterDaemon], joinConfigCompatChecker).withDispatcher(UseDispatcher).withDeploy(Deploy.local), + name = "cluster") } /** @@ -251,9 +255,8 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { */ @varargs def subscribe(subscriber: ActorRef, initialStateMode: SubscriptionInitialStateMode, to: Class[_]*): Unit = { require(to.length > 0, "at least one `ClusterDomainEvent` class is required") - require( - to.forall(classOf[ClusterDomainEvent].isAssignableFrom), - s"subscribe to `akka.cluster.ClusterEvent.ClusterDomainEvent` or subclasses, was [${to.map(_.getName).mkString(", ")}]") + require(to.forall(classOf[ClusterDomainEvent].isAssignableFrom), + s"subscribe to `akka.cluster.ClusterEvent.ClusterDomainEvent` or subclasses, was [${to.map(_.getName).mkString(", ")}]") clusterCore ! InternalClusterAction.Subscribe(subscriber, initialStateMode, to.toSet) } @@ -396,7 +399,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { if (path.address.host.isDefined) { path } else { - path.root.copy(selfAddress) / path.elements withUid path.uid + (path.root.copy(selfAddress) / path.elements).withUid(path.uid) } } @@ -424,7 +427,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { closeScheduler() - clusterJmx foreach { _.unregisterMBean() } + clusterJmx.foreach { _.unregisterMBean() } logInfo("Successfully shut down") } @@ -515,7 +518,12 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { if (settings.SelfDataCenter == ClusterSettings.DefaultDataCenter) log.error(cause, "Cluster Node [{}] - " + template, selfAddress, arg1, arg2, arg3) else - log.error(cause, "Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template, selfAddress, arg1, arg2, arg3) + log.error(cause, + "Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template, + selfAddress, + arg1, + arg2, + arg3) } private def logAtLevel(logLevel: LogLevel, message: String): Unit = { @@ -546,7 +554,12 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { if (settings.SelfDataCenter == ClusterSettings.DefaultDataCenter) log.log(logLevel, "Cluster Node [{}] - " + template, selfAddress, arg1, arg2, arg3) else - log.log(logLevel, "Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template, selfAddress, arg1, arg2, arg3) + log.log(logLevel, + "Cluster Node [{}] dc [" + settings.SelfDataCenter + "] - " + template, + selfAddress, + arg1, + arg2, + arg3) private def isLevelEnabled(logLevel: LogLevel): Boolean = LogInfo || logLevel < Logging.InfoLevel diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala index 145e029305..9b458732a9 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala @@ -6,7 +6,12 @@ package akka.cluster import akka.ConfigurationException import akka.actor.{ ActorRef, ActorSystem, ActorSystemImpl, Deploy, DynamicAccess, NoScopeGiven, Scope } -import akka.cluster.routing.{ ClusterRouterGroup, ClusterRouterGroupSettings, ClusterRouterPool, ClusterRouterPoolSettings } +import akka.cluster.routing.{ + ClusterRouterGroup, + ClusterRouterGroupSettings, + ClusterRouterPool, + ClusterRouterPoolSettings +} import akka.event.EventStream import akka.remote.{ RemoteActorRefProvider, RemoteDeployer } import akka.remote.routing.RemoteRouterConfig @@ -21,12 +26,11 @@ import com.typesafe.config.ConfigFactory * extension, i.e. the cluster will automatically be started when * the `ClusterActorRefProvider` is used. */ -private[akka] class ClusterActorRefProvider( - _systemName: String, - _settings: ActorSystem.Settings, - _eventStream: EventStream, - _dynamicAccess: DynamicAccess) extends RemoteActorRefProvider( - _systemName, _settings, _eventStream, _dynamicAccess) { +private[akka] class ClusterActorRefProvider(_systemName: String, + _settings: ActorSystem.Settings, + _eventStream: EventStream, + _dynamicAccess: DynamicAccess) + extends RemoteActorRefProvider(_systemName, _settings, _eventStream, _dynamicAccess) { override def init(system: ActorSystemImpl): Unit = { super.init(system) @@ -41,11 +45,12 @@ private[akka] class ClusterActorRefProvider( import remoteSettings._ val failureDetector = createRemoteWatcherFailureDetector(system) - system.systemActorOf(ClusterRemoteWatcher.props( - failureDetector, - heartbeatInterval = WatchHeartBeatInterval, - unreachableReaperInterval = WatchUnreachableReaperInterval, - heartbeatExpectedResponseAfter = WatchHeartbeatExpectedResponseAfter), "remote-watcher") + system.systemActorOf( + ClusterRemoteWatcher.props(failureDetector, + heartbeatInterval = WatchHeartBeatInterval, + unreachableReaperInterval = WatchUnreachableReaperInterval, + heartbeatExpectedResponseAfter = WatchHeartbeatExpectedResponseAfter), + "remote-watcher") } /** @@ -61,36 +66,42 @@ private[akka] class ClusterActorRefProvider( * * Deployer of cluster aware routers. */ -private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends RemoteDeployer(_settings, _pm) { +private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) + extends RemoteDeployer(_settings, _pm) { override def parseConfig(path: String, config: Config): Option[Deploy] = { // config is the user supplied section, no defaults // amend it to use max-total-nr-of-instances as nr-of-instances if cluster.enabled and // user has not specified nr-of-instances val config2 = - if (config.hasPath("cluster.enabled") && config.getBoolean("cluster.enabled") && !config.hasPath("nr-of-instances")) { + if (config.hasPath("cluster.enabled") && config.getBoolean("cluster.enabled") && !config.hasPath( + "nr-of-instances")) { val maxTotalNrOfInstances = config.withFallback(default).getInt("cluster.max-total-nr-of-instances") - ConfigFactory.parseString("nr-of-instances=" + maxTotalNrOfInstances) - .withFallback(config) + ConfigFactory.parseString("nr-of-instances=" + maxTotalNrOfInstances).withFallback(config) } else config super.parseConfig(path, config2) match { case d @ Some(deploy) => if (deploy.config.getBoolean("cluster.enabled")) { if (deploy.scope != NoScopeGiven) - throw new ConfigurationException("Cluster deployment can't be combined with scope [%s]".format(deploy.scope)) + throw new ConfigurationException( + "Cluster deployment can't be combined with scope [%s]".format(deploy.scope)) if (deploy.routerConfig.isInstanceOf[RemoteRouterConfig]) - throw new ConfigurationException("Cluster deployment can't be combined with [%s]".format(deploy.routerConfig)) + throw new ConfigurationException( + "Cluster deployment can't be combined with [%s]".format(deploy.routerConfig)) deploy.routerConfig match { case r: Pool => - Some(deploy.copy( - routerConfig = ClusterRouterPool(r, ClusterRouterPoolSettings.fromConfig(deploy.config)), scope = ClusterScope)) + Some( + deploy.copy(routerConfig = ClusterRouterPool(r, ClusterRouterPoolSettings.fromConfig(deploy.config)), + scope = ClusterScope)) case r: Group => - Some(deploy.copy( - routerConfig = ClusterRouterGroup(r, ClusterRouterGroupSettings.fromConfig(deploy.config)), scope = ClusterScope)) + Some( + deploy.copy(routerConfig = ClusterRouterGroup(r, ClusterRouterGroupSettings.fromConfig(deploy.config)), + scope = ClusterScope)) case other => - throw new IllegalArgumentException(s"Cluster aware router can only wrap Pool or Group, got [${other.getClass.getName}]") + throw new IllegalArgumentException( + s"Cluster aware router can only wrap Pool or Group, got [${other.getClass.getName}]") } } else d case None => None @@ -106,6 +117,7 @@ abstract class ClusterScope extends Scope * Cluster aware scope of a [[akka.actor.Deploy]] */ case object ClusterScope extends ClusterScope { + /** * Java API: get the singleton instance */ diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 19d2d7efc1..f6bdc2626d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -99,6 +99,7 @@ private[cluster] object InternalClusterAction { sealed trait ConfigCheck case object UncheckedConfig extends ConfigCheck case object IncompatibleConfig extends ConfigCheck + /** * Node with version 2.5.9 or earlier is joining. The serialized * representation of `InitJoinAck` must be a plain `Address` for @@ -118,7 +119,9 @@ private[cluster] object InternalClusterAction { * see JoinSeedNode */ @SerialVersionUID(1L) - final case class InitJoinAck(address: Address, configCheck: ConfigCheck) extends ClusterMessage with DeadLetterSuppression + final case class InitJoinAck(address: Address, configCheck: ConfigCheck) + extends ClusterMessage + with DeadLetterSuppression /** * see JoinSeedNode @@ -156,10 +159,12 @@ private[cluster] object InternalClusterAction { final case class AddOnMemberRemovedListener(callback: Runnable) extends NoSerializationVerificationNeeded sealed trait SubscriptionMessage - final case class Subscribe(subscriber: ActorRef, initialStateMode: SubscriptionInitialStateMode, - to: Set[Class[_]]) extends SubscriptionMessage + final case class Subscribe(subscriber: ActorRef, initialStateMode: SubscriptionInitialStateMode, to: Set[Class[_]]) + extends SubscriptionMessage final case class Unsubscribe(subscriber: ActorRef, to: Option[Class[_]]) - extends SubscriptionMessage with DeadLetterSuppression + extends SubscriptionMessage + with DeadLetterSuppression + /** * @param receiver [[akka.cluster.ClusterEvent.CurrentClusterState]] will be sent to the `receiver` */ @@ -179,8 +184,9 @@ private[cluster] object InternalClusterAction { * Supervisor managing the different Cluster daemons. */ @InternalApi -private[cluster] final class ClusterDaemon(joinConfigCompatChecker: JoinConfigCompatChecker) extends Actor - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[cluster] final class ClusterDaemon(joinConfigCompatChecker: JoinConfigCompatChecker) + extends Actor + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import InternalClusterAction._ // Important - don't use Cluster(context.system) in constructor because that would // cause deadlock. The Cluster extension is currently being created and is waiting @@ -213,17 +219,19 @@ private[cluster] final class ClusterDaemon(joinConfigCompatChecker: JoinConfigCo } def createChildren(): Unit = { - coreSupervisor = Some(context.actorOf(Props(classOf[ClusterCoreSupervisor], joinConfigCompatChecker). - withDispatcher(context.props.dispatcher), name = "core")) - context.actorOf(Props[ClusterHeartbeatReceiver]. - withDispatcher(context.props.dispatcher), name = "heartbeatReceiver") + coreSupervisor = Some( + context.actorOf( + Props(classOf[ClusterCoreSupervisor], joinConfigCompatChecker).withDispatcher(context.props.dispatcher), + name = "core")) + context.actorOf(Props[ClusterHeartbeatReceiver].withDispatcher(context.props.dispatcher), + name = "heartbeatReceiver") } def receive = { case msg: GetClusterCoreRef.type => if (coreSupervisor.isEmpty) createChildren() - coreSupervisor.foreach(_ forward msg) + coreSupervisor.foreach(_.forward(msg)) case AddOnMemberUpListener(code) => context.actorOf(Props(classOf[OnMemberStatusChangedListener], code, Up).withDeploy(Deploy.local)) case AddOnMemberRemovedListener(code) => @@ -243,8 +251,9 @@ private[cluster] final class ClusterDaemon(joinConfigCompatChecker: JoinConfigCo * would be obsolete. Shutdown the member if any those actors crashed. */ @InternalApi -private[cluster] final class ClusterCoreSupervisor(joinConfigCompatChecker: JoinConfigCompatChecker) extends Actor - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[cluster] final class ClusterCoreSupervisor(joinConfigCompatChecker: JoinConfigCompatChecker) + extends Actor + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { // Important - don't use Cluster(context.system) in constructor because that would // cause deadlock. The Cluster extension is currently being created and is waiting @@ -254,17 +263,19 @@ private[cluster] final class ClusterCoreSupervisor(joinConfigCompatChecker: Join var coreDaemon: Option[ActorRef] = None def createChildren(): Unit = { - val publisher = context.actorOf(Props[ClusterDomainEventPublisher]. - withDispatcher(context.props.dispatcher), name = "publisher") - coreDaemon = Some(context.watch(context.actorOf(Props(classOf[ClusterCoreDaemon], publisher, joinConfigCompatChecker). - withDispatcher(context.props.dispatcher), name = "daemon"))) + val publisher = + context.actorOf(Props[ClusterDomainEventPublisher].withDispatcher(context.props.dispatcher), name = "publisher") + coreDaemon = Some( + context.watch( + context.actorOf(Props(classOf[ClusterCoreDaemon], publisher, joinConfigCompatChecker) + .withDispatcher(context.props.dispatcher), + name = "daemon"))) } override val supervisorStrategy = OneForOneStrategy() { case NonFatal(e) => - Cluster(context.system).ClusterLogger.logError( - e, "crashed, [{}] - shutting down...", e.getMessage) + Cluster(context.system).ClusterLogger.logError(e, "crashed, [{}] - shutting down...", e.getMessage) self ! PoisonPill Stop } @@ -294,15 +305,16 @@ private[cluster] object ClusterCoreDaemon { * INTERNAL API. */ @InternalApi -private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatChecker: JoinConfigCompatChecker) extends Actor - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatChecker: JoinConfigCompatChecker) + extends Actor + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import InternalClusterAction._ import ClusterCoreDaemon._ import MembershipState._ val cluster = Cluster(context.system) import cluster.ClusterLogger._ - import cluster.{ selfAddress, selfRoles, scheduler, failureDetector, crossDcFailureDetector } + import cluster.{ crossDcFailureDetector, failureDetector, scheduler, selfAddress, selfRoles } import cluster.settings._ val selfDc = cluster.selfDataCenter @@ -310,17 +322,15 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh protected def selfUniqueAddress = cluster.selfUniqueAddress val vclockNode = VectorClock.Node(Gossip.vclockName(selfUniqueAddress)) - val gossipTargetSelector = new GossipTargetSelector( - ReduceGossipDifferentViewProbability, - cluster.settings.MultiDataCenter.CrossDcGossipProbability) + val gossipTargetSelector = new GossipTargetSelector(ReduceGossipDifferentViewProbability, + cluster.settings.MultiDataCenter.CrossDcGossipProbability) // note that self is not initially member, // and the Gossip is not versioned for this 'Node' yet - var membershipState = MembershipState( - Gossip.empty, - cluster.selfUniqueAddress, - cluster.settings.SelfDataCenter, - cluster.settings.MultiDataCenter.CrossDcConnections) + var membershipState = MembershipState(Gossip.empty, + cluster.selfUniqueAddress, + cluster.settings.SelfDataCenter, + cluster.settings.MultiDataCenter.CrossDcConnections) var isCurrentlyLeader = false @@ -366,19 +376,19 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh import context.dispatcher // start periodic gossip to random nodes in cluster - val gossipTask = scheduler.schedule( - PeriodicTasksInitialDelay.max(GossipInterval), - GossipInterval, self, GossipTick) + val gossipTask = scheduler.schedule(PeriodicTasksInitialDelay.max(GossipInterval), GossipInterval, self, GossipTick) // start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list) - val failureDetectorReaperTask = scheduler.schedule( - PeriodicTasksInitialDelay.max(UnreachableNodesReaperInterval), - UnreachableNodesReaperInterval, self, ReapUnreachableTick) + val failureDetectorReaperTask = scheduler.schedule(PeriodicTasksInitialDelay.max(UnreachableNodesReaperInterval), + UnreachableNodesReaperInterval, + self, + ReapUnreachableTick) // start periodic leader action management (only applies for the current leader) - val leaderActionsTask = scheduler.schedule( - PeriodicTasksInitialDelay.max(LeaderActionsInterval), - LeaderActionsInterval, self, LeaderActionsTick) + val leaderActionsTask = scheduler.schedule(PeriodicTasksInitialDelay.max(LeaderActionsInterval), + LeaderActionsInterval, + self, + LeaderActionsTick) // start periodic publish of current stats val publishStatsTask: Option[Cancellable] = PublishStatsInterval match { @@ -402,7 +412,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh if (isClusterBootstrapUsed) logDebug("Cluster Bootstrap is used for joining") else - logInfo("No seed-nodes configured, manual cluster join required, see " + + logInfo( + "No seed-nodes configured, manual cluster join required, see " + "https://doc.akka.io/docs/akka/current/cluster-usage.html#joining-to-seed-nodes") } else { self ! JoinSeedNodes(seedNodes) @@ -412,8 +423,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh private def isClusterBootstrapUsed: Boolean = { val conf = context.system.settings.config conf.hasPath("akka.management.cluster.bootstrap") && - conf.hasPath("akka.management.http.route-providers") && - conf.getStringList("akka.management.http.route-providers") + conf.hasPath("akka.management.http.route-providers") && + conf + .getStringList("akka.management.http.route-providers") .contains("akka.management.cluster.bootstrap.ClusterBootstrap$") } @@ -422,52 +434,54 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh gossipTask.cancel() failureDetectorReaperTask.cancel() leaderActionsTask.cancel() - publishStatsTask foreach { _.cancel() } + publishStatsTask.foreach { _.cancel() } selfExiting.trySuccess(Done) } - def uninitialized: Actor.Receive = ({ - case InitJoin => - logInfo("Received InitJoin message from [{}], but this node is not initialized yet", sender()) - sender() ! InitJoinNack(selfAddress) - case ClusterUserAction.JoinTo(address) => - join(address) - case JoinSeedNodes(newSeedNodes) => - resetJoinSeedNodesDeadline() - joinSeedNodes(newSeedNodes) - case msg: SubscriptionMessage => - publisher forward msg - case Welcome(from, gossip) => - welcome(from.address, from, gossip) - case _: Tick => - if (joinSeedNodesDeadline.exists(_.isOverdue)) - joinSeedNodesWasUnsuccessful() - }: Actor.Receive).orElse(receiveExitingCompleted) + def uninitialized: Actor.Receive = + ({ + case InitJoin => + logInfo("Received InitJoin message from [{}], but this node is not initialized yet", sender()) + sender() ! InitJoinNack(selfAddress) + case ClusterUserAction.JoinTo(address) => + join(address) + case JoinSeedNodes(newSeedNodes) => + resetJoinSeedNodesDeadline() + joinSeedNodes(newSeedNodes) + case msg: SubscriptionMessage => + publisher.forward(msg) + case Welcome(from, gossip) => + welcome(from.address, from, gossip) + case _: Tick => + if (joinSeedNodesDeadline.exists(_.isOverdue)) + joinSeedNodesWasUnsuccessful() + }: Actor.Receive).orElse(receiveExitingCompleted) - def tryingToJoin(joinWith: Address, deadline: Option[Deadline]): Actor.Receive = ({ - case Welcome(from, gossip) => - welcome(joinWith, from, gossip) - case InitJoin => - logInfo("Received InitJoin message from [{}], but this node is not a member yet", sender()) - sender() ! InitJoinNack(selfAddress) - case ClusterUserAction.JoinTo(address) => - becomeUninitialized() - join(address) - case JoinSeedNodes(newSeedNodes) => - resetJoinSeedNodesDeadline() - becomeUninitialized() - joinSeedNodes(newSeedNodes) - case msg: SubscriptionMessage => publisher forward msg - case _: Tick => - if (joinSeedNodesDeadline.exists(_.isOverdue)) - joinSeedNodesWasUnsuccessful() - else if (deadline.exists(_.isOverdue)) { - // join attempt failed, retry + def tryingToJoin(joinWith: Address, deadline: Option[Deadline]): Actor.Receive = + ({ + case Welcome(from, gossip) => + welcome(joinWith, from, gossip) + case InitJoin => + logInfo("Received InitJoin message from [{}], but this node is not a member yet", sender()) + sender() ! InitJoinNack(selfAddress) + case ClusterUserAction.JoinTo(address) => becomeUninitialized() - if (seedNodes.nonEmpty) joinSeedNodes(seedNodes) - else join(joinWith) - } - }: Actor.Receive).orElse(receiveExitingCompleted) + join(address) + case JoinSeedNodes(newSeedNodes) => + resetJoinSeedNodesDeadline() + becomeUninitialized() + joinSeedNodes(newSeedNodes) + case msg: SubscriptionMessage => publisher.forward(msg) + case _: Tick => + if (joinSeedNodesDeadline.exists(_.isOverdue)) + joinSeedNodesWasUnsuccessful() + else if (deadline.exists(_.isOverdue)) { + // join attempt failed, retry + becomeUninitialized() + if (seedNodes.nonEmpty) joinSeedNodes(seedNodes) + else join(joinWith) + } + }: Actor.Receive).orElse(receiveExitingCompleted) private def resetJoinSeedNodesDeadline(): Unit = { joinSeedNodesDeadline = ShutdownAfterUnsuccessfulJoinSeedNodes match { @@ -477,10 +491,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } private def joinSeedNodesWasUnsuccessful(): Unit = { - logWarning( - "Joining of seed-nodes [{}] was unsuccessful after configured " + - "shutdown-after-unsuccessful-join-seed-nodes [{}]. Running CoordinatedShutdown.", - seedNodes.mkString(", "), ShutdownAfterUnsuccessfulJoinSeedNodes) + logWarning("Joining of seed-nodes [{}] was unsuccessful after configured " + + "shutdown-after-unsuccessful-join-seed-nodes [{}]. Running CoordinatedShutdown.", + seedNodes.mkString(", "), + ShutdownAfterUnsuccessfulJoinSeedNodes) joinSeedNodesDeadline = None CoordinatedShutdown(context.system).run(CoordinatedShutdown.ClusterDowningReason) } @@ -506,31 +520,30 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh context.become(initialized) } - def initialized: Actor.Receive = ({ - case msg: GossipEnvelope => receiveGossip(msg) - case msg: GossipStatus => receiveGossipStatus(msg) - case GossipTick => gossipTick() - case GossipSpeedupTick => gossipSpeedupTick() - case ReapUnreachableTick => reapUnreachableMembers() - case LeaderActionsTick => leaderActions() - case PublishStatsTick => publishInternalStats() - case InitJoin(joiningNodeConfig) => - logInfo("Received InitJoin message from [{}] to [{}]", sender(), selfAddress) - initJoin(joiningNodeConfig) - case Join(node, roles) => joining(node, roles) - case ClusterUserAction.Down(address) => downing(address) - case ClusterUserAction.Leave(address) => leaving(address) - case SendGossipTo(address) => sendGossipTo(address) - case msg: SubscriptionMessage => publisher forward msg - case QuarantinedEvent(address, uid) => quarantined(UniqueAddress(address, uid)) - case ClusterUserAction.JoinTo(address) => - logInfo("Trying to join [{}] when already part of a cluster, ignoring", address) - case JoinSeedNodes(nodes) => - logInfo( - "Trying to join seed nodes [{}] when already part of a cluster, ignoring", - nodes.mkString(", ")) - case ExitingConfirmed(address) => receiveExitingConfirmed(address) - }: Actor.Receive).orElse(receiveExitingCompleted) + def initialized: Actor.Receive = + ({ + case msg: GossipEnvelope => receiveGossip(msg) + case msg: GossipStatus => receiveGossipStatus(msg) + case GossipTick => gossipTick() + case GossipSpeedupTick => gossipSpeedupTick() + case ReapUnreachableTick => reapUnreachableMembers() + case LeaderActionsTick => leaderActions() + case PublishStatsTick => publishInternalStats() + case InitJoin(joiningNodeConfig) => + logInfo("Received InitJoin message from [{}] to [{}]", sender(), selfAddress) + initJoin(joiningNodeConfig) + case Join(node, roles) => joining(node, roles) + case ClusterUserAction.Down(address) => downing(address) + case ClusterUserAction.Leave(address) => leaving(address) + case SendGossipTo(address) => sendGossipTo(address) + case msg: SubscriptionMessage => publisher.forward(msg) + case QuarantinedEvent(address, uid) => quarantined(UniqueAddress(address, uid)) + case ClusterUserAction.JoinTo(address) => + logInfo("Trying to join [{}] when already part of a cluster, ignoring", address) + case JoinSeedNodes(nodes) => + logInfo("Trying to join seed nodes [{}] when already part of a cluster, ignoring", nodes.mkString(", ")) + case ExitingConfirmed(address) => receiveExitingConfirmed(address) + }: Actor.Receive).orElse(receiveExitingCompleted) def receiveExitingCompleted: Actor.Receive = { case ExitingCompleted => @@ -559,17 +572,22 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh if (removeUnreachableWithMemberStatus.contains(selfStatus)) { // prevents a Down and Exiting node from being used for joining - logInfo("Sending InitJoinNack message from node [{}] to [{}] (version [{}])", selfAddress, sender(), - joiningNodeVersion) + logInfo("Sending InitJoinNack message from node [{}] to [{}] (version [{}])", + selfAddress, + sender(), + joiningNodeVersion) sender() ! InitJoinNack(selfAddress) } else { - logInfo("Sending InitJoinAck message from node [{}] to [{}] (version [{}])", selfAddress, sender(), - joiningNodeVersion) + logInfo("Sending InitJoinAck message from node [{}] to [{}] (version [{}])", + selfAddress, + sender(), + joiningNodeVersion) // run config compatibility check using config provided by // joining node and current (full) config on cluster side val configWithoutSensitiveKeys = { - val allowedConfigPaths = JoinConfigCompatChecker.removeSensitiveKeys(context.system.settings.config, cluster.settings) + val allowedConfigPaths = + JoinConfigCompatChecker.removeSensitiveKeys(context.system.settings.config, cluster.settings) // build a stripped down config instead where sensitive config paths are removed // we don't want any check to happen on those keys JoinConfigCompatChecker.filterWithKeys(allowedConfigPaths, context.system.settings.config) @@ -585,16 +603,17 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // Send back to joining node a subset of current configuration // containing the keys initially sent by the joining node minus // any sensitive keys as defined by this node configuration - val clusterConfig = JoinConfigCompatChecker.filterWithKeys(nonSensitiveKeys, context.system.settings.config) + val clusterConfig = + JoinConfigCompatChecker.filterWithKeys(nonSensitiveKeys, context.system.settings.config) CompatibleConfig(clusterConfig) } case Invalid(messages) => // messages are only logged on the cluster side - logWarning( - "Found incompatible settings when [{}] tried to join: {}. " + - s"Self version [{}], Joining version [$joiningNodeVersion].", - sender().path.address, messages.mkString(", "), - context.system.settings.ConfigVersion) + logWarning("Found incompatible settings when [{}] tried to join: {}. " + + s"Self version [{}], Joining version [$joiningNodeVersion].", + sender().path.address, + messages.mkString(", "), + context.system.settings.ConfigVersion) if (configCheckUnsupportedByJoiningNode) ConfigCheckUnsupportedByJoiningNode else @@ -611,21 +630,24 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh stopSeedNodeProcess() seedNodes = newSeedNodes // keep them for retry - seedNodeProcess = - if (newSeedNodes == immutable.IndexedSeq(selfAddress)) { - self ! ClusterUserAction.JoinTo(selfAddress) - None + seedNodeProcess = if (newSeedNodes == immutable.IndexedSeq(selfAddress)) { + self ! ClusterUserAction.JoinTo(selfAddress) + None + } else { + // use unique name of this actor, stopSeedNodeProcess doesn't wait for termination + seedNodeProcessCounter += 1 + if (newSeedNodes.head == selfAddress) { + Some( + context.actorOf( + Props(classOf[FirstSeedNodeProcess], newSeedNodes, joinConfigCompatChecker).withDispatcher(UseDispatcher), + name = "firstSeedNodeProcess-" + seedNodeProcessCounter)) } else { - // use unique name of this actor, stopSeedNodeProcess doesn't wait for termination - seedNodeProcessCounter += 1 - if (newSeedNodes.head == selfAddress) { - Some(context.actorOf(Props(classOf[FirstSeedNodeProcess], newSeedNodes, joinConfigCompatChecker). - withDispatcher(UseDispatcher), name = "firstSeedNodeProcess-" + seedNodeProcessCounter)) - } else { - Some(context.actorOf(Props(classOf[JoinSeedNodeProcess], newSeedNodes, joinConfigCompatChecker). - withDispatcher(UseDispatcher), name = "joinSeedNodeProcess-" + seedNodeProcessCounter)) - } + Some( + context.actorOf( + Props(classOf[JoinSeedNodeProcess], newSeedNodes, joinConfigCompatChecker).withDispatcher(UseDispatcher), + name = "joinSeedNodeProcess-" + seedNodeProcessCounter)) } + } } } @@ -637,13 +659,13 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh */ def join(address: Address): Unit = { if (address.protocol != selfAddress.protocol) - logWarning( - "Trying to join member with wrong protocol, but was ignored, expected [{}] but was [{}]", - selfAddress.protocol, address.protocol) + logWarning("Trying to join member with wrong protocol, but was ignored, expected [{}] but was [{}]", + selfAddress.protocol, + address.protocol) else if (address.system != selfAddress.system) - logWarning( - "Trying to join member with wrong ActorSystem name, but was ignored, expected [{}] but was [{}]", - selfAddress.system, address.system) + logWarning("Trying to join member with wrong ActorSystem name, but was ignored, expected [{}] but was [{}]", + selfAddress.system, + address.system) else { require(latestGossip.members.isEmpty, "Join can only be done from empty state") @@ -668,7 +690,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh seedNodeProcess match { case Some(s) => // manual join, abort current seedNodeProcess - context stop s + context.stop(s) seedNodeProcess = None case None => // no seedNodeProcess in progress } @@ -682,13 +704,13 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh def joining(joiningNode: UniqueAddress, roles: Set[String]): Unit = { val selfStatus = latestGossip.member(selfUniqueAddress).status if (joiningNode.address.protocol != selfAddress.protocol) - logWarning( - "Member with wrong protocol tried to join, but was ignored, expected [{}] but was [{}]", - selfAddress.protocol, joiningNode.address.protocol) + logWarning("Member with wrong protocol tried to join, but was ignored, expected [{}] but was [{}]", + selfAddress.protocol, + joiningNode.address.protocol) else if (joiningNode.address.system != selfAddress.system) - logWarning( - "Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]", - selfAddress.system, joiningNode.address.system) + logWarning("Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]", + selfAddress.system, + joiningNode.address.system) else if (removeUnreachableWithMemberStatus.contains(selfStatus)) logInfo("Trying to join [{}] to [{}] member, ignoring. Use a member that is Up instead.", joiningNode, selfStatus) else { @@ -707,7 +729,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // safe to down and later remove existing member // new node will retry join logInfo("New incarnation of existing member [{}] is trying to join. " + - "Existing will be removed from the cluster and then new member will be allowed to join.", m) + "Existing will be removed from the cluster and then new member will be allowed to join.", + m) if (m.status != Down) { // we can confirm it as terminated/unreachable immediately val newReachability = latestGossip.overview.reachability.terminated(selfUniqueAddress, m.uniqueAddress) @@ -725,12 +748,14 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // add joining node as Joining // add self in case someone else joins before self has joined (Set discards duplicates) val newMembers = localMembers + Member(joiningNode, roles) + Member(selfUniqueAddress, cluster.selfRoles) - val newGossip = latestGossip copy (members = newMembers) + val newGossip = latestGossip.copy(members = newMembers) updateLatestGossip(newGossip) if (joiningNode == selfUniqueAddress) { - logInfo("Node [{}] is JOINING itself (with roles [{}]) and forming new cluster", joiningNode.address, roles.mkString(", ")) + logInfo("Node [{}] is JOINING itself (with roles [{}]) and forming new cluster", + joiningNode.address, + roles.mkString(", ")) if (localMembers.isEmpty) leaderActions() // important for deterministic oldest when bootstrapping } else { @@ -768,9 +793,12 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh */ def leaving(address: Address): Unit = { // only try to update if the node is available (in the member ring) - if (latestGossip.members.exists(m => m.address == address && (m.status == Joining || m.status == WeaklyUp || m.status == Up))) { - val newMembers = latestGossip.members map { m => if (m.address == address) m.copy(status = Leaving) else m } // mark node as LEAVING - val newGossip = latestGossip copy (members = newMembers) + if (latestGossip.members.exists( + m => m.address == address && (m.status == Joining || m.status == WeaklyUp || m.status == Up))) { + val newMembers = latestGossip.members.map { m => + if (m.address == address) m.copy(status = Leaving) else m + } // mark node as LEAVING + val newGossip = latestGossip.copy(members = newMembers) updateLatestGossip(newGossip) @@ -867,13 +895,13 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh val localGossip = latestGossip if (localGossip.hasMember(node)) { val newReachability = latestGossip.overview.reachability.terminated(selfUniqueAddress, node) - val newOverview = localGossip.overview copy (reachability = newReachability) - val newGossip = localGossip copy (overview = newOverview) + val newOverview = localGossip.overview.copy(reachability = newReachability) + val newGossip = localGossip.copy(overview = newOverview) updateLatestGossip(newGossip) - logWarning( - "Marking node as TERMINATED [{}], due to quarantine. Node roles [{}]. " + - "It must still be marked as down before it's removed.", - node.address, selfRoles.mkString(",")) + logWarning("Marking node as TERMINATED [{}], due to quarantine. Node roles [{}]. " + + "It must still be marked as down before it's removed.", + node.address, + selfRoles.mkString(",")) publishMembershipState() } } @@ -885,7 +913,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh else if (!latestGossip.isReachable(selfUniqueAddress, from)) logInfo("Ignoring received gossip status from unreachable [{}] ", from) else { - (status.version compareTo latestGossip.version) match { + status.version.compareTo(latestGossip.version) match { case VectorClock.Same => // same version case VectorClock.After => gossipStatusTo(from, sender()) // remote is newer case _ => gossipTo(from, sender()) // conflicting or local is newer @@ -928,13 +956,13 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh logInfo("Ignoring received gossip that does not contain myself, from [{}]", from) Ignored } else { - val comparison = remoteGossip.version compareTo localGossip.version + val comparison = remoteGossip.version.compareTo(localGossip.version) val (winningGossip, talkback, gossipType) = comparison match { case VectorClock.Same => // same version val talkback = !exitingTasksInProgress && !remoteGossip.seenByNode(selfUniqueAddress) - (remoteGossip mergeSeen localGossip, talkback, Same) + (remoteGossip.mergeSeen(localGossip), talkback, Same) case VectorClock.Before => // local is newer (localGossip, true, Older) @@ -963,24 +991,24 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh g } - (prunedRemoteGossip merge prunedLocalGossip, true, Merge) + (prunedRemoteGossip.merge(prunedLocalGossip), true, Merge) } // Don't mark gossip state as seen while exiting is in progress, e.g. // shutting down singleton actors. This delays removal of the member until // the exiting tasks have been completed. - membershipState = membershipState.copy(latestGossip = - if (exitingTasksInProgress) winningGossip - else winningGossip seen selfUniqueAddress) + membershipState = membershipState.copy( + latestGossip = + if (exitingTasksInProgress) winningGossip + else winningGossip.seen(selfUniqueAddress)) assertLatestGossip() // for all new nodes we remove them from the failure detector - latestGossip.members foreach { - node => - if (!localGossip.members(node)) { - failureDetector.remove(node.address) - crossDcFailureDetector.remove(node.address) - } + latestGossip.members.foreach { node => + if (!localGossip.members(node)) { + failureDetector.remove(node.address) + crossDcFailureDetector.remove(node.address) + } } logDebug("Receiving gossip from [{}]", from) @@ -988,7 +1016,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh if (comparison == VectorClock.Concurrent && cluster.settings.Debug.VerboseGossipLogging) { logDebug( """Couldn't establish a causal relationship between "remote" gossip and "local" gossip - Remote[{}] - Local[{}] - merged them into [{}]""", - remoteGossip, localGossip, winningGossip) + remoteGossip, + localGossip, + winningGossip) } if (statsEnabled) { @@ -1036,7 +1066,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh def isGossipSpeedupNeeded: Boolean = { if (latestGossip.isMultiDc) - latestGossip.overview.seen.count(membershipState.isInSameDc) < latestGossip.members.count(_.dataCenter == cluster.selfDataCenter) / 2 + latestGossip.overview.seen + .count(membershipState.isInSameDc) < latestGossip.members.count(_.dataCenter == cluster.selfDataCenter) / 2 else latestGossip.overview.seen.size < latestGossip.members.size / 2 } @@ -1092,13 +1123,14 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh moveJoiningToWeaklyUp() if (leaderActionCounter == firstNotice || leaderActionCounter % periodicNotice == 0) - logInfo( - "Leader can currently not perform its duties, reachability status: [{}], member status: [{}]", - membershipState.dcReachabilityExcludingDownedObservers, - latestGossip.members.collect { - case m if m.dataCenter == selfDc => - s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}" - }.mkString(", ")) + logInfo("Leader can currently not perform its duties, reachability status: [{}], member status: [{}]", + membershipState.dcReachabilityExcludingDownedObservers, + latestGossip.members + .collect { + case m if m.dataCenter == selfDc => + s"${m.address} ${m.status} seen=${latestGossip.seenByNode(m.uniqueAddress)}" + } + .mkString(", ")) } } else if (isCurrentlyLeader) { logInfo("is no longer leader") @@ -1114,7 +1146,8 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // status Down. The down commands should spread before we shutdown. val unreachable = membershipState.dcReachability.allUnreachableOrTerminated val downed = membershipState.dcMembers.collect { case m if m.status == Down => m.uniqueAddress } - if (selfDownCounter >= MaxTicksBeforeShuttingDownMyself || downed.forall(node => unreachable(node) || latestGossip.seenByNode(node))) { + if (selfDownCounter >= MaxTicksBeforeShuttingDownMyself || downed.forall( + node => unreachable(node) || latestGossip.seenByNode(node))) { // the reason for not shutting down immediately is to give the gossip a chance to spread // the downing information to other downed nodes, so that they can shutdown themselves logInfo("Shutting down myself") @@ -1172,7 +1205,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh val enoughMembers: Boolean = isMinNrOfMembersFulfilled def isJoiningToUp(m: Member): Boolean = (m.status == Joining || m.status == WeaklyUp) && enoughMembers - latestGossip.members collect { + latestGossip.members.collect { var upNumber = 0 { @@ -1191,17 +1224,19 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh case m if m.dataCenter == selfDc && m.status == Leaving => // Move LEAVING => EXITING (once we have a convergence on LEAVING) - m copy (status = Exiting) + m.copy(status = Exiting) } } } val updatedGossip: Gossip = if (removedUnreachable.nonEmpty || removedExitingConfirmed.nonEmpty || changedMembers.nonEmpty || - removedOtherDc.nonEmpty) { + removedOtherDc.nonEmpty) { // replace changed members - val removed = removedUnreachable.map(_.uniqueAddress).union(removedExitingConfirmed) + val removed = removedUnreachable + .map(_.uniqueAddress) + .union(removedExitingConfirmed) .union(removedOtherDc.map(_.uniqueAddress)) val newGossip = latestGossip.update(changedMembers).removeAll(removed, System.currentTimeMillis()) @@ -1219,17 +1254,17 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh exitingConfirmed = exitingConfirmed.filterNot(removedExitingConfirmed) - changedMembers foreach { m => + changedMembers.foreach { m => logInfo("Leader is moving node [{}] to [{}]", m.address, m.status) } - removedUnreachable foreach { m => + removedUnreachable.foreach { m => val status = if (m.status == Exiting) "exiting" else "unreachable" logInfo("Leader is removing {} node [{}]", status, m.address) } removedExitingConfirmed.foreach { n => logInfo("Leader is removing confirmed Exiting node [{}]", n.address) } - removedOtherDc foreach { m => + removedOtherDc.foreach { m => logInfo("Leader is removing {} node [{}] in DC [{}]", m.status, m.address, m.dataCenter) } @@ -1253,9 +1288,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh if (targets.nonEmpty) { if (isDebugEnabled) - logDebug( - "Gossip exiting members [{}] to the two oldest (per role) [{}] (singleton optimization).", - exitingMembers.mkString(", "), targets.mkString(", ")) + logDebug("Gossip exiting members [{}] to the two oldest (per role) [{}] (singleton optimization).", + exitingMembers.mkString(", "), + targets.mkString(", ")) targets.foreach(m => gossipTo(m.uniqueAddress)) } @@ -1268,9 +1303,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh val enoughMembers: Boolean = isMinNrOfMembersFulfilled def isJoiningToWeaklyUp(m: Member): Boolean = m.dataCenter == selfDc && - m.status == Joining && - enoughMembers && - membershipState.dcReachabilityExcludingDownedObservers.isReachable(m.uniqueAddress) + m.status == Joining && + enoughMembers && + membershipState.dcReachabilityExcludingDownedObservers.isReachable(m.uniqueAddress) val changedMembers = localMembers.collect { case m if isJoiningToWeaklyUp(m) => m.copy(status = WeaklyUp) } @@ -1280,7 +1315,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh updateLatestGossip(newGossip) // log status changes - changedMembers foreach { m => + changedMembers.foreach { m => logInfo("Leader is moving node [{}] to [{}]", m.address, m.status) } @@ -1305,14 +1340,14 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh else crossDcFailureDetector.isAvailable(member.address) } - val newlyDetectedUnreachableMembers = localMembers filterNot { member => + val newlyDetectedUnreachableMembers = localMembers.filterNot { member => member.uniqueAddress == selfUniqueAddress || - localOverview.reachability.status(selfUniqueAddress, member.uniqueAddress) == Reachability.Unreachable || - localOverview.reachability.status(selfUniqueAddress, member.uniqueAddress) == Reachability.Terminated || - isAvailable(member) + localOverview.reachability.status(selfUniqueAddress, member.uniqueAddress) == Reachability.Unreachable || + localOverview.reachability.status(selfUniqueAddress, member.uniqueAddress) == Reachability.Terminated || + isAvailable(member) } - val newlyDetectedReachableMembers = localOverview.reachability.allUnreachableFrom(selfUniqueAddress) collect { + val newlyDetectedReachableMembers = localOverview.reachability.allUnreachableFrom(selfUniqueAddress).collect { case node if node != selfUniqueAddress && isAvailable(localGossip.member(node)) => localGossip.member(node) } @@ -1320,27 +1355,31 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh if (newlyDetectedUnreachableMembers.nonEmpty || newlyDetectedReachableMembers.nonEmpty) { val newReachability1 = newlyDetectedUnreachableMembers.foldLeft(localOverview.reachability) { - (reachability, m) => reachability.unreachable(selfUniqueAddress, m.uniqueAddress) + (reachability, m) => + reachability.unreachable(selfUniqueAddress, m.uniqueAddress) } - val newReachability2 = newlyDetectedReachableMembers.foldLeft(newReachability1) { - (reachability, m) => reachability.reachable(selfUniqueAddress, m.uniqueAddress) + val newReachability2 = newlyDetectedReachableMembers.foldLeft(newReachability1) { (reachability, m) => + reachability.reachable(selfUniqueAddress, m.uniqueAddress) } if (newReachability2 ne localOverview.reachability) { - val newOverview = localOverview copy (reachability = newReachability2) - val newGossip = localGossip copy (overview = newOverview) + val newOverview = localOverview.copy(reachability = newReachability2) + val newGossip = localGossip.copy(overview = newOverview) updateLatestGossip(newGossip) val (exiting, nonExiting) = newlyDetectedUnreachableMembers.partition(_.status == Exiting) if (nonExiting.nonEmpty) - logWarning("Marking node(s) as UNREACHABLE [{}]. Node roles [{}]", nonExiting.mkString(", "), selfRoles.mkString(", ")) + logWarning("Marking node(s) as UNREACHABLE [{}]. Node roles [{}]", + nonExiting.mkString(", "), + selfRoles.mkString(", ")) if (exiting.nonEmpty) - logInfo( - "Marking exiting node(s) as UNREACHABLE [{}]. This is expected and they will be removed.", - exiting.mkString(", ")) + logInfo("Marking exiting node(s) as UNREACHABLE [{}]. This is expected and they will be removed.", + exiting.mkString(", ")) if (newlyDetectedReachableMembers.nonEmpty) - logInfo("Marking node(s) as REACHABLE [{}]. Node roles [{}]", newlyDetectedReachableMembers.mkString(", "), selfRoles.mkString(",")) + logInfo("Marking node(s) as REACHABLE [{}]. Node roles [{}]", + newlyDetectedReachableMembers.mkString(", "), + selfRoles.mkString(",")) publishMembershipState() } @@ -1352,9 +1391,10 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh // needed for tests def sendGossipTo(address: Address): Unit = { - latestGossip.members.foreach(m => - if (m.address == address) - gossipTo(m.uniqueAddress)) + latestGossip.members.foreach( + m => + if (m.address == address) + gossipTo(m.uniqueAddress)) } /** @@ -1388,7 +1428,7 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh versionedGossip.clearSeen() else { // Nobody else has seen this gossip but us - val seenVersionedGossip = versionedGossip onlySeen selfUniqueAddress + val seenVersionedGossip = versionedGossip.onlySeen(selfUniqueAddress) // Update the state with the new gossip seenVersionedGossip } @@ -1409,9 +1449,9 @@ private[cluster] class ClusterCoreDaemon(publisher: ActorRef, joinConfigCompatCh } def publishInternalStats(): Unit = { - val vclockStats = VectorClockStats( - versionSize = latestGossip.version.versions.size, - seenLatest = latestGossip.members.count(m => latestGossip.seenByNode(m.uniqueAddress))) + val vclockStats = VectorClockStats(versionSize = latestGossip.version.versions.size, + seenLatest = + latestGossip.members.count(m => latestGossip.seenByNode(m.uniqueAddress))) publisher ! CurrentInternalStats(gossipStats, vclockStats) } @@ -1435,7 +1475,9 @@ private[cluster] case object IncompatibleConfigurationDetected extends Reason * that other seed node to join existing cluster. */ @InternalApi -private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSeq[Address], joinConfigCompatChecker: JoinConfigCompatChecker) extends Actor { +private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSeq[Address], + joinConfigCompatChecker: JoinConfigCompatChecker) + extends Actor { import InternalClusterAction._ import ClusterUserAction.JoinTo @@ -1462,17 +1504,19 @@ private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSe def receive = { case JoinSeedNode => if (timeout.hasTimeLeft) { - val requiredNonSensitiveKeys = JoinConfigCompatChecker.removeSensitiveKeys(joinConfigCompatChecker.requiredKeys, cluster.settings) + val requiredNonSensitiveKeys = + JoinConfigCompatChecker.removeSensitiveKeys(joinConfigCompatChecker.requiredKeys, cluster.settings) // configToValidate only contains the keys that are required according to JoinConfigCompatChecker on this node - val configToValidate = JoinConfigCompatChecker.filterWithKeys(requiredNonSensitiveKeys, context.system.settings.config) + val configToValidate = + JoinConfigCompatChecker.filterWithKeys(requiredNonSensitiveKeys, context.system.settings.config) // send InitJoin to remaining seed nodes (except myself) - remainingSeedNodes foreach { a => context.actorSelection(context.parent.path.toStringWithAddress(a)) ! InitJoin(configToValidate) } + remainingSeedNodes.foreach { a => + context.actorSelection(context.parent.path.toStringWithAddress(a)) ! InitJoin(configToValidate) + } } else { // no InitJoinAck received, initialize new cluster by joining myself if (isDebugEnabled) - logDebug( - "Couldn't join other seed nodes, will join myself. seed-nodes=[{}]", - seedNodes.mkString(", ")) + logDebug("Couldn't join other seed nodes, will join myself. seed-nodes=[{}]", seedNodes.mkString(", ")) context.parent ! JoinTo(selfAddress) context.stop(self) } @@ -1488,17 +1532,20 @@ private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSe case Invalid(messages) if ByPassConfigCompatCheck => logWarning("Cluster validated this node config, but sent back incompatible settings: {}. " + - "Join will be performed because compatibility check is configured to not be enforced.", messages.mkString(", ")) + "Join will be performed because compatibility check is configured to not be enforced.", + messages.mkString(", ")) context.parent ! JoinTo(address) context.stop(self) case Invalid(messages) => - logError("Cluster validated this node config, but sent back incompatible settings: {}. " + + logError( + "Cluster validated this node config, but sent back incompatible settings: {}. " + "It's recommended to perform a full cluster shutdown in order to deploy this new version. " + "If a cluster shutdown isn't an option, you may want to disable this protection by setting " + "'akka.cluster.configuration-compatibility-check.enforce-on-join = off'. " + "Note that disabling it will allow the formation of a cluster with nodes having incompatible configuration settings. " + - "This node will be shutdown!", messages.mkString(", ")) + "This node will be shutdown!", + messages.mkString(", ")) context.stop(self) CoordinatedShutdown(context.system).run(IncompatibleConfigurationDetected) } @@ -1514,18 +1561,19 @@ private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSe if (ByPassConfigCompatCheck) { // only join if set to ignore config validation logInfo("Received InitJoinAck message from [{}] to [{}]", sender(), selfAddress) - logWarning("Joining cluster with incompatible configurations. " + + logWarning( + "Joining cluster with incompatible configurations. " + "Join will be performed because compatibility check is configured to not be enforced.") context.parent ! JoinTo(address) context.stop(self) } else { logError( "Couldn't join seed nodes because of incompatible cluster configuration. " + - "It's recommended to perform a full cluster shutdown in order to deploy this new version." + - "If a cluster shutdown isn't an option, you may want to disable this protection by setting " + - "'akka.cluster.configuration-compatibility-check.enforce-on-join = off'. " + - "Note that disabling it will allow the formation of a cluster with nodes having incompatible configuration settings. " + - "This node will be shutdown!") + "It's recommended to perform a full cluster shutdown in order to deploy this new version." + + "If a cluster shutdown isn't an option, you may want to disable this protection by setting " + + "'akka.cluster.configuration-compatibility-check.enforce-on-join = off'. " + + "Note that disabling it will allow the formation of a cluster with nodes having incompatible configuration settings. " + + "This node will be shutdown!") context.stop(self) CoordinatedShutdown(context.system).run(IncompatibleConfigurationDetected) } @@ -1567,7 +1615,9 @@ private[cluster] final class FirstSeedNodeProcess(seedNodes: immutable.IndexedSe * */ @InternalApi -private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq[Address], joinConfigCompatChecker: JoinConfigCompatChecker) extends Actor { +private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq[Address], + joinConfigCompatChecker: JoinConfigCompatChecker) + extends Actor { import InternalClusterAction._ import ClusterUserAction.JoinTo @@ -1590,12 +1640,16 @@ private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq def receive = { case JoinSeedNode => - val requiredNonSensitiveKeys = JoinConfigCompatChecker.removeSensitiveKeys(joinConfigCompatChecker.requiredKeys, cluster.settings) + val requiredNonSensitiveKeys = + JoinConfigCompatChecker.removeSensitiveKeys(joinConfigCompatChecker.requiredKeys, cluster.settings) // configToValidate only contains the keys that are required according to JoinConfigCompatChecker on this node - val configToValidate = JoinConfigCompatChecker.filterWithKeys(requiredNonSensitiveKeys, context.system.settings.config) + val configToValidate = + JoinConfigCompatChecker.filterWithKeys(requiredNonSensitiveKeys, context.system.settings.config) // send InitJoin to all seed nodes (except myself) attempt += 1 - otherSeedNodes.foreach { a => context.actorSelection(context.parent.path.toStringWithAddress(a)) ! InitJoin(configToValidate) } + otherSeedNodes.foreach { a => + context.actorSelection(context.parent.path.toStringWithAddress(a)) ! InitJoin(configToValidate) + } case InitJoinAck(address, CompatibleConfig(clusterConfig)) => logInfo("Received InitJoinAck message from [{}] to [{}]", sender(), selfAddress) @@ -1608,17 +1662,20 @@ private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq case Invalid(messages) if ByPassConfigCompatCheck => logWarning("Cluster validated this node config, but sent back incompatible settings: {}. " + - "Join will be performed because compatibility check is configured to not be enforced.", messages.mkString(", ")) + "Join will be performed because compatibility check is configured to not be enforced.", + messages.mkString(", ")) context.parent ! JoinTo(address) context.become(done) case Invalid(messages) => - logError("Cluster validated this node config, but sent back incompatible settings: {}. " + + logError( + "Cluster validated this node config, but sent back incompatible settings: {}. " + "It's recommended to perform a full cluster shutdown in order to deploy this new version. " + "If a cluster shutdown isn't an option, you may want to disable this protection by setting " + "'akka.cluster.configuration-compatibility-check.enforce-on-join = off'. " + "Note that disabling it will allow the formation of a cluster with nodes having incompatible configuration settings. " + - "This node will be shutdown!", messages.mkString(", ")) + "This node will be shutdown!", + messages.mkString(", ")) context.stop(self) CoordinatedShutdown(context.system).run(IncompatibleConfigurationDetected) } @@ -1632,7 +1689,8 @@ private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq // first InitJoinAck reply, but incompatible if (ByPassConfigCompatCheck) { logInfo("Received InitJoinAck message from [{}] to [{}]", sender(), selfAddress) - logWarning("Joining cluster with incompatible configurations. " + + logWarning( + "Joining cluster with incompatible configurations. " + "Join will be performed because compatibility check is configured to not be enforced.") // only join if set to ignore config validation context.parent ! JoinTo(address) @@ -1640,11 +1698,11 @@ private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq } else { logError( "Couldn't join seed nodes because of incompatible cluster configuration. " + - "It's recommended to perform a full cluster shutdown in order to deploy this new version." + - "If a cluster shutdown isn't an option, you may want to disable this protection by setting " + - "'akka.cluster.configuration-compatibility-check.enforce-on-join = off'. " + - "Note that disabling it will allow the formation of a cluster with nodes having incompatible configuration settings. " + - "This node will be shutdown!") + "It's recommended to perform a full cluster shutdown in order to deploy this new version." + + "If a cluster shutdown isn't an option, you may want to disable this protection by setting " + + "'akka.cluster.configuration-compatibility-check.enforce-on-join = off'. " + + "Note that disabling it will allow the formation of a cluster with nodes having incompatible configuration settings. " + + "This node will be shutdown!") context.stop(self) CoordinatedShutdown(context.system).run(IncompatibleConfigurationDetected) } @@ -1653,9 +1711,9 @@ private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq case ReceiveTimeout => if (attempt >= 2) - logWarning( - "Couldn't join seed nodes after [{}] attempts, will try again. seed-nodes=[{}]", - attempt, seedNodes.filterNot(_ == selfAddress).mkString(", ")) + logWarning("Couldn't join seed nodes after [{}] attempts, will try again. seed-nodes=[{}]", + attempt, + seedNodes.filterNot(_ == selfAddress).mkString(", ")) // no InitJoinAck received, try again self ! JoinSeedNode } @@ -1680,8 +1738,8 @@ private[cluster] class OnMemberStatusChangedListener(callback: Runnable, status: private val to = status match { case Up => classOf[MemberUp] case Removed => classOf[MemberRemoved] - case other => throw new IllegalArgumentException( - s"Expected Up or Removed in OnMemberStatusChangedListener, got [$other]") + case other => + throw new IllegalArgumentException(s"Expected Up or Removed in OnMemberStatusChangedListener, got [$other]") } override def preStart(): Unit = @@ -1706,10 +1764,11 @@ private[cluster] class OnMemberStatusChangedListener(callback: Runnable, status: } private def done(): Unit = { - try callback.run() catch { + try callback.run() + catch { case NonFatal(e) => logError(e, "[{}] callback failed with [{}]", s"On${to.getSimpleName}", e.getMessage) } finally { - context stop self + context.stop(self) } } @@ -1723,12 +1782,11 @@ private[cluster] class OnMemberStatusChangedListener(callback: Runnable, status: */ @InternalApi @SerialVersionUID(1L) -private[cluster] final case class GossipStats( - receivedGossipCount: Long = 0L, - mergeCount: Long = 0L, - sameCount: Long = 0L, - newerCount: Long = 0L, - olderCount: Long = 0L) { +private[cluster] final case class GossipStats(receivedGossipCount: Long = 0L, + mergeCount: Long = 0L, + sameCount: Long = 0L, + newerCount: Long = 0L, + olderCount: Long = 0L) { def incrementMergeCount(): GossipStats = copy(mergeCount = mergeCount + 1, receivedGossipCount = receivedGossipCount + 1) @@ -1743,21 +1801,19 @@ private[cluster] final case class GossipStats( copy(olderCount = olderCount + 1, receivedGossipCount = receivedGossipCount + 1) def :+(that: GossipStats): GossipStats = { - GossipStats( - this.receivedGossipCount + that.receivedGossipCount, - this.mergeCount + that.mergeCount, - this.sameCount + that.sameCount, - this.newerCount + that.newerCount, - this.olderCount + that.olderCount) + GossipStats(this.receivedGossipCount + that.receivedGossipCount, + this.mergeCount + that.mergeCount, + this.sameCount + that.sameCount, + this.newerCount + that.newerCount, + this.olderCount + that.olderCount) } def :-(that: GossipStats): GossipStats = { - GossipStats( - this.receivedGossipCount - that.receivedGossipCount, - this.mergeCount - that.mergeCount, - this.sameCount - that.sameCount, - this.newerCount - that.newerCount, - this.olderCount - that.olderCount) + GossipStats(this.receivedGossipCount - that.receivedGossipCount, + this.mergeCount - that.mergeCount, + this.sameCount - that.sameCount, + this.newerCount - that.newerCount, + this.olderCount - that.olderCount) } } @@ -1767,6 +1823,4 @@ private[cluster] final case class GossipStats( */ @InternalApi @SerialVersionUID(1L) -private[cluster] final case class VectorClockStats( - versionSize: Int = 0, - seenLatest: Int = 0) +private[cluster] final case class VectorClockStats(versionSize: Int = 0, seenLatest: Int = 0) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index c80189e88c..f479efa1fa 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -29,12 +29,14 @@ import scala.runtime.AbstractFunction5 object ClusterEvent { sealed abstract class SubscriptionInitialStateMode + /** * When using this subscription mode a snapshot of * [[akka.cluster.ClusterEvent.CurrentClusterState]] will be sent to the * subscriber as the first message. */ case object InitialStateAsSnapshot extends SubscriptionInitialStateMode + /** * When using this subscription mode the events corresponding * to the current state will be sent to the subscriber to mimic what you would @@ -61,23 +63,24 @@ object ClusterEvent { trait ClusterDomainEvent extends DeadLetterSuppression // for binary compatibility (used to be a case class) - object CurrentClusterState extends AbstractFunction5[immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]], CurrentClusterState] { + object CurrentClusterState + extends AbstractFunction5[immutable.SortedSet[Member], + Set[Member], + Set[Address], + Option[Address], + Map[String, Option[Address]], + CurrentClusterState] { - def apply( - members: immutable.SortedSet[Member] = immutable.SortedSet.empty, - unreachable: Set[Member] = Set.empty, - seenBy: Set[Address] = Set.empty, - leader: Option[Address] = None, - roleLeaderMap: Map[String, Option[Address]] = Map.empty): CurrentClusterState = + def apply(members: immutable.SortedSet[Member] = immutable.SortedSet.empty, + unreachable: Set[Member] = Set.empty, + seenBy: Set[Address] = Set.empty, + leader: Option[Address] = None, + roleLeaderMap: Map[String, Option[Address]] = Map.empty): CurrentClusterState = new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap) - def unapply(cs: CurrentClusterState): Option[(immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]])] = - Some(( - cs.members, - cs.unreachable, - cs.seenBy, - cs.leader, - cs.roleLeaderMap)) + def unapply(cs: CurrentClusterState): Option[ + (immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]])] = + Some((cs.members, cs.unreachable, cs.seenBy, cs.leader, cs.roleLeaderMap)) } @@ -87,23 +90,25 @@ object ClusterEvent { * @param leader leader of the data center of this node */ @SerialVersionUID(2) - final class CurrentClusterState( - val members: immutable.SortedSet[Member], - val unreachable: Set[Member], - val seenBy: Set[Address], - val leader: Option[Address], - val roleLeaderMap: Map[String, Option[Address]], - val unreachableDataCenters: Set[DataCenter]) - extends Product5[immutable.SortedSet[Member], Set[Member], Set[Address], Option[Address], Map[String, Option[Address]]] - with Serializable { + final class CurrentClusterState(val members: immutable.SortedSet[Member], + val unreachable: Set[Member], + val seenBy: Set[Address], + val leader: Option[Address], + val roleLeaderMap: Map[String, Option[Address]], + val unreachableDataCenters: Set[DataCenter]) + extends Product5[immutable.SortedSet[Member], + Set[Member], + Set[Address], + Option[Address], + Map[String, Option[Address]]] + with Serializable { // for binary compatibility - def this( - members: immutable.SortedSet[Member] = immutable.SortedSet.empty, - unreachable: Set[Member] = Set.empty, - seenBy: Set[Address] = Set.empty, - leader: Option[Address] = None, - roleLeaderMap: Map[String, Option[Address]] = Map.empty) = + def this(members: immutable.SortedSet[Member] = immutable.SortedSet.empty, + unreachable: Set[Member] = Set.empty, + seenBy: Set[Address] = Set.empty, + leader: Option[Address] = None, + roleLeaderMap: Map[String, Option[Address]] = Map.empty) = this(members, unreachable, seenBy, leader, roleLeaderMap, Set.empty) /** @@ -177,22 +182,20 @@ object ClusterEvent { new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap, unreachableDataCenters) // for binary compatibility (used to be a case class) - def copy( - members: immutable.SortedSet[Member] = this.members, - unreachable: Set[Member] = this.unreachable, - seenBy: Set[Address] = this.seenBy, - leader: Option[Address] = this.leader, - roleLeaderMap: Map[String, Option[Address]] = this.roleLeaderMap) = + def copy(members: immutable.SortedSet[Member] = this.members, + unreachable: Set[Member] = this.unreachable, + seenBy: Set[Address] = this.seenBy, + leader: Option[Address] = this.leader, + roleLeaderMap: Map[String, Option[Address]] = this.roleLeaderMap) = new CurrentClusterState(members, unreachable, seenBy, leader, roleLeaderMap, unreachableDataCenters) override def equals(other: Any): Boolean = other match { case that: CurrentClusterState => - (this eq that) || ( - members == that.members && - unreachable == that.unreachable && - seenBy == that.seenBy && - leader == that.leader && - roleLeaderMap == that.roleLeaderMap) + (this eq that) || (members == that.members && + unreachable == that.unreachable && + seenBy == that.seenBy && + leader == that.leader && + roleLeaderMap == that.roleLeaderMap) case _ => false } @@ -287,6 +290,7 @@ object ClusterEvent { * is first seen on a node. */ final case class LeaderChanged(leader: Option[Address]) extends ClusterDomainEvent { + /** * Java API * @return address of current leader, or null if none @@ -300,6 +304,7 @@ object ClusterEvent { * Published when the state change is first seen on a node. */ final case class RoleLeaderChanged(role: String, leader: Option[Address]) extends ClusterDomainEvent { + /** * Java API * @return address of current leader, or null if none @@ -368,41 +373,48 @@ object ClusterEvent { /** * INTERNAL API */ - private[cluster] final case class CurrentInternalStats( - gossipStats: GossipStats, - vclockStats: VectorClockStats) extends ClusterDomainEvent + private[cluster] final case class CurrentInternalStats(gossipStats: GossipStats, vclockStats: VectorClockStats) + extends ClusterDomainEvent /** * INTERNAL API */ - private[cluster] def diffUnreachable(oldState: MembershipState, newState: MembershipState): immutable.Seq[UnreachableMember] = + private[cluster] def diffUnreachable(oldState: MembershipState, + newState: MembershipState): immutable.Seq[UnreachableMember] = if (newState eq oldState) Nil else { val newGossip = newState.latestGossip val oldUnreachableNodes = oldState.dcReachabilityNoOutsideNodes.allUnreachableOrTerminated - newState.dcReachabilityNoOutsideNodes.allUnreachableOrTerminated.iterator.collect { - case node if !oldUnreachableNodes.contains(node) && node != newState.selfUniqueAddress => - UnreachableMember(newGossip.member(node)) - }.to(immutable.IndexedSeq) + newState.dcReachabilityNoOutsideNodes.allUnreachableOrTerminated.iterator + .collect { + case node if !oldUnreachableNodes.contains(node) && node != newState.selfUniqueAddress => + UnreachableMember(newGossip.member(node)) + } + .to(immutable.IndexedSeq) } /** * INTERNAL API */ - private[cluster] def diffReachable(oldState: MembershipState, newState: MembershipState): immutable.Seq[ReachableMember] = + private[cluster] def diffReachable(oldState: MembershipState, + newState: MembershipState): immutable.Seq[ReachableMember] = if (newState eq oldState) Nil else { val newGossip = newState.latestGossip - oldState.dcReachabilityNoOutsideNodes.allUnreachable.iterator.collect { - case node if newGossip.hasMember(node) && newState.dcReachabilityNoOutsideNodes.isReachable(node) && node != newState.selfUniqueAddress => - ReachableMember(newGossip.member(node)) - }.to(immutable.IndexedSeq) + oldState.dcReachabilityNoOutsideNodes.allUnreachable.iterator + .collect { + case node + if newGossip.hasMember(node) && newState.dcReachabilityNoOutsideNodes.isReachable(node) && node != newState.selfUniqueAddress => + ReachableMember(newGossip.member(node)) + } + .to(immutable.IndexedSeq) } /** * Internal API */ - private[cluster] def isReachable(state: MembershipState, oldUnreachableNodes: Set[UniqueAddress])(otherDc: DataCenter): Boolean = { + private[cluster] def isReachable(state: MembershipState, oldUnreachableNodes: Set[UniqueAddress])( + otherDc: DataCenter): Boolean = { val unrelatedDcNodes = state.latestGossip.members.collect { case m if m.dataCenter != otherDc && m.dataCenter != state.selfDc => m.uniqueAddress } @@ -414,45 +426,55 @@ object ClusterEvent { /** * INTERNAL API */ - private[cluster] def diffUnreachableDataCenter(oldState: MembershipState, newState: MembershipState): immutable.Seq[UnreachableDataCenter] = { + private[cluster] def diffUnreachableDataCenter(oldState: MembershipState, + newState: MembershipState): immutable.Seq[UnreachableDataCenter] = { if (newState eq oldState) Nil else { - val otherDcs = (oldState.latestGossip.allDataCenters union newState.latestGossip.allDataCenters) - newState.selfDc - otherDcs.filterNot(isReachable(newState, oldState.dcReachability.allUnreachableOrTerminated)).iterator.map(UnreachableDataCenter).to(immutable.IndexedSeq) + val otherDcs = (oldState.latestGossip.allDataCenters + .union(newState.latestGossip.allDataCenters)) - newState.selfDc + otherDcs + .filterNot(isReachable(newState, oldState.dcReachability.allUnreachableOrTerminated)) + .iterator + .map(UnreachableDataCenter) + .to(immutable.IndexedSeq) } } /** * INTERNAL API */ - private[cluster] def diffReachableDataCenter(oldState: MembershipState, newState: MembershipState): immutable.Seq[ReachableDataCenter] = { + private[cluster] def diffReachableDataCenter(oldState: MembershipState, + newState: MembershipState): immutable.Seq[ReachableDataCenter] = { if (newState eq oldState) Nil else { - val otherDcs = (oldState.latestGossip.allDataCenters union newState.latestGossip.allDataCenters) - newState.selfDc + val otherDcs = (oldState.latestGossip.allDataCenters + .union(newState.latestGossip.allDataCenters)) - newState.selfDc val oldUnreachableDcs = otherDcs.filterNot(isReachable(oldState, Set())) val currentUnreachableDcs = otherDcs.filterNot(isReachable(newState, Set())) - (oldUnreachableDcs diff currentUnreachableDcs).iterator.map(ReachableDataCenter).to(immutable.IndexedSeq) + oldUnreachableDcs.diff(currentUnreachableDcs).iterator.map(ReachableDataCenter).to(immutable.IndexedSeq) } } /** * INTERNAL API. */ - private[cluster] def diffMemberEvents(oldState: MembershipState, newState: MembershipState): immutable.Seq[MemberEvent] = + private[cluster] def diffMemberEvents(oldState: MembershipState, + newState: MembershipState): immutable.Seq[MemberEvent] = if (newState eq oldState) Nil else { val oldGossip = oldState.latestGossip val newGossip = newState.latestGossip - val newMembers = newGossip.members diff oldGossip.members + val newMembers = newGossip.members.diff(oldGossip.members) val membersGroupedByAddress = List(newGossip.members, oldGossip.members).flatten.groupBy(_.uniqueAddress) - val changedMembers = membersGroupedByAddress collect { - case (_, newMember :: oldMember :: Nil) if newMember.status != oldMember.status || newMember.upNumber != oldMember.upNumber => + val changedMembers = membersGroupedByAddress.collect { + case (_, newMember :: oldMember :: Nil) + if newMember.status != oldMember.status || newMember.upNumber != oldMember.upNumber => newMember } import akka.util.ccompat.imm._ - val memberEvents = (newMembers ++ changedMembers).unsorted collect { + val memberEvents = (newMembers ++ changedMembers).unsorted.collect { case m if m.status == Joining => MemberJoined(m) case m if m.status == WeaklyUp => MemberWeaklyUp(m) case m if m.status == Up => MemberUp(m) @@ -462,7 +484,7 @@ object ClusterEvent { // no events for other transitions } - val removedMembers = oldGossip.members diff newGossip.members + val removedMembers = oldGossip.members.diff(newGossip.members) val removedEvents = removedMembers.unsorted.map(m => MemberRemoved(m.copy(status = Removed), m.status)) (new VectorBuilder[MemberEvent]() ++= removedEvents ++= memberEvents).result() @@ -472,7 +494,8 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffLeader(oldState: MembershipState, newState: MembershipState): immutable.Seq[LeaderChanged] = { + private[cluster] def diffLeader(oldState: MembershipState, + newState: MembershipState): immutable.Seq[LeaderChanged] = { val newLeader = newState.leader if (newLeader != oldState.leader) List(LeaderChanged(newLeader.map(_.address))) else Nil @@ -484,7 +507,7 @@ object ClusterEvent { @InternalApi private[cluster] def diffRolesLeader(oldState: MembershipState, newState: MembershipState): Set[RoleLeaderChanged] = { for { - role <- oldState.latestGossip.allRoles union newState.latestGossip.allRoles + role <- oldState.latestGossip.allRoles.union(newState.latestGossip.allRoles) newLeader = newState.roleLeader(role) if newLeader != oldState.roleLeader(role) } yield RoleLeaderChanged(role, newLeader.map(_.address)) @@ -508,7 +531,8 @@ object ClusterEvent { * INTERNAL API */ @InternalApi - private[cluster] def diffReachability(oldState: MembershipState, newState: MembershipState): immutable.Seq[ReachabilityChanged] = + private[cluster] def diffReachability(oldState: MembershipState, + newState: MembershipState): immutable.Seq[ReachabilityChanged] = if (newState.overview.reachability eq oldState.overview.reachability) Nil else List(ReachabilityChanged(newState.overview.reachability)) @@ -519,17 +543,18 @@ object ClusterEvent { * Responsible for domain event subscriptions and publishing of * domain events to event bus. */ -private[cluster] final class ClusterDomainEventPublisher extends Actor with ActorLogging - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[cluster] final class ClusterDomainEventPublisher + extends Actor + with ActorLogging + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import InternalClusterAction._ val cluster = Cluster(context.system) val selfUniqueAddress = cluster.selfUniqueAddress - val emptyMembershipState = MembershipState( - Gossip.empty, - cluster.selfUniqueAddress, - cluster.settings.SelfDataCenter, - cluster.settings.MultiDataCenter.CrossDcConnections) + val emptyMembershipState = MembershipState(Gossip.empty, + cluster.selfUniqueAddress, + cluster.settings.SelfDataCenter, + cluster.settings.MultiDataCenter.CrossDcConnections) var membershipState: MembershipState = emptyMembershipState def selfDc = cluster.settings.SelfDataCenter @@ -568,14 +593,14 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto if (!membershipState.latestGossip.isMultiDc) Set.empty else membershipState.latestGossip.allDataCenters.filterNot(isReachable(membershipState, Set.empty)) - val state = new CurrentClusterState( - members = membershipState.latestGossip.members, - unreachable = unreachable, - seenBy = membershipState.latestGossip.seenBy.map(_.address), - leader = membershipState.leader.map(_.address), - roleLeaderMap = membershipState.latestGossip.allRoles.iterator.map(r => - r -> membershipState.roleLeader(r).map(_.address)).toMap, - unreachableDataCenters) + val state = new CurrentClusterState(members = membershipState.latestGossip.members, + unreachable = unreachable, + seenBy = membershipState.latestGossip.seenBy.map(_.address), + leader = membershipState.leader.map(_.address), + roleLeaderMap = membershipState.latestGossip.allRoles.iterator + .map(r => r -> membershipState.roleLeader(r).map(_.address)) + .toMap, + unreachableDataCenters) receiver ! state } @@ -591,7 +616,7 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto sendCurrentClusterState(subscriber) } - to foreach { eventStream.subscribe(subscriber, _) } + to.foreach { eventStream.subscribe(subscriber, _) } } def unsubscribe(subscriber: ActorRef, to: Option[Class[_]]): Unit = to match { @@ -607,21 +632,21 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto } def publishDiff(oldState: MembershipState, newState: MembershipState, pub: AnyRef => Unit): Unit = { - diffMemberEvents(oldState, newState) foreach pub - diffUnreachable(oldState, newState) foreach pub - diffReachable(oldState, newState) foreach pub - diffUnreachableDataCenter(oldState, newState) foreach pub - diffReachableDataCenter(oldState, newState) foreach pub - diffLeader(oldState, newState) foreach pub - diffRolesLeader(oldState, newState) foreach pub + diffMemberEvents(oldState, newState).foreach(pub) + diffUnreachable(oldState, newState).foreach(pub) + diffReachable(oldState, newState).foreach(pub) + diffUnreachableDataCenter(oldState, newState).foreach(pub) + diffReachableDataCenter(oldState, newState).foreach(pub) + diffLeader(oldState, newState).foreach(pub) + diffRolesLeader(oldState, newState).foreach(pub) // publish internal SeenState for testing purposes - diffSeen(oldState, newState) foreach pub - diffReachability(oldState, newState) foreach pub + diffSeen(oldState, newState).foreach(pub) + diffReachability(oldState, newState).foreach(pub) } def publishInternalStats(currentStats: CurrentInternalStats): Unit = publish(currentStats) - def publish(event: AnyRef): Unit = eventStream publish event + def publish(event: AnyRef): Unit = eventStream.publish(event) def clearState(): Unit = { membershipState = emptyMembershipState diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index 5126bb814d..8215733762 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -52,6 +52,7 @@ private[cluster] object ClusterHeartbeatReceiver { * INTERNAL API */ private[cluster] object ClusterHeartbeatSender { + /** * Sent at regular intervals for failure detection. */ @@ -60,7 +61,10 @@ private[cluster] object ClusterHeartbeatSender { /** * Sent as reply to [[Heartbeat]] messages. */ - final case class HeartbeatRsp(from: UniqueAddress) extends ClusterMessage with HeartbeatMessage with DeadLetterSuppression + final case class HeartbeatRsp(from: UniqueAddress) + extends ClusterMessage + with HeartbeatMessage + with DeadLetterSuppression // sent to self only case object HeartbeatTick @@ -81,7 +85,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg val cluster = Cluster(context.system) import cluster.ClusterLogger._ val verboseHeartbeat = cluster.settings.Debug.VerboseHeartbeatLogging - import cluster.{ selfAddress, selfUniqueAddress, scheduler } + import cluster.{ scheduler, selfAddress, selfUniqueAddress } import cluster.settings._ import context.dispatcher @@ -98,9 +102,8 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg failureDetector) // start periodic heartbeat to other nodes in cluster - val heartbeatTask = scheduler.schedule( - PeriodicTasksInitialDelay max HeartbeatInterval, - HeartbeatInterval, self, HeartbeatTick) + val heartbeatTask = + scheduler.schedule(PeriodicTasksInitialDelay max HeartbeatInterval, HeartbeatInterval, self, HeartbeatTick) // used for logging warning if actual tick interval is unexpected (e.g. due to starvation) private var tickTimestamp = System.nanoTime() + (PeriodicTasksInitialDelay max HeartbeatInterval).toNanos @@ -142,16 +145,16 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg } def init(snapshot: CurrentClusterState): Unit = { - val nodes = snapshot.members.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } + val nodes = snapshot.members.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } val unreachable = snapshot.unreachable.collect { case m if filterInternalClusterMembers(m) => m.uniqueAddress } state = state.init(nodes, unreachable) } def addMember(m: Member): Unit = if (m.uniqueAddress != selfUniqueAddress && // is not self - !state.contains(m.uniqueAddress) && // not already added - filterInternalClusterMembers(m) // should be watching members from this DC (internal / external) - ) { + !state.contains(m.uniqueAddress) && // not already added + filterInternalClusterMembers(m) // should be watching members from this DC (internal / external) + ) { state = state.addMember(m.uniqueAddress) } @@ -160,7 +163,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg if (m.uniqueAddress == cluster.selfUniqueAddress) { // This cluster node will be shutdown, but stop this actor immediately // to avoid further updates - context stop self + context.stop(self) } else { state = state.removeMember(m.uniqueAddress) } @@ -173,7 +176,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg state = state.reachableMember(m.uniqueAddress) def heartbeat(): Unit = { - state.activeReceivers foreach { to => + state.activeReceivers.foreach { to => if (failureDetector.isMonitoring(to.address)) { if (verboseHeartbeat) logDebug("Heartbeat to [{}]", to.address) } else { @@ -193,9 +196,11 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg if ((now - tickTimestamp) >= (HeartbeatInterval.toNanos * 2)) logWarning( "Scheduled sending of heartbeat was delayed. " + - "Previous heartbeat was sent [{}] ms ago, expected interval is [{}] ms. This may cause failure detection " + - "to mark members as unreachable. The reason can be thread starvation, e.g. by running blocking tasks on the " + - "default dispatcher, CPU overload, or GC.", TimeUnit.NANOSECONDS.toMillis(now - tickTimestamp), HeartbeatInterval.toMillis) + "Previous heartbeat was sent [{}] ms ago, expected interval is [{}] ms. This may cause failure detection " + + "to mark members as unreachable. The reason can be thread starvation, e.g. by running blocking tasks on the " + + "default dispatcher, CPU overload, or GC.", + TimeUnit.NANOSECONDS.toMillis(now - tickTimestamp), + HeartbeatInterval.toMillis) tickTimestamp = now } @@ -219,12 +224,11 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg * It is immutable, but it updates the failureDetector. */ @InternalApi -private[cluster] final case class ClusterHeartbeatSenderState( - ring: HeartbeatNodeRing, - oldReceiversNowUnreachable: Set[UniqueAddress], - failureDetector: FailureDetectorRegistry[Address]) { +private[cluster] final case class ClusterHeartbeatSenderState(ring: HeartbeatNodeRing, + oldReceiversNowUnreachable: Set[UniqueAddress], + failureDetector: FailureDetectorRegistry[Address]) { - val activeReceivers: Set[UniqueAddress] = ring.myReceivers union oldReceiversNowUnreachable + val activeReceivers: Set[UniqueAddress] = ring.myReceivers.union(oldReceiversNowUnreachable) def selfAddress = ring.selfAddress @@ -239,7 +243,7 @@ private[cluster] final case class ClusterHeartbeatSenderState( def removeMember(node: UniqueAddress): ClusterHeartbeatSenderState = { val newState = membershipChange(ring :- node) - failureDetector remove node.address + failureDetector.remove(node.address) if (newState.oldReceiversNowUnreachable(node)) newState.copy(oldReceiversNowUnreachable = newState.oldReceiversNowUnreachable - node) else @@ -254,11 +258,11 @@ private[cluster] final case class ClusterHeartbeatSenderState( private def membershipChange(newRing: HeartbeatNodeRing): ClusterHeartbeatSenderState = { val oldReceivers = ring.myReceivers - val removedReceivers = oldReceivers diff newRing.myReceivers + val removedReceivers = oldReceivers.diff(newRing.myReceivers) var adjustedOldReceiversNowUnreachable = oldReceiversNowUnreachable - removedReceivers foreach { a => + removedReceivers.foreach { a => if (failureDetector.isAvailable(a.address)) - failureDetector remove a.address + failureDetector.remove(a.address) else adjustedOldReceiversNowUnreachable += a } @@ -267,11 +271,11 @@ private[cluster] final case class ClusterHeartbeatSenderState( def heartbeatRsp(from: UniqueAddress): ClusterHeartbeatSenderState = if (activeReceivers(from)) { - failureDetector heartbeat from.address + failureDetector.heartbeat(from.address) if (oldReceiversNowUnreachable(from)) { // back from unreachable, ok to stop heartbeating to it if (!ring.myReceivers(from)) - failureDetector remove from.address + failureDetector.remove(from.address) copy(oldReceiversNowUnreachable = oldReceiversNowUnreachable - from) } else this } else this @@ -287,11 +291,10 @@ private[cluster] final case class ClusterHeartbeatSenderState( * * It is immutable, i.e. the methods return new instances. */ -private[cluster] final case class HeartbeatNodeRing( - selfAddress: UniqueAddress, - nodes: Set[UniqueAddress], - unreachable: Set[UniqueAddress], - monitoredByNrOfMembers: Int) { +private[cluster] final case class HeartbeatNodeRing(selfAddress: UniqueAddress, + nodes: Set[UniqueAddress], + unreachable: Set[UniqueAddress], + monitoredByNrOfMembers: Int) { require(nodes contains selfAddress, s"nodes [${nodes.mkString(", ")}] must contain selfAddress [${selfAddress}]") @@ -302,7 +305,7 @@ private[cluster] final case class HeartbeatNodeRing( ha < hb || (ha == hb && Member.addressOrdering.compare(a.address, b.address) < 0) } - immutable.SortedSet() union nodes + immutable.SortedSet().union(nodes) } /** diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala index 5db449ab72..47166a2484 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala @@ -151,24 +151,32 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { // JMX attributes (bean-style) def getClusterStatus: String = { - val members = clusterView.members.toSeq.sorted(Member.ordering).map { m => - s"""{ + val members = clusterView.members.toSeq + .sorted(Member.ordering) + .map { m => + s"""{ | "address": "${m.address}", - | "roles": [${if (m.roles.isEmpty) "" else m.roles.toList.sorted.map("\"" + _ + "\"").mkString("\n ", ",\n ", "\n ")}], + | "roles": [${if (m.roles.isEmpty) "" + else m.roles.toList.sorted.map("\"" + _ + "\"").mkString("\n ", ",\n ", "\n ")}], | "status": "${m.status}" | }""".stripMargin - } mkString (",\n ") - - val unreachable = clusterView.reachability.observersGroupedByUnreachable.toSeq.sortBy(_._1).map { - case (subject, observers) => { - val observerAddresses = observers.toSeq.sorted.map("\"" + _.address + "\"") - s"""{ - | "node": "${subject.address}", - | "observed-by": [${if (observerAddresses.isEmpty) "" else observerAddresses.mkString("\n ", ",\n ", "\n ")}] - | }""".stripMargin } + .mkString(",\n ") - } mkString (",\n ") + val unreachable = clusterView.reachability.observersGroupedByUnreachable.toSeq + .sortBy(_._1) + .map { + case (subject, observers) => { + val observerAddresses = observers.toSeq.sorted.map("\"" + _.address + "\"") + s"""{ + | "node": "${subject.address}", + | "observed-by": [${if (observerAddresses.isEmpty) "" + else observerAddresses.mkString("\n ", ",\n ", "\n ")}] + | }""".stripMargin + } + + } + .mkString(",\n ") s"""{ | "members": [${if (members.isEmpty) "" else "\n " + members + "\n "}], @@ -210,7 +218,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { } else { log.warning( s"Could not register Cluster JMX MBean with name=$clusterMBeanName as it is already registered. " + - "If you are running multiple clusters in the same JVM, set 'akka.cluster.jmx.multi-mbeans-in-same-jvm = on' in config") + "If you are running multiple clusters in the same JVM, set 'akka.cluster.jmx.multi-mbeans-in-same-jvm = on' in config") } } } @@ -229,7 +237,7 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { } else { log.warning( s"Could not unregister Cluster JMX MBean with name=$clusterMBeanName as it was not found. " + - "If you are running multiple clusters in the same JVM, set 'akka.cluster.jmx.multi-mbeans-in-same-jvm = on' in config") + "If you are running multiple clusters in the same JVM, set 'akka.cluster.jmx.multi-mbeans-in-same-jvm = on' in config") } } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index 3c56975c90..b4d18df189 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -49,68 +49,67 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { // create actor that subscribes to the cluster eventBus to update current read view state private val eventBusListener: ActorRef = { - cluster.system.systemActorOf(Props(new Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { - override def preStart(): Unit = cluster.subscribe(self, classOf[ClusterDomainEvent]) - override def postStop(): Unit = cluster.unsubscribe(self) + cluster.system + .systemActorOf(Props(new Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { + override def preStart(): Unit = cluster.subscribe(self, classOf[ClusterDomainEvent]) + override def postStop(): Unit = cluster.unsubscribe(self) - def receive = { - case e: ClusterDomainEvent => - e match { - case SeenChanged(_, seenBy) => - _state = _state.copy(seenBy = seenBy) - case ReachabilityChanged(reachability) => - _reachability = reachability - case MemberRemoved(member, _) => - _state = _state.copy(members = _state.members - member, unreachable = _state.unreachable - member) - case UnreachableMember(member) => - // replace current member with new member (might have different status, only address is used in equals) - _state = _state.copy(unreachable = _state.unreachable - member + member) - case ReachableMember(member) => - _state = _state.copy(unreachable = _state.unreachable - member) - case event: MemberEvent => - // replace current member with new member (might have different status, only address is used in equals) - val newUnreachable = - if (_state.unreachable.contains(event.member)) _state.unreachable - event.member + event.member - else _state.unreachable - _state = _state.copy( - members = _state.members - event.member + event.member, - unreachable = newUnreachable) - case LeaderChanged(leader) => - _state = _state.copy(leader = leader) - case RoleLeaderChanged(role, leader) => - _state = _state.copy(roleLeaderMap = _state.roleLeaderMap + (role -> leader)) - case stats: CurrentInternalStats => _latestStats = stats - case ClusterShuttingDown => + def receive = { + case e: ClusterDomainEvent => + e match { + case SeenChanged(_, seenBy) => + _state = _state.copy(seenBy = seenBy) + case ReachabilityChanged(reachability) => + _reachability = reachability + case MemberRemoved(member, _) => + _state = _state.copy(members = _state.members - member, unreachable = _state.unreachable - member) + case UnreachableMember(member) => + // replace current member with new member (might have different status, only address is used in equals) + _state = _state.copy(unreachable = _state.unreachable - member + member) + case ReachableMember(member) => + _state = _state.copy(unreachable = _state.unreachable - member) + case event: MemberEvent => + // replace current member with new member (might have different status, only address is used in equals) + val newUnreachable = + if (_state.unreachable.contains(event.member)) _state.unreachable - event.member + event.member + else _state.unreachable + _state = + _state.copy(members = _state.members - event.member + event.member, unreachable = newUnreachable) + case LeaderChanged(leader) => + _state = _state.copy(leader = leader) + case RoleLeaderChanged(role, leader) => + _state = _state.copy(roleLeaderMap = _state.roleLeaderMap + (role -> leader)) + case stats: CurrentInternalStats => _latestStats = stats + case ClusterShuttingDown => + case r: ReachableDataCenter => + _state = _state.withUnreachableDataCenters(_state.unreachableDataCenters - r.dataCenter) + case r: UnreachableDataCenter => + _state = _state.withUnreachableDataCenters(_state.unreachableDataCenters + r.dataCenter) - case r: ReachableDataCenter => - _state = _state.withUnreachableDataCenters(_state.unreachableDataCenters - r.dataCenter) - case r: UnreachableDataCenter => - _state = _state.withUnreachableDataCenters(_state.unreachableDataCenters + r.dataCenter) + } - } + e match { + case e: MemberEvent if e.member.address == selfAddress => + _cachedSelf match { + case OptionVal.Some(s) if s.status == MemberStatus.Removed && _closed => + // ignore as Cluster.close has been called + case _ => + _cachedSelf = OptionVal.Some(e.member) + } + case _ => + } - e match { - case e: MemberEvent if e.member.address == selfAddress => - _cachedSelf match { - case OptionVal.Some(s) if s.status == MemberStatus.Removed && _closed => - // ignore as Cluster.close has been called - case _ => - _cachedSelf = OptionVal.Some(e.member) - } - case _ => - } + // once captured, optional verbose logging of event + e match { + case _: SeenChanged => // ignore + case event => + if (cluster.settings.LogInfoVerbose) + logInfo("event {}", event) + } - // once captured, optional verbose logging of event - e match { - case _: SeenChanged => // ignore - case event => - if (cluster.settings.LogInfoVerbose) - logInfo("event {}", event) - } - - case s: CurrentClusterState => _state = s - } - }).withDispatcher(cluster.settings.UseDispatcher).withDeploy(Deploy.local), name = "clusterEventBusListener") + case s: CurrentClusterState => _state = s + } + }).withDispatcher(cluster.settings.UseDispatcher).withDeploy(Deploy.local), name = "clusterEventBusListener") } def state: CurrentClusterState = _state @@ -127,7 +126,8 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { private def selfFromStateOrPlaceholder = { import cluster.selfUniqueAddress - state.members.find(_.uniqueAddress == selfUniqueAddress) + state.members + .find(_.uniqueAddress == selfUniqueAddress) .getOrElse(Member(selfUniqueAddress, cluster.selfRoles).copy(status = MemberStatus.Removed)) } @@ -177,8 +177,8 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { def isAvailable: Boolean = { val myself = self !unreachableMembers.contains(myself) && - myself.status != MemberStatus.Down && - myself.status != MemberStatus.Removed + myself.status != MemberStatus.Down && + myself.status != MemberStatus.Removed } def reachability: Reachability = _reachability diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala index 5ed99cd96a..109161ef2d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterRemoteWatcher.scala @@ -21,18 +21,22 @@ import akka.remote.RARP * INTERNAL API */ private[cluster] object ClusterRemoteWatcher { + /** * Factory method for `ClusterRemoteWatcher` [[akka.actor.Props]]. */ - def props( - failureDetector: FailureDetectorRegistry[Address], - heartbeatInterval: FiniteDuration, - unreachableReaperInterval: FiniteDuration, - heartbeatExpectedResponseAfter: FiniteDuration): Props = - Props(classOf[ClusterRemoteWatcher], failureDetector, heartbeatInterval, unreachableReaperInterval, - heartbeatExpectedResponseAfter).withDeploy(Deploy.local) + def props(failureDetector: FailureDetectorRegistry[Address], + heartbeatInterval: FiniteDuration, + unreachableReaperInterval: FiniteDuration, + heartbeatExpectedResponseAfter: FiniteDuration): Props = + Props(classOf[ClusterRemoteWatcher], + failureDetector, + heartbeatInterval, + unreachableReaperInterval, + heartbeatExpectedResponseAfter).withDeploy(Deploy.local) - private final case class DelayedQuarantine(m: Member, previousStatus: MemberStatus) extends NoSerializationVerificationNeeded + private final case class DelayedQuarantine(m: Member, previousStatus: MemberStatus) + extends NoSerializationVerificationNeeded } @@ -47,16 +51,11 @@ private[cluster] object ClusterRemoteWatcher { * over responsibility from `RemoteWatcher` if a watch is added before a node is member * of the cluster and then later becomes cluster member. */ -private[cluster] class ClusterRemoteWatcher( - failureDetector: FailureDetectorRegistry[Address], - heartbeatInterval: FiniteDuration, - unreachableReaperInterval: FiniteDuration, - heartbeatExpectedResponseAfter: FiniteDuration) - extends RemoteWatcher( - failureDetector, - heartbeatInterval, - unreachableReaperInterval, - heartbeatExpectedResponseAfter) { +private[cluster] class ClusterRemoteWatcher(failureDetector: FailureDetectorRegistry[Address], + heartbeatInterval: FiniteDuration, + unreachableReaperInterval: FiniteDuration, + heartbeatExpectedResponseAfter: FiniteDuration) + extends RemoteWatcher(failureDetector, heartbeatInterval, unreachableReaperInterval, heartbeatExpectedResponseAfter) { import ClusterRemoteWatcher.DelayedQuarantine @@ -78,13 +77,13 @@ private[cluster] class ClusterRemoteWatcher( cluster.unsubscribe(self) } - override def receive = receiveClusterEvent orElse super.receive + override def receive = receiveClusterEvent.orElse(super.receive) def receiveClusterEvent: Actor.Receive = { case state: CurrentClusterState => clusterNodes = state.members.collect { case m if m.address != selfAddress => m.address } - clusterNodes foreach takeOverResponsibility - unreachable = unreachable diff clusterNodes + clusterNodes.foreach(takeOverResponsibility) + unreachable = unreachable.diff(clusterNodes) case MemberJoined(m) => memberJoined(m) case MemberUp(m) => memberUp(m) case MemberWeaklyUp(m) => memberUp(m) @@ -111,8 +110,10 @@ private[cluster] class ClusterRemoteWatcher( clusterNodes -= m.address if (previousStatus == MemberStatus.Down) { - quarantine(m.address, Some(m.uniqueAddress.longUid), - s"Cluster member removed, previous status [$previousStatus]", harmless = false) + quarantine(m.address, + Some(m.uniqueAddress.longUid), + s"Cluster member removed, previous status [$previousStatus]", + harmless = false) } else if (arteryEnabled) { // Don't quarantine gracefully removed members (leaving) directly, // give Cluster Singleton some time to exchange TakeOver/HandOver messages. @@ -120,7 +121,8 @@ private[cluster] class ClusterRemoteWatcher( // is triggered earlier. pendingDelayedQuarantine += m.uniqueAddress import context.dispatcher - context.system.scheduler.scheduleOnce(cluster.settings.QuarantineRemovedNodeAfter, self, DelayedQuarantine(m, previousStatus)) + context.system.scheduler + .scheduleOnce(cluster.settings.QuarantineRemovedNodeAfter, self, DelayedQuarantine(m, previousStatus)) } publishAddressTerminated(m.address) @@ -131,16 +133,20 @@ private[cluster] class ClusterRemoteWatcher( if (pendingDelayedQuarantine.nonEmpty) pendingDelayedQuarantine.find(_.address == newIncarnation.address).foreach { oldIncarnation => pendingDelayedQuarantine -= oldIncarnation - quarantine(oldIncarnation.address, Some(oldIncarnation.longUid), - s"Cluster member removed, new incarnation joined", harmless = true) + quarantine(oldIncarnation.address, + Some(oldIncarnation.longUid), + s"Cluster member removed, new incarnation joined", + harmless = true) } } def delayedQuarantine(m: Member, previousStatus: MemberStatus): Unit = { if (pendingDelayedQuarantine(m.uniqueAddress)) { pendingDelayedQuarantine -= m.uniqueAddress - quarantine(m.address, Some(m.uniqueAddress.longUid), s"Cluster member removed, previous status [$previousStatus]", - harmless = true) + quarantine(m.address, + Some(m.uniqueAddress.longUid), + s"Cluster member removed, previous status [$previousStatus]", + harmless = true) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 463d6eb7cf..f26624c335 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -13,13 +13,14 @@ import akka.actor.Address import akka.actor.AddressFromURIString import akka.annotation.InternalApi import akka.dispatch.Dispatchers -import akka.util.Helpers.{ ConfigOps, Requiring, toRootLowerCase } +import akka.util.Helpers.{ toRootLowerCase, ConfigOps, Requiring } import scala.concurrent.duration.FiniteDuration import akka.japi.Util.immutableSeq object ClusterSettings { type DataCenter = String + /** * INTERNAL API. */ @@ -44,31 +45,33 @@ final class ClusterSettings(val config: Config, val systemName: String) { val FailureDetectorImplementationClass: String = FailureDetectorConfig.getString("implementation-class") val HeartbeatInterval: FiniteDuration = { FailureDetectorConfig.getMillisDuration("heartbeat-interval") - } requiring (_ > Duration.Zero, "failure-detector.heartbeat-interval must be > 0") + }.requiring(_ > Duration.Zero, "failure-detector.heartbeat-interval must be > 0") val HeartbeatExpectedResponseAfter: FiniteDuration = { FailureDetectorConfig.getMillisDuration("expected-response-after") - } requiring (_ > Duration.Zero, "failure-detector.expected-response-after > 0") + }.requiring(_ > Duration.Zero, "failure-detector.expected-response-after > 0") val MonitoredByNrOfMembers: Int = { FailureDetectorConfig.getInt("monitored-by-nr-of-members") - } requiring (_ > 0, "failure-detector.monitored-by-nr-of-members must be > 0") + }.requiring(_ > 0, "failure-detector.monitored-by-nr-of-members must be > 0") final class CrossDcFailureDetectorSettings(val config: Config) { val ImplementationClass: String = config.getString("implementation-class") val HeartbeatInterval: FiniteDuration = { config.getMillisDuration("heartbeat-interval") - } requiring (_ > Duration.Zero, "failure-detector.heartbeat-interval must be > 0") + }.requiring(_ > Duration.Zero, "failure-detector.heartbeat-interval must be > 0") val HeartbeatExpectedResponseAfter: FiniteDuration = { config.getMillisDuration("expected-response-after") - } requiring (_ > Duration.Zero, "failure-detector.expected-response-after > 0") + }.requiring(_ > Duration.Zero, "failure-detector.expected-response-after > 0") def NrOfMonitoringActors: Int = MultiDataCenter.CrossDcConnections } object MultiDataCenter { - val CrossDcConnections: Int = cc.getInt("multi-data-center.cross-data-center-connections") + val CrossDcConnections: Int = cc + .getInt("multi-data-center.cross-data-center-connections") .requiring(_ > 0, "cross-data-center-connections must be > 0") - val CrossDcGossipProbability: Double = cc.getDouble("multi-data-center.cross-data-center-gossip-probability") - .requiring(d => d >= 0.0D && d <= 1.0D, "cross-data-center-gossip-probability must be >= 0.0 and <= 1.0") + val CrossDcGossipProbability: Double = cc + .getDouble("multi-data-center.cross-data-center-gossip-probability") + .requiring(d => d >= 0.0d && d <= 1.0d, "cross-data-center-gossip-probability must be >= 0.0 and <= 1.0") val CrossDcFailureDetectorSettings: CrossDcFailureDetectorSettings = new CrossDcFailureDetectorSettings(cc.getConfig("multi-data-center.failure-detector")) @@ -81,28 +84,28 @@ final class ClusterSettings(val config: Config, val systemName: String) { val key = "retry-unsuccessful-join-after" toRootLowerCase(cc.getString(key)) match { case "off" => Duration.Undefined - case _ => cc.getMillisDuration(key) requiring (_ > Duration.Zero, key + " > 0s, or off") + case _ => cc.getMillisDuration(key).requiring(_ > Duration.Zero, key + " > 0s, or off") } } val ShutdownAfterUnsuccessfulJoinSeedNodes: Duration = { val key = "shutdown-after-unsuccessful-join-seed-nodes" toRootLowerCase(cc.getString(key)) match { case "off" => Duration.Undefined - case _ => cc.getMillisDuration(key) requiring (_ > Duration.Zero, key + " > 0s, or off") + case _ => cc.getMillisDuration(key).requiring(_ > Duration.Zero, key + " > 0s, or off") } } val PeriodicTasksInitialDelay: FiniteDuration = cc.getMillisDuration("periodic-tasks-initial-delay") val GossipInterval: FiniteDuration = cc.getMillisDuration("gossip-interval") val GossipTimeToLive: FiniteDuration = { cc.getMillisDuration("gossip-time-to-live") - } requiring (_ > Duration.Zero, "gossip-time-to-live must be > 0") + }.requiring(_ > Duration.Zero, "gossip-time-to-live must be > 0") val LeaderActionsInterval: FiniteDuration = cc.getMillisDuration("leader-actions-interval") val UnreachableNodesReaperInterval: FiniteDuration = cc.getMillisDuration("unreachable-nodes-reaper-interval") val PublishStatsInterval: Duration = { val key = "publish-stats-interval" toRootLowerCase(cc.getString(key)) match { case "off" => Duration.Undefined - case _ => cc.getMillisDuration(key) requiring (_ >= Duration.Zero, key + " >= 0s, or off") + case _ => cc.getMillisDuration(key).requiring(_ >= Duration.Zero, key + " >= 0s, or off") } } @@ -111,7 +114,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { */ val PruneGossipTombstonesAfter: Duration = { val key = "prune-gossip-tombstones-after" - cc.getMillisDuration(key) requiring (_ >= Duration.Zero, key + " >= 0s") + cc.getMillisDuration(key).requiring(_ >= Duration.Zero, key + " >= 0s") } // specific to the [[akka.cluster.DefaultDowningProvider]] @@ -119,7 +122,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { val key = "auto-down-unreachable-after" toRootLowerCase(cc.getString(key)) match { case "off" => Duration.Undefined - case _ => cc.getMillisDuration(key) requiring (_ >= Duration.Zero, key + " >= 0s, or off") + case _ => cc.getMillisDuration(key).requiring(_ >= Duration.Zero, key + " >= 0s, or off") } } @@ -133,7 +136,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { val key = "down-removal-margin" toRootLowerCase(cc.getString(key)) match { case "off" => Duration.Zero - case _ => cc.getMillisDuration(key) requiring (_ >= Duration.Zero, key + " >= 0s, or off") + case _ => cc.getMillisDuration(key).requiring(_ >= Duration.Zero, key + " >= 0s, or off") } } @@ -145,14 +148,15 @@ final class ClusterSettings(val config: Config, val systemName: String) { } val QuarantineRemovedNodeAfter: FiniteDuration = - cc.getMillisDuration("quarantine-removed-node-after") requiring (_ > Duration.Zero, "quarantine-removed-node-after must be > 0") + cc.getMillisDuration("quarantine-removed-node-after") + .requiring(_ > Duration.Zero, "quarantine-removed-node-after must be > 0") val AllowWeaklyUpMembers: Boolean = cc.getBoolean("allow-weakly-up-members") val SelfDataCenter: DataCenter = cc.getString("multi-data-center.self-data-center") val Roles: Set[String] = { - val configuredRoles = immutableSeq(cc.getStringList("roles")).toSet requiring ( + val configuredRoles = immutableSeq(cc.getStringList("roles")).toSet.requiring( _.forall(!_.startsWith(DcRolePrefix)), s"Roles must not start with '$DcRolePrefix' as that is reserved for the cluster self-data-center setting") @@ -161,12 +165,16 @@ final class ClusterSettings(val config: Config, val systemName: String) { val MinNrOfMembers: Int = { cc.getInt("min-nr-of-members") - } requiring (_ > 0, "min-nr-of-members must be > 0") + }.requiring(_ > 0, "min-nr-of-members must be > 0") val MinNrOfMembersOfRole: Map[String, Int] = { import scala.collection.JavaConverters._ - cc.getConfig("role").root.asScala.collect { - case (key, value: ConfigObject) => key -> value.toConfig.getInt("min-nr-of-members") - }.toMap + cc.getConfig("role") + .root + .asScala + .collect { + case (key, value: ConfigObject) => key -> value.toConfig.getInt("min-nr-of-members") + } + .toMap } val RunCoordinatedShutdownWhenDown: Boolean = cc.getBoolean("run-coordinated-shutdown-when-down") val JmxEnabled: Boolean = cc.getBoolean("jmx.enabled") @@ -183,9 +191,7 @@ final class ClusterSettings(val config: Config, val systemName: String) { val ByPassConfigCompatCheck: Boolean = !cc.getBoolean("configuration-compatibility-check.enforce-on-join") val ConfigCompatCheckers: Set[String] = { import scala.collection.JavaConverters._ - cc.getConfig("configuration-compatibility-check.checkers") - .root.unwrapped.values().asScala - .map(_.toString).toSet + cc.getConfig("configuration-compatibility-check.checkers").root.unwrapped.values().asScala.map(_.toString).toSet } val SensitiveConfigPaths = { @@ -193,7 +199,10 @@ final class ClusterSettings(val config: Config, val systemName: String) { val sensitiveKeys = cc.getConfig("configuration-compatibility-check.sensitive-config-paths") - .root.unwrapped.values().asScala + .root + .unwrapped + .values() + .asScala .flatMap(_.asInstanceOf[java.util.List[String]].asScala) sensitiveKeys.toSet @@ -205,4 +214,3 @@ final class ClusterSettings(val config: Config, val systemName: String) { } } - diff --git a/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala b/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala index 51b39dc9ab..4afae28d5e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CoordinatedShutdownLeave.scala @@ -45,8 +45,9 @@ private[akka] class CoordinatedShutdownLeave extends Actor { if (s.members.isEmpty) { // not joined yet done(replyTo) - } else if (s.members.exists(m => m.uniqueAddress == cluster.selfUniqueAddress && - (m.status == Leaving || m.status == Exiting || m.status == Down))) { + } else if (s.members.exists(m => + m.uniqueAddress == cluster.selfUniqueAddress && + (m.status == Leaving || m.status == Exiting || m.status == Down))) { done(replyTo) } case MemberLeft(m) => diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala index 7cbfd40d2b..8b29f28a4e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -57,16 +57,16 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg val selfHeartbeat = ClusterHeartbeatSender.Heartbeat(selfAddress) - var dataCentersState: CrossDcHeartbeatingState = CrossDcHeartbeatingState.init( - selfDataCenter, - crossDcFailureDetector, - crossDcSettings.NrOfMonitoringActors, - SortedSet.empty) + var dataCentersState: CrossDcHeartbeatingState = CrossDcHeartbeatingState.init(selfDataCenter, + crossDcFailureDetector, + crossDcSettings.NrOfMonitoringActors, + SortedSet.empty) // start periodic heartbeat to other nodes in cluster - val heartbeatTask = scheduler.schedule( - PeriodicTasksInitialDelay max HeartbeatInterval, - HeartbeatInterval, self, ClusterHeartbeatSender.HeartbeatTick) + val heartbeatTask = scheduler.schedule(PeriodicTasksInitialDelay max HeartbeatInterval, + HeartbeatInterval, + self, + ClusterHeartbeatSender.HeartbeatTick) override def preStart(): Unit = { cluster.subscribe(self, classOf[MemberEvent]) @@ -86,7 +86,7 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg context.actorSelection(ClusterHeartbeatReceiver.path(address)) def receive: Actor.Receive = - dormant orElse introspecting + dormant.orElse(introspecting) /** * In this state no cross-datacenter heartbeats are sent by this actor. @@ -143,14 +143,14 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg def removeMember(m: Member): Unit = if (m.uniqueAddress == cluster.selfUniqueAddress) { // This cluster node will be shutdown, but stop this actor immediately to avoid further updates - context stop self + context.stop(self) } else { dataCentersState = dataCentersState.removeMember(m) becomeActiveIfResponsibleForHeartbeat() } def heartbeat(): Unit = { - dataCentersState.activeReceivers foreach { to => + dataCentersState.activeReceivers.foreach { to => if (crossDcFailureDetector.isMonitoring(to.address)) { if (verboseHeartbeat) logDebug("(Cross) Heartbeat to [{}]", to.address) } else { @@ -183,10 +183,11 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg /** Idempotent, become active if this node is n-th oldest and should monitor other nodes */ private def becomeActiveIfResponsibleForHeartbeat(): Unit = { if (!activelyMonitoring && selfIsResponsibleForCrossDcHeartbeat()) { - log.info("Cross DC heartbeat becoming ACTIVE on this node (for DC: {}), monitoring other DCs oldest nodes", selfDataCenter) + log.info("Cross DC heartbeat becoming ACTIVE on this node (for DC: {}), monitoring other DCs oldest nodes", + selfDataCenter) activelyMonitoring = true - context.become(active orElse introspecting) + context.become(active.orElse(introspecting)) } else if (!activelyMonitoring) if (verboseHeartbeat) log.info("Remaining DORMANT; others in {} handle heartbeating other DCs", selfDataCenter) } @@ -210,11 +211,10 @@ private[akka] object CrossDcHeartbeatSender { /** INTERNAL API */ @InternalApi -private[cluster] final case class CrossDcHeartbeatingState( - selfDataCenter: DataCenter, - failureDetector: FailureDetectorRegistry[Address], - nrOfMonitoredNodesPerDc: Int, - state: Map[ClusterSettings.DataCenter, SortedSet[Member]]) { +private[cluster] final case class CrossDcHeartbeatingState(selfDataCenter: DataCenter, + failureDetector: FailureDetectorRegistry[Address], + nrOfMonitoredNodesPerDc: Int, + state: Map[ClusterSettings.DataCenter, SortedSet[Member]]) { import CrossDcHeartbeatingState._ /** @@ -238,8 +238,7 @@ private[cluster] final case class CrossDcHeartbeatingState( // we need to remove the member first, to avoid having "duplicates" // this is because the removal and uniqueness we need is only by uniqueAddress // which is not used by the `ageOrdering` - val oldMembersWithoutM = state.getOrElse(dc, emptyMembersSortedSet) - .filterNot(_.uniqueAddress == m.uniqueAddress) + val oldMembersWithoutM = state.getOrElse(dc, emptyMembersSortedSet).filterNot(_.uniqueAddress == m.uniqueAddress) val updatedMembers = oldMembersWithoutM + m val updatedState = this.copy(state = state.updated(dc, updatedMembers)) @@ -249,7 +248,7 @@ private[cluster] final case class CrossDcHeartbeatingState( // should happen rarely, since upNumbers are assigned sequentially, and we only ever compare nodes // in the same DC. If it happens though, we need to remove the previously monitored node from the failure // detector, to prevent both a resource leak and that node actually appearing as unreachable in the gossip (!) - val stoppedMonitoringReceivers = updatedState.activeReceiversIn(dc) diff this.activeReceiversIn(dc) + val stoppedMonitoringReceivers = updatedState.activeReceiversIn(dc).diff(this.activeReceiversIn(dc)) stoppedMonitoringReceivers.foreach(m => failureDetector.remove(m.address)) // at most one element difference updatedState @@ -273,9 +272,7 @@ private[cluster] final case class CrossDcHeartbeatingState( val otherDcs = state.filter(_._1 != selfDataCenter) val allOtherNodes = otherDcs.values - allOtherNodes.flatMap( - _.take(nrOfMonitoredNodesPerDc).iterator - .map(_.uniqueAddress).to(immutable.IndexedSeq)).toSet + allOtherNodes.flatMap(_.take(nrOfMonitoredNodesPerDc).iterator.map(_.uniqueAddress).to(immutable.IndexedSeq)).toSet } /** Lists addresses in given DataCenter that this node should send heartbeats to */ @@ -283,9 +280,7 @@ private[cluster] final case class CrossDcHeartbeatingState( if (dc == selfDataCenter) Set.empty // CrossDcHeartbeatSender is not supposed to send within its own Dc else { val otherNodes = state.getOrElse(dc, emptyMembersSortedSet) - otherNodes - .take(nrOfMonitoredNodesPerDc).iterator - .map(_.uniqueAddress).to(immutable.Set) + otherNodes.take(nrOfMonitoredNodesPerDc).iterator.map(_.uniqueAddress).to(immutable.Set) } def allMembers: Iterable[Member] = @@ -293,7 +288,7 @@ private[cluster] final case class CrossDcHeartbeatingState( def heartbeatRsp(from: UniqueAddress): CrossDcHeartbeatingState = { if (activeReceivers.contains(from)) { - failureDetector heartbeat from.address + failureDetector.heartbeat(from.address) } this } @@ -314,30 +309,25 @@ private[cluster] object CrossDcHeartbeatingState { def atLeastInUpState(m: Member): Boolean = m.status != MemberStatus.WeaklyUp && m.status != MemberStatus.Joining - def init( - selfDataCenter: DataCenter, - crossDcFailureDetector: FailureDetectorRegistry[Address], - nrOfMonitoredNodesPerDc: Int, - members: SortedSet[Member]): CrossDcHeartbeatingState = { - new CrossDcHeartbeatingState( - selfDataCenter, - crossDcFailureDetector, - nrOfMonitoredNodesPerDc, - state = { - // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc - val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter) + def init(selfDataCenter: DataCenter, + crossDcFailureDetector: FailureDetectorRegistry[Address], + nrOfMonitoredNodesPerDc: Int, + members: SortedSet[Member]): CrossDcHeartbeatingState = { + new CrossDcHeartbeatingState(selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodesPerDc, state = { + // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc + val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter) - if (members.ordering == Member.ageOrdering) { - // we already have the right ordering - groupedByDc - } else { - // we need to enforce the ageOrdering for the SortedSet in each DC - groupedByDc.map { - case (dc, ms) => - dc -> (SortedSet.empty[Member](Member.ageOrdering) union ms) - } + if (members.ordering == Member.ageOrdering) { + // we already have the right ordering + groupedByDc + } else { + // we need to enforce the ageOrdering for the SortedSet in each DC + groupedByDc.map { + case (dc, ms) => + dc -> (SortedSet.empty[Member](Member.ageOrdering).union(ms)) } - }) + } + }) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala b/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala index a8930b6a57..4246da8d96 100644 --- a/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala @@ -21,12 +21,12 @@ private[cluster] object DowningProvider { */ def load(fqcn: String, system: ActorSystem): DowningProvider = { val eas = system.asInstanceOf[ExtendedActorSystem] - eas.dynamicAccess.createInstanceFor[DowningProvider]( - fqcn, - List((classOf[ActorSystem], system))).recover { - case e => throw new ConfigurationException( - s"Could not create cluster downing provider [$fqcn]", e) - }.get + eas.dynamicAccess + .createInstanceFor[DowningProvider](fqcn, List((classOf[ActorSystem], system))) + .recover { + case e => throw new ConfigurationException(s"Could not create cluster downing provider [$fqcn]", e) + } + .get } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index 6d6f4c3edc..b67968d864 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -62,10 +62,10 @@ private[cluster] object Gossip { @SerialVersionUID(1L) @InternalApi private[cluster] final case class Gossip( - members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address - overview: GossipOverview = GossipOverview(), - version: VectorClock = VectorClock(), // vector clock version - tombstones: Map[UniqueAddress, Gossip.Timestamp] = Map.empty) { + members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address + overview: GossipOverview = GossipOverview(), + version: VectorClock = VectorClock(), // vector clock version + tombstones: Map[UniqueAddress, Gossip.Timestamp] = Map.empty) { if (Cluster.isAssertInvariantsEnabled) assertInvariants() @@ -74,28 +74,24 @@ private[cluster] final case class Gossip( def ifTrueThrow(func: => Boolean, expected: String, actual: String): Unit = if (func) throw new IllegalArgumentException(s"$expected, but found [$actual]") - ifTrueThrow( - members.exists(_.status == Removed), - expected = s"Live members must not have status [$Removed]", - actual = s"${members.filter(_.status == Removed)}") + ifTrueThrow(members.exists(_.status == Removed), + expected = s"Live members must not have status [$Removed]", + actual = s"${members.filter(_.status == Removed)}") - val inReachabilityButNotMember = overview.reachability.allObservers diff members.map(_.uniqueAddress) - ifTrueThrow( - inReachabilityButNotMember.nonEmpty, - expected = "Nodes not part of cluster in reachability table", - actual = inReachabilityButNotMember.mkString(", ")) + val inReachabilityButNotMember = overview.reachability.allObservers.diff(members.map(_.uniqueAddress)) + ifTrueThrow(inReachabilityButNotMember.nonEmpty, + expected = "Nodes not part of cluster in reachability table", + actual = inReachabilityButNotMember.mkString(", ")) - val inReachabilityVersionsButNotMember = overview.reachability.versions.keySet diff members.map(_.uniqueAddress) - ifTrueThrow( - inReachabilityVersionsButNotMember.nonEmpty, - expected = "Nodes not part of cluster in reachability versions table", - actual = inReachabilityVersionsButNotMember.mkString(", ")) + val inReachabilityVersionsButNotMember = overview.reachability.versions.keySet.diff(members.map(_.uniqueAddress)) + ifTrueThrow(inReachabilityVersionsButNotMember.nonEmpty, + expected = "Nodes not part of cluster in reachability versions table", + actual = inReachabilityVersionsButNotMember.mkString(", ")) - val seenButNotMember = overview.seen diff members.map(_.uniqueAddress) - ifTrueThrow( - seenButNotMember.nonEmpty, - expected = "Nodes not part of cluster have marked the Gossip as seen", - actual = seenButNotMember.mkString(", ")) + val seenButNotMember = overview.seen.diff(members.map(_.uniqueAddress)) + ifTrueThrow(seenButNotMember.nonEmpty, + expected = "Nodes not part of cluster have marked the Gossip as seen", + actual = seenButNotMember.mkString(", ")) } @transient private lazy val membersMap: Map[UniqueAddress, Member] = @@ -118,7 +114,7 @@ private[cluster] final case class Gossip( */ def :+(member: Member): Gossip = { if (members contains member) this - else this copy (members = members + member) + else this.copy(members = members + member) } /** @@ -126,21 +122,21 @@ private[cluster] final case class Gossip( */ def seen(node: UniqueAddress): Gossip = { if (seenByNode(node)) this - else this copy (overview = overview copy (seen = overview.seen + node)) + else this.copy(overview = overview.copy(seen = overview.seen + node)) } /** * Marks the gossip as seen by only this node (address) by replacing the 'gossip.overview.seen' */ def onlySeen(node: UniqueAddress): Gossip = { - this copy (overview = overview copy (seen = Set(node))) + this.copy(overview = overview.copy(seen = Set(node))) } /** * Remove all seen entries */ def clearSeen(): Gossip = { - this copy (overview = overview copy (seen = Set.empty)) + this.copy(overview = overview.copy(seen = Set.empty)) } /** @@ -157,7 +153,7 @@ private[cluster] final case class Gossip( * Merges the seen table of two Gossip instances. */ def mergeSeen(that: Gossip): Gossip = - this copy (overview = overview copy (seen = overview.seen union that.overview.seen)) + this.copy(overview = overview.copy(seen = overview.seen.union(that.overview.seen))) /** * Merges two Gossip instances including membership tables, tombstones, and the VectorClock histories. @@ -168,17 +164,17 @@ private[cluster] final case class Gossip( val mergedTombstones = tombstones ++ that.tombstones // 2. merge vector clocks (but remove entries for tombstoned nodes) - val mergedVClock = mergedTombstones.keys.foldLeft(this.version merge that.version) { (vclock, node) => + val mergedVClock = mergedTombstones.keys.foldLeft(this.version.merge(that.version)) { (vclock, node) => vclock.prune(VectorClock.Node(Gossip.vclockName(node))) } // 2. merge members by selecting the single Member with highest MemberStatus out of the Member groups - val mergedMembers = Gossip.emptyMembers union Member.pickHighestPriority(this.members, that.members, mergedTombstones) + val mergedMembers = + Gossip.emptyMembers.union(Member.pickHighestPriority(this.members, that.members, mergedTombstones)) // 3. merge reachability table by picking records with highest version - val mergedReachability = this.overview.reachability.merge( - mergedMembers.map(_.uniqueAddress), - that.overview.reachability) + val mergedReachability = + this.overview.reachability.merge(mergedMembers.map(_.uniqueAddress), that.overview.reachability) // 4. Nobody can have seen this new gossip yet val mergedSeen = Set.empty[UniqueAddress] @@ -209,9 +205,7 @@ private[cluster] final case class Gossip( } def member(node: UniqueAddress): Member = { - membersMap.getOrElse( - node, - Member.removed(node)) // placeholder for removed member + membersMap.getOrElse(node, Member.removed(node)) // placeholder for removed member } def hasMember(node: UniqueAddress): Boolean = membersMap.contains(node) @@ -221,7 +215,7 @@ private[cluster] final case class Gossip( } def update(updatedMembers: immutable.SortedSet[Member]): Gossip = { - copy(members = updatedMembers union (members diff updatedMembers)) + copy(members = updatedMembers.union(members.diff(updatedMembers))) } /** @@ -252,7 +246,7 @@ private[cluster] final case class Gossip( val newSeen = overview.seen - member.uniqueAddress // update gossip overview - val newOverview = overview copy (seen = newSeen) + val newOverview = overview.copy(seen = newSeen) copy(members = newMembers, overview = newOverview) // update gossip } @@ -277,9 +271,8 @@ private[cluster] final case class Gossip( * Represents the overview of the cluster, holds the cluster convergence table and set with unreachable nodes. */ @SerialVersionUID(1L) -private[cluster] final case class GossipOverview( - seen: Set[UniqueAddress] = Set.empty, - reachability: Reachability = Reachability.empty) { +private[cluster] final case class GossipOverview(seen: Set[UniqueAddress] = Set.empty, + reachability: Reachability = Reachability.empty) { override def toString = s"GossipOverview(reachability = [$reachability], seen = [${seen.mkString(", ")}])" @@ -302,12 +295,12 @@ object GossipEnvelope { * different in that case. */ @SerialVersionUID(2L) -private[cluster] class GossipEnvelope private ( - val from: UniqueAddress, - val to: UniqueAddress, - @volatile var g: Gossip, - serDeadline: Deadline, - @transient @volatile var ser:() => Gossip) extends ClusterMessage { +private[cluster] class GossipEnvelope private (val from: UniqueAddress, + val to: UniqueAddress, + @volatile var g: Gossip, + serDeadline: Deadline, + @transient @volatile var ser: () => Gossip) + extends ClusterMessage { def gossip: Gossip = { deserialize() diff --git a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala index 393225a99c..8cd2baedc9 100644 --- a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala +++ b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala @@ -73,10 +73,9 @@ object JoinConfigCompatChecker { // NOTE: we only check the key if effectively required // because config may contain more keys than required for this checker val incompatibleKeys = - toCheck.entrySet().asScala - .collect { - case entry if requiredKeys.contains(entry.getKey) && !checkCompat(entry) => s"${entry.getKey} is incompatible" - } + toCheck.entrySet().asScala.collect { + case entry if requiredKeys.contains(entry.getKey) && !checkCompat(entry) => s"${entry.getKey} is incompatible" + } if (incompatibleKeys.isEmpty) Valid else Invalid(incompatibleKeys.to(im.Seq)) @@ -97,10 +96,9 @@ object JoinConfigCompatChecker { private[cluster] def filterWithKeys(requiredKeys: im.Seq[String], config: Config): Config = { val filtered = - config.entrySet().asScala - .collect { - case e if requiredKeys.contains(e.getKey) => (e.getKey, e.getValue) - } + config.entrySet().asScala.collect { + case e if requiredKeys.contains(e.getKey) => (e.getKey, e.getValue) + } ConfigFactory.parseMap(filtered.toMap.asJava) } @@ -111,7 +109,8 @@ object JoinConfigCompatChecker { * from the passed `requiredKeys` Seq. */ @InternalApi - private[cluster] def removeSensitiveKeys(requiredKeys: im.Seq[String], clusterSettings: ClusterSettings): im.Seq[String] = { + private[cluster] def removeSensitiveKeys(requiredKeys: im.Seq[String], + clusterSettings: ClusterSettings): im.Seq[String] = { requiredKeys.filter { key => !clusterSettings.SensitiveConfigPaths.exists(s => key.startsWith(s)) } @@ -174,6 +173,7 @@ sealed trait ConfigValidation { } case object Valid extends ConfigValidation { + /** * Java API: get the singleton instance */ diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index a4c3a2677d..c6e51696db 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -18,13 +18,14 @@ import scala.runtime.AbstractFunction2 * and roles. */ @SerialVersionUID(1L) -class Member private[cluster] ( - val uniqueAddress: UniqueAddress, - private[cluster] val upNumber: Int, // INTERNAL API - val status: MemberStatus, - val roles: Set[String]) extends Serializable { +class Member private[cluster] (val uniqueAddress: UniqueAddress, + private[cluster] val upNumber: Int, // INTERNAL API + val status: MemberStatus, + val roles: Set[String]) + extends Serializable { - lazy val dataCenter: DataCenter = roles.find(_.startsWith(ClusterSettings.DcRolePrefix)) + lazy val dataCenter: DataCenter = roles + .find(_.startsWith(ClusterSettings.DcRolePrefix)) .getOrElse(throw new IllegalStateException("DataCenter undefined, should not be possible")) .substring(ClusterSettings.DcRolePrefix.length) @@ -66,7 +67,7 @@ class Member private[cluster] ( if (dataCenter != other.dataCenter) throw new IllegalArgumentException( "Comparing members of different data centers with isOlderThan is not allowed. " + - s"[$this] vs. [$other]") + s"[$this] vs. [$other]") if (upNumber == other.upNumber) Member.addressOrdering.compare(address, other.address) < 0 else @@ -77,9 +78,7 @@ class Member private[cluster] ( val oldStatus = this.status if (status == oldStatus) this else { - require( - allowedTransitions(oldStatus)(status), - s"Invalid member status transition [ ${this} -> ${status}]") + require(allowedTransitions(oldStatus)(status), s"Invalid member status transition [ ${this} -> ${status}]") new Member(uniqueAddress, upNumber, status, roles) } } @@ -145,7 +144,7 @@ object Member { */ implicit val ordering: Ordering[Member] = new Ordering[Member] { def compare(a: Member, b: Member): Int = { - a.uniqueAddress compare b.uniqueAddress + a.uniqueAddress.compare(b.uniqueAddress) } } @@ -157,8 +156,8 @@ object Member { * data centers it will throw `IllegalArgumentException` if the * members belong to different data centers. */ - val ageOrdering: Ordering[Member] = Ordering.fromLessThan[Member] { - (a, b) => a.isOlderThan(b) + val ageOrdering: Ordering[Member] = Ordering.fromLessThan[Member] { (a, b) => + a.isOlderThan(b) } @deprecated("Was accidentally made a public API, internal", since = "2.5.4") @@ -169,7 +168,9 @@ object Member { * INTERNAL API. */ @InternalApi - private[akka] def pickHighestPriority(a: Set[Member], b: Set[Member], tombstones: Map[UniqueAddress, Long]): Set[Member] = { + private[akka] def pickHighestPriority(a: Set[Member], + b: Set[Member], + tombstones: Map[UniqueAddress, Long]): Set[Member] = { // group all members by Address => Seq[Member] val groupedByAddress = (a.toSeq ++ b.toSeq).groupBy(_.uniqueAddress) // pick highest MemberStatus @@ -178,7 +179,8 @@ object Member { if (members.size == 2) acc + members.reduceLeft(highestPriorityOf) else { val m = members.head - if (tombstones.contains(m.uniqueAddress) || MembershipState.removeUnreachableWithMemberStatus(m.status)) acc // removed + if (tombstones.contains(m.uniqueAddress) || MembershipState.removeUnreachableWithMemberStatus(m.status)) + acc // removed else acc + m } } @@ -191,21 +193,22 @@ object Member { if (m1.status == m2.status) // preserve the oldest in case of different upNumber if (m1.isOlderThan(m2)) m1 else m2 - else (m1.status, m2.status) match { - case (Removed, _) => m1 - case (_, Removed) => m2 - case (Down, _) => m1 - case (_, Down) => m2 - case (Exiting, _) => m1 - case (_, Exiting) => m2 - case (Leaving, _) => m1 - case (_, Leaving) => m2 - case (Joining, _) => m2 - case (_, Joining) => m1 - case (WeaklyUp, _) => m2 - case (_, WeaklyUp) => m1 - case (Up, Up) => m1 - } + else + (m1.status, m2.status) match { + case (Removed, _) => m1 + case (_, Removed) => m2 + case (Down, _) => m1 + case (_, Down) => m2 + case (Exiting, _) => m1 + case (_, Exiting) => m2 + case (Leaving, _) => m1 + case (_, Leaving) => m2 + case (Joining, _) => m2 + case (_, Joining) => m1 + case (WeaklyUp, _) => m2 + case (_, WeaklyUp) => m1 + case (Up, Up) => m1 + } } } @@ -265,14 +268,13 @@ object MemberStatus { * INTERNAL API */ private[cluster] val allowedTransitions: Map[MemberStatus, Set[MemberStatus]] = - Map( - Joining -> Set(WeaklyUp, Up, Leaving, Down, Removed), - WeaklyUp -> Set(Up, Leaving, Down, Removed), - Up -> Set(Leaving, Down, Removed), - Leaving -> Set(Exiting, Down, Removed), - Down -> Set(Removed), - Exiting -> Set(Removed, Down), - Removed -> Set.empty[MemberStatus]) + Map(Joining -> Set(WeaklyUp, Up, Leaving, Down, Removed), + WeaklyUp -> Set(Up, Leaving, Down, Removed), + Up -> Set(Leaving, Down, Removed), + Leaving -> Set(Exiting, Down, Removed), + Down -> Set(Removed), + Exiting -> Set(Removed, Down), + Removed -> Set.empty[MemberStatus]) } object UniqueAddress extends AbstractFunction2[Address, Int, UniqueAddress] { diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala index 5cf8c5574c..5fb6046adb 100644 --- a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala +++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala @@ -30,11 +30,10 @@ import scala.util.Random /** * INTERNAL API */ -@InternalApi private[akka] final case class MembershipState( - latestGossip: Gossip, - selfUniqueAddress: UniqueAddress, - selfDc: DataCenter, - crossDcConnections: Int) { +@InternalApi private[akka] final case class MembershipState(latestGossip: Gossip, + selfUniqueAddress: UniqueAddress, + selfDc: DataCenter, + crossDcConnections: Int) { import MembershipState._ @@ -57,8 +56,9 @@ import scala.util.Random // If another member in the data center that is UP or LEAVING and has not seen this gossip or is exiting // convergence cannot be reached def memberHinderingConvergenceExists = - members.exists(member => - member.dataCenter == selfDc && + members.exists( + member => + member.dataCenter == selfDc && convergenceMemberStatus(member.status) && !(latestGossip.seenByNode(member.uniqueAddress) || exitingConfirmed(member.uniqueAddress))) @@ -86,14 +86,18 @@ import scala.util.Random * but including observed unreachable nodes outside of the data center */ lazy val dcReachabilityWithoutObservationsWithin: Reachability = - dcReachability.filterRecords { r => latestGossip.member(r.subject).dataCenter != selfDc } + dcReachability.filterRecords { r => + latestGossip.member(r.subject).dataCenter != selfDc + } /** * @return reachability for data center nodes, with observations from outside the data center or from downed nodes filtered out */ lazy val dcReachabilityExcludingDownedObservers: Reachability = { val membersToExclude = members.collect { case m if m.status == Down || m.dataCenter != selfDc => m.uniqueAddress } - overview.reachability.removeObservers(membersToExclude).remove(members.collect { case m if m.dataCenter != selfDc => m.uniqueAddress }) + overview.reachability + .removeObservers(membersToExclude) + .remove(members.collect { case m if m.dataCenter != selfDc => m.uniqueAddress }) } lazy val dcReachabilityNoOutsideNodes: Reachability = @@ -155,14 +159,18 @@ import scala.util.Random val reachableMembersInDc = if (reachability.isAllReachable) mbrs.filter(m => m.dataCenter == selfDc && m.status != Down) - else mbrs.filter(m => - m.dataCenter == selfDc && - m.status != Down && - (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) + else + mbrs.filter( + m => + m.dataCenter == selfDc && + m.status != Down && + (reachability.isReachable(m.uniqueAddress) || m.uniqueAddress == selfUniqueAddress)) if (reachableMembersInDc.isEmpty) None - else reachableMembersInDc.find(m => leaderMemberStatus(m.status)) - .orElse(Some(reachableMembersInDc.min(Member.leaderStatusOrdering))) - .map(_.uniqueAddress) + else + reachableMembersInDc + .find(m => leaderMemberStatus(m.status)) + .orElse(Some(reachableMembersInDc.min(Member.leaderStatusOrdering))) + .map(_.uniqueAddress) } def isInSameDc(node: UniqueAddress): Boolean = @@ -213,9 +221,8 @@ import scala.util.Random /** * INTERNAL API */ -@InternalApi private[akka] class GossipTargetSelector( - reduceGossipDifferentViewProbability: Double, - crossDcGossipProbability: Double) { +@InternalApi private[akka] class GossipTargetSelector(reduceGossipDifferentViewProbability: Double, + crossDcGossipProbability: Double) { final def gossipTarget(state: MembershipState): Option[UniqueAddress] = { selectRandomNode(gossipTargets(state)) @@ -275,10 +282,14 @@ import scala.util.Random if (preferNodesWithDifferentView(state)) { // If it's time to try to gossip to some nodes with a different view // gossip to a random alive same dc member with preference to a member with older gossip version - latestGossip.members.iterator.collect { - case m if m.dataCenter == state.selfDc && !latestGossip.seenByNode(m.uniqueAddress) && state.validNodeForGossip(m.uniqueAddress) => - m.uniqueAddress - }.to(Vector) + latestGossip.members.iterator + .collect { + case m + if m.dataCenter == state.selfDc && !latestGossip.seenByNode(m.uniqueAddress) && state + .validNodeForGossip(m.uniqueAddress) => + m.uniqueAddress + } + .to(Vector) } else Vector.empty // Fall back to localGossip @@ -308,7 +319,6 @@ import scala.util.Random def findFirstDcWithValidNodes(left: List[DataCenter]): Vector[UniqueAddress] = left match { case dc :: tail => - val validNodes = nodesPerDc(dc).collect { case member if state.validNodeForGossip(member.uniqueAddress) => member.uniqueAddress @@ -365,12 +375,13 @@ import scala.util.Random */ protected def selectDcLocalNodes(state: MembershipState): Boolean = { val localMembers = state.dcMembers.size - val probability = if (localMembers > 4) - crossDcGossipProbability - else { - // don't go below the configured probability - math.max((5 - localMembers) * 0.25, crossDcGossipProbability) - } + val probability = + if (localMembers > 4) + crossDcGossipProbability + else { + // don't go below the configured probability + math.max((5 - localMembers) * 0.25, crossDcGossipProbability) + } ThreadLocalRandom.current.nextDouble() > probability } diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index aa3b5be486..492c1b2bcb 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -55,9 +55,9 @@ private[cluster] object Reachability { */ @SerialVersionUID(1L) @InternalApi -private[cluster] class Reachability private ( - val records: immutable.IndexedSeq[Reachability.Record], - val versions: Map[UniqueAddress, Long]) extends Serializable { +private[cluster] class Reachability private (val records: immutable.IndexedSeq[Reachability.Record], + val versions: Map[UniqueAddress, Long]) + extends Serializable { import Reachability._ @@ -75,7 +75,7 @@ private[cluster] class Reachability private ( var allTerminated = Set.empty[UniqueAddress] var allUnreachable = Set.empty[UniqueAddress] - records foreach { r => + records.foreach { r => val m = mapBuilder.get(r.observer) match { case None => Map(r.subject -> r) case Some(m) => m.updated(r.subject, r) @@ -88,13 +88,13 @@ private[cluster] class Reachability private ( val observerRowsMap: Map[UniqueAddress, Map[UniqueAddress, Reachability.Record]] = mapBuilder.toMap - (observerRowsMap, allUnreachable diff allTerminated, allTerminated) + (observerRowsMap, allUnreachable.diff(allTerminated), allTerminated) } } val allUnreachableOrTerminated: Set[UniqueAddress] = if (allTerminated.isEmpty) allUnreachable - else allUnreachable union allTerminated + else allUnreachable.union(allTerminated) } @@ -132,7 +132,6 @@ private[cluster] class Reachability private ( // otherwise, update old observations case Some(oldObserverRows) => - oldObserverRows.get(subject) match { case None => if (status == Reachable && oldObserverRows.forall { case (_, r) => r.status == Reachable }) { @@ -145,7 +144,9 @@ private[cluster] class Reachability private ( if (oldRecord.status == Terminated || oldRecord.status == status) this else { - if (status == Reachable && oldObserverRows.forall { case (_, r) => r.status == Reachable || r.subject == subject }) { + if (status == Reachable && oldObserverRows.forall { + case (_, r) => r.status == Reachable || r.subject == subject + }) { // all Reachable, prune by removing the records of the observer, and bump the version new Reachability(records.filterNot(_.observer == observer), newVersions) } else { @@ -161,12 +162,12 @@ private[cluster] class Reachability private ( val recordBuilder = new immutable.VectorBuilder[Record] recordBuilder.sizeHint(math.max(this.records.size, other.records.size)) var newVersions = versions - allowed foreach { observer => + allowed.foreach { observer => val observerVersion1 = this.currentVersion(observer) val observerVersion2 = other.currentVersion(observer) (this.observerRows(observer), other.observerRows(observer)) match { - case (None, None) => + case (None, None) => case (Some(rows1), Some(rows2)) => // We throw away a complete set of records based on the version here. Couldn't we lose records here? No, // because the observer gossips always the complete set of records. (That's hard to see in the model, because @@ -213,10 +214,11 @@ private[cluster] class Reachability private ( def status(observer: UniqueAddress, subject: UniqueAddress): ReachabilityStatus = observerRows(observer) match { case None => Reachable - case Some(observerRows) => observerRows.get(subject) match { - case None => Reachable - case Some(record) => record.status - } + case Some(observerRows) => + observerRows.get(subject) match { + case None => Reachable + case Some(record) => record.status + } } def status(node: UniqueAddress): ReachabilityStatus = @@ -262,9 +264,11 @@ private[cluster] class Reachability private ( observerRows(observer) match { case None => Set.empty case Some(observerRows) => - observerRows.iterator.collect { - case (subject, record) if record.status == Unreachable => subject - }.to(immutable.Set) + observerRows.iterator + .collect { + case (subject, record) if record.status == Unreachable => subject + } + .to(immutable.Set) } def observersGroupedByUnreachable: Map[UniqueAddress, Set[UniqueAddress]] = { @@ -292,7 +296,7 @@ private[cluster] class Reachability private ( override def equals(obj: Any): Boolean = obj match { case other: Reachability => records.size == other.records.size && versions == other.versions && - cache.observerRowsMap == other.cache.observerRowsMap + cache.observerRowsMap == other.cache.observerRowsMap case _ => false } @@ -313,4 +317,3 @@ private[cluster] class Reachability private ( } } - diff --git a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index 0e047c2893..00669654a8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -28,8 +28,10 @@ private[cluster] object VectorClock { private def hash(name: String): String = { val digester = MessageDigest.getInstance("MD5") - digester update name.getBytes("UTF-8") - digester.digest.map { h => "%02x".format(0xFF & h) }.mkString + digester.update(name.getBytes("UTF-8")) + digester.digest.map { h => + "%02x".format(0xFF & h) + }.mkString } } @@ -43,6 +45,7 @@ private[cluster] object VectorClock { case object Before extends Ordering case object Same extends Ordering case object Concurrent extends Ordering + /** * Marker to ensure that we do a full order comparison instead of bailing out early. */ @@ -66,8 +69,7 @@ private[cluster] object VectorClock { * Based on code from the 'vlock' VectorClock library by Coda Hale. */ @SerialVersionUID(1L) -final case class VectorClock( - versions: TreeMap[VectorClock.Node, Long] = TreeMap.empty[VectorClock.Node, Long]) { +final case class VectorClock(versions: TreeMap[VectorClock.Node, Long] = TreeMap.empty[VectorClock.Node, Long]) { import VectorClock._ @@ -119,12 +121,15 @@ final case class VectorClock( if ((requestedOrder ne FullOrder) && (currentOrder ne Same) && (currentOrder ne requestedOrder)) currentOrder else if ((nt1 eq cmpEndMarker) && (nt2 eq cmpEndMarker)) currentOrder // i1 is empty but i2 is not, so i1 can only be Before - else if (nt1 eq cmpEndMarker) { if (currentOrder eq After) Concurrent else Before } + else if (nt1 eq cmpEndMarker) { + if (currentOrder eq After) Concurrent else Before + } // i2 is empty but i1 is not, so i1 can only be After - else if (nt2 eq cmpEndMarker) { if (currentOrder eq Before) Concurrent else After } - else { + else if (nt2 eq cmpEndMarker) { + if (currentOrder eq Before) Concurrent else After + } else { // compare the nodes - val nc = nt1._1 compareTo nt2._1 + val nc = nt1._1.compareTo(nt2._1) if (nc == 0) { // both nodes exist compare the timestamps // same timestamp so just continue with the next nodes diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index 51d06b451c..80cc9a45ca 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -54,7 +54,9 @@ private[akka] object ClusterMessageSerializer { /** * Protobuf serializer of cluster messages. */ -final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { +final class ClusterMessageSerializer(val system: ExtendedActorSystem) + extends SerializerWithStringManifest + with BaseSerializer { import ClusterMessageSerializer._ private lazy val serialization = SerializationExtension(system) @@ -154,7 +156,8 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se private def addressToProtoByteArray(address: Address): Array[Byte] = addressToProto(address).build.toByteArray private def uniqueAddressToProto(uniqueAddress: UniqueAddress): cm.UniqueAddress.Builder = { - cm.UniqueAddress.newBuilder() + cm.UniqueAddress + .newBuilder() .setAddress(addressToProto(uniqueAddress.address)) .setUid(uniqueAddress.longUid.toInt) .setUid2((uniqueAddress.longUid >> 32).toInt) @@ -173,8 +176,7 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se private def poolToProto(pool: Pool): cm.Pool = { val builder = cm.Pool.newBuilder() val serializer = serialization.findSerializerFor(pool) - builder.setSerializerId(serializer.identifier) - .setData(ByteString.copyFrom(serializer.toBinary(pool))) + builder.setSerializerId(serializer.identifier).setData(ByteString.copyFrom(serializer.toBinary(pool))) val manifest = Serializers.manifestFor(serializer, pool) builder.setManifest(manifest) builder.build() @@ -182,7 +184,8 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se private def clusterRouterPoolSettingsToProto(settings: ClusterRouterPoolSettings): cm.ClusterRouterPoolSettings = { val builder = cm.ClusterRouterPoolSettings.newBuilder() - builder.setAllowLocalRoutees(settings.allowLocalRoutees) + builder + .setAllowLocalRoutees(settings.allowLocalRoutees) .setMaxInstancesPerNode(settings.maxInstancesPerNode) .setTotalInstances(settings.totalInstances) .addAllUseRoles(settings.useRoles.asJava) @@ -222,11 +225,9 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se private def deserializeJoin(bytes: Array[Byte]): InternalClusterAction.Join = { val m = cm.Join.parseFrom(bytes) val roles = Set.empty[String] ++ m.getRolesList.asScala - InternalClusterAction.Join( - uniqueAddressFromProto(m.getNode), - if (roles.exists(_.startsWith(ClusterSettings.DcRolePrefix))) roles - else roles + (ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter) - ) + InternalClusterAction.Join(uniqueAddressFromProto(m.getNode), + if (roles.exists(_.startsWith(ClusterSettings.DcRolePrefix))) roles + else roles + (ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter)) } private def deserializeWelcome(bytes: Array[Byte]): InternalClusterAction.Welcome = { @@ -255,7 +256,8 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se val i = cm.InitJoinAck.parseFrom(bytes) val configCheck = i.getConfigCheck.getType match { - case cm.ConfigCheck.Type.CompatibleConfig => CompatibleConfig(ConfigFactory.parseString(i.getConfigCheck.getClusterConfig)) + case cm.ConfigCheck.Type.CompatibleConfig => + CompatibleConfig(ConfigFactory.parseString(i.getConfigCheck.getClusterConfig)) case cm.ConfigCheck.Type.IncompatibleConfig => IncompatibleConfig case cm.ConfigCheck.Type.UncheckedConfig => UncheckedConfig } @@ -289,15 +291,13 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se private def uniqueAddressFromProto(uniqueAddress: cm.UniqueAddress): UniqueAddress = { - UniqueAddress( - addressFromProto(uniqueAddress.getAddress), - if (uniqueAddress.hasUid2) { - // new remote node join the two parts of the long uid back - (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0xFFFFFFFFL) - } else { - // old remote node - uniqueAddress.getUid.toLong - }) + UniqueAddress(addressFromProto(uniqueAddress.getAddress), if (uniqueAddress.hasUid2) { + // new remote node join the two parts of the long uid back + (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0XFFFFFFFFL) + } else { + // old remote node + uniqueAddress.getUid.toLong + }) } private val memberStatusToInt = scala.collection.immutable.HashMap[MemberStatus, Int]( @@ -327,9 +327,7 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se cm.Join.newBuilder().setNode(uniqueAddressToProto(node)).addAllRoles(roles.asJava).build() private def initJoinToProto(currentConfig: Config): cm.InitJoin = { - cm.InitJoin.newBuilder() - .setCurrentConfig(currentConfig.root.render(ConfigRenderOptions.concise)) - .build() + cm.InitJoin.newBuilder().setCurrentConfig(currentConfig.root.render(ConfigRenderOptions.concise)).build() } private def initJoinAckToByteArray(address: Address, configCheck: ConfigCheck): Array[Byte] = { @@ -359,10 +357,7 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se throw new IllegalStateException("Unexpected ConfigCheckUnsupportedByJoiningNode") } - cm.InitJoinAck.newBuilder(). - setAddress(addressToProto(address)). - setConfigCheck(configCheckBuilder.build()). - build() + cm.InitJoinAck.newBuilder().setAddress(addressToProto(address)).setConfigCheck(configCheckBuilder.build()).build() } private def welcomeToProto(from: UniqueAddress, gossip: Gossip): cm.Welcome = @@ -372,47 +367,57 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se val allMembers = gossip.members.toVector val allAddresses: Vector[UniqueAddress] = allMembers.map(_.uniqueAddress) ++ gossip.tombstones.keys val addressMapping = allAddresses.zipWithIndex.toMap - val allRoles = allMembers.foldLeft(Set.empty[String])((acc, m) => acc union m.roles).to(Vector) + val allRoles = allMembers.foldLeft(Set.empty[String])((acc, m) => acc.union(m.roles)).to(Vector) val roleMapping = allRoles.zipWithIndex.toMap val allHashes = gossip.version.versions.keys.to(Vector) val hashMapping = allHashes.zipWithIndex.toMap - def mapUniqueAddress(uniqueAddress: UniqueAddress): Integer = mapWithErrorMessage(addressMapping, uniqueAddress, "address") + def mapUniqueAddress(uniqueAddress: UniqueAddress): Integer = + mapWithErrorMessage(addressMapping, uniqueAddress, "address") def mapRole(role: String): Integer = mapWithErrorMessage(roleMapping, role, "role") def memberToProto(member: Member) = - cm.Member.newBuilder.setAddressIndex(mapUniqueAddress(member.uniqueAddress)).setUpNumber(member.upNumber). - setStatus(cm.MemberStatus.valueOf(memberStatusToInt(member.status))). - addAllRolesIndexes(member.roles.map(mapRole).asJava) + cm.Member.newBuilder + .setAddressIndex(mapUniqueAddress(member.uniqueAddress)) + .setUpNumber(member.upNumber) + .setStatus(cm.MemberStatus.valueOf(memberStatusToInt(member.status))) + .addAllRolesIndexes(member.roles.map(mapRole).asJava) def reachabilityToProto(reachability: Reachability): Iterable[cm.ObserverReachability.Builder] = { reachability.versions.map { case (observer, version) => - val subjectReachability = reachability.recordsFrom(observer).map(r => - cm.SubjectReachability.newBuilder().setAddressIndex(mapUniqueAddress(r.subject)). - setStatus(cm.ReachabilityStatus.valueOf(reachabilityStatusToInt(r.status))). - setVersion(r.version)) - cm.ObserverReachability.newBuilder().setAddressIndex(mapUniqueAddress(observer)).setVersion(version). - addAllSubjectReachability(subjectReachability.map(_.build).asJava) + val subjectReachability = reachability + .recordsFrom(observer) + .map( + r => + cm.SubjectReachability + .newBuilder() + .setAddressIndex(mapUniqueAddress(r.subject)) + .setStatus(cm.ReachabilityStatus.valueOf(reachabilityStatusToInt(r.status))) + .setVersion(r.version)) + cm.ObserverReachability + .newBuilder() + .setAddressIndex(mapUniqueAddress(observer)) + .setVersion(version) + .addAllSubjectReachability(subjectReachability.map(_.build).asJava) } } def tombstoneToProto(t: (UniqueAddress, Long)): cm.Tombstone = - cm.Tombstone.newBuilder() - .setAddressIndex(mapUniqueAddress(t._1)) - .setTimestamp(t._2) - .build() + cm.Tombstone.newBuilder().setAddressIndex(mapUniqueAddress(t._1)).setTimestamp(t._2).build() val reachability = reachabilityToProto(gossip.overview.reachability) val members = gossip.members.unsorted.map(memberToProto _) val seen = gossip.overview.seen.map(mapUniqueAddress) - val overview = cm.GossipOverview.newBuilder.addAllSeen(seen.asJava). - addAllObserverReachability(reachability.map(_.build).asJava) + val overview = + cm.GossipOverview.newBuilder.addAllSeen(seen.asJava).addAllObserverReachability(reachability.map(_.build).asJava) - cm.Gossip.newBuilder().addAllAllAddresses(allAddresses.map(uniqueAddressToProto(_).build).asJava). - addAllAllRoles(allRoles.asJava) + cm.Gossip + .newBuilder() + .addAllAllAddresses(allAddresses.map(uniqueAddressToProto(_).build).asJava) + .addAllAllRoles(allRoles.asJava) .addAllAllHashes(allHashes.asJava) .addAllMembers(members.map(_.build).asJava) .setOverview(overview) @@ -422,24 +427,29 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se private def vectorClockToProto(version: VectorClock, hashMapping: Map[String, Int]): cm.VectorClock.Builder = { val versions: Iterable[cm.VectorClock.Version.Builder] = version.versions.map { - case (n, t) => cm.VectorClock.Version.newBuilder().setHashIndex(mapWithErrorMessage(hashMapping, n, "hash")). - setTimestamp(t) + case (n, t) => + cm.VectorClock.Version.newBuilder().setHashIndex(mapWithErrorMessage(hashMapping, n, "hash")).setTimestamp(t) } cm.VectorClock.newBuilder().setTimestamp(0).addAllVersions(versions.map(_.build).asJava) } private def gossipEnvelopeToProto(envelope: GossipEnvelope): cm.GossipEnvelope = - cm.GossipEnvelope.newBuilder(). - setFrom(uniqueAddressToProto(envelope.from)). - setTo(uniqueAddressToProto(envelope.to)). - setSerializedGossip(ByteString.copyFrom(compress(gossipToProto(envelope.gossip).build))). - build + cm.GossipEnvelope + .newBuilder() + .setFrom(uniqueAddressToProto(envelope.from)) + .setTo(uniqueAddressToProto(envelope.to)) + .setSerializedGossip(ByteString.copyFrom(compress(gossipToProto(envelope.gossip).build))) + .build private def gossipStatusToProto(status: GossipStatus): cm.GossipStatus = { val allHashes = status.version.versions.keys.toVector val hashMapping = allHashes.zipWithIndex.toMap - cm.GossipStatus.newBuilder().setFrom(uniqueAddressToProto(status.from)).addAllAllHashes(allHashes.asJava). - setVersion(vectorClockToProto(status.version, hashMapping)).build() + cm.GossipStatus + .newBuilder() + .setFrom(uniqueAddressToProto(status.from)) + .addAllAllHashes(allHashes.asJava) + .setVersion(vectorClockToProto(status.version, hashMapping)) + .build() } private def deserializeGossipEnvelope(bytes: Array[Byte]): GossipEnvelope = @@ -462,7 +472,8 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se versionsBuilder += ((observer, o.getVersion)) for (s <- o.getSubjectReachabilityList.asScala) { val subject = addressMapping(s.getAddressIndex) - val record = Reachability.Record(observer, subject, reachabilityStatusFromInt(s.getStatus.getNumber), s.getVersion) + val record = + Reachability.Record(observer, subject, reachabilityStatusFromInt(s.getStatus.getNumber), s.getVersion) recordBuilder += record } } @@ -471,8 +482,10 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se } def memberFromProto(member: cm.Member) = - new Member(addressMapping(member.getAddressIndex), member.getUpNumber, memberStatusFromInt(member.getStatus.getNumber), - rolesFromProto(member.getRolesIndexesList.asScala.toSeq)) + new Member(addressMapping(member.getAddressIndex), + member.getUpNumber, + memberStatusFromInt(member.getStatus.getNumber), + rolesFromProto(member.getRolesIndexesList.asScala.toSeq)) def rolesFromProto(roleIndexes: Seq[Integer]): Set[String] = { var containsDc = false @@ -493,10 +506,12 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se def tombstoneFromProto(tombstone: cm.Tombstone): (UniqueAddress, Long) = (addressMapping(tombstone.getAddressIndex), tombstone.getTimestamp) - val members: immutable.SortedSet[Member] = gossip.getMembersList.asScala.iterator.map(memberFromProto).to(immutable.SortedSet) + val members: immutable.SortedSet[Member] = + gossip.getMembersList.asScala.iterator.map(memberFromProto).to(immutable.SortedSet) val reachability = reachabilityFromProto(gossip.getOverview.getObserverReachabilityList.asScala) - val seen: Set[UniqueAddress] = gossip.getOverview.getSeenList.asScala.iterator.map(addressMapping(_)).to(immutable.Set) + val seen: Set[UniqueAddress] = + gossip.getOverview.getSeenList.asScala.iterator.map(addressMapping(_)).to(immutable.Set) val overview = GossipOverview(seen, reachability) val tombstones: Map[UniqueAddress, Long] = gossip.getTombstonesList.asScala.iterator.map(tombstoneFromProto).toMap @@ -504,28 +519,26 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se } private def vectorClockFromProto(version: cm.VectorClock, hashMapping: immutable.Seq[String]) = { - VectorClock(scala.collection.immutable.TreeMap.from(version.getVersionsList.asScala.iterator.map( - v => (VectorClock.Node.fromHash(hashMapping(v.getHashIndex)), v.getTimestamp)))) + VectorClock(scala.collection.immutable.TreeMap.from(version.getVersionsList.asScala.iterator.map(v => + (VectorClock.Node.fromHash(hashMapping(v.getHashIndex)), v.getTimestamp)))) } private def gossipEnvelopeFromProto(envelope: cm.GossipEnvelope): GossipEnvelope = { val serializedGossip = envelope.getSerializedGossip - GossipEnvelope(uniqueAddressFromProto(envelope.getFrom), uniqueAddressFromProto(envelope.getTo), - Deadline.now + GossipTimeToLive, () => gossipFromProto(cm.Gossip.parseFrom(decompress(serializedGossip.toByteArray)))) + GossipEnvelope(uniqueAddressFromProto(envelope.getFrom), + uniqueAddressFromProto(envelope.getTo), + Deadline.now + GossipTimeToLive, + () => gossipFromProto(cm.Gossip.parseFrom(decompress(serializedGossip.toByteArray)))) } private def gossipStatusFromProto(status: cm.GossipStatus): GossipStatus = - GossipStatus(uniqueAddressFromProto(status.getFrom), vectorClockFromProto( - status.getVersion, - status.getAllHashesList.asScala.toVector)) + GossipStatus(uniqueAddressFromProto(status.getFrom), + vectorClockFromProto(status.getVersion, status.getAllHashesList.asScala.toVector)) def deserializeClusterRouterPool(bytes: Array[Byte]): ClusterRouterPool = { val crp = cm.ClusterRouterPool.parseFrom(bytes) - ClusterRouterPool( - poolFromProto(crp.getPool), - clusterRouterPoolSettingsFromProto(crp.getSettings) - ) + ClusterRouterPool(poolFromProto(crp.getPool), clusterRouterPoolSettingsFromProto(crp.getSettings)) } private def poolFromProto(pool: cm.Pool): Pool = { @@ -534,16 +547,14 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) extends Se private def clusterRouterPoolSettingsFromProto(crps: cm.ClusterRouterPoolSettings): ClusterRouterPoolSettings = { // For backwards compatibility, useRoles is the combination of getUseRole and getUseRolesList - ClusterRouterPoolSettings( - totalInstances = crps.getTotalInstances, - maxInstancesPerNode = crps.getMaxInstancesPerNode, - allowLocalRoutees = crps.getAllowLocalRoutees, - useRoles = if (crps.hasUseRole) { - crps.getUseRolesList.asScala.toSet + crps.getUseRole - } else { - crps.getUseRolesList.asScala.toSet - } - ) + ClusterRouterPoolSettings(totalInstances = crps.getTotalInstances, + maxInstancesPerNode = crps.getMaxInstancesPerNode, + allowLocalRoutees = crps.getAllowLocalRoutees, + useRoles = if (crps.hasUseRole) { + crps.getUseRolesList.asScala.toSet + crps.getUseRole + } else { + crps.getUseRolesList.asScala.toSet + }) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala index 769e2e0beb..afd6619b11 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala @@ -33,38 +33,50 @@ import scala.collection.JavaConverters._ object ClusterRouterGroupSettings { @deprecated("useRole has been replaced with useRoles", since = "2.5.4") - def apply(totalInstances: Int, routeesPaths: immutable.Seq[String], allowLocalRoutees: Boolean, useRole: Option[String]): ClusterRouterGroupSettings = + def apply(totalInstances: Int, + routeesPaths: immutable.Seq[String], + allowLocalRoutees: Boolean, + useRole: Option[String]): ClusterRouterGroupSettings = ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRole.toSet) @varargs - def apply(totalInstances: Int, routeesPaths: immutable.Seq[String], allowLocalRoutees: Boolean, useRoles: String*): ClusterRouterGroupSettings = + def apply(totalInstances: Int, + routeesPaths: immutable.Seq[String], + allowLocalRoutees: Boolean, + useRoles: String*): ClusterRouterGroupSettings = ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.toSet) // For backwards compatibility, useRoles is the combination of use-roles and use-role def fromConfig(config: Config): ClusterRouterGroupSettings = - ClusterRouterGroupSettings( - totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config), - routeesPaths = immutableSeq(config.getStringList("routees.paths")), - allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), - useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption(config.getString("cluster.use-role"))) + ClusterRouterGroupSettings(totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config), + routeesPaths = immutableSeq(config.getStringList("routees.paths")), + allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), + useRoles = config + .getStringList("cluster.use-roles") + .asScala + .toSet ++ ClusterRouterSettingsBase.useRoleOption( + config.getString("cluster.use-role"))) } /** * `totalInstances` of cluster router must be > 0 */ @SerialVersionUID(1L) -final case class ClusterRouterGroupSettings( - totalInstances: Int, - routeesPaths: immutable.Seq[String], - allowLocalRoutees: Boolean, - useRoles: Set[String]) extends ClusterRouterSettingsBase { +final case class ClusterRouterGroupSettings(totalInstances: Int, + routeesPaths: immutable.Seq[String], + allowLocalRoutees: Boolean, + useRoles: Set[String]) + extends ClusterRouterSettingsBase { // For binary compatibility @deprecated("useRole has been replaced with useRoles", since = "2.5.4") def useRole: Option[String] = useRoles.headOption @deprecated("useRole has been replaced with useRoles", since = "2.5.4") - def this(totalInstances: Int, routeesPaths: immutable.Seq[String], allowLocalRoutees: Boolean, useRole: Option[String]) = + def this(totalInstances: Int, + routeesPaths: immutable.Seq[String], + allowLocalRoutees: Boolean, + useRole: Option[String]) = this(totalInstances, routeesPaths, allowLocalRoutees, useRole.toSet) /** @@ -77,12 +89,18 @@ final case class ClusterRouterGroupSettings( /** * Java API */ - def this(totalInstances: Int, routeesPaths: java.lang.Iterable[String], allowLocalRoutees: Boolean, useRoles: java.util.Set[String]) = + def this(totalInstances: Int, + routeesPaths: java.lang.Iterable[String], + allowLocalRoutees: Boolean, + useRoles: java.util.Set[String]) = this(totalInstances, immutableSeq(routeesPaths), allowLocalRoutees, useRoles.asScala.toSet) // For binary compatibility @deprecated("Use constructor with useRoles instead", since = "2.5.4") - def copy(totalInstances: Int = totalInstances, routeesPaths: immutable.Seq[String] = routeesPaths, allowLocalRoutees: Boolean = allowLocalRoutees, useRole: Option[String] = useRole): ClusterRouterGroupSettings = + def copy(totalInstances: Int = totalInstances, + routeesPaths: immutable.Seq[String] = routeesPaths, + allowLocalRoutees: Boolean = allowLocalRoutees, + useRole: Option[String] = useRole): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRole) if (totalInstances <= 0) throw new IllegalArgumentException("totalInstances of cluster router must be > 0") @@ -95,33 +113,45 @@ final case class ClusterRouterGroupSettings( throw new IllegalArgumentException(s"routeesPaths [$p] is not a valid actor path without address information") } - def withUseRoles(useRoles: Set[String]): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles) + def withUseRoles(useRoles: Set[String]): ClusterRouterGroupSettings = + new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles) @varargs - def withUseRoles(useRoles: String*): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.toSet) + def withUseRoles(useRoles: String*): ClusterRouterGroupSettings = + new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.toSet) /** * Java API */ - def withUseRoles(useRoles: java.util.Set[String]): ClusterRouterGroupSettings = new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.asScala.toSet) + def withUseRoles(useRoles: java.util.Set[String]): ClusterRouterGroupSettings = + new ClusterRouterGroupSettings(totalInstances, routeesPaths, allowLocalRoutees, useRoles.asScala.toSet) } object ClusterRouterPoolSettings { @deprecated("useRole has been replaced with useRoles", since = "2.5.4") - def apply(totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, useRole: Option[String]): ClusterRouterPoolSettings = + def apply(totalInstances: Int, + maxInstancesPerNode: Int, + allowLocalRoutees: Boolean, + useRole: Option[String]): ClusterRouterPoolSettings = ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole.toSet) @varargs - def apply(totalInstances: Int, maxInstancesPerNode: Int, allowLocalRoutees: Boolean, useRoles: String*): ClusterRouterPoolSettings = + def apply(totalInstances: Int, + maxInstancesPerNode: Int, + allowLocalRoutees: Boolean, + useRoles: String*): ClusterRouterPoolSettings = ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.toSet) // For backwards compatibility, useRoles is the combination of use-roles and use-role def fromConfig(config: Config): ClusterRouterPoolSettings = - ClusterRouterPoolSettings( - totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config), - maxInstancesPerNode = config.getInt("cluster.max-nr-of-instances-per-node"), - allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), - useRoles = config.getStringList("cluster.use-roles").asScala.toSet ++ ClusterRouterSettingsBase.useRoleOption(config.getString("cluster.use-role"))) + ClusterRouterPoolSettings(totalInstances = ClusterRouterSettingsBase.getMaxTotalNrOfInstances(config), + maxInstancesPerNode = config.getInt("cluster.max-nr-of-instances-per-node"), + allowLocalRoutees = config.getBoolean("cluster.allow-local-routees"), + useRoles = config + .getStringList("cluster.use-roles") + .asScala + .toSet ++ ClusterRouterSettingsBase.useRoleOption( + config.getString("cluster.use-role"))) } /** @@ -130,11 +160,11 @@ object ClusterRouterPoolSettings { * `maxInstancesPerNode` of cluster router must be 1 when routeesPath is defined */ @SerialVersionUID(1L) -final case class ClusterRouterPoolSettings( - totalInstances: Int, - maxInstancesPerNode: Int, - allowLocalRoutees: Boolean, - useRoles: Set[String]) extends ClusterRouterSettingsBase { +final case class ClusterRouterPoolSettings(totalInstances: Int, + maxInstancesPerNode: Int, + allowLocalRoutees: Boolean, + useRoles: Set[String]) + extends ClusterRouterSettingsBase { // For binary compatibility @deprecated("useRole has been replaced with useRoles", since = "2.5.4") @@ -159,20 +189,27 @@ final case class ClusterRouterPoolSettings( // For binary compatibility @deprecated("Use copy with useRoles instead", since = "2.5.4") - def copy(totalInstances: Int = totalInstances, maxInstancesPerNode: Int = maxInstancesPerNode, allowLocalRoutees: Boolean = allowLocalRoutees, useRole: Option[String] = useRole): ClusterRouterPoolSettings = + def copy(totalInstances: Int = totalInstances, + maxInstancesPerNode: Int = maxInstancesPerNode, + allowLocalRoutees: Boolean = allowLocalRoutees, + useRole: Option[String] = useRole): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRole) - if (maxInstancesPerNode <= 0) throw new IllegalArgumentException("maxInstancesPerNode of cluster pool router must be > 0") + if (maxInstancesPerNode <= 0) + throw new IllegalArgumentException("maxInstancesPerNode of cluster pool router must be > 0") - def withUseRoles(useRoles: Set[String]): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles) + def withUseRoles(useRoles: Set[String]): ClusterRouterPoolSettings = + new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles) @varargs - def withUseRoles(useRoles: String*): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.toSet) + def withUseRoles(useRoles: String*): ClusterRouterPoolSettings = + new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.toSet) /** * Java API */ - def withUseRoles(useRoles: java.util.Set[String]): ClusterRouterPoolSettings = new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.asScala.toSet) + def withUseRoles(useRoles: java.util.Set[String]): ClusterRouterPoolSettings = + new ClusterRouterPoolSettings(totalInstances, maxInstancesPerNode, allowLocalRoutees, useRoles.asScala.toSet) } /** @@ -217,7 +254,9 @@ private[akka] trait ClusterRouterSettingsBase { * [[akka.routing.RoundRobinGroup]] or custom routers. */ @SerialVersionUID(1L) -final case class ClusterRouterGroup(local: Group, settings: ClusterRouterGroupSettings) extends Group with ClusterRouterConfigBase { +final case class ClusterRouterGroup(local: Group, settings: ClusterRouterGroupSettings) + extends Group + with ClusterRouterConfigBase { override def paths(system: ActorSystem): immutable.Iterable[String] = if (settings.allowLocalRoutees && settings.useRoles.nonEmpty) { @@ -234,8 +273,8 @@ final case class ClusterRouterGroup(local: Group, settings: ClusterRouterGroupSe override private[akka] def createRouterActor(): RouterActor = new ClusterRouterGroupActor(settings) override def withFallback(other: RouterConfig): RouterConfig = other match { - case ClusterRouterGroup(_: ClusterRouterGroup, _) => throw new IllegalStateException( - "ClusterRouterGroup is not allowed to wrap a ClusterRouterGroup") + case ClusterRouterGroup(_: ClusterRouterGroup, _) => + throw new IllegalStateException("ClusterRouterGroup is not allowed to wrap a ClusterRouterGroup") case ClusterRouterGroup(otherLocal, _) => copy(local = this.local.withFallback(otherLocal).asInstanceOf[Group]) case _ => @@ -251,7 +290,9 @@ final case class ClusterRouterGroup(local: Group, settings: ClusterRouterGroupSe * [[akka.routing.RoundRobinGroup]] or custom routers. */ @SerialVersionUID(1L) -final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSettings) extends Pool with ClusterRouterConfigBase { +final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSettings) + extends Pool + with ClusterRouterConfigBase { require(local.resizer.isEmpty, "Resizer can't be used together with cluster router") @@ -262,8 +303,9 @@ final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSetti */ override private[akka] def newRoutee(routeeProps: Props, context: ActorContext): Routee = { val name = "c" + childNameCounter.incrementAndGet - val ref = context.asInstanceOf[ActorCell].attachChild( - local.enrichWithPoolDispatcher(routeeProps, context), name, systemService = false) + val ref = context + .asInstanceOf[ActorCell] + .attachChild(local.enrichWithPoolDispatcher(routeeProps, context), name, systemService = false) ActorRefRoutee(ref) } @@ -284,7 +326,8 @@ final case class ClusterRouterPool(local: Pool, settings: ClusterRouterPoolSetti /** * INTERNAL API */ - override private[akka] def createRouterActor(): RouterActor = new ClusterRouterPoolActor(local.supervisorStrategy, settings) + override private[akka] def createRouterActor(): RouterActor = + new ClusterRouterPoolActor(local.supervisorStrategy, settings) override def supervisorStrategy: SupervisorStrategy = local.supervisorStrategy @@ -319,11 +362,12 @@ private[akka] trait ClusterRouterConfigBase extends RouterConfig { /** * INTERNAL API */ -private[akka] class ClusterRouterPoolActor( - supervisorStrategy: SupervisorStrategy, val settings: ClusterRouterPoolSettings) - extends RouterPoolActor(supervisorStrategy) with ClusterRouterActor { +private[akka] class ClusterRouterPoolActor(supervisorStrategy: SupervisorStrategy, + val settings: ClusterRouterPoolSettings) + extends RouterPoolActor(supervisorStrategy) + with ClusterRouterActor { - override def receive = clusterReceive orElse super.receive + override def receive = clusterReceive.orElse(super.receive) /** * Adds routees based on totalInstances and maxInstancesPerNode settings @@ -334,8 +378,8 @@ private[akka] class ClusterRouterPoolActor( case None => // done case Some(target) => val routeeProps = cell.routeeProps - val deploy = Deploy(config = ConfigFactory.empty(), routerConfig = routeeProps.routerConfig, - scope = RemoteScope(target)) + val deploy = + Deploy(config = ConfigFactory.empty(), routerConfig = routeeProps.routerConfig, scope = RemoteScope(target)) val routee = pool.newRoutee(routeeProps.withDeploy(deploy), context) // must register each one, since registered routees are used in selectDeploymentTarget cell.addRoutee(routee) @@ -373,7 +417,8 @@ private[akka] class ClusterRouterPoolActor( * INTERNAL API */ private[akka] class ClusterRouterGroupActor(val settings: ClusterRouterGroupSettings) - extends RouterActor with ClusterRouterActor { + extends RouterActor + with ClusterRouterActor { val group = cell.routerConfig match { case x: Group => x @@ -381,7 +426,7 @@ private[akka] class ClusterRouterGroupActor(val settings: ClusterRouterGroupSett throw ActorInitializationException("ClusterRouterGroupActor can only be used with group, not " + other.getClass) } - override def receive = clusterReceive orElse super.receive + override def receive = clusterReceive.orElse(super.receive) var usedRouteePaths: Map[Address, Set[String]] = if (settings.allowLocalRoutees) @@ -416,7 +461,7 @@ private[akka] class ClusterRouterGroupActor(val settings: ClusterRouterGroupSett None } else { // find the node with least routees - val unusedNodes = currentNodes filterNot usedRouteePaths.contains + val unusedNodes = currentNodes.filterNot(usedRouteePaths.contains) if (unusedNodes.nonEmpty) { Some((unusedNodes.head, settings.routeesPaths.head)) @@ -444,7 +489,8 @@ private[akka] trait ClusterRouterActor { this: RouterActor => def settings: ClusterRouterSettingsBase if (!cell.routerConfig.isInstanceOf[Pool] && !cell.routerConfig.isInstanceOf[Group]) - throw ActorInitializationException("Cluster router actor can only be used with Pool or Group, not with " + + throw ActorInitializationException( + "Cluster router actor can only be used with Pool or Group, not with " + cell.routerConfig.getClass) def cluster: Cluster = Cluster(context.system) @@ -464,8 +510,8 @@ private[akka] trait ClusterRouterActor { this: RouterActor => def isAvailable(m: Member): Boolean = (m.status == MemberStatus.Up || m.status == MemberStatus.WeaklyUp) && - satisfiesRoles(m.roles) && - (settings.allowLocalRoutees || m.address != cluster.selfAddress) + satisfiesRoles(m.roles) && + (settings.allowLocalRoutees || m.address != cluster.selfAddress) private def satisfiesRoles(memberRoles: Set[String]): Boolean = settings.useRoles.subsetOf(memberRoles) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/AttemptSysMsgRedeliverySpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/AttemptSysMsgRedeliverySpec.scala index a821156408..d4283dbbe0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/AttemptSysMsgRedeliverySpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/AttemptSysMsgRedeliverySpec.scala @@ -36,8 +36,11 @@ class AttemptSysMsgRedeliveryMultiJvmNode1 extends AttemptSysMsgRedeliverySpec class AttemptSysMsgRedeliveryMultiJvmNode2 extends AttemptSysMsgRedeliverySpec class AttemptSysMsgRedeliveryMultiJvmNode3 extends AttemptSysMsgRedeliverySpec -class AttemptSysMsgRedeliverySpec extends MultiNodeSpec(AttemptSysMsgRedeliveryMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with DefaultTimeout { +class AttemptSysMsgRedeliverySpec + extends MultiNodeSpec(AttemptSysMsgRedeliveryMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender + with DefaultTimeout { import AttemptSysMsgRedeliveryMultiJvmSpec._ "AttemptSysMsgRedelivery" must { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 3f93a9c819..e80fabe985 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -8,7 +8,8 @@ import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -final case class ClientDowningNodeThatIsUnreachableMultiNodeConfig(failureDetectorPuppet: Boolean) extends MultiNodeConfig { +final case class ClientDowningNodeThatIsUnreachableMultiNodeConfig(failureDetectorPuppet: Boolean) + extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") @@ -17,21 +18,31 @@ final case class ClientDowningNodeThatIsUnreachableMultiNodeConfig(failureDetect commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) } -class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) -class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) -class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) -class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode1 + extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode2 + extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode3 + extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode4 + extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) -class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) -class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) -class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) -class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode1 + extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode2 + extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode3 + extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode4 + extends ClientDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) -abstract class ClientDowningNodeThatIsUnreachableSpec(multiNodeConfig: ClientDowningNodeThatIsUnreachableMultiNodeConfig) - extends MultiNodeSpec(multiNodeConfig) - with MultiNodeClusterSpec { +abstract class ClientDowningNodeThatIsUnreachableSpec( + multiNodeConfig: ClientDowningNodeThatIsUnreachableMultiNodeConfig) + extends MultiNodeSpec(multiNodeConfig) + with MultiNodeClusterSpec { - def this(failureDetectorPuppet: Boolean) = this(ClientDowningNodeThatIsUnreachableMultiNodeConfig(failureDetectorPuppet)) + def this(failureDetectorPuppet: Boolean) = + this(ClientDowningNodeThatIsUnreachableMultiNodeConfig(failureDetectorPuppet)) import multiNodeConfig._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index cb1cea6ffb..7600653190 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -17,19 +17,27 @@ final case class ClientDowningNodeThatIsUpMultiNodeConfig(failureDetectorPuppet: commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) } -class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = true) -class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = true) -class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = true) -class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = true) +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode1 + extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = true) +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode2 + extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = true) +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode3 + extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = true) +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode4 + extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = true) -class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = false) -class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = false) -class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = false) -class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = false) +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode1 + extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = false) +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode2 + extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = false) +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode3 + extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = false) +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode4 + extends ClientDowningNodeThatIsUpSpec(failureDetectorPuppet = false) abstract class ClientDowningNodeThatIsUpSpec(multiNodeConfig: ClientDowningNodeThatIsUpMultiNodeConfig) - extends MultiNodeSpec(multiNodeConfig) - with MultiNodeClusterSpec { + extends MultiNodeSpec(multiNodeConfig) + with MultiNodeClusterSpec { def this(failureDetectorPuppet: Boolean) = this(ClientDowningNodeThatIsUpMultiNodeConfig(failureDetectorPuppet)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala index 676be1355f..fe7abaf408 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala @@ -16,9 +16,10 @@ object ClusterAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString("akka.cluster.failure-detector.threshold = 4")). - withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString("akka.cluster.failure-detector.threshold = 4")) + .withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) } @@ -28,8 +29,8 @@ class ClusterAccrualFailureDetectorMultiJvmNode2 extends ClusterAccrualFailureDe class ClusterAccrualFailureDetectorMultiJvmNode3 extends ClusterAccrualFailureDetectorSpec abstract class ClusterAccrualFailureDetectorSpec - extends MultiNodeSpec(ClusterAccrualFailureDetectorMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(ClusterAccrualFailureDetectorMultiJvmSpec) + with MultiNodeClusterSpec { import ClusterAccrualFailureDetectorMultiJvmSpec._ @@ -49,46 +50,46 @@ abstract class ClusterAccrualFailureDetectorSpec } "mark node as 'unavailable' when network partition and then back to 'available' when partition is healed" taggedAs - LongRunningTest in { - runOn(first) { - testConductor.blackhole(first, second, Direction.Both).await - } - - enterBarrier("broken") - - runOn(first) { - // detect failure... - awaitCond(!cluster.failureDetector.isAvailable(second), 15.seconds) - // other connections still ok - cluster.failureDetector.isAvailable(third) should ===(true) - } - - runOn(second) { - // detect failure... - awaitCond(!cluster.failureDetector.isAvailable(first), 15.seconds) - // other connections still ok - cluster.failureDetector.isAvailable(third) should ===(true) - } - - enterBarrier("partitioned") - - runOn(first) { - testConductor.passThrough(first, second, Direction.Both).await - } - - enterBarrier("repaired") - - runOn(first, third) { - awaitCond(cluster.failureDetector.isAvailable(second), 15.seconds) - } - - runOn(second) { - awaitCond(cluster.failureDetector.isAvailable(first), 15.seconds) - } - - enterBarrier("after-2") + LongRunningTest in { + runOn(first) { + testConductor.blackhole(first, second, Direction.Both).await } + enterBarrier("broken") + + runOn(first) { + // detect failure... + awaitCond(!cluster.failureDetector.isAvailable(second), 15.seconds) + // other connections still ok + cluster.failureDetector.isAvailable(third) should ===(true) + } + + runOn(second) { + // detect failure... + awaitCond(!cluster.failureDetector.isAvailable(first), 15.seconds) + // other connections still ok + cluster.failureDetector.isAvailable(third) should ===(true) + } + + enterBarrier("partitioned") + + runOn(first) { + testConductor.passThrough(first, second, Direction.Both).await + } + + enterBarrier("repaired") + + runOn(first, third) { + awaitCond(cluster.failureDetector.isAvailable(second), 15.seconds) + } + + runOn(second) { + awaitCond(cluster.failureDetector.isAvailable(first), 15.seconds) + } + + enterBarrier("after-2") + } + "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { runOn(first) { testConductor.exit(third, 0).await diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala index 00560acedf..9ade5c5c54 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala @@ -41,8 +41,9 @@ class ClusterDeathWatchMultiJvmNode4 extends ClusterDeathWatchSpec class ClusterDeathWatchMultiJvmNode5 extends ClusterDeathWatchSpec abstract class ClusterDeathWatchSpec - extends MultiNodeSpec(ClusterDeathWatchMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender { + extends MultiNodeSpec(ClusterDeathWatchMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import ClusterDeathWatchMultiJvmSpec._ @@ -105,7 +106,8 @@ abstract class ClusterDeathWatchSpec } runOn(second, third, fourth) { - system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local), name = "subject") + system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local), + name = "subject") enterBarrier("subjected-started") enterBarrier("watch-established") runOn(third) { @@ -152,9 +154,11 @@ abstract class ClusterDeathWatchSpec enterBarrier("after-2") } - "be able to watch actor before node joins cluster, ClusterRemoteWatcher takes over from RemoteWatcher" in within(20 seconds) { + "be able to watch actor before node joins cluster, ClusterRemoteWatcher takes over from RemoteWatcher" in within( + 20 seconds) { runOn(fifth) { - system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local), name = "subject5") + system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local), + name = "subject5") } enterBarrier("subjected-started") @@ -234,10 +238,13 @@ abstract class ClusterDeathWatchSpec enterBarrier("first-unavailable") val timeout = remainingOrDefault - try Await.ready(system.whenTerminated, timeout) catch { + try Await.ready(system.whenTerminated, timeout) + catch { case _: TimeoutException => - fail("Failed to stop [%s] within [%s] \n%s".format(system.name, timeout, - system.asInstanceOf[ActorSystemImpl].printTree)) + fail( + "Failed to stop [%s] within [%s] \n%s".format(system.name, + timeout, + system.asInstanceOf[ActorSystemImpl].printTree)) } // signal to the first node that fourth is done diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 6fc404bbaf..e25ec41745 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -19,12 +19,11 @@ final case class ConvergenceMultiNodeConfig(failureDetectorPuppet: Boolean) exte val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster.failure-detector.threshold = 4 akka.cluster.allow-weakly-up-members = off - """)). - withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) + """)).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) } class ConvergenceWithFailureDetectorPuppetMultiJvmNode1 extends ConvergenceSpec(failureDetectorPuppet = true) @@ -38,8 +37,8 @@ class ConvergenceWithAccrualFailureDetectorMultiJvmNode3 extends ConvergenceSpec class ConvergenceWithAccrualFailureDetectorMultiJvmNode4 extends ConvergenceSpec(failureDetectorPuppet = false) abstract class ConvergenceSpec(multiNodeConfig: ConvergenceMultiNodeConfig) - extends MultiNodeSpec(multiNodeConfig) - with MultiNodeClusterSpec { + extends MultiNodeSpec(multiNodeConfig) + with MultiNodeClusterSpec { def this(failureDetectorPuppet: Boolean) = this(ConvergenceMultiNodeConfig(failureDetectorPuppet)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala index c17457e3dc..028dc95e12 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/DeterministicOldestWhenJoiningSpec.scala @@ -19,11 +19,14 @@ object DeterministicOldestWhenJoiningMultiJvmSpec extends MultiNodeConfig { val seed2 = role("seed2") val seed3 = role("seed3") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" # not too quick to trigger problematic scenario more often akka.cluster.leader-actions-interval = 2000 ms akka.cluster.gossip-interval = 500 ms - """)).withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } class DeterministicOldestWhenJoiningMultiJvmNode1 extends DeterministicOldestWhenJoiningSpec @@ -31,8 +34,8 @@ class DeterministicOldestWhenJoiningMultiJvmNode2 extends DeterministicOldestWhe class DeterministicOldestWhenJoiningMultiJvmNode3 extends DeterministicOldestWhenJoiningSpec abstract class DeterministicOldestWhenJoiningSpec - extends MultiNodeSpec(DeterministicOldestWhenJoiningMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(DeterministicOldestWhenJoiningMultiJvmSpec) + with MultiNodeClusterSpec { import DeterministicOldestWhenJoiningMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/DisallowJoinOfTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/DisallowJoinOfTwoClustersSpec.scala index 60558d49be..5c964ce7cc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/DisallowJoinOfTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/DisallowJoinOfTwoClustersSpec.scala @@ -25,8 +25,8 @@ class DisallowJoinOfTwoClustersMultiJvmNode4 extends DisallowJoinOfTwoClustersSp class DisallowJoinOfTwoClustersMultiJvmNode5 extends DisallowJoinOfTwoClustersSpec abstract class DisallowJoinOfTwoClustersSpec - extends MultiNodeSpec(DisallowJoinOfTwoClustersMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(DisallowJoinOfTwoClustersMultiJvmSpec) + with MultiNodeClusterSpec { import DisallowJoinOfTwoClustersMultiJvmSpec._ @@ -65,7 +65,7 @@ abstract class DisallowJoinOfTwoClustersSpec } // no change expected - 1 to 5 foreach { _ => + (1 to 5).foreach { _ => clusterView.members.size should ===(expectedSize) Thread.sleep(1000) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala index 75d39348eb..ca7294bbbe 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala @@ -18,10 +18,9 @@ object InitialHeartbeatMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster.failure-detector.threshold = 4""")). - withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka.cluster.failure-detector.threshold = 4""")).withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) } @@ -30,9 +29,7 @@ class InitialHeartbeatMultiJvmNode1 extends InitialHeartbeatSpec class InitialHeartbeatMultiJvmNode2 extends InitialHeartbeatSpec class InitialHeartbeatMultiJvmNode3 extends InitialHeartbeatSpec -abstract class InitialHeartbeatSpec - extends MultiNodeSpec(InitialHeartbeatMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class InitialHeartbeatSpec extends MultiNodeSpec(InitialHeartbeatMultiJvmSpec) with MultiNodeClusterSpec { import InitialHeartbeatMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala index d9491eb5ec..59706f5b93 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialMembersOfNewDcSpec.scala @@ -11,8 +11,7 @@ import com.typesafe.config.ConfigFactory import scala.concurrent.duration._ object InitialMembersOfNewDcSpec extends MultiNodeConfig { - commonConfig(ConfigFactory.parseString( - s""" + commonConfig(ConfigFactory.parseString(s""" akka.actor.provider = cluster akka.actor.warn-about-java-serializer-usage = off akka.coordinated-shutdown.terminate-actor-system = off @@ -52,7 +51,10 @@ class InitialMembersOfNewDcSpecMultiJvmNode3 extends InitialMembersOfNewDcSpec class InitialMembersOfNewDcSpecMultiJvmNode4 extends InitialMembersOfNewDcSpec class InitialMembersOfNewDcSpecMultiJvmNode5 extends InitialMembersOfNewDcSpec -abstract class InitialMembersOfNewDcSpec extends MultiNodeSpec(InitialMembersOfNewDcSpec) with STMultiNodeSpec with ImplicitSender { +abstract class InitialMembersOfNewDcSpec + extends MultiNodeSpec(InitialMembersOfNewDcSpec) + with STMultiNodeSpec + with ImplicitSender { import InitialMembersOfNewDcSpec._ @@ -106,4 +108,3 @@ abstract class InitialMembersOfNewDcSpec extends MultiNodeSpec(InitialMembersOfN } } } - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala index 2cb2448cbf..aaaa0f2dc2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala @@ -15,8 +15,7 @@ object JoinInProgressMultiJvmSpec extends MultiNodeConfig { val second = role("second") commonConfig( - debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster { # simulate delay in gossip by turning it off gossip-interval = 300 s @@ -24,16 +23,13 @@ object JoinInProgressMultiJvmSpec extends MultiNodeConfig { threshold = 4 acceptable-heartbeat-pause = 1 second } - }""") - .withFallback(MultiNodeClusterSpec.clusterConfig))) + }""").withFallback(MultiNodeClusterSpec.clusterConfig))) } class JoinInProgressMultiJvmNode1 extends JoinInProgressSpec class JoinInProgressMultiJvmNode2 extends JoinInProgressSpec -abstract class JoinInProgressSpec - extends MultiNodeSpec(JoinInProgressMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class JoinInProgressSpec extends MultiNodeSpec(JoinInProgressMultiJvmSpec) with MultiNodeClusterSpec { import JoinInProgressMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala index 67e1224ef5..dfb088d27d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala @@ -17,8 +17,7 @@ object JoinSeedNodeMultiJvmSpec extends MultiNodeConfig { val ordinary1 = role("ordinary1") val ordinary2 = role("ordinary2") - commonConfig(debugConfig(on = false). - withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } class JoinSeedNodeMultiJvmNode1 extends JoinSeedNodeSpec @@ -27,9 +26,7 @@ class JoinSeedNodeMultiJvmNode3 extends JoinSeedNodeSpec class JoinSeedNodeMultiJvmNode4 extends JoinSeedNodeSpec class JoinSeedNodeMultiJvmNode5 extends JoinSeedNodeSpec -abstract class JoinSeedNodeSpec - extends MultiNodeSpec(JoinSeedNodeMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class JoinSeedNodeSpec extends MultiNodeSpec(JoinSeedNodeMultiJvmSpec) with MultiNodeClusterSpec { import JoinSeedNodeMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala index 48bf538129..4efdf51255 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala @@ -26,8 +26,7 @@ object LargeMessageClusterMultiJvmSpec extends MultiNodeConfig { // Note that this test uses default configuration, // not MultiNodeClusterSpec.clusterConfig - commonConfig(ConfigFactory.parseString( - s""" + commonConfig(ConfigFactory.parseString(s""" akka { cluster.debug.verbose-heartbeat-logging = on loggers = ["akka.testkit.TestEventListener"] @@ -82,8 +81,10 @@ class LargeMessageClusterMultiJvmNode1 extends LargeMessageClusterSpec class LargeMessageClusterMultiJvmNode2 extends LargeMessageClusterSpec class LargeMessageClusterMultiJvmNode3 extends LargeMessageClusterSpec -abstract class LargeMessageClusterSpec extends MultiNodeSpec(LargeMessageClusterMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender { +abstract class LargeMessageClusterSpec + extends MultiNodeSpec(LargeMessageClusterMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import LargeMessageClusterMultiJvmSpec._ override def expectedTestDuration: FiniteDuration = 3.minutes @@ -97,8 +98,7 @@ abstract class LargeMessageClusterSpec extends MultiNodeSpec(LargeMessageCluster "Artery Cluster with large messages" must { "init cluster" taggedAs LongRunningTest in { - Cluster(system).subscribe(unreachableProbe.ref, ClusterEvent.InitialStateAsEvents, - classOf[UnreachableMember]) + Cluster(system).subscribe(unreachableProbe.ref, ClusterEvent.InitialStateAsEvents, classOf[UnreachableMember]) awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningAllOtherNodesSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningAllOtherNodesSpec.scala index 11f30a83e4..151fe95038 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningAllOtherNodesSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningAllOtherNodesSpec.scala @@ -18,12 +18,13 @@ object LeaderDowningAllOtherNodesMultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") val sixth = role("sixth") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.cluster.failure-detector.monitored-by-nr-of-members = 2 akka.cluster.auto-down-unreachable-after = 1s - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } class LeaderDowningAllOtherNodesMultiJvmNode1 extends LeaderDowningAllOtherNodesSpec @@ -34,8 +35,8 @@ class LeaderDowningAllOtherNodesMultiJvmNode5 extends LeaderDowningAllOtherNodes class LeaderDowningAllOtherNodesMultiJvmNode6 extends LeaderDowningAllOtherNodesSpec abstract class LeaderDowningAllOtherNodesSpec - extends MultiNodeSpec(LeaderDowningAllOtherNodesMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(LeaderDowningAllOtherNodesMultiJvmSpec) + with MultiNodeClusterSpec { import LeaderDowningAllOtherNodesMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index f4f1c8fcbd..764a8b336e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -12,32 +12,44 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import scala.concurrent.duration._ -final case class LeaderDowningNodeThatIsUnreachableMultiNodeConfig(failureDetectorPuppet: Boolean) extends MultiNodeConfig { +final case class LeaderDowningNodeThatIsUnreachableMultiNodeConfig(failureDetectorPuppet: Boolean) + extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString("akka.cluster.auto-down-unreachable-after = 2s")). - withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString("akka.cluster.auto-down-unreachable-after = 2s")) + .withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) } -class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) -class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) -class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) -class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode1 + extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode2 + extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode3 + extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode4 + extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = true) -class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) -class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) -class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) -class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode1 + extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode2 + extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode3 + extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode4 + extends LeaderDowningNodeThatIsUnreachableSpec(failureDetectorPuppet = false) -abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDowningNodeThatIsUnreachableMultiNodeConfig) - extends MultiNodeSpec(multiNodeConfig) - with MultiNodeClusterSpec { +abstract class LeaderDowningNodeThatIsUnreachableSpec( + multiNodeConfig: LeaderDowningNodeThatIsUnreachableMultiNodeConfig) + extends MultiNodeSpec(multiNodeConfig) + with MultiNodeClusterSpec { - def this(failureDetectorPuppet: Boolean) = this(LeaderDowningNodeThatIsUnreachableMultiNodeConfig(failureDetectorPuppet)) + def this(failureDetectorPuppet: Boolean) = + this(LeaderDowningNodeThatIsUnreachableMultiNodeConfig(failureDetectorPuppet)) import multiNodeConfig._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 64309440ac..2e15efba16 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -33,8 +33,8 @@ class LeaderElectionWithAccrualFailureDetectorMultiJvmNode4 extends LeaderElecti class LeaderElectionWithAccrualFailureDetectorMultiJvmNode5 extends LeaderElectionSpec(failureDetectorPuppet = false) abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig) - extends MultiNodeSpec(multiNodeConfig) - with MultiNodeClusterSpec { + extends MultiNodeSpec(multiNodeConfig) + with MultiNodeClusterSpec { def this(failureDetectorPuppet: Boolean) = this(LeaderElectionMultiNodeConfig(failureDetectorPuppet)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index c0d6faf0f1..c33d86ffb1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -19,18 +19,17 @@ object LeaderLeavingMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString("akka.cluster.auto-down-unreachable-after = 0s")). - withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString("akka.cluster.auto-down-unreachable-after = 0s")) + .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } class LeaderLeavingMultiJvmNode1 extends LeaderLeavingSpec class LeaderLeavingMultiJvmNode2 extends LeaderLeavingSpec class LeaderLeavingMultiJvmNode3 extends LeaderLeavingSpec -abstract class LeaderLeavingSpec - extends MultiNodeSpec(LeaderLeavingMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class LeaderLeavingSpec extends MultiNodeSpec(LeaderLeavingMultiJvmSpec) with MultiNodeClusterSpec { import LeaderLeavingMultiJvmSpec._ import ClusterEvent._ @@ -66,7 +65,7 @@ abstract class LeaderLeavingSpec if (state.members.exists(m => m.address == oldLeaderAddress && m.status == Exiting)) exitingLatch.countDown() case MemberExited(m) if m.address == oldLeaderAddress => exitingLatch.countDown() - case _ => // ignore + case _ => // ignore } }).withDeploy(Deploy.local)), classOf[MemberEvent]) enterBarrier("registered-listener") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala index dc18cdfe28..f9960d6221 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala @@ -32,9 +32,7 @@ class MBeanMultiJvmNode2 extends MBeanSpec class MBeanMultiJvmNode3 extends MBeanSpec class MBeanMultiJvmNode4 extends MBeanSpec -abstract class MBeanSpec - extends MultiNodeSpec(MBeanMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class MBeanSpec extends MultiNodeSpec(MBeanMultiJvmSpec) with MultiNodeClusterSpec { import MBeanMultiJvmSpec._ @@ -44,15 +42,14 @@ abstract class MBeanSpec "Cluster MBean" must { "expose attributes" taggedAs LongRunningTest in { val info = mbeanServer.getMBeanInfo(mbeanName) - info.getAttributes.map(_.getName).toSet should ===(Set( - "ClusterStatus", "Members", "Unreachable", "MemberStatus", "Leader", "Singleton", "Available")) + info.getAttributes.map(_.getName).toSet should ===( + Set("ClusterStatus", "Members", "Unreachable", "MemberStatus", "Leader", "Singleton", "Available")) enterBarrier("after-1") } "expose operations" taggedAs LongRunningTest in { val info = mbeanServer.getMBeanInfo(mbeanName) - info.getOperations.map(_.getName).toSet should ===(Set( - "join", "leave", "down")) + info.getOperations.map(_.getName).toSet should ===(Set("join", "leave", "down")) enterBarrier("after-2") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MemberWeaklyUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MemberWeaklyUpSpec.scala index fe2ffa4c75..9e5c7de834 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MemberWeaklyUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MemberWeaklyUpSpec.scala @@ -20,12 +20,10 @@ object MemberWeaklyUpSpec extends MultiNodeConfig { val fourth = role("fourth") val fifth = role("fifth") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.remote.retry-gate-closed-for = 3 s akka.cluster.allow-weakly-up-members = on - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) } @@ -36,9 +34,7 @@ class MemberWeaklyUpMultiJvmNode3 extends MemberWeaklyUpSpec class MemberWeaklyUpMultiJvmNode4 extends MemberWeaklyUpSpec class MemberWeaklyUpMultiJvmNode5 extends MemberWeaklyUpSpec -abstract class MemberWeaklyUpSpec - extends MultiNodeSpec(MemberWeaklyUpSpec) - with MultiNodeClusterSpec { +abstract class MemberWeaklyUpSpec extends MultiNodeSpec(MemberWeaklyUpSpec) with MultiNodeClusterSpec { import MemberWeaklyUpSpec._ @@ -55,7 +51,8 @@ abstract class MemberWeaklyUpSpec enterBarrier("after-1") } - "detect network partition and mark nodes on other side as unreachable" taggedAs LongRunningTest in within(20 seconds) { + "detect network partition and mark nodes on other side as unreachable" taggedAs LongRunningTest in within( + 20 seconds) { runOn(first) { // split the cluster in two parts (first, second) / (third, fourth, fifth) for (role1 <- side1; role2 <- side2) { @@ -87,14 +84,18 @@ abstract class MemberWeaklyUpSpec runOn(side1: _*) { awaitAssert { clusterView.members.size should be(4) - clusterView.members.exists { m => m.address == address(second) && m.status == WeaklyUp } should be(true) + clusterView.members.exists { m => + m.address == address(second) && m.status == WeaklyUp + } should be(true) } } runOn(side2: _*) { awaitAssert { clusterView.members.size should be(4) - clusterView.members.exists { m => m.address == address(fifth) && m.status == WeaklyUp } should be(true) + clusterView.members.exists { m => + m.address == address(fifth) && m.status == WeaklyUp + } should be(true) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index 9c9aeca855..e9bdd2ff92 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -25,8 +25,8 @@ class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListe class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec abstract class MembershipChangeListenerExitingSpec - extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) + with MultiNodeClusterSpec { import MembershipChangeListenerExitingMultiJvmSpec._ import ClusterEvent._ @@ -45,18 +45,20 @@ abstract class MembershipChangeListenerExitingSpec val exitingLatch = TestLatch() val removedLatch = TestLatch() val secondAddress = address(second) - cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case state: CurrentClusterState => - if (state.members.exists(m => m.address == secondAddress && m.status == Exiting)) + cluster.subscribe( + system.actorOf(Props(new Actor { + def receive = { + case state: CurrentClusterState => + if (state.members.exists(m => m.address == secondAddress && m.status == Exiting)) + exitingLatch.countDown() + case MemberExited(m) if m.address == secondAddress => exitingLatch.countDown() - case MemberExited(m) if m.address == secondAddress => - exitingLatch.countDown() - case MemberRemoved(m, Exiting) if m.address == secondAddress => - removedLatch.countDown() - case _ => // ignore - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + case MemberRemoved(m, Exiting) if m.address == secondAddress => + removedLatch.countDown() + case _ => // ignore + } + }).withDeploy(Deploy.local)), + classOf[MemberEvent]) enterBarrier("registered-listener") exitingLatch.await removedLatch.await @@ -65,16 +67,18 @@ abstract class MembershipChangeListenerExitingSpec runOn(third) { val exitingLatch = TestLatch() val secondAddress = address(second) - cluster.subscribe(system.actorOf(Props(new Actor { - def receive = { - case state: CurrentClusterState => - if (state.members.exists(m => m.address == secondAddress && m.status == Exiting)) + cluster.subscribe( + system.actorOf(Props(new Actor { + def receive = { + case state: CurrentClusterState => + if (state.members.exists(m => m.address == secondAddress && m.status == Exiting)) + exitingLatch.countDown() + case MemberExited(m) if m.address == secondAddress => exitingLatch.countDown() - case MemberExited(m) if m.address == secondAddress => - exitingLatch.countDown() - case _ => // ignore - } - }).withDeploy(Deploy.local)), classOf[MemberEvent]) + case _ => // ignore + } + }).withDeploy(Deploy.local)), + classOf[MemberEvent]) enterBarrier("registered-listener") exitingLatch.await } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 4741d86bae..ace0881706 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -24,8 +24,8 @@ class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUp class MembershipChangeListenerUpMultiJvmNode3 extends MembershipChangeListenerUpSpec abstract class MembershipChangeListenerUpSpec - extends MultiNodeSpec(MembershipChangeListenerUpMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(MembershipChangeListenerUpMultiJvmSpec) + with MultiNodeClusterSpec { import MembershipChangeListenerUpMultiJvmSpec._ import ClusterEvent._ @@ -38,7 +38,7 @@ abstract class MembershipChangeListenerUpSpec runOn(first, second) { val latch = TestLatch() - val expectedAddresses = Set(first, second) map address + val expectedAddresses = Set(first, second).map(address) cluster.subscribe(system.actorOf(Props(new Actor { var members = Set.empty[Member] def receive = { @@ -65,7 +65,7 @@ abstract class MembershipChangeListenerUpSpec "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { val latch = TestLatch() - val expectedAddresses = Set(first, second, third) map address + val expectedAddresses = Set(first, second, third).map(address) cluster.subscribe(system.actorOf(Props(new Actor { var members = Set.empty[Member] def receive = { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala index de4bf5851b..3e8cb6de2c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala @@ -17,9 +17,10 @@ object MinMembersBeforeUpMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString( - "akka.cluster.min-nr-of-members = 3")). - withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString("akka.cluster.min-nr-of-members = 3")) + .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } object MinMembersBeforeUpWithWeaklyUpMultiJvmSpec extends MultiNodeConfig { @@ -27,10 +28,12 @@ object MinMembersBeforeUpWithWeaklyUpMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.cluster.min-nr-of-members = 3 - akka.cluster.allow-weakly-up-members = on""")). - withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) + akka.cluster.allow-weakly-up-members = on""")) + .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } object MinMembersOfRoleBeforeUpMultiJvmSpec extends MultiNodeConfig { @@ -38,15 +41,14 @@ object MinMembersOfRoleBeforeUpMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString( - "akka.cluster.role.backend.min-nr-of-members = 2")). - withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString("akka.cluster.role.backend.min-nr-of-members = 2")) + .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) - nodeConfig(first)( - ConfigFactory.parseString("akka.cluster.roles =[frontend]")) + nodeConfig(first)(ConfigFactory.parseString("akka.cluster.roles =[frontend]")) - nodeConfig(second, third)( - ConfigFactory.parseString("akka.cluster.roles =[backend]")) + nodeConfig(second, third)(ConfigFactory.parseString("akka.cluster.roles =[backend]")) } class MinMembersBeforeUpMultiJvmNode1 extends MinMembersBeforeUpSpec @@ -101,8 +103,8 @@ abstract class MinMembersOfRoleBeforeUpSpec extends MinMembersBeforeUpBase(MinMe } abstract class MinMembersBeforeUpBase(multiNodeConfig: MultiNodeConfig) - extends MultiNodeSpec(multiNodeConfig) - with MultiNodeClusterSpec { + extends MultiNodeSpec(multiNodeConfig) + with MultiNodeClusterSpec { def first: RoleName def second: RoleName @@ -113,7 +115,7 @@ abstract class MinMembersBeforeUpBase(multiNodeConfig: MultiNodeConfig) cluster.registerOnMemberUp(onUpLatch.countDown()) runOn(first) { - cluster join myself + cluster.join(myself) awaitAssert { clusterView.refreshCurrentState() clusterView.status should ===(Joining) @@ -127,14 +129,14 @@ abstract class MinMembersBeforeUpBase(multiNodeConfig: MultiNodeConfig) cluster.join(first) } runOn(first, second) { - val expectedAddresses = Set(first, second) map address + val expectedAddresses = Set(first, second).map(address) awaitAssert { clusterView.refreshCurrentState() clusterView.members.map(_.address) should ===(expectedAddresses) } clusterView.members.unsorted.map(_.status) should ===(Set(Joining)) // and it should not change - 1 to 5 foreach { _ => + (1 to 5).foreach { _ => Thread.sleep(1000) clusterView.members.map(_.address) should ===(expectedAddresses) clusterView.members.unsorted.map(_.status) should ===(Set(Joining)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala index 4d876bd262..0004060276 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcClusterSpec.scala @@ -18,18 +18,15 @@ class MultiDcSpecConfig(crossDcConnections: Int = 5) extends MultiNodeConfig { val fourth = role("fourth") val fifth = role("fifth") - commonConfig(ConfigFactory.parseString( - s""" + commonConfig(ConfigFactory.parseString(s""" akka.cluster.multi-data-center.cross-data-center-connections = $crossDcConnections """).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first, second)(ConfigFactory.parseString( - """ + nodeConfig(first, second)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" """)) - nodeConfig(third, fourth, fifth)(ConfigFactory.parseString( - """ + nodeConfig(third, fourth, fifth)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc2" """)) @@ -52,9 +49,7 @@ class MultiDcFewCrossDcMultiJvmNode3 extends MultiDcSpec(MultiDcFewCrossDcConnec class MultiDcFewCrossDcMultiJvmNode4 extends MultiDcSpec(MultiDcFewCrossDcConnectionsConfig) class MultiDcFewCrossDcMultiJvmNode5 extends MultiDcSpec(MultiDcFewCrossDcConnectionsConfig) -abstract class MultiDcSpec(config: MultiDcSpecConfig) - extends MultiNodeSpec(config) - with MultiNodeClusterSpec { +abstract class MultiDcSpec(config: MultiDcSpecConfig) extends MultiNodeSpec(config) with MultiNodeClusterSpec { import config._ @@ -127,7 +122,8 @@ abstract class MultiDcSpec(config: MultiDcSpecConfig) enterBarrier("inter-data-center unreachability end") } - "be able to have data center member changes while there is unreachability in another data center" in within(20.seconds) { + "be able to have data center member changes while there is unreachability in another data center" in within( + 20.seconds) { runOn(first) { testConductor.blackhole(first, second, Direction.Both).await } @@ -141,7 +137,8 @@ abstract class MultiDcSpec(config: MultiDcSpecConfig) cluster.leave(fourth) awaitAssert(clusterView.members.map(_.address) should not contain address(fourth)) - awaitAssert(clusterView.members.collect { case m if m.status == Up => m.address } should contain(address(fifth))) + awaitAssert( + clusterView.members.collect { case m if m.status == Up => m.address } should contain(address(fifth))) } enterBarrier("other-data-center-internal-unreachable changed") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala index e60293d591..b88bff5029 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala @@ -24,22 +24,19 @@ object MultiDcHeartbeatTakingOverSpecMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") // beta val fifth = role("fifth") // beta - nodeConfig(first, second, third)(ConfigFactory.parseString( - """ + nodeConfig(first, second, third)(ConfigFactory.parseString(""" akka { cluster.multi-data-center.self-data-center = alpha } """)) - nodeConfig(fourth, fifth)(ConfigFactory.parseString( - """ + nodeConfig(fourth, fifth)(ConfigFactory.parseString(""" akka { cluster.multi-data-center.self-data-center = beta } """)) - commonConfig(ConfigFactory.parseString( - """ + commonConfig(ConfigFactory.parseString(""" akka { actor.provider = cluster @@ -66,8 +63,9 @@ class MultiDcHeartbeatTakingOverSpecMultiJvmNode3 extends MultiDcHeartbeatTaking class MultiDcHeartbeatTakingOverSpecMultiJvmNode4 extends MultiDcHeartbeatTakingOverSpec class MultiDcHeartbeatTakingOverSpecMultiJvmNode5 extends MultiDcHeartbeatTakingOverSpec -abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeartbeatTakingOverSpecMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class MultiDcHeartbeatTakingOverSpec + extends MultiNodeSpec(MultiDcHeartbeatTakingOverSpecMultiJvmSpec) + with MultiNodeClusterSpec { "A 2-dc cluster" must { @@ -93,7 +91,8 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeart expectedBetaHeartbeaterNodes = takeNOldestMembers(dataCenter = "beta", 2) expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) - expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles union expectedBetaHeartbeaterRoles) + expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles.union( + expectedBetaHeartbeaterRoles)) } "collect information on oldest nodes" taggedAs LongRunningTest in { @@ -141,7 +140,8 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeart // we leave one of the current oldest nodes of the `alpha` DC, // since it has 3 members the "not yet oldest" one becomes oldest and should start monitoring across datacenter val preLeaveOldestAlphaRole = expectedAlphaHeartbeaterRoles.head - val preLeaveOldestAlphaAddress = expectedAlphaHeartbeaterNodes.find(_.address.port.get == preLeaveOldestAlphaRole.port.get).get.address + val preLeaveOldestAlphaAddress = + expectedAlphaHeartbeaterNodes.find(_.address.port.get == preLeaveOldestAlphaRole.port.get).get.address runOn(preLeaveOldestAlphaRole) { info(s"Leaving: ${preLeaveOldestAlphaAddress}") cluster.leave(cluster.selfAddress) @@ -156,20 +156,25 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeart enterBarrier("after-alpha-monitoring-node-left") implicit val sender = observer.ref - val expectedAlphaMonitoringNodesAfterLeaving = (takeNOldestMembers(dataCenter = "alpha", 3).filterNot(_.status == MemberStatus.Exiting)) + val expectedAlphaMonitoringNodesAfterLeaving = + (takeNOldestMembers(dataCenter = "alpha", 3).filterNot(_.status == MemberStatus.Exiting)) runOn(membersAsRoles(expectedAlphaMonitoringNodesAfterLeaving).toList: _*) { - awaitAssert({ + awaitAssert( + { - selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() + selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() - try { - observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) - info(s"Got confirmation from ${observer.lastSender} that it is actively monitoring now") - } catch { - case ex: Throwable => - throw new AssertionError(s"Monitoring was Dormant on ${cluster.selfAddress}, where we expected it to be active!", ex) - } - }, 20.seconds) + try { + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + info(s"Got confirmation from ${observer.lastSender} that it is actively monitoring now") + } catch { + case ex: Throwable => + throw new AssertionError( + s"Monitoring was Dormant on ${cluster.selfAddress}, where we expected it to be active!", + ex) + } + }, + 20.seconds) } enterBarrier("confirmed-heartbeating-take-over") } @@ -183,9 +188,12 @@ abstract class MultiDcHeartbeatTakingOverSpec extends MultiNodeSpec(MultiDcHeart * (since marking that transition is a Leader action). */ private def membersByAge(dataCenter: ClusterSettings.DataCenter): immutable.SortedSet[Member] = - SortedSet.empty(Member.ageOrdering) - .union(cluster.state.members.filter(m => m.dataCenter == dataCenter && - m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp)) + SortedSet + .empty(Member.ageOrdering) + .union( + cluster.state.members.filter(m => + m.dataCenter == dataCenter && + m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp)) /** INTERNAL API */ @InternalApi diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcLastNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcLastNodeSpec.scala index b4db10aec2..b82bd56a62 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcLastNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcLastNodeSpec.scala @@ -14,18 +14,15 @@ object MultiDcLastNodeSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(ConfigFactory.parseString( - s""" + commonConfig(ConfigFactory.parseString(s""" akka.loglevel = INFO """).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first, second)(ConfigFactory.parseString( - """ + nodeConfig(first, second)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" """)) - nodeConfig(third)(ConfigFactory.parseString( - """ + nodeConfig(third)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc2" """)) @@ -35,8 +32,7 @@ class MultiDcLastNodeMultiJvmNode1 extends MultiDcLastNodeSpec class MultiDcLastNodeMultiJvmNode2 extends MultiDcLastNodeSpec class MultiDcLastNodeMultiJvmNode3 extends MultiDcLastNodeSpec -abstract class MultiDcLastNodeSpec extends MultiNodeSpec(MultiDcLastNodeSpec) - with MultiNodeClusterSpec { +abstract class MultiDcLastNodeSpec extends MultiNodeSpec(MultiDcLastNodeSpec) with MultiNodeClusterSpec { import MultiDcLastNodeSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala index 6cd84bb6f3..c5cff78299 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala @@ -22,8 +22,8 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") val fifth = role("fifth") - commonConfig(ConfigFactory.parseString( - """ + commonConfig( + ConfigFactory.parseString(""" akka.loglevel = DEBUG # issue #24955 akka.cluster.debug.verbose-heartbeat-logging = on akka.cluster.debug.verbose-gossip-logging = on @@ -42,13 +42,11 @@ object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig { } """).withFallback(MultiNodeClusterSpec.clusterConfig)) - nodeConfig(first, second)(ConfigFactory.parseString( - """ + nodeConfig(first, second)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc1" """)) - nodeConfig(third, fourth, fifth)(ConfigFactory.parseString( - """ + nodeConfig(third, fourth, fifth)(ConfigFactory.parseString(""" akka.cluster.multi-data-center.self-data-center = "dc2" """)) @@ -61,9 +59,7 @@ class MultiDcSplitBrainMultiJvmNode3 extends MultiDcSplitBrainSpec class MultiDcSplitBrainMultiJvmNode4 extends MultiDcSplitBrainSpec class MultiDcSplitBrainMultiJvmNode5 extends MultiDcSplitBrainSpec -abstract class MultiDcSplitBrainSpec - extends MultiNodeSpec(MultiDcSplitBrainMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class MultiDcSplitBrainSpec extends MultiNodeSpec(MultiDcSplitBrainMultiJvmSpec) with MultiNodeClusterSpec { import MultiDcSplitBrainMultiJvmSpec._ @@ -196,7 +192,8 @@ abstract class MultiDcSplitBrainSpec // forth has left the cluster, fifth is still not a member - "be able to have data center member restart (same host:port) while there is inter data center split" in within(60.seconds) { + "be able to have data center member restart (same host:port) while there is inter data center split" in within( + 60.seconds) { val subscribeProbe = TestProbe() runOn(first, second, third, fifth) { Cluster(system).subscribe(subscribeProbe.ref, InitialStateAsSnapshot, classOf[MemberUp], classOf[MemberRemoved]) @@ -212,7 +209,9 @@ abstract class MultiDcSplitBrainSpec awaitAssert(clusterView.members.collect { case m if m.dataCenter == "dc2" && m.status == MemberStatus.Up => m.address } should ===(Set(address(third), address(fifth)))) - fifthOriginalUniqueAddress = clusterView.members.collectFirst { case m if m.address == address(fifth) => m.uniqueAddress } + fifthOriginalUniqueAddress = clusterView.members.collectFirst { + case m if m.address == address(fifth) => m.uniqueAddress + } } enterBarrier("fifth-joined") @@ -238,10 +237,8 @@ abstract class MultiDcSplitBrainSpec Await.ready(system.whenTerminated, remaining) val port = Cluster(system).selfAddress.port.get - val restartedSystem = ActorSystem( - system.name, - ConfigFactory.parseString( - s""" + val restartedSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = $port akka.remote.artery.canonical.port = $port akka.coordinated-shutdown.terminate-actor-system = on diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala index 475347e81e..9bf78ced27 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -21,22 +21,19 @@ object MultiDcSunnyWeatherMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") val fifth = role("fifth") - nodeConfig(first, second, third)(ConfigFactory.parseString( - """ + nodeConfig(first, second, third)(ConfigFactory.parseString(""" akka { cluster.multi-data-center.self-data-center = alpha } """)) - nodeConfig(fourth, fifth)(ConfigFactory.parseString( - """ + nodeConfig(fourth, fifth)(ConfigFactory.parseString(""" akka { cluster.multi-data-center.self-data-center = beta } """)) - commonConfig(ConfigFactory.parseString( - """ + commonConfig(ConfigFactory.parseString(""" akka { actor.provider = cluster @@ -63,8 +60,9 @@ class MultiDcSunnyWeatherMultiJvmNode3 extends MultiDcSunnyWeatherSpec class MultiDcSunnyWeatherMultiJvmNode4 extends MultiDcSunnyWeatherSpec class MultiDcSunnyWeatherMultiJvmNode5 extends MultiDcSunnyWeatherSpec -abstract class MultiDcSunnyWeatherSpec extends MultiNodeSpec(MultiDcSunnyWeatherMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class MultiDcSunnyWeatherSpec + extends MultiNodeSpec(MultiDcSunnyWeatherMultiJvmSpec) + with MultiNodeClusterSpec { "A normal cluster" must { "be healthy" taggedAs LongRunningTest in { @@ -83,7 +81,8 @@ abstract class MultiDcSunnyWeatherSpec extends MultiNodeSpec(MultiDcSunnyWeather val expectedBetaHeartbeaterNodes = takeNOldestMembers(dataCenter = "beta", 2) val expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) - val expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles union expectedBetaHeartbeaterRoles) + val expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles.union( + expectedBetaHeartbeaterRoles)) enterBarrier("found-expectations") @@ -123,15 +122,14 @@ abstract class MultiDcSunnyWeatherSpec extends MultiNodeSpec(MultiDcSunnyWeather implicit val sender = observer.ref selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() observer.expectMsgType[CrossDcHeartbeatSender.MonitoringStateReport](5.seconds) match { - case CrossDcHeartbeatSender.MonitoringDormant() => // ok ... + case CrossDcHeartbeatSender.MonitoringDormant() => // ok ... case CrossDcHeartbeatSender.MonitoringActive(state) => - // must not heartbeat myself state.activeReceivers should not contain cluster.selfUniqueAddress // not any of the members in the same datacenter; it's "cross-dc" after all val myDataCenterMembers = state.state.getOrElse(cluster.selfDataCenter, Set.empty) - myDataCenterMembers foreach { myDcMember => + myDataCenterMembers.foreach { myDcMember => state.activeReceivers should not contain myDcMember.uniqueAddress } @@ -149,9 +147,12 @@ abstract class MultiDcSunnyWeatherSpec extends MultiNodeSpec(MultiDcSunnyWeather * (since marking that transition is a Leader action). */ private def membersByAge(dataCenter: ClusterSettings.DataCenter): immutable.SortedSet[Member] = - SortedSet.empty(Member.ageOrdering) - .union(cluster.state.members.filter(m => m.dataCenter == dataCenter && - m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp)) + SortedSet + .empty(Member.ageOrdering) + .union( + cluster.state.members.filter(m => + m.dataCenter == dataCenter && + m.status != MemberStatus.Joining && m.status != MemberStatus.WeaklyUp)) /** INTERNAL API */ @InternalApi diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 7af09e95e7..ea5db00b7b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -32,8 +32,9 @@ import scala.concurrent.Await object MultiNodeClusterSpec { def clusterConfigWithFailureDetectorPuppet: Config = - ConfigFactory.parseString("akka.cluster.failure-detector.implementation-class = akka.cluster.FailureDetectorPuppet"). - withFallback(clusterConfig) + ConfigFactory + .parseString("akka.cluster.failure-detector.implementation-class = akka.cluster.FailureDetectorPuppet") + .withFallback(clusterConfig) def clusterConfig(failureDetectorPuppet: Boolean): Config = if (failureDetectorPuppet) clusterConfigWithFailureDetectorPuppet else clusterConfig @@ -84,19 +85,20 @@ object MultiNodeClusterSpec { import EndActor._ def receive: Receive = { case SendEnd => - target foreach { t => + target.foreach { t => context.actorSelection(RootActorPath(t) / self.path.elements) ! End } case End => - testActor forward End + testActor.forward(End) sender() ! EndAck case EndAck => - testActor forward EndAck + testActor.forward(EndAck) } } } -trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoroner with FlightRecordingSupport { self: MultiNodeSpec => +trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoroner with FlightRecordingSupport { + self: MultiNodeSpec => override def initialParticipants = roles.size @@ -121,28 +123,26 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro def muteLog(sys: ActorSystem = system): Unit = { if (!sys.log.isDebugEnabled) { - Seq( - ".*Cluster Node.* - registered cluster JMX MBean.*", - ".*Cluster Node.* - is starting up.*", - ".*Shutting down cluster Node.*", - ".*Cluster node successfully shut down.*", - ".*Using a dedicated scheduler for cluster.*") foreach { s => - sys.eventStream.publish(Mute(EventFilter.info(pattern = s))) - } + Seq(".*Cluster Node.* - registered cluster JMX MBean.*", + ".*Cluster Node.* - is starting up.*", + ".*Shutting down cluster Node.*", + ".*Cluster node successfully shut down.*", + ".*Using a dedicated scheduler for cluster.*").foreach { s => + sys.eventStream.publish(Mute(EventFilter.info(pattern = s))) + } - muteDeadLetters( - classOf[ClusterHeartbeatSender.Heartbeat], - classOf[ClusterHeartbeatSender.HeartbeatRsp], - classOf[GossipEnvelope], - classOf[GossipStatus], - classOf[InternalClusterAction.Tick], - classOf[akka.actor.PoisonPill], - classOf[akka.dispatch.sysmsg.DeathWatchNotification], - classOf[akka.remote.transport.AssociationHandle.Disassociated], - // akka.remote.transport.AssociationHandle.Disassociated.getClass, - classOf[akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying], - // akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass, - classOf[akka.remote.transport.AssociationHandle.InboundPayload])(sys) + muteDeadLetters(classOf[ClusterHeartbeatSender.Heartbeat], + classOf[ClusterHeartbeatSender.HeartbeatRsp], + classOf[GossipEnvelope], + classOf[GossipStatus], + classOf[InternalClusterAction.Tick], + classOf[akka.actor.PoisonPill], + classOf[akka.dispatch.sysmsg.DeathWatchNotification], + classOf[akka.remote.transport.AssociationHandle.Disassociated], + // akka.remote.transport.AssociationHandle.Disassociated.getClass, + classOf[akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying], + // akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass, + classOf[akka.remote.transport.AssociationHandle.InboundPayload])(sys) } } @@ -208,7 +208,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro */ def startClusterNode(): Unit = { if (clusterView.members.isEmpty) { - cluster join myself + cluster.join(myself) awaitAssert(clusterView.members.map(_.address) should contain(address(myself))) } else clusterView.self @@ -241,19 +241,24 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro */ def joinWithin(joinNode: RoleName, max: Duration = remainingOrDefault, interval: Duration = 1.second): Unit = { def memberInState(member: Address, status: Seq[MemberStatus]): Boolean = - clusterView.members.exists { m => (m.address == member) && status.contains(m.status) } + clusterView.members.exists { m => + (m.address == member) && status.contains(m.status) + } cluster.join(joinNode) - awaitCond({ - clusterView.refreshCurrentState() - if (memberInState(joinNode, List(MemberStatus.up)) && - memberInState(myself, List(MemberStatus.Joining, MemberStatus.Up))) - true - else { - cluster.join(joinNode) - false - } - }, max, interval) + awaitCond( + { + clusterView.refreshCurrentState() + if (memberInState(joinNode, List(MemberStatus.up)) && + memberInState(myself, List(MemberStatus.Joining, MemberStatus.Up))) + true + else { + cluster.join(joinNode) + false + } + }, + max, + interval) } /** @@ -294,23 +299,21 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro val expectedLeader = roleOfLeader(nodesInCluster) val leader = clusterView.leader val isLeader = leader == Some(clusterView.selfAddress) - assert( - isLeader == isNode(expectedLeader), - "expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members)) - clusterView.status should (be(MemberStatus.Up) or be(MemberStatus.Leaving)) + assert(isLeader == isNode(expectedLeader), + "expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members)) + clusterView.status should (be(MemberStatus.Up).or(be(MemberStatus.Leaving))) } /** * Wait until the expected number of members has status Up has been reached. * Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring. */ - def awaitMembersUp( - numberOfMembers: Int, - canNotBePartOfMemberRing: Set[Address] = Set.empty, - timeout: FiniteDuration = 25.seconds): Unit = { + def awaitMembersUp(numberOfMembers: Int, + canNotBePartOfMemberRing: Set[Address] = Set.empty, + timeout: FiniteDuration = 25.seconds): Unit = { within(timeout) { if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set - awaitAssert(canNotBePartOfMemberRing foreach (a => clusterView.members.map(_.address) should not contain (a))) + awaitAssert(canNotBePartOfMemberRing.foreach(a => clusterView.members.map(_.address) should not contain (a))) awaitAssert(clusterView.members.size should ===(numberOfMembers)) awaitAssert(clusterView.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up))) // clusterView.leader is updated by LeaderChanged, await that to be updated also @@ -348,7 +351,8 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro enterBarrier("member-left") // verify that the member is EXITING - try Await.result(exitingLatch, timeout) catch { + try Await.result(exitingLatch, timeout) + catch { case cause: Exception => throw new AssertionError(s"Member ${toBeRemovedAddress} was not removed within ${timeout}!", cause) } @@ -368,7 +372,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro * Wait until the specified nodes have seen the same gossip overview. */ def awaitSeenSameState(addresses: Address*): Unit = - awaitAssert((addresses.toSet diff clusterView.seenBy) should ===(Set.empty)) + awaitAssert((addresses.toSet.diff(clusterView.seenBy)) should ===(Set.empty)) /** * Leader according to the address ordering of the roles. @@ -399,7 +403,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro * failure detector. */ def markNodeAsAvailable(address: Address): Unit = - failureDetectorPuppet(address) foreach (_.markNodeAsAvailable()) + failureDetectorPuppet(address).foreach(_.markNodeAsAvailable()) /** * Marks a node as unavailable in the failure detector if @@ -411,7 +415,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro // before marking it as unavailable there should be at least one heartbeat // to create the FailureDetectorPuppet in the FailureDetectorRegistry cluster.failureDetector.heartbeat(address) - failureDetectorPuppet(address) foreach (_.markNodeAsUnavailable()) + failureDetectorPuppet(address).foreach(_.markNodeAsUnavailable()) } } @@ -421,9 +425,8 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro private def failureDetectorPuppet(address: Address): Option[FailureDetectorPuppet] = cluster.failureDetector match { case reg: DefaultFailureDetectorRegistry[Address] => - reg.failureDetector(address) collect { case p: FailureDetectorPuppet => p } + reg.failureDetector(address).collect { case p: FailureDetectorPuppet => p } case _ => None } } - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala index fc92909fa4..1d53ff7cf8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala @@ -20,8 +20,8 @@ object NodeChurnMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster.auto-down-unreachable-after = 1s akka.cluster.prune-gossip-tombstones-after = 1s akka.remote.log-frame-size-exceeding = 1200b @@ -30,8 +30,7 @@ object NodeChurnMultiJvmSpec extends MultiNodeConfig { embedded-media-driver = off aeron-dir = "target/aeron-NodeChurnSpec" } - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) class LogListener(testActor: ActorRef) extends Actor { def receive = { @@ -47,11 +46,13 @@ class NodeChurnMultiJvmNode2 extends NodeChurnSpec class NodeChurnMultiJvmNode3 extends NodeChurnSpec abstract class NodeChurnSpec - extends MultiNodeSpec({ - // Aeron media driver must be started before ActorSystem - SharedMediaDriverSupport.startMediaDriver(NodeChurnMultiJvmSpec) - NodeChurnMultiJvmSpec - }) with MultiNodeClusterSpec with ImplicitSender { + extends MultiNodeSpec({ + // Aeron media driver must be started before ActorSystem + SharedMediaDriverSupport.startMediaDriver(NodeChurnMultiJvmSpec) + NodeChurnMultiJvmSpec + }) + with MultiNodeClusterSpec + with ImplicitSender { import NodeChurnMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeDowningAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeDowningAndBeingRemovedSpec.scala index 1159506b06..a7dd89432b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeDowningAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeDowningAndBeingRemovedSpec.scala @@ -15,8 +15,11 @@ object NodeDowningAndBeingRemovedMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString( - "akka.cluster.auto-down-unreachable-after = off").withFallback(MultiNodeClusterSpec.clusterConfig))) + commonConfig( + debugConfig(on = false).withFallback( + ConfigFactory + .parseString("akka.cluster.auto-down-unreachable-after = off") + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class NodeDowningAndBeingRemovedMultiJvmNode1 extends NodeDowningAndBeingRemovedSpec @@ -24,8 +27,8 @@ class NodeDowningAndBeingRemovedMultiJvmNode2 extends NodeDowningAndBeingRemoved class NodeDowningAndBeingRemovedMultiJvmNode3 extends NodeDowningAndBeingRemovedSpec abstract class NodeDowningAndBeingRemovedSpec - extends MultiNodeSpec(NodeDowningAndBeingRemovedMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(NodeDowningAndBeingRemovedMultiJvmSpec) + with MultiNodeClusterSpec { import NodeDowningAndBeingRemovedMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 9a03a3325a..8613a79b50 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -14,8 +14,7 @@ object NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec extends MultiNodeConfig val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false) - .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec @@ -23,8 +22,8 @@ class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndEx class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec abstract class NodeLeavingAndExitingAndBeingRemovedSpec - extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) + with MultiNodeClusterSpec { import NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 63e068f4eb..a8395c53c8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -17,8 +17,7 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec @@ -26,8 +25,8 @@ class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec abstract class NodeLeavingAndExitingSpec - extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) - with MultiNodeClusterSpec { + extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) + with MultiNodeClusterSpec { import NodeLeavingAndExitingMultiJvmSpec._ import ClusterEvent._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 3344be5ba9..56bc0575c3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -21,9 +21,7 @@ class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec -abstract class NodeMembershipSpec - extends MultiNodeSpec(NodeMembershipMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec { import NodeMembershipMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 605b44b859..fb97e948c8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -23,9 +23,7 @@ object NodeUpMultiJvmSpec extends MultiNodeConfig { class NodeUpMultiJvmNode1 extends NodeUpSpec class NodeUpMultiJvmNode2 extends NodeUpSpec -abstract class NodeUpSpec - extends MultiNodeSpec(NodeUpMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class NodeUpSpec extends MultiNodeSpec(NodeUpMultiJvmSpec) with MultiNodeClusterSpec { import NodeUpMultiJvmSpec._ import ClusterEvent._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala index 49bd44cc1c..0f6e416785 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/QuickRestartSpec.scala @@ -20,12 +20,11 @@ object QuickRestartMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster.auto-down-unreachable-after = off akka.cluster.allow-weakly-up-members = off - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -34,8 +33,9 @@ class QuickRestartMultiJvmNode2 extends QuickRestartSpec class QuickRestartMultiJvmNode3 extends QuickRestartSpec abstract class QuickRestartSpec - extends MultiNodeSpec(QuickRestartMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender { + extends MultiNodeSpec(QuickRestartMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import QuickRestartMultiJvmSpec._ @@ -62,14 +62,11 @@ abstract class QuickRestartSpec if (restartingSystem == null) ActorSystem( system.name, - ConfigFactory.parseString(s"akka.cluster.roles = [round-$n]") - .withFallback(system.settings.config)) + ConfigFactory.parseString(s"akka.cluster.roles = [round-$n]").withFallback(system.settings.config)) else - ActorSystem( - system.name, - // use the same port - ConfigFactory.parseString( - s""" + ActorSystem(system.name, + // use the same port + ConfigFactory.parseString(s""" akka.cluster.roles = [round-$n] akka.remote.netty.tcp.port = ${Cluster(restartingSystem).selfAddress.port.get} akka.remote.artery.canonical.port = ${Cluster(restartingSystem).selfAddress.port.get} @@ -90,7 +87,8 @@ abstract class QuickRestartSpec Cluster(system).state.members.size should ===(totalNumberOfNodes) Cluster(system).state.members.map(_.status == MemberStatus.Up) // use the role to test that it is the new incarnation that joined, sneaky - Cluster(system).state.members.flatMap(_.roles) should ===(Set(s"round-$n", ClusterSettings.DcRolePrefix + "default")) + Cluster(system).state.members.flatMap(_.roles) should ===( + Set(s"round-$n", ClusterSettings.DcRolePrefix + "default")) } } enterBarrier("members-up-" + n) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala index ed60bd6fb5..1ca2fb2d7f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala @@ -26,13 +26,12 @@ object RestartFirstSeedNodeMultiJvmSpec extends MultiNodeConfig { val seed2 = role("seed2") val seed3 = role("seed3") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster.auto-down-unreachable-after = off akka.cluster.retry-unsuccessful-join-after = 3s akka.cluster.allow-weakly-up-members = off - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) } class RestartFirstSeedNodeMultiJvmNode1 extends RestartFirstSeedNodeSpec @@ -40,8 +39,9 @@ class RestartFirstSeedNodeMultiJvmNode2 extends RestartFirstSeedNodeSpec class RestartFirstSeedNodeMultiJvmNode3 extends RestartFirstSeedNodeSpec abstract class RestartFirstSeedNodeSpec - extends MultiNodeSpec(RestartFirstSeedNodeMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender { + extends MultiNodeSpec(RestartFirstSeedNodeMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import RestartFirstSeedNodeMultiJvmSpec._ @@ -53,18 +53,15 @@ abstract class RestartFirstSeedNodeSpec def missingSeed = address(seed3).copy(port = Some(61313)) def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2, seed3, missingSeed) - lazy val restartedSeed1System = ActorSystem( - system.name, - ConfigFactory.parseString( - s""" + lazy val restartedSeed1System = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = ${seedNodes.head.port.get} akka.remote.artery.canonical.port = ${seedNodes.head.port.get} """).withFallback(system.settings.config)) override def afterAll(): Unit = { runOn(seed1) { - shutdown( - if (seed1System.whenTerminated.isCompleted) restartedSeed1System else seed1System) + shutdown(if (seed1System.whenTerminated.isCompleted) restartedSeed1System else seed1System) } super.afterAll() } @@ -87,7 +84,7 @@ abstract class RestartFirstSeedNodeSpec runOn(seed1) { enterBarrier("seed1-address-receiver-ready") seedNode1Address = Cluster(seed1System).selfAddress - List(seed2, seed3) foreach { r => + List(seed2, seed3).foreach { r => system.actorSelection(RootActorPath(r) / "user" / "address-receiver") ! seedNode1Address expectMsg(5 seconds, "ok") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala index 6117de961e..22c3d1e073 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala @@ -24,15 +24,14 @@ object RestartNode2SpecMultiJvmSpec extends MultiNodeConfig { val seed1 = role("seed1") val seed2 = role("seed2") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster.auto-down-unreachable-after = 2s akka.cluster.retry-unsuccessful-join-after = 3s akka.cluster.allow-weakly-up-members = off akka.remote.retry-gate-closed-for = 45s akka.remote.log-remote-lifecycle-events = INFO - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -40,8 +39,9 @@ class RestartNode2SpecMultiJvmNode1 extends RestartNode2SpecSpec class RestartNode2SpecMultiJvmNode2 extends RestartNode2SpecSpec abstract class RestartNode2SpecSpec - extends MultiNodeSpec(RestartNode2SpecMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender { + extends MultiNodeSpec(RestartNode2SpecMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import RestartNode2SpecMultiJvmSpec._ @@ -53,10 +53,8 @@ abstract class RestartNode2SpecSpec def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2) // this is the node that will attempt to re-join, keep gate times low so it can retry quickly - lazy val restartedSeed1System = ActorSystem( - system.name, - ConfigFactory.parseString( - s""" + lazy val restartedSeed1System = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = ${seedNodes.head.port.get} akka.remote.artery.canonical.port = ${seedNodes.head.port.get} #akka.remote.retry-gate-closed-for = 1s @@ -64,8 +62,7 @@ abstract class RestartNode2SpecSpec override def afterAll(): Unit = { runOn(seed1) { - shutdown( - if (seed1System.whenTerminated.isCompleted) restartedSeed1System else seed1System) + shutdown(if (seed1System.whenTerminated.isCompleted) restartedSeed1System else seed1System) } super.afterAll() } @@ -88,7 +85,7 @@ abstract class RestartNode2SpecSpec runOn(seed1) { enterBarrier("seed1-address-receiver-ready") seedNode1Address = Cluster(seed1System).selfAddress - List(seed2) foreach { r => + List(seed2).foreach { r => system.actorSelection(RootActorPath(r) / "user" / "address-receiver") ! seedNode1Address expectMsg(5.seconds, "ok") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala index 00e12a59e1..5674c89a96 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala @@ -25,12 +25,11 @@ object RestartNode3MultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster.auto-down-unreachable-after = off akka.cluster.allow-weakly-up-members = off - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) } @@ -40,8 +39,9 @@ class RestartNode3MultiJvmNode2 extends RestartNode3Spec class RestartNode3MultiJvmNode3 extends RestartNode3Spec abstract class RestartNode3Spec - extends MultiNodeSpec(RestartNode3MultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender { + extends MultiNodeSpec(RestartNode3MultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import RestartNode3MultiJvmSpec._ @@ -52,10 +52,8 @@ abstract class RestartNode3Spec def seedNodes: immutable.IndexedSeq[Address] = Vector(first) - lazy val restartedSecondSystem = ActorSystem( - system.name, - ConfigFactory.parseString( - s""" + lazy val restartedSecondSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get} akka.remote.netty.tcp.port = ${secondUniqueAddress.address.port.get} """).withFallback(system.settings.config)) @@ -90,7 +88,7 @@ abstract class RestartNode3Spec runOn(second) { enterBarrier("second-address-receiver-ready") secondUniqueAddress = Cluster(secondSystem).selfUniqueAddress - List(first, third) foreach { r => + List(first, third).foreach { r => system.actorSelection(RootActorPath(r) / "user" / "address-receiver") ! secondUniqueAddress expectMsg(5.seconds, "ok") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala index 140ca2685e..5708e1f48b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala @@ -30,13 +30,12 @@ object RestartNodeMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster.auto-down-unreachable-after = 5s akka.cluster.allow-weakly-up-members = off #akka.remote.use-passive-connections = off - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) /** * This was used together with sleep in EndpointReader before deliverAndAck @@ -60,8 +59,9 @@ class RestartNodeMultiJvmNode2 extends RestartNodeSpec class RestartNodeMultiJvmNode3 extends RestartNodeSpec abstract class RestartNodeSpec - extends MultiNodeSpec(RestartNodeMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender { + extends MultiNodeSpec(RestartNodeMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import RestartNodeMultiJvmSpec._ @@ -72,9 +72,8 @@ abstract class RestartNodeSpec def seedNodes: immutable.IndexedSeq[Address] = Vector(first, secondUniqueAddress.address, third) - lazy val restartedSecondSystem = ActorSystem( - system.name, - ConfigFactory.parseString(s""" + lazy val restartedSecondSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = ${secondUniqueAddress.address.port.get} akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get} """).withFallback(system.settings.config)) @@ -107,7 +106,7 @@ abstract class RestartNodeSpec runOn(second) { enterBarrier("second-address-receiver-ready") secondUniqueAddress = Cluster(secondSystem).selfUniqueAddress - List(first, third) foreach { r => + List(first, third).foreach { r => system.actorSelection(RootActorPath(r) / "user" / "address-receiver") ! secondUniqueAddress expectMsg(5.seconds, "ok") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala index 4bf526b9c2..47fd6c21f8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SharedMediaDriverSupport.scala @@ -45,7 +45,8 @@ object SharedMediaDriverSupport { override def accept(msg: String): Unit = { println(msg) } - }) catch { + }) + catch { case NonFatal(e) => println(e.getMessage) false @@ -103,7 +104,7 @@ object SharedMediaDriverSupport { case NonFatal(e) => println( s"Couldn't delete Aeron embedded media driver files in [${driver.aeronDirectoryName}] " + - s"due to [${e.getMessage}]") + s"due to [${e.getMessage}]") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index 46bd4b9008..01a63eb140 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -16,26 +16,26 @@ final case class SingletonClusterMultiNodeConfig(failureDetectorPuppet: Boolean) val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster { auto-down-unreachable-after = 0s failure-detector.threshold = 4 } - """)). - withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) + """)).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) } class SingletonClusterWithFailureDetectorPuppetMultiJvmNode1 extends SingletonClusterSpec(failureDetectorPuppet = true) class SingletonClusterWithFailureDetectorPuppetMultiJvmNode2 extends SingletonClusterSpec(failureDetectorPuppet = true) -class SingletonClusterWithAccrualFailureDetectorMultiJvmNode1 extends SingletonClusterSpec(failureDetectorPuppet = false) -class SingletonClusterWithAccrualFailureDetectorMultiJvmNode2 extends SingletonClusterSpec(failureDetectorPuppet = false) +class SingletonClusterWithAccrualFailureDetectorMultiJvmNode1 + extends SingletonClusterSpec(failureDetectorPuppet = false) +class SingletonClusterWithAccrualFailureDetectorMultiJvmNode2 + extends SingletonClusterSpec(failureDetectorPuppet = false) abstract class SingletonClusterSpec(multiNodeConfig: SingletonClusterMultiNodeConfig) - extends MultiNodeSpec(multiNodeConfig) - with MultiNodeClusterSpec { + extends MultiNodeSpec(multiNodeConfig) + with MultiNodeClusterSpec { def this(failureDetectorPuppet: Boolean) = this(SingletonClusterMultiNodeConfig(failureDetectorPuppet)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala index f47067f8fe..354be99d93 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala @@ -21,14 +21,12 @@ final case class SplitBrainMultiNodeConfig(failureDetectorPuppet: Boolean) exten val fourth = role("fourth") val fifth = role("fifth") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.remote.retry-gate-closed-for = 3 s akka.cluster { auto-down-unreachable-after = 1s failure-detector.threshold = 4 - }""")). - withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) + }""")).withFallback(MultiNodeClusterSpec.clusterConfig(failureDetectorPuppet))) testTransport(on = true) } @@ -46,8 +44,8 @@ class SplitBrainWithAccrualFailureDetectorMultiJvmNode4 extends SplitBrainSpec(f class SplitBrainWithAccrualFailureDetectorMultiJvmNode5 extends SplitBrainSpec(failureDetectorPuppet = false) abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig) - extends MultiNodeSpec(multiNodeConfig) - with MultiNodeClusterSpec { + extends MultiNodeSpec(multiNodeConfig) + with MultiNodeClusterSpec { def this(failureDetectorPuppet: Boolean) = this(SplitBrainMultiNodeConfig(failureDetectorPuppet)) @@ -66,7 +64,8 @@ abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig) enterBarrier("after-1") } - "detect network partition and mark nodes on other side as unreachable and form new cluster" taggedAs LongRunningTest in within(30 seconds) { + "detect network partition and mark nodes on other side as unreachable and form new cluster" taggedAs LongRunningTest in within( + 30 seconds) { enterBarrier("before-split") runOn(first) { @@ -80,14 +79,14 @@ abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig) runOn(side1: _*) { for (role <- side2) markNodeAsUnavailable(role) // auto-down - awaitMembersUp(side1.size, side2.toSet map address) + awaitMembersUp(side1.size, side2.toSet.map(address)) assertLeader(side1: _*) } runOn(side2: _*) { for (role <- side1) markNodeAsUnavailable(role) // auto-down - awaitMembersUp(side2.size, side1.toSet map address) + awaitMembersUp(side2.size, side1.toSet.map(address)) assertLeader(side2: _*) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala index 183815fbc0..7496db4c18 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StreamRefSpec.scala @@ -37,12 +37,10 @@ object StreamRefSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.cluster { auto-down-unreachable-after = 1s - }""")). - withFallback(MultiNodeClusterSpec.clusterConfig)) + }""")).withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) @@ -62,7 +60,8 @@ object StreamRefSpec extends MultiNodeConfig { case RequestLogs(streamId) => // materialize the SourceRef: val (done: Future[Done], ref: Future[SourceRef[String]]) = - Source.fromIterator(() => Iterator.from(1)) + Source + .fromIterator(() => Iterator.from(1)) .map(n => s"elem-$n") .watchTermination()(Keep.right) .toMat(StreamRefs.sourceRef())(Keep.both) @@ -81,7 +80,7 @@ object StreamRefSpec extends MultiNodeConfig { val reply: Future[LogsOffer] = ref.map(LogsOffer(streamId, _)) // reply to sender - reply pipeTo sender() + reply.pipeTo(sender()) } } @@ -101,10 +100,10 @@ object StreamRefSpec extends MultiNodeConfig { def receive = { case PrepareUpload(nodeId) => - // materialize the SinkRef (the remote is like a source of data for us): val (ref: Future[SinkRef[String]], done: Future[Done]) = - StreamRefs.sinkRef[String]() + StreamRefs + .sinkRef[String]() .throttle(1, 1.second) .toMat(Sink.ignore)(Keep.both) .mapMaterializedValue { m => @@ -122,7 +121,7 @@ object StreamRefSpec extends MultiNodeConfig { val reply: Future[MeasurementsSinkReady] = ref.map(MeasurementsSinkReady(nodeId, _)) // reply to sender - reply pipeTo sender() + reply.pipeTo(sender()) } } @@ -133,8 +132,7 @@ class StreamRefMultiJvmNode1 extends StreamRefSpec class StreamRefMultiJvmNode2 extends StreamRefSpec class StreamRefMultiJvmNode3 extends StreamRefSpec -abstract class StreamRefSpec extends MultiNodeSpec(StreamRefSpec) - with MultiNodeClusterSpec with ImplicitSender { +abstract class StreamRefSpec extends MultiNodeSpec(StreamRefSpec) with MultiNodeClusterSpec with ImplicitSender { import StreamRefSpec._ private implicit val mat: ActorMaterializer = ActorMaterializer() @@ -163,11 +161,7 @@ abstract class StreamRefSpec extends MultiNodeSpec(StreamRefSpec) ref ! RequestLogs(1337) val dataSourceRef = expectMsgType[LogsOffer].sourceRef destinationForSource = dataSourceRef.runWith(TestSink.probe) - destinationForSource - .request(3) - .expectNext("elem-1") - .expectNext("elem-2") - .expectNext("elem-3") + destinationForSource.request(3).expectNext("elem-1").expectNext("elem-2").expectNext("elem-3") } runOn(second) { dataSourceLifecycle.expectMsg("started-1337") @@ -182,10 +176,10 @@ abstract class StreamRefSpec extends MultiNodeSpec(StreamRefSpec) // auto-down runOn(first, third) { - awaitMembersUp(2, Set(second) map address) + awaitMembersUp(2, Set(second).map(address)) } runOn(second) { - awaitMembersUp(1, Set(first, third) map address) + awaitMembersUp(1, Set(first, third).map(address)) } enterBarrier("members-removed") @@ -215,7 +209,8 @@ abstract class StreamRefSpec extends MultiNodeSpec(StreamRefSpec) ref ! PrepareUpload("system-42-tmp") val ready = expectMsgType[MeasurementsSinkReady] - Source.fromIterator(() => Iterator.from(1)) + Source + .fromIterator(() => Iterator.from(1)) .map(n => s"elem-$n") .watchTermination()(Keep.right) .to(ready.sinkRef) @@ -237,10 +232,10 @@ abstract class StreamRefSpec extends MultiNodeSpec(StreamRefSpec) // auto-down runOn(first) { - awaitMembersUp(1, Set(third) map address) + awaitMembersUp(1, Set(third).map(address)) } runOn(third) { - awaitMembersUp(1, Set(first) map address) + awaitMembersUp(1, Set(first).map(address)) } enterBarrier("members-removed") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala index c492a3d6ce..b24374450e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala @@ -66,7 +66,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { val totalNumberOfNodes = System.getProperty("MultiJvm.akka.cluster.Stress.nrOfNodes") match { case null => 13 - case value => value.toInt requiring (_ >= 10, "nrOfNodes should be >= 10") + case value => value.toInt.requiring(_ >= 10, "nrOfNodes should be >= 10") } for (n <- 1 to totalNumberOfNodes) role("node-" + n) @@ -180,9 +180,9 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { val numberOfNodesJoiningToOneNode = getInt("nr-of-nodes-joining-to-one") * nFactor // remaining will join to seed nodes val numberOfNodesJoiningToSeedNodes = (totalNumberOfNodes - numberOfSeedNodes - - numberOfNodesJoiningToSeedNodesInitially - numberOfNodesJoiningOneByOneSmall - - numberOfNodesJoiningOneByOneLarge - numberOfNodesJoiningToOneNode) requiring (_ >= 0, - s"too many configured nr-of-nodes-joining-*, total should be <= ${totalNumberOfNodes}") + numberOfNodesJoiningToSeedNodesInitially - numberOfNodesJoiningOneByOneSmall - + numberOfNodesJoiningOneByOneLarge - numberOfNodesJoiningToOneNode) + .requiring(_ >= 0, s"too many configured nr-of-nodes-joining-*, total should be <= ${totalNumberOfNodes}") val numberOfNodesLeavingOneByOneSmall = getInt("nr-of-nodes-leaving-one-by-one-small") * nFactor val numberOfNodesLeavingOneByOneLarge = getInt("nr-of-nodes-leaving-one-by-one-large") * nFactor val numberOfNodesLeaving = getInt("nr-of-nodes-leaving") * nFactor @@ -209,16 +209,17 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { require( numberOfSeedNodes + numberOfNodesJoiningToSeedNodesInitially + numberOfNodesJoiningOneByOneSmall + - numberOfNodesJoiningOneByOneLarge + numberOfNodesJoiningToOneNode + numberOfNodesJoiningToSeedNodes <= totalNumberOfNodes, + numberOfNodesJoiningOneByOneLarge + numberOfNodesJoiningToOneNode + numberOfNodesJoiningToSeedNodes <= totalNumberOfNodes, s"specified number of joining nodes <= ${totalNumberOfNodes}") // don't shutdown the 3 nodes hosting the master actors require( numberOfNodesLeavingOneByOneSmall + numberOfNodesLeavingOneByOneLarge + numberOfNodesLeaving + - numberOfNodesShutdownOneByOneSmall + numberOfNodesShutdownOneByOneLarge + numberOfNodesShutdown <= totalNumberOfNodes - 3, + numberOfNodesShutdownOneByOneSmall + numberOfNodesShutdownOneByOneLarge + numberOfNodesShutdown <= totalNumberOfNodes - 3, s"specified number of leaving/shutdown nodes <= ${totalNumberOfNodes - 3}") - require(numberOfNodesJoinRemove <= totalNumberOfNodes, s"nr-of-nodes-join-remove should be <= ${totalNumberOfNodes}") + require(numberOfNodesJoinRemove <= totalNumberOfNodes, + s"nr-of-nodes-join-remove should be <= ${totalNumberOfNodes}") override def toString: String = { testConfig.withFallback(ConfigFactory.parseString(s"nrOfNodes=${totalNumberOfNodes}")).root.render @@ -229,10 +230,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { def form: String = d.formatted("%.2f") } - final case class ClusterResult( - address: Address, - duration: Duration, - clusterStats: GossipStats) + final case class ClusterResult(address: Address, duration: Duration, clusterStats: GossipStats) final case class AggregatedClusterResult(title: String, duration: Duration, clusterStats: GossipStats) @@ -242,7 +240,9 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { * expected results has been collected. It shuts down * itself when expected results has been collected. */ - class ClusterResultAggregator(title: String, expectedResults: Int, settings: Settings) extends Actor with ActorLogging { + class ClusterResultAggregator(title: String, expectedResults: Int, settings: Settings) + extends Actor + with ActorLogging { import settings.infolog private val cluster = Cluster(context.system) private var reportTo: Option[ActorRef] = None @@ -267,9 +267,10 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { if (results.size == expectedResults) { val aggregated = AggregatedClusterResult(title, maxDuration, totalGossipStats) if (infolog) - log.info(s"[${title}] completed in [${aggregated.duration.toMillis}] ms\n${aggregated.clusterStats}\n\n${formatPhi}\n\n${formatStats}") - reportTo foreach { _ ! aggregated } - context stop self + log.info( + s"[${title}] completed in [${aggregated.duration.toMillis}] ms\n${aggregated.clusterStats}\n\n${formatPhi}\n\n${formatStats}") + reportTo.foreach { _ ! aggregated } + context.stop(self) } case _: CurrentClusterState => case ReportTo(ref) => reportTo = ref @@ -308,8 +309,9 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { import stats.vclockStats._ s"ClusterStats($receivedGossipCount, $mergeCount, $sameCount, $newerCount, $olderCount, $versionSize, $seenLatest)" } - (clusterStatsObservedByNode map { case (monitor, stats) => s"${monitor}\t${f(stats)}" }). - mkString("ClusterStats(gossip, merge, same, newer, older, vclockSize, seenLatest)\n", "\n", "") + clusterStatsObservedByNode + .map { case (monitor, stats) => s"${monitor}\t${f(stats)}" } + .mkString("ClusterStats(gossip, merge, same, newer, older, vclockSize, seenLatest)\n", "\n", "") } } @@ -329,7 +331,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { } def formatHistory: String = - (formatHistoryHeader +: (history map formatHistoryLine)).mkString("\n") + (formatHistoryHeader +: (history.map(formatHistoryLine))).mkString("\n") def formatHistoryHeader: String = "[Title]\t[Duration (ms)]\t[GossipStats(gossip, merge, same, newer, older)]" @@ -345,21 +347,22 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { class PhiObserver extends Actor with ActorLogging { val cluster = Cluster(context.system) var reportTo: Option[ActorRef] = None - val emptyPhiByNode: Map[Address, PhiValue] = Map.empty[Address, PhiValue].withDefault(address => PhiValue(address, 0, 0, 0.0)) + val emptyPhiByNode: Map[Address, PhiValue] = + Map.empty[Address, PhiValue].withDefault(address => PhiValue(address, 0, 0, 0.0)) var phiByNode = emptyPhiByNode var nodes = Set.empty[Address] def phi(address: Address): Double = cluster.failureDetector match { - case reg: DefaultFailureDetectorRegistry[Address] => reg.failureDetector(address) match { - case Some(fd: PhiAccrualFailureDetector) => fd.phi - case _ => 0.0 - } + case reg: DefaultFailureDetectorRegistry[Address] => + reg.failureDetector(address) match { + case Some(fd: PhiAccrualFailureDetector) => fd.phi + case _ => 0.0 + } case _ => 0.0 } import context.dispatcher - val checkPhiTask = context.system.scheduler.schedule( - 1.second, 1.second, self, PhiTick) + val checkPhiTask = context.system.scheduler.schedule(1.second, 1.second, self, PhiTick) // subscribe to MemberEvent, re-subscribe when restart override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent]) @@ -371,23 +374,25 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { def receive = { case PhiTick => - nodes foreach { node => + nodes.foreach { node => val previous = phiByNode(node) val φ = phi(node) if (φ > 0 || cluster.failureDetector.isMonitoring(node)) { val aboveOne = if (!φ.isInfinite && φ > 1.0) 1 else 0 - phiByNode += node -> PhiValue(node, previous.countAboveOne + aboveOne, previous.count + 1, - math.max(previous.max, φ)) + phiByNode += node -> PhiValue(node, + previous.countAboveOne + aboveOne, + previous.count + 1, + math.max(previous.max, φ)) } } val phiSet = immutable.SortedSet.empty[PhiValue] ++ phiByNode.values - reportTo foreach { _ ! PhiResult(cluster.selfAddress, phiSet) } + reportTo.foreach { _ ! PhiResult(cluster.selfAddress, phiSet) } case state: CurrentClusterState => nodes = state.members.map(_.address) case memberEvent: MemberEvent => nodes += memberEvent.member.address case ReportTo(ref) => - reportTo foreach context.unwatch + reportTo.foreach(context.unwatch) reportTo = ref - reportTo foreach context.watch + reportTo.foreach(context.watch) case Terminated(ref) => reportTo match { case Some(`ref`) => reportTo = None @@ -417,11 +422,11 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { case Some(start) => gossipStats :- start } val res = StatsResult(cluster.selfAddress, CurrentInternalStats(diff, vclockStats)) - reportTo foreach { _ ! res } + reportTo.foreach { _ ! res } case ReportTo(ref) => - reportTo foreach context.unwatch + reportTo.foreach(context.unwatch) reportTo = ref - reportTo foreach context.watch + reportTo.foreach(context.watch) case Terminated(ref) => reportTo match { case Some(`ref`) => reportTo = None @@ -453,7 +458,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { val workers = context.actorOf(FromConfig.props(Props[Worker]), "workers") val payload = Array.fill(settings.payloadSize)(ThreadLocalRandom.current.nextInt(127).toByte) val retryTimeout = 5.seconds.dilated(context.system) - val idCounter = Iterator from 0 + val idCounter = Iterator.from(0) var sendCounter = 0L var ackCounter = 0L var outstanding = Map.empty[JobId, JobState] @@ -502,23 +507,27 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { if (outstanding.isEmpty) { val duration = (System.nanoTime - startTime).nanos replyTo ! WorkResult(duration, sendCounter, ackCounter) - context stop self + context.stop(self) } def sendJobs(): Unit = { - 0 until settings.workBatchSize foreach { _ => + (0 until settings.workBatchSize).foreach { _ => send(createJob()) } } def createJob(): Job = { - if (tree) TreeJob(idCounter.next(), payload, ThreadLocalRandom.current.nextInt(settings.treeWidth), - settings.treeLevels, settings.treeWidth) + if (tree) + TreeJob(idCounter.next(), + payload, + ThreadLocalRandom.current.nextInt(settings.treeWidth), + settings.treeLevels, + settings.treeWidth) else SimpleJob(idCounter.next(), payload) } def resend(): Unit = { - outstanding.values foreach { jobState => + outstanding.values.foreach { jobState => if (jobState.deadline.isOverdue) send(jobState.job) } @@ -536,22 +545,23 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { */ class Worker extends Actor with ActorLogging { def receive = { - case SimpleJob(id, payload) => sender() ! Ack(id) + case SimpleJob(id, payload) => sender() ! Ack(id) case TreeJob(id, payload, idx, levels, width) => // create the actors when first TreeJob message is received val totalActors = ((width * math.pow(width, levels) - 1) / (width - 1)).toInt - log.debug( - "Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children", - totalActors, levels, width) + log.debug("Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children", + totalActors, + levels, + width) val tree = context.actorOf(Props(classOf[TreeNode], levels, width), "tree") - tree forward ((idx, SimpleJob(id, payload))) + tree.forward((idx, SimpleJob(id, payload))) context.become(treeWorker(tree)) } def treeWorker(tree: ActorRef): Receive = { case SimpleJob(id, payload) => sender() ! Ack(id) case TreeJob(id, payload, idx, _, _) => - tree forward ((idx, SimpleJob(id, payload))) + tree.forward((idx, SimpleJob(id, payload))) } } @@ -559,10 +569,12 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { require(level >= 1) def createChild(): Actor = if (level == 1) new Leaf else new TreeNode(level - 1, width) val indexedChildren = - 0 until width map { i => context.actorOf(Props(createChild()).withDeploy(Deploy.local), name = i.toString) } toVector + (0 until width).map { i => + context.actorOf(Props(createChild()).withDeploy(Deploy.local), name = i.toString) + } toVector def receive = { - case (idx: Int, job: SimpleJob) if idx < width => indexedChildren(idx) forward ((idx, job)) + case (idx: Int, job: SimpleJob) if idx < width => indexedChildren(idx).forward((idx, job)) } } @@ -595,12 +607,11 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { def receive = { case props: Props => context.actorOf(props) - case e: Exception => context.children foreach { _ ! e } + case e: Exception => context.children.foreach { _ ! e } case GetChildrenCount => sender() ! ChildrenCount(context.children.size, restartCount) case Reset => - require( - context.children.isEmpty, - s"ResetChildrenCount not allowed when children exists, [${context.children.size}]") + require(context.children.isEmpty, + s"ResetChildrenCount not allowed when children exists, [${context.children.size}]") restartCount = 0 } } @@ -661,11 +672,14 @@ class StressMultiJvmNode12 extends StressSpec class StressMultiJvmNode13 extends StressSpec abstract class StressSpec - extends MultiNodeSpec({ - // Aeron media driver must be started before ActorSystem - SharedMediaDriverSupport.startMediaDriver(StressMultiJvmSpec) - StressMultiJvmSpec - }) with MultiNodeClusterSpec with BeforeAndAfterEach with ImplicitSender { + extends MultiNodeSpec({ + // Aeron media driver must be started before ActorSystem + SharedMediaDriverSupport.startMediaDriver(StressMultiJvmSpec) + StressMultiJvmSpec + }) + with MultiNodeClusterSpec + with BeforeAndAfterEach + with ImplicitSender { import StressMultiJvmSpec._ @@ -686,8 +700,12 @@ abstract class StressSpec override def muteLog(sys: ActorSystem = system): Unit = { super.muteLog(sys) sys.eventStream.publish(Mute(EventFilter[RuntimeException](pattern = ".*Simulated exception.*"))) - muteDeadLetters(classOf[SimpleJob], classOf[AggregatedClusterResult], SendBatch.getClass, - classOf[StatsResult], classOf[PhiResult], RetryTick.getClass)(sys) + muteDeadLetters(classOf[SimpleJob], + classOf[AggregatedClusterResult], + SendBatch.getClass, + classOf[StatsResult], + classOf[PhiResult], + RetryTick.getClass)(sys) } override protected def afterTermination(): Unit = { @@ -713,22 +731,38 @@ abstract class StressSpec val sb = new StringBuilder - sb.append("Operating system: ").append(os.getName).append(", ").append(os.getArch).append(", ").append(os.getVersion) + sb.append("Operating system: ") + .append(os.getName) + .append(", ") + .append(os.getArch) + .append(", ") + .append(os.getVersion) sb.append("\n") - sb.append("JVM: ").append(runtime.getVmName).append(" ").append(runtime.getVmVendor). - append(" ").append(runtime.getVmVersion) + sb.append("JVM: ") + .append(runtime.getVmName) + .append(" ") + .append(runtime.getVmVendor) + .append(" ") + .append(runtime.getVmVersion) sb.append("\n") sb.append("Processors: ").append(os.getAvailableProcessors) sb.append("\n") sb.append("Load average: ").append(os.getSystemLoadAverage) sb.append("\n") - sb.append("Thread count: ").append(threads.getThreadCount).append(" (").append(threads.getPeakThreadCount).append(")") + sb.append("Thread count: ") + .append(threads.getThreadCount) + .append(" (") + .append(threads.getPeakThreadCount) + .append(")") sb.append("\n") - sb.append("Heap: ").append((heap.getUsed.toDouble / 1024 / 1024).form). - append(" (").append((heap.getInit.toDouble / 1024 / 1024).form). - append(" - "). - append((heap.getMax.toDouble / 1024 / 1024).form). - append(")").append(" MB") + sb.append("Heap: ") + .append((heap.getUsed.toDouble / 1024 / 1024).form) + .append(" (") + .append((heap.getInit.toDouble / 1024 / 1024).form) + .append(" - ") + .append((heap.getMax.toDouble / 1024 / 1024).form) + .append(")") + .append(" MB") sb.append("\n") import scala.collection.JavaConverters._ @@ -794,7 +828,7 @@ abstract class StressSpec } def joinOneByOne(numberOfNodes: Int): Unit = { - 0 until numberOfNodes foreach { _ => + (0 until numberOfNodes).foreach { _ => joinOne() nbrUsedRoles += 1 step += 1 @@ -825,12 +859,13 @@ abstract class StressSpec within(10.seconds + convergenceWithin(3.seconds, nbrUsedRoles + numberOfNodes)) { val currentRoles = roles.take(nbrUsedRoles + numberOfNodes) val joiningRoles = currentRoles.takeRight(numberOfNodes) - val title = s"join ${numberOfNodes} to ${if (toSeedNodes) "seed nodes" else "one node"}, in ${nbrUsedRoles} nodes cluster" + val title = + s"join ${numberOfNodes} to ${if (toSeedNodes) "seed nodes" else "one node"}, in ${nbrUsedRoles} nodes cluster" createResultAggregator(title, expectedResults = currentRoles.size, includeInHistory = true) runOn(currentRoles: _*) { reportResult { runOn(joiningRoles: _*) { - if (toSeedNodes) cluster.joinSeedNodes(seedNodes.toIndexedSeq map address) + if (toSeedNodes) cluster.joinSeedNodes(seedNodes.toIndexedSeq.map(address)) else cluster.join(roles.head) } awaitMembersUp(currentRoles.size, timeout = remainingOrDefault) @@ -842,7 +877,7 @@ abstract class StressSpec } def removeOneByOne(numberOfNodes: Int, shutdown: Boolean): Unit = { - 0 until numberOfNodes foreach { _ => + (0 until numberOfNodes).foreach { _ => removeOne(shutdown) nbrUsedRoles -= 1 step += 1 @@ -925,7 +960,7 @@ abstract class StressSpec val returnValue = thunk - clusterResultAggregator foreach { + clusterResultAggregator.foreach { _ ! ClusterResult(cluster.selfAddress, (System.nanoTime - startTime).nanos, latestGossipStats :- startStats) } @@ -939,7 +974,9 @@ abstract class StressSpec val usedRoles = roles.take(nbrUsedRoles) val usedAddresses = usedRoles.map(address(_)).toSet - @tailrec def loop(counter: Int, previousAS: Option[ActorSystem], allPreviousAddresses: Set[Address]): Option[ActorSystem] = { + @tailrec def loop(counter: Int, + previousAS: Option[ActorSystem], + allPreviousAddresses: Set[Address]): Option[ActorSystem] = { if (counter > rounds) previousAS else { val t = title + " round " + counter @@ -952,20 +989,21 @@ abstract class StressSpec reportResult { val nextAS = if (activeRoles contains myself) { - previousAS foreach { as => TestKit.shutdownActorSystem(as) } + previousAS.foreach { as => + TestKit.shutdownActorSystem(as) + } val sys = ActorSystem(system.name, system.settings.config) muteLog(sys) - Cluster(sys).joinSeedNodes(seedNodes.toIndexedSeq map address) + Cluster(sys).joinSeedNodes(seedNodes.toIndexedSeq.map(address)) Some(sys) } else previousAS runOn(usedRoles: _*) { - awaitMembersUp( - nbrUsedRoles + activeRoles.size, - canNotBePartOfMemberRing = allPreviousAddresses, - timeout = remainingOrDefault) + awaitMembersUp(nbrUsedRoles + activeRoles.size, + canNotBePartOfMemberRing = allPreviousAddresses, + timeout = remainingOrDefault) awaitAllReachable() } - val nextAddresses = clusterView.members.map(_.address) diff usedAddresses + val nextAddresses = clusterView.members.map(_.address).diff(usedAddresses) runOn(usedRoles: _*) { nextAddresses.size should ===(numberOfNodesJoinRemove) } @@ -981,7 +1019,9 @@ abstract class StressSpec } } - loop(1, None, Set.empty) foreach { as => TestKit.shutdownActorSystem(as) } + loop(1, None, Set.empty).foreach { as => + TestKit.shutdownActorSystem(as) + } within(loopDuration) { runOn(usedRoles: _*) { awaitMembersUp(nbrUsedRoles, timeout = remainingOrDefault) @@ -1001,8 +1041,11 @@ abstract class StressSpec identifyProbe.expectMsgType[ActorIdentity].ref } - def exerciseRouters(title: String, duration: FiniteDuration, batchInterval: FiniteDuration, - expectDroppedMessages: Boolean, tree: Boolean): Unit = + def exerciseRouters(title: String, + duration: FiniteDuration, + batchInterval: FiniteDuration, + expectDroppedMessages: Boolean, + tree: Boolean): Unit = within(duration + 10.seconds) { nbrUsedRoles should ===(totalNumberOfNodes) createResultAggregator(title, expectedResults = nbrUsedRoles, includeInHistory = false) @@ -1010,9 +1053,8 @@ abstract class StressSpec val (masterRoles, otherRoles) = roles.take(nbrUsedRoles).splitAt(3) runOn(masterRoles: _*) { reportResult { - val m = system.actorOf( - Props(classOf[Master], settings, batchInterval, tree).withDeploy(Deploy.local), - name = masterName) + val m = system.actorOf(Props(classOf[Master], settings, batchInterval, tree).withDeploy(Deploy.local), + name = masterName) m ! Begin import system.dispatcher system.scheduler.scheduleOnce(duration) { @@ -1039,9 +1081,11 @@ abstract class StressSpec def awaitWorkResult(m: ActorRef): WorkResult = { val workResult = expectMsgType[WorkResult] if (settings.infolog) - log.info("{} result, [{}] jobs/s, retried [{}] of [{}] msg", masterName, - workResult.jobsPerSecond.form, - workResult.retryCount, workResult.sendCount) + log.info("{} result, [{}] jobs/s, retried [{}] of [{}] msg", + masterName, + workResult.jobsPerSecond.form, + workResult.retryCount, + workResult.sendCount) watch(m) expectTerminated(m) workResult @@ -1057,13 +1101,15 @@ abstract class StressSpec val (masterRoles, otherRoles) = roles.take(nbrUsedRoles).splitAt(3) runOn(masterRoles: _*) { reportResult { - roles.take(nbrUsedRoles) foreach { r => + roles.take(nbrUsedRoles).foreach { r => supervisor ! Props[RemoteChild].withDeploy(Deploy(scope = RemoteScope(address(r)))) } supervisor ! GetChildrenCount expectMsgType[ChildrenCount] should ===(ChildrenCount(nbrUsedRoles, 0)) - 1 to 5 foreach { _ => supervisor ! new RuntimeException("Simulated exception") } + (1 to 5).foreach { _ => + supervisor ! new RuntimeException("Simulated exception") + } awaitAssert { supervisor ! GetChildrenCount val c = expectMsgType[ChildrenCount] @@ -1123,14 +1169,15 @@ abstract class StressSpec "join seed nodes" taggedAs LongRunningTest in within(30 seconds) { - val otherNodesJoiningSeedNodes = roles.slice(numberOfSeedNodes, numberOfSeedNodes + numberOfNodesJoiningToSeedNodesInitially) + val otherNodesJoiningSeedNodes = + roles.slice(numberOfSeedNodes, numberOfSeedNodes + numberOfNodesJoiningToSeedNodesInitially) val size = seedNodes.size + otherNodesJoiningSeedNodes.size createResultAggregator("join seed nodes", expectedResults = size, includeInHistory = true) runOn((seedNodes ++ otherNodesJoiningSeedNodes): _*) { reportResult { - cluster.joinSeedNodes(seedNodes.toIndexedSeq map address) + cluster.joinSeedNodes(seedNodes.toIndexedSeq.map(address)) awaitMembersUp(size, timeout = remainingOrDefault) } } @@ -1143,9 +1190,8 @@ abstract class StressSpec "start routers that are running while nodes are joining" taggedAs LongRunningTest in { runOn(roles.take(3): _*) { - system.actorOf( - Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local), - name = masterName) ! Begin + system.actorOf(Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local), + name = masterName) ! Begin } } @@ -1192,32 +1238,44 @@ abstract class StressSpec "use routers with normal throughput" taggedAs LongRunningTest in { if (exerciseActors) { - exerciseRouters("use routers with normal throughput", normalThroughputDuration, - batchInterval = workBatchInterval, expectDroppedMessages = false, tree = false) + exerciseRouters("use routers with normal throughput", + normalThroughputDuration, + batchInterval = workBatchInterval, + expectDroppedMessages = false, + tree = false) } enterBarrier("after-" + step) } "use routers with high throughput" taggedAs LongRunningTest in { if (exerciseActors) { - exerciseRouters("use routers with high throughput", highThroughputDuration, - batchInterval = Duration.Zero, expectDroppedMessages = false, tree = false) + exerciseRouters("use routers with high throughput", + highThroughputDuration, + batchInterval = Duration.Zero, + expectDroppedMessages = false, + tree = false) } enterBarrier("after-" + step) } "use many actors with normal throughput" taggedAs LongRunningTest in { if (exerciseActors) { - exerciseRouters("use many actors with normal throughput", normalThroughputDuration, - batchInterval = workBatchInterval, expectDroppedMessages = false, tree = true) + exerciseRouters("use many actors with normal throughput", + normalThroughputDuration, + batchInterval = workBatchInterval, + expectDroppedMessages = false, + tree = true) } enterBarrier("after-" + step) } "use many actors with high throughput" taggedAs LongRunningTest in { if (exerciseActors) { - exerciseRouters("use many actors with high throughput", highThroughputDuration, - batchInterval = Duration.Zero, expectDroppedMessages = false, tree = true) + exerciseRouters("use many actors with high throughput", + highThroughputDuration, + batchInterval = Duration.Zero, + expectDroppedMessages = false, + tree = true) } enterBarrier("after-" + step) } @@ -1242,9 +1300,8 @@ abstract class StressSpec "start routers that are running while nodes are removed" taggedAs LongRunningTest in { if (exerciseActors) { runOn(roles.take(3): _*) { - system.actorOf( - Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local), - name = masterName) ! Begin + system.actorOf(Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local), + name = masterName) ! Begin } } enterBarrier("after-" + step) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 62a87c9510..006c84cb00 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -22,8 +22,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { // Note that this test uses default configuration, // not MultiNodeClusterSpec.clusterConfig - commonConfig(ConfigFactory.parseString( - """ + commonConfig(ConfigFactory.parseString(""" akka { actor.provider = cluster loggers = ["akka.testkit.TestEventListener"] @@ -41,8 +40,7 @@ class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec -abstract class SunnyWeatherSpec extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) - with MultiNodeClusterSpec { +abstract class SunnyWeatherSpec extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) with MultiNodeClusterSpec { import SunnyWeatherMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala index bdb051f96b..9260a4e62a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SurviveNetworkInstabilitySpec.scala @@ -33,13 +33,14 @@ object SurviveNetworkInstabilityMultiJvmSpec extends MultiNodeConfig { val seventh = role("seventh") val eighth = role("eighth") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.remote.system-message-buffer-size=100 akka.remote.artery.advanced.system-message-buffer-size=100 akka.remote.netty.tcp.connection-timeout = 10s - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) testTransport(on = true) @@ -78,9 +79,9 @@ class SurviveNetworkInstabilityMultiJvmNode7 extends SurviveNetworkInstabilitySp class SurviveNetworkInstabilityMultiJvmNode8 extends SurviveNetworkInstabilitySpec abstract class SurviveNetworkInstabilitySpec - extends MultiNodeSpec(SurviveNetworkInstabilityMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender { + extends MultiNodeSpec(SurviveNetworkInstabilityMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import SurviveNetworkInstabilityMultiJvmSpec._ @@ -90,7 +91,7 @@ abstract class SurviveNetworkInstabilitySpec override def expectedTestDuration = 3.minutes def assertUnreachable(subjects: RoleName*): Unit = { - val expected = subjects.toSet map address + val expected = subjects.toSet.map(address) awaitAssert(clusterView.unreachableMembers.map(_.address) should ===(expected)) } @@ -265,8 +266,12 @@ abstract class SurviveNetworkInstabilitySpec enterBarrier("watcher-created") runOn(second) { - val sysMsgBufferSize = system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider]. - remoteSettings.SysMsgBufferSize + val sysMsgBufferSize = system + .asInstanceOf[ExtendedActorSystem] + .provider + .asInstanceOf[RemoteActorRefProvider] + .remoteSettings + .SysMsgBufferSize val refs = Vector.fill(sysMsgBufferSize + 1)(system.actorOf(Props[Echo])).toSet system.actorSelection(node(third) / "user" / "watcher") ! Targets(refs) expectMsg(TargetsRegistered) @@ -351,7 +356,7 @@ abstract class SurviveNetworkInstabilitySpec runOn(side1AfterJoin: _*) { // side2 removed - val expected = (side1AfterJoin map address).toSet + val expected = side1AfterJoin.map(address).toSet awaitAssert { // repeat the downing in case it was not successful, which may // happen if the removal was reverted due to gossip merge, see issue #18767 @@ -380,13 +385,13 @@ abstract class SurviveNetworkInstabilitySpec Thread.sleep(10000) runOn(side1AfterJoin: _*) { - val expected = (side1AfterJoin map address).toSet + val expected = side1AfterJoin.map(address).toSet clusterView.members.map(_.address) should ===(expected) } runOn(side2: _*) { // side2 comes back but stays unreachable - val expected = ((side2 ++ side1) map address).toSet + val expected = (side2 ++ side1).map(address).toSet clusterView.members.map(_.address) should ===(expected) assertUnreachable(side1: _*) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 8ade73b93f..8f3b9197f9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -20,12 +20,14 @@ object TransitionMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback( + ConfigFactory.parseString(""" akka.cluster.periodic-tasks-initial-delay = 300 s # turn off all periodic tasks akka.cluster.publish-stats-interval = 0 s # always, when it happens - """)). - withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) } class TransitionMultiJvmNode1 extends TransitionSpec @@ -33,9 +35,9 @@ class TransitionMultiJvmNode2 extends TransitionSpec class TransitionMultiJvmNode3 extends TransitionSpec abstract class TransitionSpec - extends MultiNodeSpec(TransitionMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender { + extends MultiNodeSpec(TransitionMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender { import TransitionMultiJvmSpec._ @@ -46,7 +48,7 @@ abstract class TransitionSpec def nonLeader(roles: RoleName*) = roles.toSeq.sorted.tail def memberStatus(address: Address): MemberStatus = { - val statusOption = (clusterView.members union clusterView.unreachableMembers).collectFirst { + val statusOption = clusterView.members.union(clusterView.unreachableMembers).collectFirst { case m if m.address == address => m.status } statusOption.getOrElse(Removed) @@ -56,10 +58,10 @@ abstract class TransitionSpec def members: Set[RoleName] = memberAddresses.flatMap(roleName(_)) - def seenLatestGossip: Set[RoleName] = clusterView.seenBy flatMap roleName + def seenLatestGossip: Set[RoleName] = clusterView.seenBy.flatMap(roleName) def awaitSeen(addresses: Address*): Unit = awaitAssert { - (seenLatestGossip map address) should ===(addresses.toSet) + (seenLatestGossip.map(address)) should ===(addresses.toSet) } def awaitMembers(addresses: Address*): Unit = awaitAssert { @@ -91,7 +93,7 @@ abstract class TransitionSpec clusterView.latestStats.gossipStats.receivedGossipCount != oldCount // received gossip } // gossip chat will synchronize the views - awaitCond((Set(fromRole, toRole) diff seenLatestGossip).isEmpty) + awaitCond(Set(fromRole, toRole).diff(seenLatestGossip).isEmpty) enterBarrier("after-gossip-" + gossipBarrierCounter) } runOn(fromRole) { @@ -99,7 +101,7 @@ abstract class TransitionSpec // send gossip cluster.clusterCore ! InternalClusterAction.SendGossipTo(toRole) // gossip chat will synchronize the views - awaitCond((Set(fromRole, toRole) diff seenLatestGossip).isEmpty) + awaitCond(Set(fromRole, toRole).diff(seenLatestGossip).isEmpty) enterBarrier("after-gossip-" + gossipBarrierCounter) } runOn(roles.filterNot(r => r == fromRole || r == toRole): _*) { @@ -114,7 +116,7 @@ abstract class TransitionSpec "start nodes as singleton clusters" taggedAs LongRunningTest in { runOn(first) { - cluster join myself + cluster.join(myself) // first joining itself will immediately be moved to Up awaitMemberStatus(myself, Up) awaitCond(clusterView.isSingletonCluster) @@ -144,7 +146,7 @@ abstract class TransitionSpec } enterBarrier("leader-actions-2") - first gossipTo second + first.gossipTo(second) runOn(first, second) { // gossip chat will synchronize the views awaitMemberStatus(second, Up) @@ -166,7 +168,7 @@ abstract class TransitionSpec } enterBarrier("third-joined-second") - second gossipTo first + second.gossipTo(first) runOn(first, second) { // gossip chat will synchronize the views awaitMembers(first, second, third) @@ -175,7 +177,7 @@ abstract class TransitionSpec awaitAssert(seenLatestGossip should ===(Set(first, second, third))) } - first gossipTo third + first.gossipTo(third) runOn(first, second, third) { awaitMembers(first, second, third) awaitMemberStatus(first, Up) @@ -197,14 +199,14 @@ abstract class TransitionSpec enterBarrier("leader-actions-3") // leader gossipTo first non-leader - leader12 gossipTo other1 + leader12.gossipTo(other1) runOn(other1) { awaitMemberStatus(third, Up) awaitAssert(seenLatestGossip should ===(Set(leader12, myself))) } // first non-leader gossipTo the other non-leader - other1 gossipTo other2 + other1.gossipTo(other2) runOn(other1) { // send gossip cluster.clusterCore ! InternalClusterAction.SendGossipTo(other2) @@ -215,7 +217,7 @@ abstract class TransitionSpec } // first non-leader gossipTo the leader - other1 gossipTo leader12 + other1.gossipTo(leader12) runOn(first, second, third) { awaitMemberStatus(first, Up) awaitMemberStatus(second, Up) @@ -236,7 +238,7 @@ abstract class TransitionSpec enterBarrier("after-second-unavailable") - third gossipTo first + third.gossipTo(first) runOn(first, third) { awaitAssert(clusterView.unreachableMembers.map(_.address) should contain(address(second))) @@ -248,7 +250,7 @@ abstract class TransitionSpec enterBarrier("after-second-down") - first gossipTo third + first.gossipTo(third) runOn(first, third) { awaitAssert(clusterView.unreachableMembers.map(_.address) should contain(address(second))) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala index e1caf377c2..721c79725e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala @@ -27,8 +27,7 @@ object UnreachableNodeJoinsAgainMultiNodeConfig extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(ConfigFactory.parseString( - """ + commonConfig(ConfigFactory.parseString(""" akka.remote.log-remote-lifecycle-events = off """).withFallback(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig))) @@ -42,8 +41,8 @@ class UnreachableNodeJoinsAgainMultiJvmNode3 extends UnreachableNodeJoinsAgainSp class UnreachableNodeJoinsAgainMultiJvmNode4 extends UnreachableNodeJoinsAgainSpec abstract class UnreachableNodeJoinsAgainSpec - extends MultiNodeSpec(UnreachableNodeJoinsAgainMultiNodeConfig) - with MultiNodeClusterSpec { + extends MultiNodeSpec(UnreachableNodeJoinsAgainMultiNodeConfig) + with MultiNodeClusterSpec { import UnreachableNodeJoinsAgainMultiNodeConfig._ @@ -93,7 +92,7 @@ abstract class UnreachableNodeJoinsAgainSpec val members = clusterView.members clusterView.unreachableMembers.size should ===(roles.size - 1) } - clusterView.unreachableMembers.map(_.address) should ===((allButVictim map address).toSet) + clusterView.unreachableMembers.map(_.address) should ===(allButVictim.map(address).toSet) } } @@ -105,7 +104,7 @@ abstract class UnreachableNodeJoinsAgainSpec val members = clusterView.members clusterView.unreachableMembers.size should ===(1) } - awaitSeenSameState(allButVictim map address: _*) + awaitSeenSameState(allButVictim.map(address): _*) // still one unreachable clusterView.unreachableMembers.size should ===(1) clusterView.unreachableMembers.head.address should ===(node(victim).address) @@ -118,7 +117,7 @@ abstract class UnreachableNodeJoinsAgainSpec "mark the node as DOWN" taggedAs LongRunningTest in { runOn(master) { - cluster down victim + cluster.down(victim) } val allButVictim = allBut(victim, roles) @@ -126,7 +125,7 @@ abstract class UnreachableNodeJoinsAgainSpec // eventually removed awaitMembersUp(roles.size - 1, Set(victim)) awaitAssert(clusterView.unreachableMembers should ===(Set.empty), 15 seconds) - awaitAssert(clusterView.members.map(_.address) should ===((allButVictim map address).toSet)) + awaitAssert(clusterView.members.map(_.address) should ===(allButVictim.map(address).toSet)) } @@ -161,20 +160,21 @@ abstract class UnreachableNodeJoinsAgainSpec runOn(victim) { val victimAddress = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val freshConfig = - ConfigFactory.parseString( - if (RARP(system).provider.remoteSettings.Artery.Enabled) - s""" + ConfigFactory + .parseString( + if (RARP(system).provider.remoteSettings.Artery.Enabled) + s""" akka.remote.artery.canonical { hostname = ${victimAddress.host.get} port = ${victimAddress.port.get} } """ - else s""" + else s""" akka.remote.netty.tcp { hostname = ${victimAddress.host.get} port = ${victimAddress.port.get} - }""" - ).withFallback(system.settings.config) + }""") + .withFallback(system.settings.config) Await.ready(system.whenTerminated, 10 seconds) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala index 0a808318d0..a7e26869e6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala @@ -39,9 +39,11 @@ class ClusterConsistentHashingGroupMultiJvmNode1 extends ClusterConsistentHashin class ClusterConsistentHashingGroupMultiJvmNode2 extends ClusterConsistentHashingGroupSpec class ClusterConsistentHashingGroupMultiJvmNode3 extends ClusterConsistentHashingGroupSpec -abstract class ClusterConsistentHashingGroupSpec extends MultiNodeSpec(ClusterConsistentHashingGroupMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with DefaultTimeout { +abstract class ClusterConsistentHashingGroupSpec + extends MultiNodeSpec(ClusterConsistentHashingGroupMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender + with DefaultTimeout { import ClusterConsistentHashingGroupMultiJvmSpec._ /** diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala index cacd639264..100f638a62 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala @@ -35,8 +35,7 @@ object ClusterConsistentHashingRouterMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" common-router-settings = { router = consistent-hashing-pool cluster { @@ -51,8 +50,7 @@ object ClusterConsistentHashingRouterMultiJvmSpec extends MultiNodeConfig { /router3 = $${common-router-settings} /router4 = $${common-router-settings} } - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -60,9 +58,11 @@ class ClusterConsistentHashingRouterMultiJvmNode1 extends ClusterConsistentHashi class ClusterConsistentHashingRouterMultiJvmNode2 extends ClusterConsistentHashingRouterSpec class ClusterConsistentHashingRouterMultiJvmNode3 extends ClusterConsistentHashingRouterSpec -abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterConsistentHashingRouterMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with DefaultTimeout { +abstract class ClusterConsistentHashingRouterSpec + extends MultiNodeSpec(ClusterConsistentHashingRouterMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender + with DefaultTimeout { import ClusterConsistentHashingRouterMultiJvmSpec._ lazy val router1 = system.actorOf(FromConfig.props(Props[Echo]), "router1") @@ -89,7 +89,8 @@ abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterC // it may take some time until router receives cluster member events awaitAssert { currentRoutees(router1).size should ===(4) } val routees = currentRoutees(router1) - routees.map { case ActorRefRoutee(ref) => fullAddress(ref) }.toSet should ===(Set(address(first), address(second))) + routees.map { case ActorRefRoutee(ref) => fullAddress(ref) }.toSet should ===( + Set(address(first), address(second))) } enterBarrier("after-2") } @@ -121,10 +122,10 @@ abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterC "deploy programatically defined routees to the member nodes in the cluster" in { runOn(first) { val router2 = system.actorOf( - ClusterRouterPool( - local = ConsistentHashingPool(nrOfInstances = 0), - settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true)). - props(Props[Echo]), + ClusterRouterPool(local = ConsistentHashingPool(nrOfInstances = 0), + settings = ClusterRouterPoolSettings(totalInstances = 10, + maxInstancesPerNode = 2, + allowLocalRoutees = true)).props(Props[Echo]), "router2") // it may take some time until router receives cluster member events awaitAssert { currentRoutees(router2).size should ===(6) } @@ -141,7 +142,9 @@ abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterC case s: String => s } - val router3 = system.actorOf(ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping).props(Props[Echo]), "router3") + val router3 = + system.actorOf(ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping).props(Props[Echo]), + "router3") assertHashMapping(router3) } @@ -155,12 +158,13 @@ abstract class ClusterConsistentHashingRouterSpec extends MultiNodeSpec(ClusterC case s: String => s } - val router4 = system.actorOf( - ClusterRouterPool( - local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping), - settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true)). - props(Props[Echo]), - "router4") + val router4 = + system.actorOf(ClusterRouterPool(local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping), + settings = + ClusterRouterPoolSettings(totalInstances = 10, + maxInstancesPerNode = 1, + allowLocalRoutees = true)).props(Props[Echo]), + "router4") assertHashMapping(router4) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala index 86e16ad228..61ff8c65a5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala @@ -48,8 +48,10 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback( + ConfigFactory.parseString(""" akka.actor { allow-java-serialization = off serialize-creators = off @@ -92,8 +94,8 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { } } } - """)). - withFallback(MultiNodeClusterSpec.clusterConfig)) + """)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString("""akka.cluster.roles =["a", "c"]""")) nodeConfig(third)(ConfigFactory.parseString("""akka.cluster.roles =["b", "c"]""")) @@ -107,17 +109,18 @@ class ClusterRoundRobinMultiJvmNode2 extends ClusterRoundRobinSpec class ClusterRoundRobinMultiJvmNode3 extends ClusterRoundRobinSpec class ClusterRoundRobinMultiJvmNode4 extends ClusterRoundRobinSpec -abstract class ClusterRoundRobinSpec extends MultiNodeSpec(ClusterRoundRobinMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with DefaultTimeout { +abstract class ClusterRoundRobinSpec + extends MultiNodeSpec(ClusterRoundRobinMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender + with DefaultTimeout { import ClusterRoundRobinMultiJvmSpec._ lazy val router1 = system.actorOf(FromConfig.props(Props[SomeActor]), "router1") lazy val router2 = system.actorOf( - ClusterRouterPool( - RoundRobinPool(nrOfInstances = 0), - ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true)). - props(Props[SomeActor]), + ClusterRouterPool(RoundRobinPool(nrOfInstances = 0), + ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true)) + .props(Props[SomeActor]), "router2") lazy val router3 = system.actorOf(FromConfig.props(Props[SomeActor]), "router3") lazy val router4 = system.actorOf(FromConfig.props(), "router4") @@ -312,7 +315,7 @@ abstract class ClusterRoundRobinSpec extends MultiNodeSpec(ClusterRoundRobinMult // note that router2 has totalInstances = 3, maxInstancesPerNode = 1 val routees = currentRoutees(router2) - val routeeAddresses = routees map { case ActorRefRoutee(ref) => fullAddress(ref) } + val routeeAddresses = routees.map { case ActorRefRoutee(ref) => fullAddress(ref) } routeeAddresses.size should ===(3) replies.values.sum should ===(iterationCount) @@ -326,7 +329,7 @@ abstract class ClusterRoundRobinSpec extends MultiNodeSpec(ClusterRoundRobinMult // myservice is already running def routees = currentRoutees(router4) - def routeeAddresses = (routees map { case ActorSelectionRoutee(sel) => fullAddress(sel.anchor) }).toSet + def routeeAddresses = routees.map { case ActorSelectionRoutee(sel) => fullAddress(sel.anchor) }.toSet runOn(first) { // 4 nodes, 2 routees on each node @@ -351,10 +354,10 @@ abstract class ClusterRoundRobinSpec extends MultiNodeSpec(ClusterRoundRobinMult runOn(first) { def routees = currentRoutees(router2) - def routeeAddresses = (routees map { case ActorRefRoutee(ref) => fullAddress(ref) }).toSet + def routeeAddresses = routees.map { case ActorRefRoutee(ref) => fullAddress(ref) }.toSet - routees foreach { case ActorRefRoutee(ref) => watch(ref) } - val notUsedAddress = ((roles map address).toSet diff routeeAddresses).head + routees.foreach { case ActorRefRoutee(ref) => watch(ref) } + val notUsedAddress = roles.map(address).toSet.diff(routeeAddresses).head val downAddress = routeeAddresses.find(_ != address(first)).get val downRouteeRef = routees.collectFirst { case ActorRefRoutee(ref) if ref.path.address == downAddress => ref diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala index 119a8375d5..9314c6c27d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/UseRoleIgnoredSpec.scala @@ -46,8 +46,7 @@ object UseRoleIgnoredMultiJvmSpec extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false). - withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first)(ConfigFactory.parseString("""akka.cluster.roles =["a", "c"]""")) nodeConfig(second, third)(ConfigFactory.parseString("""akka.cluster.roles =["b", "c"]""")) @@ -58,9 +57,11 @@ class UseRoleIgnoredMultiJvmNode1 extends UseRoleIgnoredSpec class UseRoleIgnoredMultiJvmNode2 extends UseRoleIgnoredSpec class UseRoleIgnoredMultiJvmNode3 extends UseRoleIgnoredSpec -abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with DefaultTimeout { +abstract class UseRoleIgnoredSpec + extends MultiNodeSpec(UseRoleIgnoredMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender + with DefaultTimeout { import akka.cluster.routing.UseRoleIgnoredMultiJvmSpec._ def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = { @@ -103,10 +104,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp val roles = Set("b") val router = system.actorOf( - ClusterRouterPool( - RoundRobinPool(nrOfInstances = 6), - ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = false, useRoles = roles)). - props(Props[SomeActor]), + ClusterRouterPool(RoundRobinPool(nrOfInstances = 6), + ClusterRouterPoolSettings(totalInstances = 6, + maxInstancesPerNode = 2, + allowLocalRoutees = false, + useRoles = roles)).props(Props[SomeActor]), "router-2") awaitAssert(currentRoutees(router).size should ===(4)) @@ -132,12 +134,13 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp runOn(first) { val roles = Set("b") - val router = system.actorOf( - ClusterRouterGroup( - RoundRobinGroup(paths = Nil), - ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"), - allowLocalRoutees = false, useRoles = roles)).props, - "router-2b") + val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil), + ClusterRouterGroupSettings(totalInstances = 6, + routeesPaths = + List("/user/foo", "/user/bar"), + allowLocalRoutees = false, + useRoles = roles)).props, + "router-2b") awaitAssert(currentRoutees(router).size should ===(4)) @@ -163,10 +166,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp val roles = Set("b") val router = system.actorOf( - ClusterRouterPool( - RoundRobinPool(nrOfInstances = 6), - ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRoles = roles)). - props(Props[SomeActor]), + ClusterRouterPool(RoundRobinPool(nrOfInstances = 6), + ClusterRouterPoolSettings(totalInstances = 6, + maxInstancesPerNode = 2, + allowLocalRoutees = true, + useRoles = roles)).props(Props[SomeActor]), "router-3") awaitAssert(currentRoutees(router).size should ===(4)) @@ -192,12 +196,13 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp runOn(first) { val roles = Set("b") - val router = system.actorOf( - ClusterRouterGroup( - RoundRobinGroup(paths = Nil), - ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"), - allowLocalRoutees = true, useRoles = roles)).props, - "router-3b") + val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil), + ClusterRouterGroupSettings(totalInstances = 6, + routeesPaths = + List("/user/foo", "/user/bar"), + allowLocalRoutees = true, + useRoles = roles)).props, + "router-3b") awaitAssert(currentRoutees(router).size should ===(4)) @@ -223,10 +228,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp val roles = Set("a") val router = system.actorOf( - ClusterRouterPool( - RoundRobinPool(nrOfInstances = 6), - ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRoles = roles)). - props(Props[SomeActor]), + ClusterRouterPool(RoundRobinPool(nrOfInstances = 6), + ClusterRouterPoolSettings(totalInstances = 6, + maxInstancesPerNode = 2, + allowLocalRoutees = true, + useRoles = roles)).props(Props[SomeActor]), "router-4") awaitAssert(currentRoutees(router).size should ===(2)) @@ -252,12 +258,13 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp runOn(first) { val roles = Set("a") - val router = system.actorOf( - ClusterRouterGroup( - RoundRobinGroup(paths = Nil), - ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"), - allowLocalRoutees = true, useRoles = roles)).props, - "router-4b") + val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil), + ClusterRouterGroupSettings(totalInstances = 6, + routeesPaths = + List("/user/foo", "/user/bar"), + allowLocalRoutees = true, + useRoles = roles)).props, + "router-4b") awaitAssert(currentRoutees(router).size should ===(2)) @@ -283,10 +290,11 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp val roles = Set("c") val router = system.actorOf( - ClusterRouterPool( - RoundRobinPool(nrOfInstances = 6), - ClusterRouterPoolSettings(totalInstances = 6, maxInstancesPerNode = 2, allowLocalRoutees = true, useRoles = roles)). - props(Props[SomeActor]), + ClusterRouterPool(RoundRobinPool(nrOfInstances = 6), + ClusterRouterPoolSettings(totalInstances = 6, + maxInstancesPerNode = 2, + allowLocalRoutees = true, + useRoles = roles)).props(Props[SomeActor]), "router-5") awaitAssert(currentRoutees(router).size should ===(6)) @@ -312,12 +320,13 @@ abstract class UseRoleIgnoredSpec extends MultiNodeSpec(UseRoleIgnoredMultiJvmSp runOn(first) { val roles = Set("c") - val router = system.actorOf( - ClusterRouterGroup( - RoundRobinGroup(paths = Nil), - ClusterRouterGroupSettings(totalInstances = 6, routeesPaths = List("/user/foo", "/user/bar"), - allowLocalRoutees = true, useRoles = roles)).props, - "router-5b") + val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil), + ClusterRouterGroupSettings(totalInstances = 6, + routeesPaths = + List("/user/foo", "/user/bar"), + allowLocalRoutees = true, + useRoles = roles)).props, + "router-5b") awaitAssert(currentRoutees(router).size should ===(6)) diff --git a/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala index 081b30ced9..4a03d1998b 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala @@ -18,11 +18,8 @@ import akka.testkit.TimingTest object AutoDownSpec { final case class DownCalled(address: Address) - class AutoDownTestActor( - memberA: Member, - autoDownUnreachableAfter: FiniteDuration, - probe: ActorRef) - extends AutoDownBase(autoDownUnreachableAfter) { + class AutoDownTestActor(memberA: Member, autoDownUnreachableAfter: FiniteDuration, probe: ActorRef) + extends AutoDownBase(autoDownUnreachableAfter) { override def selfAddress = memberA.address override def scheduler: Scheduler = context.system.scheduler diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 1454b8b1d4..fca05365fd 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -57,15 +57,15 @@ class ClusterConfigSpec extends AkkaSpec { } "be able to parse non-default cluster config elements" in { - val settings = new ClusterSettings(ConfigFactory.parseString( - """ + val settings = new ClusterSettings(ConfigFactory.parseString(""" |akka { | cluster { | roles = [ "hamlet" ] | multi-data-center.self-data-center = "blue" | } |} - """.stripMargin).withFallback(ConfigFactory.load()), system.name) + """.stripMargin).withFallback(ConfigFactory.load()), + system.name) import settings._ Roles should ===(Set("hamlet", ClusterSettings.DcRolePrefix + "blue")) SelfDataCenter should ===("blue") diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala index 5940c861af..f9fc25ec3f 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala @@ -36,7 +36,8 @@ object ClusterDeployerSpec { } akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 - """, ConfigParseOptions.defaults) + """, + ConfigParseOptions.defaults) class RecipeActor extends Actor { def receive = { case _ => } @@ -53,15 +54,16 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) deployment should not be (None) - deployment should ===(Some( - Deploy( - service, - deployment.get.config, - ClusterRouterPool(RoundRobinPool(20), ClusterRouterPoolSettings( - totalInstances = 20, maxInstancesPerNode = 3, allowLocalRoutees = false)), - ClusterScope, - Deploy.NoDispatcherGiven, - Deploy.NoMailboxGiven))) + deployment should ===( + Some(Deploy(service, + deployment.get.config, + ClusterRouterPool(RoundRobinPool(20), + ClusterRouterPoolSettings(totalInstances = 20, + maxInstancesPerNode = 3, + allowLocalRoutees = false)), + ClusterScope, + Deploy.NoDispatcherGiven, + Deploy.NoMailboxGiven))) } "be able to parse 'akka.actor.deployment._' with specified cluster group" in { @@ -69,15 +71,16 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) deployment should not be (None) - deployment should ===(Some( - Deploy( - service, - deployment.get.config, - ClusterRouterGroup(RoundRobinGroup(List("/user/myservice")), ClusterRouterGroupSettings( - totalInstances = 20, routeesPaths = List("/user/myservice"), allowLocalRoutees = false)), - ClusterScope, - "mydispatcher", - "mymailbox"))) + deployment should ===( + Some(Deploy(service, + deployment.get.config, + ClusterRouterGroup(RoundRobinGroup(List("/user/myservice")), + ClusterRouterGroupSettings(totalInstances = 20, + routeesPaths = List("/user/myservice"), + allowLocalRoutees = false)), + ClusterScope, + "mydispatcher", + "mymailbox"))) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index 2875c2aea0..adec870a7c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -29,8 +29,10 @@ object ClusterDomainEventPublisherSpec { """ } -class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublisherSpec.config) - with BeforeAndAfterEach with ImplicitSender { +class ClusterDomainEventPublisherSpec + extends AkkaSpec(ClusterDomainEventPublisherSpec.config) + with BeforeAndAfterEach + with ImplicitSender { val protocol = if (RARP(system).provider.remoteSettings.Artery.Enabled) "akka" @@ -68,20 +70,27 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val state3 = state(g3, aUp.uniqueAddress, DefaultDataCenter) val g4 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)).seen(aUp.uniqueAddress) val state4 = state(g4, aUp.uniqueAddress, DefaultDataCenter) - val g5 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)).seen(aUp.uniqueAddress).seen(bExiting.uniqueAddress).seen(cUp.uniqueAddress).seen(a51Up.uniqueAddress) + val g5 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)) + .seen(aUp.uniqueAddress) + .seen(bExiting.uniqueAddress) + .seen(cUp.uniqueAddress) + .seen(a51Up.uniqueAddress) val state5 = state(g5, aUp.uniqueAddress, DefaultDataCenter) val g6 = Gossip(members = SortedSet(aLeaving, bExiting, cUp)).seen(aUp.uniqueAddress) val state6 = state(g6, aUp.uniqueAddress, DefaultDataCenter) val g7 = Gossip(members = SortedSet(aExiting, bExiting, cUp)).seen(aUp.uniqueAddress) val state7 = state(g7, aUp.uniqueAddress, DefaultDataCenter) - val g8 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp), overview = GossipOverview(reachability = - Reachability.empty.unreachable(aUp.uniqueAddress, dUp.uniqueAddress))).seen(aUp.uniqueAddress) + val g8 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp), + overview = + GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, dUp.uniqueAddress))) + .seen(aUp.uniqueAddress) val state8 = state(g8, aUp.uniqueAddress, DefaultDataCenter) - val g9 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp, eUp), overview = GossipOverview(reachability = - Reachability.empty.unreachable(aUp.uniqueAddress, eUp.uniqueAddress))) + val g9 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp, eUp), + overview = + GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, eUp.uniqueAddress))) val state9 = state(g9, aUp.uniqueAddress, DefaultDataCenter) - val g10 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp, eUp), overview = GossipOverview(reachability = - Reachability.empty)) + val g10 = Gossip(members = SortedSet(aUp, bExiting, cUp, dUp, eUp), + overview = GossipOverview(reachability = Reachability.empty)) val state10 = state(g10, aUp.uniqueAddress, DefaultDataCenter) // created in beforeEach @@ -155,10 +164,11 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish val subscriber = TestProbe() publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[RoleLeaderChanged])) subscriber.expectMsgType[CurrentClusterState] - publisher ! PublishChanges(state(Gossip(members = SortedSet(cJoining, dUp)), dUp.uniqueAddress, DefaultDataCenter)) - subscriber.expectMsgAllOf( - RoleLeaderChanged("GRP", Some(dUp.address)), - RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(dUp.address))) + publisher ! PublishChanges( + state(Gossip(members = SortedSet(cJoining, dUp)), dUp.uniqueAddress, DefaultDataCenter)) + subscriber.expectMsgAllOf(RoleLeaderChanged("GRP", Some(dUp.address)), + RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, + Some(dUp.address))) publisher ! PublishChanges(state(Gossip(members = SortedSet(cUp, dUp)), dUp.uniqueAddress, DefaultDataCenter)) subscriber.expectMsg(RoleLeaderChanged("GRP", Some(cUp.address))) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index 5fc9c2c185..64b9e8d98a 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -37,7 +37,9 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { val selfDummyAddress = UniqueAddress(Address("akka.tcp", "sys", "selfDummy", 2552), 17L) private[cluster] def converge(gossip: Gossip): (Gossip, Set[UniqueAddress]) = - gossip.members.foldLeft((gossip, Set.empty[UniqueAddress])) { case ((gs, as), m) => (gs.seen(m.uniqueAddress), as + m.uniqueAddress) } + gossip.members.foldLeft((gossip, Set.empty[UniqueAddress])) { + case ((gs, as), m) => (gs.seen(m.uniqueAddress), as + m.uniqueAddress) + } private def state(g: Gossip): MembershipState = state(g, selfDummyAddress) @@ -66,26 +68,25 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { val (g1, _) = converge(Gossip(members = SortedSet(aJoining, bUp, cUp))) val (g2, s2) = converge(Gossip(members = SortedSet(aUp, bUp, cLeaving, eJoining))) - diffMemberEvents(state(g1), state(g2)) should ===(Seq(MemberUp(aUp), MemberLeft(cLeaving), MemberJoined(eJoining))) + diffMemberEvents(state(g1), state(g2)) should ===( + Seq(MemberUp(aUp), MemberLeft(cLeaving), MemberJoined(eJoining))) diffUnreachable(state(g1), state(g2)) should ===(Seq.empty) diffSeen(state(g1), state(g2)) should ===(Seq(SeenChanged(convergence = true, seenBy = s2.map(_.address)))) } "be produced for members in unreachable" in { - val reachability1 = Reachability.empty. - unreachable(aUp.uniqueAddress, cUp.uniqueAddress). - unreachable(aUp.uniqueAddress, eUp.uniqueAddress) + val reachability1 = Reachability.empty + .unreachable(aUp.uniqueAddress, cUp.uniqueAddress) + .unreachable(aUp.uniqueAddress, eUp.uniqueAddress) val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, eUp), overview = GossipOverview(reachability = reachability1)) - val reachability2 = reachability1. - unreachable(aUp.uniqueAddress, bDown.uniqueAddress) - val g2 = Gossip(members = SortedSet(aUp, cUp, bDown, eDown), overview = GossipOverview(reachability = reachability2)) + val reachability2 = reachability1.unreachable(aUp.uniqueAddress, bDown.uniqueAddress) + val g2 = + Gossip(members = SortedSet(aUp, cUp, bDown, eDown), overview = GossipOverview(reachability = reachability2)) diffUnreachable(state(g1), state(g2)) should ===(Seq(UnreachableMember(bDown))) // never include self member in unreachable - diffUnreachable( - state(g1, bDown.uniqueAddress), - state(g2, bDown.uniqueAddress)) should ===(Seq()) + diffUnreachable(state(g1, bDown.uniqueAddress), state(g2, bDown.uniqueAddress)) should ===(Seq()) diffSeen(state(g1), state(g2)) should ===(Seq.empty) } @@ -98,25 +99,27 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { val dc3BMemberUp = TestMember(Address("akka.tcp", "sys", "dc3B", 2552), Up, Set.empty[String], "dc3") val reachability1 = Reachability.empty - val g1 = Gossip(members = SortedSet(aUp, bUp, dc2AMemberUp, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp), overview = GossipOverview(reachability = reachability1)) + val g1 = Gossip(members = SortedSet(aUp, bUp, dc2AMemberUp, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp), + overview = GossipOverview(reachability = reachability1)) val reachability2 = reachability1 .unreachable(aUp.uniqueAddress, dc2AMemberDown.uniqueAddress) .unreachable(dc2BMemberUp.uniqueAddress, dc2AMemberDown.uniqueAddress) - val g2 = Gossip(members = SortedSet(aUp, bUp, dc2AMemberDown, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp), overview = GossipOverview(reachability = reachability2)) + val g2 = Gossip(members = SortedSet(aUp, bUp, dc2AMemberDown, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp), + overview = GossipOverview(reachability = reachability2)) Set(aUp, bUp, dc2AMemberUp, dc2BMemberUp, dc3AMemberUp, dc3BMemberUp).foreach { member => val otherDc = if (member.dataCenter == ClusterSettings.DefaultDataCenter) Seq("dc2") else Seq() - diffUnreachableDataCenter( - MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5), - MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===(otherDc.map(UnreachableDataCenter)) + diffUnreachableDataCenter(MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5), + MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===( + otherDc.map(UnreachableDataCenter)) - diffReachableDataCenter( - MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5), - MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===(otherDc.map(ReachableDataCenter)) + diffReachableDataCenter(MembershipState(g2, member.uniqueAddress, member.dataCenter, crossDcConnections = 5), + MembershipState(g1, member.uniqueAddress, member.dataCenter, crossDcConnections = 5)) should ===( + otherDc.map(ReachableDataCenter)) } } @@ -127,8 +130,7 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { val reachability1 = Reachability.empty val g1 = Gossip(members = SortedSet(aUp, dc2AMemberUp), overview = GossipOverview(reachability = reachability1)) - val reachability2 = reachability1 - .unreachable(aUp.uniqueAddress, dc2AMemberDown.uniqueAddress) + val reachability2 = reachability1.unreachable(aUp.uniqueAddress, dc2AMemberDown.uniqueAddress) val g2 = Gossip(members = SortedSet(aUp, dc2AMemberDown), overview = GossipOverview(reachability = reachability2)) diffUnreachableDataCenter( @@ -149,26 +151,22 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { } "be produced for members becoming reachable after unreachable" in { - val reachability1 = Reachability.empty. - unreachable(aUp.uniqueAddress, cUp.uniqueAddress).reachable(aUp.uniqueAddress, cUp.uniqueAddress). - unreachable(aUp.uniqueAddress, eUp.uniqueAddress). - unreachable(aUp.uniqueAddress, bUp.uniqueAddress) + val reachability1 = Reachability.empty + .unreachable(aUp.uniqueAddress, cUp.uniqueAddress) + .reachable(aUp.uniqueAddress, cUp.uniqueAddress) + .unreachable(aUp.uniqueAddress, eUp.uniqueAddress) + .unreachable(aUp.uniqueAddress, bUp.uniqueAddress) val g1 = Gossip(members = SortedSet(aUp, bUp, cUp, eUp), overview = GossipOverview(reachability = reachability1)) - val reachability2 = reachability1. - unreachable(aUp.uniqueAddress, cUp.uniqueAddress). - reachable(aUp.uniqueAddress, bUp.uniqueAddress) + val reachability2 = + reachability1.unreachable(aUp.uniqueAddress, cUp.uniqueAddress).reachable(aUp.uniqueAddress, bUp.uniqueAddress) val g2 = Gossip(members = SortedSet(aUp, cUp, bUp, eUp), overview = GossipOverview(reachability = reachability2)) diffUnreachable(state(g1), state(g2)) should ===(Seq(UnreachableMember(cUp))) // never include self member in unreachable - diffUnreachable( - state(g1, cUp.uniqueAddress), - state(g2, cUp.uniqueAddress)) should ===(Seq()) + diffUnreachable(state(g1, cUp.uniqueAddress), state(g2, cUp.uniqueAddress)) should ===(Seq()) diffReachable(state(g1), state(g2)) should ===(Seq(ReachableMember(bUp))) // never include self member in reachable - diffReachable( - state(g1, bUp.uniqueAddress), - state(g2, bUp.uniqueAddress)) should ===(Seq()) + diffReachable(state(g1, bUp.uniqueAddress), state(g2, bUp.uniqueAddress)) should ===(Seq()) } "be produced for downed members" in { @@ -191,27 +189,33 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { "be produced for removed and rejoined member in another data center" in { val bUpDc2 = TestMember(bUp.address, Up, bRoles, dataCenter = "dc2") val bUpDc2Removed = TestMember(bUpDc2.address, Removed, bRoles, dataCenter = "dc2") - val bUpDc2Restarted = TestMember.withUniqueAddress(UniqueAddress(bUpDc2.address, 2L), Up, bRoles, dataCenter = "dc2") + val bUpDc2Restarted = + TestMember.withUniqueAddress(UniqueAddress(bUpDc2.address, 2L), Up, bRoles, dataCenter = "dc2") val g1 = Gossip(members = SortedSet(aUp, bUpDc2)) val g2 = g1 .remove(bUpDc2.uniqueAddress, System.currentTimeMillis()) // adds tombstone .copy(members = SortedSet(aUp, bUpDc2Restarted)) .merge(g1) - diffMemberEvents(state(g1), state(g2)) should ===(Seq( - MemberRemoved(bUpDc2Removed, Up), MemberUp(bUpDc2Restarted))) + diffMemberEvents(state(g1), state(g2)) should ===( + Seq(MemberRemoved(bUpDc2Removed, Up), MemberUp(bUpDc2Restarted))) } "be produced for convergence changes" in { - val g1 = Gossip(members = SortedSet(aUp, bUp, eJoining)).seen(aUp.uniqueAddress).seen(bUp.uniqueAddress).seen(eJoining.uniqueAddress) + val g1 = Gossip(members = SortedSet(aUp, bUp, eJoining)) + .seen(aUp.uniqueAddress) + .seen(bUp.uniqueAddress) + .seen(eJoining.uniqueAddress) val g2 = Gossip(members = SortedSet(aUp, bUp, eJoining)).seen(aUp.uniqueAddress).seen(bUp.uniqueAddress) diffMemberEvents(state(g1), state(g2)) should ===(Seq.empty) diffUnreachable(state(g1), state(g2)) should ===(Seq.empty) - diffSeen(state(g1), state(g2)) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address)))) + diffSeen(state(g1), state(g2)) should ===( + Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address)))) diffMemberEvents(state(g2), state(g1)) should ===(Seq.empty) diffUnreachable(state(g2), state(g1)) should ===(Seq.empty) - diffSeen(state(g2), state(g1)) should ===(Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address, eJoining.address)))) + diffSeen(state(g2), state(g1)) should ===( + Seq(SeenChanged(convergence = true, seenBy = Set(aUp.address, bUp.address, eJoining.address)))) } "be produced for leader changes" in { @@ -230,20 +234,19 @@ class ClusterDomainEventSpec extends WordSpec with Matchers { val g2 = Gossip(members = SortedSet(bUp, cUp, dExiting, eJoining)) diffRolesLeader(state(g0), state(g1)) should ===( Set( - // since this role is implicitly added - RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(aUp.address)), - RoleLeaderChanged("AA", Some(aUp.address)), - RoleLeaderChanged("AB", Some(aUp.address)), - RoleLeaderChanged("BB", Some(bUp.address)), - RoleLeaderChanged("DD", Some(dLeaving.address)), - RoleLeaderChanged("DE", Some(dLeaving.address)), - RoleLeaderChanged("EE", Some(eUp.address)))) + // since this role is implicitly added + RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(aUp.address)), + RoleLeaderChanged("AA", Some(aUp.address)), + RoleLeaderChanged("AB", Some(aUp.address)), + RoleLeaderChanged("BB", Some(bUp.address)), + RoleLeaderChanged("DD", Some(dLeaving.address)), + RoleLeaderChanged("DE", Some(dLeaving.address)), + RoleLeaderChanged("EE", Some(eUp.address)))) diffRolesLeader(state(g1), state(g2)) should ===( - Set( - RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(bUp.address)), - RoleLeaderChanged("AA", None), - RoleLeaderChanged("AB", Some(bUp.address)), - RoleLeaderChanged("DE", Some(eJoining.address)))) + Set(RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(bUp.address)), + RoleLeaderChanged("AA", None), + RoleLeaderChanged("AB", Some(bUp.address)), + RoleLeaderChanged("DE", Some(eJoining.address)))) } "not be produced for role leader changes in other data centers" in { diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala index 31be6735be..ff062acebf 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala @@ -48,14 +48,18 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with Matchers { private def emptyState: ClusterHeartbeatSenderState = emptyState(aa) - private def emptyState(selfUniqueAddress: UniqueAddress) = ClusterHeartbeatSenderState( - ring = HeartbeatNodeRing(selfUniqueAddress, Set(selfUniqueAddress), Set.empty, monitoredByNrOfMembers = 3), - oldReceiversNowUnreachable = Set.empty[UniqueAddress], - failureDetector = new DefaultFailureDetectorRegistry[Address](() => new FailureDetectorStub)) + private def emptyState(selfUniqueAddress: UniqueAddress) = + ClusterHeartbeatSenderState( + ring = HeartbeatNodeRing(selfUniqueAddress, Set(selfUniqueAddress), Set.empty, monitoredByNrOfMembers = 3), + oldReceiversNowUnreachable = Set.empty[UniqueAddress], + failureDetector = new DefaultFailureDetectorRegistry[Address](() => new FailureDetectorStub)) private def fd(state: ClusterHeartbeatSenderState, node: UniqueAddress): FailureDetectorStub = - state.failureDetector.asInstanceOf[DefaultFailureDetectorRegistry[Address]].failureDetector(node.address). - get.asInstanceOf[FailureDetectorStub] + state.failureDetector + .asInstanceOf[DefaultFailureDetectorRegistry[Address]] + .failureDetector(node.address) + .get + .asInstanceOf[FailureDetectorStub] "A ClusterHeartbeatSenderState" must { @@ -94,7 +98,12 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with Matchers { "use specified number of members + unreachable" in { // they are sorted by the hash (uid) of the UniqueAddress - emptyState.addMember(cc).addMember(dd).addMember(bb).addMember(ee).unreachableMember(cc) + emptyState + .addMember(cc) + .addMember(dd) + .addMember(bb) + .addMember(ee) + .unreachableMember(cc) .activeReceivers should ===(Set(bb, cc, dd, ee)) } @@ -142,8 +151,9 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with Matchers { "behave correctly for random operations" in { val rnd = ThreadLocalRandom.current - val nodes = (1 to rnd.nextInt(10, 200)).map(n => - UniqueAddress(Address("akka.tcp", "sys", "n" + n, 2552), n.toLong)).toVector + val nodes = (1 to rnd.nextInt(10, 200)) + .map(n => UniqueAddress(Address("akka.tcp", "sys", "n" + n, 2552), n.toLong)) + .toVector def rndNode() = nodes(rnd.nextInt(0, nodes.size)) val selfUniqueAddress = rndNode() var state = emptyState(selfUniqueAddress) @@ -161,7 +171,7 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with Matchers { val oldUnreachable = state.oldReceiversNowUnreachable state = state.addMember(node) // keep unreachable - (oldUnreachable diff state.activeReceivers) should ===(Set.empty) + (oldUnreachable.diff(state.activeReceivers)) should ===(Set.empty) state.failureDetector.isMonitoring(node.address) should ===(false) state.failureDetector.isAvailable(node.address) should ===(true) } @@ -171,10 +181,9 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with Matchers { val oldUnreachable = state.oldReceiversNowUnreachable state = state.removeMember(node) // keep unreachable, unless it was the removed - if (oldUnreachable(node)) - (oldUnreachable diff state.activeReceivers) should ===(Set(node)) + if (oldUnreachable(node))(oldUnreachable.diff(state.activeReceivers)) should ===(Set(node)) else - (oldUnreachable diff state.activeReceivers) should ===(Set.empty) + (oldUnreachable.diff(state.activeReceivers)) should ===(Set.empty) state.failureDetector.isMonitoring(node.address) should ===(false) state.failureDetector.isAvailable(node.address) should ===(true) @@ -214,7 +223,8 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with Matchers { } } catch { case e: Throwable => - println(s"Failure context: i=$i, node=$node, op=$operation, " + + println( + s"Failure context: i=$i, node=$node, op=$operation, " + s"oldReceiversNowUnreachable=${state.oldReceiversNowUnreachable}, " + s"ringReceivers=${state.ring.myReceivers}, ringNodes=${state.ring.nodes}") throw e @@ -225,4 +235,3 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with Matchers { } } - diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterLogSpec.scala index a6534805e7..6516168aa5 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterLogSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterLogSpec.scala @@ -48,15 +48,11 @@ abstract class ClusterLogSpec(config: Config) extends AkkaSpec(config) with Impl /** The expected log info pattern to intercept after a `cluster.join`. */ protected def join(expected: String): Unit = - EventFilter. - info(occurrences = 1, pattern = expected). - intercept(cluster.join(selfAddress)) + EventFilter.info(occurrences = 1, pattern = expected).intercept(cluster.join(selfAddress)) /** The expected log info pattern to intercept after a `cluster.down`. */ protected def down(expected: String): Unit = - EventFilter. - info(occurrences = 1, pattern = expected). - intercept(cluster.down(selfAddress)) + EventFilter.info(occurrences = 1, pattern = expected).intercept(cluster.down(selfAddress)) } class ClusterLogDefaultSpec extends ClusterLogSpec(ClusterLogSpec.config) { @@ -73,8 +69,7 @@ class ClusterLogDefaultSpec extends ClusterLogSpec(ClusterLogSpec.config) { } } -class ClusterLogVerboseDefaultSpec extends ClusterLogSpec( - ConfigFactory.parseString(ClusterLogSpec.config)) { +class ClusterLogVerboseDefaultSpec extends ClusterLogSpec(ConfigFactory.parseString(ClusterLogSpec.config)) { "A Cluster" must { @@ -87,9 +82,11 @@ class ClusterLogVerboseDefaultSpec extends ClusterLogSpec( } } -class ClusterLogVerboseEnabledSpec extends ClusterLogSpec( - ConfigFactory.parseString("akka.cluster.log-info-verbose = on"). - withFallback(ConfigFactory.parseString(ClusterLogSpec.config))) { +class ClusterLogVerboseEnabledSpec + extends ClusterLogSpec( + ConfigFactory + .parseString("akka.cluster.log-info-verbose = on") + .withFallback(ConfigFactory.parseString(ClusterLogSpec.config))) { "A Cluster" must { diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index e0b66178d0..d40d765e3b 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -114,7 +114,8 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "allow join and leave with local address" in { - val sys2 = ActorSystem("ClusterSpec2", ConfigFactory.parseString(""" + val sys2 = ActorSystem("ClusterSpec2", + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 @@ -148,7 +149,8 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "leave via CoordinatedShutdown.run" in { - val sys2 = ActorSystem("ClusterSpec2", ConfigFactory.parseString(""" + val sys2 = ActorSystem("ClusterSpec2", + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 @@ -163,10 +165,12 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { CoordinatedShutdown(sys2).run(CoordinatedShutdown.UnknownReason) probe.expectMsgType[MemberLeft] // MemberExited might not be published before MemberRemoved - val removed = probe.fishForMessage() { - case _: MemberExited => false - case _: MemberRemoved => true - }.asInstanceOf[MemberRemoved] + val removed = probe + .fishForMessage() { + case _: MemberExited => false + case _: MemberRemoved => true + } + .asInstanceOf[MemberRemoved] removed.previousStatus should ===(MemberStatus.Exiting) } finally { shutdown(sys2) @@ -174,7 +178,8 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "terminate ActorSystem via CoordinatedShutdown.run when a stream involving StreamRefs is running" in { - val sys2 = ActorSystem("ClusterSpec2", ConfigFactory.parseString(""" + val sys2 = ActorSystem("ClusterSpec2", + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 @@ -193,10 +198,12 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { CoordinatedShutdown(sys2).run(CoordinatedShutdown.UnknownReason) probe.expectMsgType[MemberLeft] // MemberExited might not be published before MemberRemoved - val removed = probe.fishForMessage() { - case _: MemberExited => false - case _: MemberRemoved => true - }.asInstanceOf[MemberRemoved] + val removed = probe + .fishForMessage() { + case _: MemberExited => false + case _: MemberRemoved => true + } + .asInstanceOf[MemberRemoved] removed.previousStatus should ===(MemberStatus.Exiting) Await.result(sys2.whenTerminated, 10.seconds) @@ -208,7 +215,8 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "leave via CoordinatedShutdown.run when member status is Joining" in { - val sys2 = ActorSystem("ClusterSpec2", ConfigFactory.parseString(""" + val sys2 = ActorSystem("ClusterSpec2", + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 @@ -224,10 +232,12 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { CoordinatedShutdown(sys2).run(CoordinatedShutdown.UnknownReason) probe.expectMsgType[MemberLeft] // MemberExited might not be published before MemberRemoved - val removed = probe.fishForMessage() { - case _: MemberExited => false - case _: MemberRemoved => true - }.asInstanceOf[MemberRemoved] + val removed = probe + .fishForMessage() { + case _: MemberExited => false + case _: MemberRemoved => true + } + .asInstanceOf[MemberRemoved] removed.previousStatus should ===(MemberStatus.Exiting) } finally { shutdown(sys2) @@ -235,7 +245,8 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "terminate ActorSystem via leave (CoordinatedShutdown)" in { - val sys2 = ActorSystem("ClusterSpec2", ConfigFactory.parseString(""" + val sys2 = ActorSystem("ClusterSpec2", + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 @@ -251,10 +262,12 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { Cluster(sys2).leave(Cluster(sys2).selfAddress) probe.expectMsgType[MemberLeft] // MemberExited might not be published before MemberRemoved - val removed = probe.fishForMessage() { - case _: MemberExited => false - case _: MemberRemoved => true - }.asInstanceOf[MemberRemoved] + val removed = probe + .fishForMessage() { + case _: MemberExited => false + case _: MemberRemoved => true + } + .asInstanceOf[MemberRemoved] removed.previousStatus should ===(MemberStatus.Exiting) Await.result(sys2.whenTerminated, 10.seconds) Cluster(sys2).isTerminated should ===(true) @@ -265,7 +278,8 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "terminate ActorSystem via down (CoordinatedShutdown)" in { - val sys3 = ActorSystem("ClusterSpec3", ConfigFactory.parseString(""" + val sys3 = ActorSystem("ClusterSpec3", + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 @@ -291,13 +305,11 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "register multiple cluster JMX MBeans with akka.cluster.jmx.multi-mbeans-in-same-jvm = on" in { - def getConfig = (port: Int) => ConfigFactory.parseString( - s""" + def getConfig = (port: Int) => ConfigFactory.parseString(s""" akka.cluster.jmx.multi-mbeans-in-same-jvm = on akka.remote.netty.tcp.port = ${port} akka.remote.artery.canonical.port = ${port} - """ - ).withFallback(ConfigFactory.parseString(ClusterSpec.config)) + """).withFallback(ConfigFactory.parseString(ClusterSpec.config)) val sys1 = ActorSystem("ClusterSpec4", getConfig(2552)) val sys2 = ActorSystem("ClusterSpec4", getConfig(2553)) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala index 586bfd3fa3..6bcc958306 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterTestKit.scala @@ -116,7 +116,9 @@ trait ClusterTestKit extends TestKitBase { // remove old before starting the new one cluster.leave(cluster.readView.selfAddress) - awaitCond(cluster.readView.status == Removed, message = s"awaiting node [${cluster.readView.selfAddress}] to be 'Removed'. Current status: [${cluster.readView.status}]") + awaitCond(cluster.readView.status == Removed, + message = + s"awaiting node [${cluster.readView.selfAddress}] to be 'Removed'. Current status: [${cluster.readView.status}]") shutdown(actorSystem) awaitCond(cluster.isTerminated) @@ -124,12 +126,10 @@ trait ClusterTestKit extends TestKitBase { // remove from internal list actorSystems = actorSystems.filterNot(_ == actorSystem) - val newConfig = ConfigFactory.parseString( - s""" + val newConfig = ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = $port akka.remote.artery.canonical.port = $port - """ - ).withFallback(config) + """).withFallback(config) if (firstSeedNode) newActorSystemAsFirst(newConfig) else newActorSystem(newConfig) @@ -182,14 +182,13 @@ abstract class RollingUpgradeClusterSpec(config: Config) extends AkkaSpec(config * @param enforced toggle `akka.cluster.configuration-compatibility-check.enforce-on-join` on or off * @param shouldRejoin the condition being tested on attempted re-join: members up or terminated */ - def upgradeCluster( - clusterSize: Int, - baseConfig: Config, - upgradeConfig: Config, - timeout: FiniteDuration, - awaitAll: FiniteDuration, - enforced: Boolean, - shouldRejoin: Boolean): Unit = { + def upgradeCluster(clusterSize: Int, + baseConfig: Config, + upgradeConfig: Config, + timeout: FiniteDuration, + awaitAll: FiniteDuration, + enforced: Boolean, + shouldRejoin: Boolean): Unit = { require(clusterSize > 1, s"'clusterSize' must be > 1 but was $clusterSize") val util = new ClusterTestUtil(system.name) diff --git a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala index 454403419d..5c753cb464 100644 --- a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala @@ -33,8 +33,7 @@ class DummyDowningProvider(system: ActorSystem) extends DowningProvider { class DowningProviderSpec extends WordSpec with Matchers { - val baseConf = ConfigFactory.parseString( - """ + val baseConf = ConfigFactory.parseString(""" akka { loglevel = WARNING actor.provider = "cluster" @@ -56,8 +55,8 @@ class DowningProviderSpec extends WordSpec with Matchers { } "use akka.cluster.AutoDowning if 'auto-down-unreachable-after' is configured" in { - val system = ActorSystem("auto-downing", ConfigFactory.parseString( - """ + val system = ActorSystem("auto-downing", + ConfigFactory.parseString(""" akka.cluster.auto-down-unreachable-after = 18d """).withFallback(baseConf)) Cluster(system).downingProvider shouldBe an[AutoDowning] @@ -65,10 +64,12 @@ class DowningProviderSpec extends WordSpec with Matchers { } "use the specified downing provider" in { - val system = ActorSystem("auto-downing", ConfigFactory.parseString( - """ + val system = ActorSystem("auto-downing", + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class="akka.cluster.DummyDowningProvider" - """).withFallback(baseConf)) + """) + .withFallback(baseConf)) Cluster(system).downingProvider shouldBe a[DummyDowningProvider] awaitCond(Cluster(system).downingProvider.asInstanceOf[DummyDowningProvider].actorPropsAccessed.get(), 3.seconds) @@ -76,10 +77,12 @@ class DowningProviderSpec extends WordSpec with Matchers { } "stop the cluster if the downing provider throws exception in props method" in { - val system = ActorSystem("auto-downing", ConfigFactory.parseString( - """ + val system = ActorSystem("auto-downing", + ConfigFactory + .parseString(""" akka.cluster.downing-provider-class="akka.cluster.FailingDowningProvider" - """).withFallback(baseConf)) + """) + .withFallback(baseConf)) val cluster = Cluster(system) cluster.join(cluster.selfAddress) diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala index 6f936cb032..c48b04a949 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -35,4 +35,3 @@ class FailureDetectorPuppet(config: Config, ev: EventStream) extends FailureDete override def heartbeat(): Unit = status.compareAndSet(Unknown, Up) } - diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 4fa6f8de1d..f91d9b93eb 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -34,7 +34,8 @@ class GossipSpec extends WordSpec with Matchers { val dc2d1 = TestMember(Address("akka.tcp", "sys", "d", 2552), Up, Set.empty, dataCenter = "dc2") val dc2d2 = TestMember(dc2d1.address, status = Down, roles = Set.empty, dataCenter = dc2d1.dataCenter) // restarted with another uid - val dc2d3 = TestMember.withUniqueAddress(UniqueAddress(dc2d1.address, longUid = 3L), Up, Set.empty, dataCenter = "dc2") + val dc2d3 = + TestMember.withUniqueAddress(UniqueAddress(dc2d1.address, longUid = 3L), Up, Set.empty, dataCenter = "dc2") private def state(g: Gossip, selfMember: Member = a1): MembershipState = MembershipState(g, selfMember.uniqueAddress, selfMember.dataCenter, crossDcConnections = 5) @@ -42,8 +43,7 @@ class GossipSpec extends WordSpec with Matchers { "A Gossip" must { "have correct test setup" in { - List(a1, a2, b1, b2, c1, c2, c3, d1, e1, e2, e3).foreach(m => - m.dataCenter should ===(DefaultDataCenter)) + List(a1, a2, b1, b2, c1, c2, c3, d1, e1, e2, e3).foreach(m => m.dataCenter should ===(DefaultDataCenter)) } "reach convergence when it's empty" in { @@ -87,14 +87,16 @@ class GossipSpec extends WordSpec with Matchers { // c1 is Leaving val r1 = Reachability.empty.unreachable(b1.uniqueAddress, c1.uniqueAddress) val g1 = Gossip(members = SortedSet(a1, b1, c1), overview = GossipOverview(reachability = r1)) - .seen(a1.uniqueAddress).seen(b1.uniqueAddress) + .seen(a1.uniqueAddress) + .seen(b1.uniqueAddress) state(g1).convergence(Set(c1.uniqueAddress)) should ===(true) } "not reach convergence when unreachable" in { val r1 = Reachability.empty.unreachable(b1.uniqueAddress, a1.uniqueAddress) val g1 = (Gossip(members = SortedSet(a1, b1), overview = GossipOverview(reachability = r1))) - .seen(a1.uniqueAddress).seen(b1.uniqueAddress) + .seen(a1.uniqueAddress) + .seen(b1.uniqueAddress) state(g1, b1).convergence(Set.empty) should ===(false) // but from a1's point of view (it knows that itself is not unreachable) state(g1).convergence(Set.empty) should ===(true) @@ -104,7 +106,9 @@ class GossipSpec extends WordSpec with Matchers { // e3 is Down val r1 = Reachability.empty.unreachable(e3.uniqueAddress, a1.uniqueAddress) val g1 = (Gossip(members = SortedSet(a1, b1, e3), overview = GossipOverview(reachability = r1))) - .seen(a1.uniqueAddress).seen(b1.uniqueAddress).seen(e3.uniqueAddress) + .seen(a1.uniqueAddress) + .seen(b1.uniqueAddress) + .seen(e3.uniqueAddress) state(g1, b1).convergence(Set.empty) should ===(true) } @@ -112,26 +116,28 @@ class GossipSpec extends WordSpec with Matchers { val g1 = Gossip(members = SortedSet(a1, c1, e1)) val g2 = Gossip(members = SortedSet(a2, c2, e2)) - val merged1 = g1 merge g2 + val merged1 = g1.merge(g2) merged1.members should ===(SortedSet(a2, c1, e1)) merged1.members.toSeq.map(_.status) should ===(Seq(Up, Leaving, Up)) - val merged2 = g2 merge g1 + val merged2 = g2.merge(g1) merged2.members should ===(SortedSet(a2, c1, e1)) merged2.members.toSeq.map(_.status) should ===(Seq(Up, Leaving, Up)) } "merge unreachable" in { - val r1 = Reachability.empty.unreachable(b1.uniqueAddress, a1.uniqueAddress).unreachable(b1.uniqueAddress, c1.uniqueAddress) + val r1 = Reachability.empty + .unreachable(b1.uniqueAddress, a1.uniqueAddress) + .unreachable(b1.uniqueAddress, c1.uniqueAddress) val g1 = Gossip(members = SortedSet(a1, b1, c1), overview = GossipOverview(reachability = r1)) val r2 = Reachability.empty.unreachable(a1.uniqueAddress, d1.uniqueAddress) val g2 = Gossip(members = SortedSet(a1, b1, c1, d1), overview = GossipOverview(reachability = r2)) - val merged1 = g1 merge g2 + val merged1 = g1.merge(g2) merged1.overview.reachability.allUnreachable should ===(Set(a1.uniqueAddress, c1.uniqueAddress, d1.uniqueAddress)) - val merged2 = g2 merge g1 + val merged2 = g2.merge(g1) merged2.overview.reachability.allUnreachable should ===(merged1.overview.reachability.allUnreachable) } @@ -142,11 +148,11 @@ class GossipSpec extends WordSpec with Matchers { val r2 = r1.unreachable(b1.uniqueAddress, c3.uniqueAddress) val g2 = Gossip(members = SortedSet(a1, b1, c3), overview = GossipOverview(reachability = r2)) - val merged1 = g1 merge g2 + val merged1 = g1.merge(g2) merged1.members should ===(SortedSet(a1, b1)) merged1.overview.reachability.allUnreachable should ===(Set(a1.uniqueAddress)) - val merged2 = g2 merge g1 + val merged2 = g2.merge(g1) merged2.overview.reachability.allUnreachable should ===(merged1.overview.reachability.allUnreachable) merged2.members should ===(merged1.members) } @@ -185,30 +191,35 @@ class GossipSpec extends WordSpec with Matchers { val vclockNode = VectorClock.Node("something") val g1 = (Gossip(members = SortedSet(a1, b1, c1, d1)) :+ vclockNode).seen(a1.uniqueAddress).seen(b1.uniqueAddress) val g2 = (Gossip(members = SortedSet(a1, b1, c1, d1)) :+ vclockNode).seen(a1.uniqueAddress).seen(c1.uniqueAddress) - val g3 = (g1 copy (version = g2.version)).seen(d1.uniqueAddress) + val g3 = g1.copy(version = g2.version).seen(d1.uniqueAddress) def checkMerged(merged: Gossip): Unit = { val seen = merged.overview.seen.toSeq seen.length should ===(0) - merged seenByNode (a1.uniqueAddress) should ===(false) - merged seenByNode (b1.uniqueAddress) should ===(false) - merged seenByNode (c1.uniqueAddress) should ===(false) - merged seenByNode (d1.uniqueAddress) should ===(false) - merged seenByNode (e1.uniqueAddress) should ===(false) + merged.seenByNode(a1.uniqueAddress) should ===(false) + merged.seenByNode(b1.uniqueAddress) should ===(false) + merged.seenByNode(c1.uniqueAddress) should ===(false) + merged.seenByNode(d1.uniqueAddress) should ===(false) + merged.seenByNode(e1.uniqueAddress) should ===(false) } - checkMerged(g3 merge g2) - checkMerged(g2 merge g3) + checkMerged(g3.merge(g2)) + checkMerged(g2.merge(g3)) } "know who is youngest" in { // a2 and e1 is Joining - val g1 = Gossip(members = SortedSet(a2, b1.copyUp(3), e1), overview = GossipOverview(reachability = - Reachability.empty.unreachable(a2.uniqueAddress, e1.uniqueAddress))) + val g1 = + Gossip(members = SortedSet(a2, b1.copyUp(3), e1), + overview = + GossipOverview(reachability = Reachability.empty.unreachable(a2.uniqueAddress, e1.uniqueAddress))) state(g1).youngestMember should ===(b1) - val g2 = Gossip(members = SortedSet(a2, b1.copyUp(3), e1), overview = GossipOverview(reachability = - Reachability.empty.unreachable(a2.uniqueAddress, b1.uniqueAddress).unreachable(a2.uniqueAddress, e1.uniqueAddress))) + val g2 = Gossip(members = SortedSet(a2, b1.copyUp(3), e1), + overview = GossipOverview( + reachability = Reachability.empty + .unreachable(a2.uniqueAddress, b1.uniqueAddress) + .unreachable(a2.uniqueAddress, e1.uniqueAddress))) state(g2).youngestMember should ===(b1) val g3 = Gossip(members = SortedSet(a2, b1.copyUp(3), e2.copyUp(4))) state(g3).youngestMember should ===(e2) @@ -341,12 +352,11 @@ class GossipSpec extends WordSpec with Matchers { // TODO test coverage for when leaderOf returns None - I have not been able to figure it out "clear out a bunch of stuff when removing a node" in { - val g = Gossip( - members = SortedSet(dc1a1, dc1b1, dc2d2), - overview = GossipOverview(reachability = - Reachability.empty - .unreachable(dc1b1.uniqueAddress, dc2d2.uniqueAddress) - .unreachable(dc2d2.uniqueAddress, dc1b1.uniqueAddress))) + val g = Gossip(members = SortedSet(dc1a1, dc1b1, dc2d2), + overview = GossipOverview( + reachability = Reachability.empty + .unreachable(dc1b1.uniqueAddress, dc2d2.uniqueAddress) + .unreachable(dc2d2.uniqueAddress, dc1b1.uniqueAddress))) .:+(VectorClock.Node(Gossip.vclockName(dc1b1.uniqueAddress))) .:+(VectorClock.Node(Gossip.vclockName(dc2d2.uniqueAddress))) .remove(dc1b1.uniqueAddress, System.currentTimeMillis()) @@ -375,16 +385,18 @@ class GossipSpec extends WordSpec with Matchers { gdc2.tombstones.keys should contain(dc2d1.uniqueAddress) gdc2.members should not contain (dc2d1) - gdc2.overview.reachability.records.filter(r => r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) + gdc2.overview.reachability.records.filter(r => + r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) gdc2.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) // when we merge the two, it should not be reintroduced - val merged1 = gdc2 merge gdc1 + val merged1 = gdc2.merge(gdc1) merged1.members should ===(SortedSet(dc1a1, dc1b1, dc2c1)) merged1.tombstones.keys should contain(dc2d1.uniqueAddress) merged1.members should not contain (dc2d1) - merged1.overview.reachability.records.filter(r => r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) + merged1.overview.reachability.records.filter(r => + r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) merged1.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) merged1.version.versions.keys should not contain (VectorClock.Node(vclockName(dc2d1.uniqueAddress))) } @@ -407,14 +419,15 @@ class GossipSpec extends WordSpec with Matchers { gdc2.members.map(_.uniqueAddress) should contain(dc2d3.uniqueAddress) // when we merge the two, it should replace the old with new - val merged1 = gdc2 merge gdc1 + val merged1 = gdc2.merge(gdc1) merged1.members should ===(SortedSet(dc1a1, dc1b1, dc2c1, dc2d3)) merged1.members.map(_.uniqueAddress) should not contain (dc2d1.uniqueAddress) merged1.members.map(_.uniqueAddress) should contain(dc2d3.uniqueAddress) merged1.tombstones.keys should contain(dc2d1.uniqueAddress) merged1.tombstones.keys should not contain (dc2d3.uniqueAddress) - merged1.overview.reachability.records.filter(r => r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) + merged1.overview.reachability.records.filter(r => + r.subject == dc2d1.uniqueAddress || r.observer == dc2d1.uniqueAddress) should be(empty) merged1.overview.reachability.versions.keys should not contain (dc2d1.uniqueAddress) merged1.version.versions.keys should not contain (VectorClock.Node(vclockName(dc2d1.uniqueAddress))) } @@ -440,18 +453,16 @@ class GossipSpec extends WordSpec with Matchers { gdc2.version.versions.keySet should not contain (VectorClock.Node(vclockName(dc2c1.uniqueAddress))) // when we merge the two, the nodes should not be reintroduced - val merged1 = gdc2 merge gdc1 + val merged1 = gdc2.merge(gdc1) merged1.members should ===(SortedSet(dc1a1, dc2d1)) - merged1.version.versions.keySet should ===(Set( - VectorClock.Node(vclockName(dc1a1.uniqueAddress)), - VectorClock.Node(vclockName(dc2d1.uniqueAddress)))) + merged1.version.versions.keySet should ===( + Set(VectorClock.Node(vclockName(dc1a1.uniqueAddress)), VectorClock.Node(vclockName(dc2d1.uniqueAddress)))) } "prune old tombstones" in { val timestamp = 352684800 - val g = Gossip(members = SortedSet(dc1a1, dc1b1)) - .remove(dc1b1.uniqueAddress, timestamp) + val g = Gossip(members = SortedSet(dc1a1, dc1b1)).remove(dc1b1.uniqueAddress, timestamp) g.tombstones.keys should contain(dc1b1.uniqueAddress) @@ -462,10 +473,8 @@ class GossipSpec extends WordSpec with Matchers { } "mark a node as down" in { - val g = Gossip(members = SortedSet(dc1a1, dc1b1)) - .seen(dc1a1.uniqueAddress) - .seen(dc1b1.uniqueAddress) - .markAsDown(dc1b1) + val g = + Gossip(members = SortedSet(dc1a1, dc1b1)).seen(dc1a1.uniqueAddress).seen(dc1b1.uniqueAddress).markAsDown(dc1b1) g.member(dc1b1.uniqueAddress).status should ===(MemberStatus.Down) g.overview.seen should not contain (dc1b1.uniqueAddress) diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala index ce7b176e7b..237e34d98c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipTargetSelectorSpec.scala @@ -25,10 +25,8 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { val iDc4 = TestMember(Address("akka.tcp", "sys", "i", 2552), Up, Set.empty, dataCenter = "dc4") - val defaultSelector = new GossipTargetSelector( - reduceGossipDifferentViewProbability = 400, - crossDcGossipProbability = 0.2 - ) + val defaultSelector = + new GossipTargetSelector(reduceGossipDifferentViewProbability = 400, crossDcGossipProbability = 0.2) "The gossip target selection" should { @@ -46,7 +44,8 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { override protected def selectDcLocalNodes(s: MembershipState): Boolean = true } - val state = MembershipState(Gossip(SortedSet(aDc1, bDc1, eDc2, fDc2)), aDc1, aDc1.dataCenter, crossDcConnections = 5) + val state = + MembershipState(Gossip(SortedSet(aDc1, bDc1, eDc2, fDc2)), aDc1, aDc1.dataCenter, crossDcConnections = 5) val gossipTo = alwaysLocalSelector.gossipTargets(state) // only one other local node @@ -58,11 +57,12 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { override protected def selectDcLocalNodes(s: MembershipState): Boolean = false } - val state = MembershipState(Gossip(SortedSet(aDc1, bDc1, eDc2, fDc2)), aDc1, aDc1.dataCenter, crossDcConnections = 5) + val state = + MembershipState(Gossip(SortedSet(aDc1, bDc1, eDc2, fDc2)), aDc1, aDc1.dataCenter, crossDcConnections = 5) val gossipTo = alwaysCrossDcSelector.gossipTargets(state) // only one other local node - gossipTo should (contain(eDc2.uniqueAddress) or contain(fDc2.uniqueAddress)) + gossipTo should (contain(eDc2.uniqueAddress).or(contain(fDc2.uniqueAddress))) } "select local nodes that hasn't seen the gossip when chance says so" in { @@ -70,12 +70,8 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { override protected def preferNodesWithDifferentView(s: MembershipState): Boolean = true } - val state = MembershipState( - Gossip(SortedSet(aDc1, bDc1, cDc1)).seen(bDc1), - aDc1, - aDc1.dataCenter, - crossDcConnections = 5 - ) + val state = + MembershipState(Gossip(SortedSet(aDc1, bDc1, cDc1)).seen(bDc1), aDc1, aDc1.dataCenter, crossDcConnections = 5) val gossipTo = alwaysLocalSelector.gossipTargets(state) // a1 is self, b1 has seen so only option is c1 @@ -87,12 +83,8 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { override protected def preferNodesWithDifferentView(s: MembershipState): Boolean = false } - val state = MembershipState( - Gossip(SortedSet(aDc1, bDc1, cDc1)).seen(bDc1), - aDc1, - aDc1.dataCenter, - crossDcConnections = 5 - ) + val state = + MembershipState(Gossip(SortedSet(aDc1, bDc1, cDc1)).seen(bDc1), aDc1, aDc1.dataCenter, crossDcConnections = 5) val gossipTo = alwaysLocalSelector.gossipTargets(state) // a1 is self, b1 is the only that has seen @@ -105,10 +97,8 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { } val state = MembershipState( - Gossip( - members = SortedSet(aDc1, bDc1, cDc1), - overview = GossipOverview( - reachability = Reachability.empty.unreachable(aDc1, bDc1))), + Gossip(members = SortedSet(aDc1, bDc1, cDc1), + overview = GossipOverview(reachability = Reachability.empty.unreachable(aDc1, bDc1))), aDc1, aDc1.dataCenter, crossDcConnections = 5) @@ -124,10 +114,9 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { } val state = MembershipState( - Gossip( - members = SortedSet(aDc1, bDc1, cDc1), - overview = GossipOverview( - reachability = Reachability.empty.unreachable(aDc1, bDc1).unreachable(bDc1, cDc1))), + Gossip(members = SortedSet(aDc1, bDc1, cDc1), + overview = + GossipOverview(reachability = Reachability.empty.unreachable(aDc1, bDc1).unreachable(bDc1, cDc1))), aDc1, aDc1.dataCenter, crossDcConnections = 5) @@ -144,16 +133,13 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name } - val state = MembershipState( - Gossip( - members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3), - overview = GossipOverview( - reachability = Reachability.empty - .unreachable(aDc1, eDc2) - .unreachable(aDc1, fDc2))), - aDc1, - aDc1.dataCenter, - crossDcConnections = 5) + val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3), + overview = GossipOverview( + reachability = + Reachability.empty.unreachable(aDc1, eDc2).unreachable(aDc1, fDc2))), + aDc1, + aDc1.dataCenter, + crossDcConnections = 5) val gossipTo = selector.gossipTargets(state) gossipTo should ===(Vector[UniqueAddress](gDc3, hDc3)) } @@ -164,13 +150,10 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name } - val state = MembershipState( - Gossip( - members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3) - ).seen(fDc2).seen(hDc3), - aDc1, - aDc1.dataCenter, - crossDcConnections = 5) + val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3)).seen(fDc2).seen(hDc3), + aDc1, + aDc1.dataCenter, + crossDcConnections = 5) val gossipTo = selector.gossipTargets(state) gossipTo should ===(Vector[UniqueAddress](eDc2, fDc2)) } @@ -181,23 +164,19 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { override protected def dcsInRandomOrder(dcs: List[DataCenter]): List[DataCenter] = dcs.sorted // sort on name } - val state = MembershipState( - Gossip( - members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3)), - aDc1, - aDc1.dataCenter, - crossDcConnections = 1) + val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, eDc2, fDc2, gDc3, hDc3)), + aDc1, + aDc1.dataCenter, + crossDcConnections = 1) val gossipTo = selector.gossipTargets(state) gossipTo should ===(Vector[UniqueAddress](eDc2)) } "select N random local nodes when single dc" in { - val state = MembershipState( - Gossip( - members = SortedSet(aDc1, bDc1, cDc1)), - aDc1, - aDc1.dataCenter, - crossDcConnections = 1) // means only a e and g are oldest + val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, cDc1)), + aDc1, + aDc1.dataCenter, + crossDcConnections = 1) // means only a e and g are oldest val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) @@ -205,12 +184,10 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { } "select N random local nodes when not self among oldest" in { - val state = MembershipState( - Gossip( - members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2, gDc3, hDc3)), - bDc1, - bDc1.dataCenter, - crossDcConnections = 1) // means only a, e and g are oldest + val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2, gDc3, hDc3)), + bDc1, + bDc1.dataCenter, + crossDcConnections = 1) // means only a, e and g are oldest val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) @@ -218,12 +195,10 @@ class GossipTargetSelectorSpec extends WordSpec with Matchers { } "select N-1 random local nodes plus one cross dc oldest node when self among oldest" in { - val state = MembershipState( - Gossip( - members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2)), - aDc1, - aDc1.dataCenter, - crossDcConnections = 1) // means only a and e are oldest + val state = MembershipState(Gossip(members = SortedSet(aDc1, bDc1, cDc1, eDc2, fDc2)), + aDc1, + aDc1.dataCenter, + crossDcConnections = 1) // means only a and e are oldest val randomNodes = defaultSelector.randomNodesForFullGossip(state, 3) diff --git a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala index d999321d2a..39764df1d1 100644 --- a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala @@ -25,7 +25,7 @@ class HeartbeatNodeRingSpec extends WordSpec with Matchers { val ring = HeartbeatNodeRing(cc, nodes, Set.empty, 3) ring.myReceivers should ===(ring.receivers(cc)) - nodes foreach { n => + nodes.foreach { n => val receivers = ring.receivers(n) receivers.size should ===(3) receivers should not contain (n) diff --git a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala index db34b2ec17..9da7731b65 100644 --- a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerRollingUpdateSpec.scala @@ -12,8 +12,7 @@ import com.typesafe.config.{ Config, ConfigFactory } object JoinConfigCompatCheckerRollingUpdateSpec { - val baseConfig = ConfigFactory.parseString( - s""" + val baseConfig = ConfigFactory.parseString(s""" akka.log-dead-letters = off akka.log-dead-letters-during-shutdown = off akka.remote.log-remote-lifecycle-events = off @@ -24,13 +23,11 @@ object JoinConfigCompatCheckerRollingUpdateSpec { akka.cluster.periodic-tasks-initial-delay = 100 ms akka.cluster.publish-stats-interval = 0 s failure-detector.heartbeat-interval = 100 ms - """) - .withFallback(JoinConfigCompatCheckerSpec.baseConfig) + """).withFallback(JoinConfigCompatCheckerSpec.baseConfig) val v1Config: Config = baseConfig.withFallback(JoinConfigCompatCheckerSpec.configWithChecker) - private val v2 = ConfigFactory.parseString( - """ + private val v2 = ConfigFactory.parseString(""" akka.cluster.new-configuration = "v2" akka.cluster.configuration-compatibility-check.checkers { rolling-upgrade-test = "akka.cluster.JoinConfigCompatRollingUpdateChecker" @@ -43,8 +40,8 @@ object JoinConfigCompatCheckerRollingUpdateSpec { } -class JoinConfigCompatCheckerRollingUpdateSpec extends RollingUpgradeClusterSpec( - JoinConfigCompatCheckerRollingUpdateSpec.v1Config) { +class JoinConfigCompatCheckerRollingUpdateSpec + extends RollingUpgradeClusterSpec(JoinConfigCompatCheckerRollingUpdateSpec.v1Config) { import JoinConfigCompatCheckerRollingUpdateSpec._ diff --git a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerSpec.scala index 5c1e2d729a..f018a1e07e 100644 --- a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatCheckerSpec.scala @@ -13,19 +13,16 @@ import scala.collection.{ immutable => im } object JoinConfigCompatCheckerSpec { val baseConfig: Config = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.coordinated-shutdown.terminate-actor-system = on akka.remote.netty.tcp.port = 0 akka.remote.artery.canonical.port = 0 akka.cluster.jmx.multi-mbeans-in-same-jvm = on - """ - ) + """) val configWithChecker: Config = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { config-compat-test = "test" sensitive.properties { @@ -43,8 +40,7 @@ object JoinConfigCompatCheckerSpec { } } } - """ - ).withFallback(baseConfig) + """).withFallback(baseConfig) } class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { @@ -71,8 +67,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { "NOT be allowed to join a cluster when its configuration is incompatible" taggedAs LongRunningTest in { // this config is NOT compatible with the cluster config val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # this config is incompatible @@ -84,8 +79,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -113,8 +107,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // because there is one missing required configuration property. // This test verifies that cluster config are being sent back and checked on joining node as well val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # this config is not available on cluster side @@ -128,8 +121,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -156,8 +148,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // because there is one missing required configuration property. // This test verifies that cluster config are being sent back and checked on joining node as well val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # this config is required on cluster side @@ -167,8 +158,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { enforce-on-join = on } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -195,8 +185,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // because there is one missing required configuration property. // This test verifies that validation on joining side takes 'configuration-compatibility-check.enforce-on-join' in consideration val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # this config is not available on cluster side @@ -210,8 +199,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -233,8 +221,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // this config is NOT compatible with the cluster config, // but node will ignore the the config check and join anyway val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { configuration-compatibility-check { @@ -247,8 +234,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { # this config is incompatible config-compat-test = "test2" } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -269,8 +255,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { "NOT be allowed to join a cluster using a different value for akka.cluster.downing-provider-class" taggedAs LongRunningTest in { val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # using explicit downing provider class @@ -280,8 +265,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { enforce-on-join = on } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -335,8 +319,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { "NOT be allowed to re-join a cluster when its configuration is incompatible" taggedAs LongRunningTest in { // this config is NOT compatible with the cluster config val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # this config is incompatible @@ -348,8 +331,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -383,8 +365,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // because there is one missing required configuration property. // This test verifies that cluster config are being sent back and checked on joining node as well val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # this config is not available on cluster side @@ -398,8 +379,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -433,8 +413,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // because there is one missing required configuration property. // This test verifies that cluster config are being sent back and checked on joining node as well val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # this config is required on cluster side @@ -444,8 +423,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { enforce-on-join = on } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -479,8 +457,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // because there is one missing required configuration property. // This test verifies that validation on joining side takes 'configuration-compatibility-check.enforce-on-join' in consideration val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # this config is not available on cluster side @@ -494,8 +471,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -527,8 +503,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // this config is NOT compatible with the cluster config, // but node will ignore the the config check and join anyway val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { configuration-compatibility-check { @@ -541,8 +516,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { # this config is incompatible config-compat-test = "test2" } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -578,8 +552,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { // the cluster will ignore them, because they are on the sensitive-config-path // the cluster won't let it be leaked back to the joining node neither which will fail the join attempt. val joinNodeConfig = - ConfigFactory.parseString( - """ + ConfigFactory.parseString(""" akka.cluster { # these config are compatible, @@ -604,8 +577,7 @@ class JoinConfigCompatCheckerSpec extends AkkaSpec with ClusterTestKit { } } } - """ - ) + """) val clusterTestUtil = new ClusterTestUtil(system.name) // first node @@ -643,7 +615,8 @@ class JoinConfigCompatCheckerExtraTest extends JoinConfigCompatChecker { /** Rogue checker that tries to leak sensitive information */ class RogueJoinConfigCompatCheckerTest extends JoinConfigCompatChecker { - override def requiredKeys = im.Seq("akka.cluster.sensitive.properties.password", "akka.cluster.sensitive.properties.username") + override def requiredKeys = + im.Seq("akka.cluster.sensitive.properties.password", "akka.cluster.sensitive.properties.username") /** this check always returns Valid. The goal is to try to make the cluster leak those properties */ override def check(toValidate: Config, actualConfig: Config): ConfigValidation = diff --git a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatPreDefinedChecksSpec.scala b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatPreDefinedChecksSpec.scala index 512b6775c2..8c821e7820 100644 --- a/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatPreDefinedChecksSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/JoinConfigCompatPreDefinedChecksSpec.scala @@ -14,26 +14,21 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers { // Test for some of the pre-build helpers we offer "JoinConfigCompatChecker.exists" must { - val requiredKeys = im.Seq( - "akka.cluster.min-nr-of-members", - "akka.cluster.retry-unsuccessful-join-after", - "akka.cluster.allow-weakly-up-members" - ) + val requiredKeys = im.Seq("akka.cluster.min-nr-of-members", + "akka.cluster.retry-unsuccessful-join-after", + "akka.cluster.allow-weakly-up-members") "pass when all required keys are provided" in { val result = - JoinConfigCompatChecker.exists( - requiredKeys, - config( - """ + JoinConfigCompatChecker.exists(requiredKeys, + config(""" |{ | akka.cluster.min-nr-of-members = 1 | akka.cluster.retry-unsuccessful-join-after = 10s | akka.cluster.allow-weakly-up-members = on |} - """.stripMargin) - ) + """.stripMargin)) result shouldBe Valid } @@ -41,15 +36,12 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers { "fail when some required keys are NOT provided" in { val Invalid(incompatibleKeys) = - JoinConfigCompatChecker.exists( - requiredKeys, - config( - """ + JoinConfigCompatChecker.exists(requiredKeys, + config(""" |{ | akka.cluster.min-nr-of-members = 1 |} - """.stripMargin) - ) + """.stripMargin)) incompatibleKeys should have size 2 incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is missing") @@ -59,15 +51,12 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers { "JoinConfigCompatChecker.fullMatch" must { - val requiredKeys = im.Seq( - "akka.cluster.min-nr-of-members", - "akka.cluster.retry-unsuccessful-join-after", - "akka.cluster.allow-weakly-up-members" - ) + val requiredKeys = im.Seq("akka.cluster.min-nr-of-members", + "akka.cluster.retry-unsuccessful-join-after", + "akka.cluster.allow-weakly-up-members") val clusterConfig = - config( - """ + config(""" |{ | akka.cluster.min-nr-of-members = 1 | akka.cluster.retry-unsuccessful-join-after = 10s @@ -78,18 +67,15 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers { "pass when all required keys are provided and all match cluster config" in { val result = - JoinConfigCompatChecker.fullMatch( - requiredKeys, - config( - """ + JoinConfigCompatChecker.fullMatch(requiredKeys, + config(""" |{ | akka.cluster.min-nr-of-members = 1 | akka.cluster.retry-unsuccessful-join-after = 10s | akka.cluster.allow-weakly-up-members = on |} """.stripMargin), - clusterConfig - ) + clusterConfig) result shouldBe Valid } @@ -97,16 +83,13 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers { "fail when some required keys are NOT provided" in { val Invalid(incompatibleKeys) = - JoinConfigCompatChecker.fullMatch( - requiredKeys, - config( - """ + JoinConfigCompatChecker.fullMatch(requiredKeys, + config(""" |{ | akka.cluster.min-nr-of-members = 1 |} """.stripMargin), - clusterConfig - ) + clusterConfig) incompatibleKeys should have size 2 incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is missing") @@ -116,18 +99,15 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers { "fail when all required keys are passed, but some values don't match cluster config" in { val Invalid(incompatibleKeys) = - JoinConfigCompatChecker.fullMatch( - requiredKeys, - config( - """ + JoinConfigCompatChecker.fullMatch(requiredKeys, + config(""" |{ | akka.cluster.min-nr-of-members = 1 | akka.cluster.retry-unsuccessful-join-after = 15s | akka.cluster.allow-weakly-up-members = off |} """.stripMargin), - clusterConfig - ) + clusterConfig) incompatibleKeys should have size 2 incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is incompatible") @@ -137,17 +117,14 @@ class JoinConfigCompatPreDefinedChecksSpec extends WordSpec with Matchers { "fail when all required keys are passed, but some are missing and others don't match cluster config" in { val Invalid(incompatibleKeys) = - JoinConfigCompatChecker.fullMatch( - requiredKeys, - config( - """ + JoinConfigCompatChecker.fullMatch(requiredKeys, + config(""" |{ | akka.cluster.min-nr-of-members = 1 | akka.cluster.allow-weakly-up-members = off |} """.stripMargin), - clusterConfig - ) + clusterConfig) incompatibleKeys should have size 2 incompatibleKeys should contain("akka.cluster.retry-unsuccessful-join-after is missing") diff --git a/akka-cluster/src/test/scala/akka/cluster/MembershipStateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MembershipStateSpec.scala index 12e33274b7..2e105416d5 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MembershipStateSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MembershipStateSpec.scala @@ -28,28 +28,16 @@ class MembershipStateSpec extends WordSpec with Matchers { "Membership state" must { "sort by upNumber for oldest top members" in { val gossip = Gossip(SortedSet(a1, a2, a3, a4, b1, b2, b3, bOldest)) - val membershipState = MembershipState( - gossip, - a1.uniqueAddress, - "dc-a", - 2 - ) + val membershipState = MembershipState(gossip, a1.uniqueAddress, "dc-a", 2) - membershipState.ageSortedTopOldestMembersPerDc should equal(Map( - "dc-a" -> SortedSet(a1, a2), - "dc-b" -> SortedSet(bOldest, b1) - )) + membershipState.ageSortedTopOldestMembersPerDc should equal( + Map("dc-a" -> SortedSet(a1, a2), "dc-b" -> SortedSet(bOldest, b1))) } "find two oldest as targets for Exiting change" in { val a1Exiting = a1.copy(MemberStatus.Leaving).copy(MemberStatus.Exiting) val gossip = Gossip(SortedSet(a1Exiting, a2, a3, a4)) - val membershipState = MembershipState( - gossip, - a1.uniqueAddress, - "dc-a", - 2 - ) + val membershipState = MembershipState(gossip, a1.uniqueAddress, "dc-a", 2) membershipState.gossipTargetsForExitingMembers(Set(a1Exiting)) should ===(Set(a1Exiting, a2)) } @@ -57,32 +45,50 @@ class MembershipStateSpec extends WordSpec with Matchers { "find two oldest in DC as targets for Exiting change" in { val a4Exiting = a4.copy(MemberStatus.Leaving).copy(MemberStatus.Exiting) val gossip = Gossip(SortedSet(a2, a3, a4Exiting, b1, b2)) - val membershipState = MembershipState( - gossip, - a1.uniqueAddress, - "dc-a", - 2 - ) + val membershipState = MembershipState(gossip, a1.uniqueAddress, "dc-a", 2) membershipState.gossipTargetsForExitingMembers(Set(a4Exiting)) should ===(Set(a2, a3)) } "find two oldest per role as targets for Exiting change" in { - val a5 = TestMember(Address("akka.tcp", "sys", "a5", 2552), MemberStatus.Exiting, roles = Set("role1", "role2"), upNumber = 5, dataCenter = "dc-a") - val a6 = TestMember(Address("akka.tcp", "sys", "a6", 2552), MemberStatus.Exiting, roles = Set("role1", "role3"), upNumber = 6, dataCenter = "dc-a") - val a7 = TestMember(Address("akka.tcp", "sys", "a7", 2552), MemberStatus.Exiting, roles = Set("role1"), upNumber = 7, dataCenter = "dc-a") - val a8 = TestMember(Address("akka.tcp", "sys", "a8", 2552), MemberStatus.Exiting, roles = Set("role1"), upNumber = 8, dataCenter = "dc-a") - val a9 = TestMember(Address("akka.tcp", "sys", "a9", 2552), MemberStatus.Exiting, roles = Set("role2"), upNumber = 9, dataCenter = "dc-a") - val b5 = TestMember(Address("akka.tcp", "sys", "b5", 2552), MemberStatus.Exiting, roles = Set("role1"), upNumber = 5, dataCenter = "dc-b") - val b6 = TestMember(Address("akka.tcp", "sys", "b6", 2552), MemberStatus.Exiting, roles = Set("role2"), upNumber = 6, dataCenter = "dc-b") + val a5 = TestMember(Address("akka.tcp", "sys", "a5", 2552), + MemberStatus.Exiting, + roles = Set("role1", "role2"), + upNumber = 5, + dataCenter = "dc-a") + val a6 = TestMember(Address("akka.tcp", "sys", "a6", 2552), + MemberStatus.Exiting, + roles = Set("role1", "role3"), + upNumber = 6, + dataCenter = "dc-a") + val a7 = TestMember(Address("akka.tcp", "sys", "a7", 2552), + MemberStatus.Exiting, + roles = Set("role1"), + upNumber = 7, + dataCenter = "dc-a") + val a8 = TestMember(Address("akka.tcp", "sys", "a8", 2552), + MemberStatus.Exiting, + roles = Set("role1"), + upNumber = 8, + dataCenter = "dc-a") + val a9 = TestMember(Address("akka.tcp", "sys", "a9", 2552), + MemberStatus.Exiting, + roles = Set("role2"), + upNumber = 9, + dataCenter = "dc-a") + val b5 = TestMember(Address("akka.tcp", "sys", "b5", 2552), + MemberStatus.Exiting, + roles = Set("role1"), + upNumber = 5, + dataCenter = "dc-b") + val b6 = TestMember(Address("akka.tcp", "sys", "b6", 2552), + MemberStatus.Exiting, + roles = Set("role2"), + upNumber = 6, + dataCenter = "dc-b") val theExiting = Set(a5, a6) val gossip = Gossip(SortedSet(a1, a2, a3, a4, a5, a6, a7, a8, a9, b1, b2, b3, b5, b6)) - val membershipState = MembershipState( - gossip, - a1.uniqueAddress, - "dc-a", - 2 - ) + val membershipState = MembershipState(gossip, a1.uniqueAddress, "dc-a", 2) membershipState.gossipTargetsForExitingMembers(theExiting) should ===(Set(a1, a2, a5, a6, a9)) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala index e8b74e7cc7..6462030529 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala @@ -40,7 +40,10 @@ class ReachabilityPerfSpec extends WordSpec with Matchers { val reachability3 = addUnreachable(reachability1, nodesSize / 2) val allowed = reachability1.versions.keySet - private def checkThunkFor(r1: Reachability, r2: Reachability, thunk: (Reachability, Reachability) => Unit, times: Int): Unit = { + private def checkThunkFor(r1: Reachability, + r2: Reachability, + thunk: (Reachability, Reachability) => Unit, + times: Int): Unit = { for (i <- 1 to times) { thunk(Reachability(r1.records, r1.versions), Reachability(r2.records, r2.versions)) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala index 31ab77e4b3..559f51ba98 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ReachabilitySpec.scala @@ -10,7 +10,7 @@ import akka.actor.Address class ReachabilitySpec extends WordSpec with Matchers { - import Reachability.{ Reachable, Unreachable, Terminated, Record } + import Reachability.{ Reachable, Record, Terminated, Unreachable } val nodeA = UniqueAddress(Address("akka.tcp", "sys", "a", 2552), 1L) val nodeB = UniqueAddress(Address("akka.tcp", "sys", "b", 2552), 2L) @@ -42,13 +42,13 @@ class ReachabilitySpec extends WordSpec with Matchers { "not change terminated entry" in { val r = Reachability.empty.terminated(nodeB, nodeA) - r.reachable(nodeB, nodeA) should be theSameInstanceAs (r) - r.unreachable(nodeB, nodeA) should be theSameInstanceAs (r) + (r.reachable(nodeB, nodeA) should be).theSameInstanceAs(r) + (r.unreachable(nodeB, nodeA) should be).theSameInstanceAs(r) } "not change when same status" in { val r = Reachability.empty.unreachable(nodeB, nodeA) - r.unreachable(nodeB, nodeA) should be theSameInstanceAs (r) + (r.unreachable(nodeB, nodeA) should be).theSameInstanceAs(r) } "be unreachable when some observed unreachable and others reachable" in { @@ -57,17 +57,23 @@ class ReachabilitySpec extends WordSpec with Matchers { } "be reachable when all observed reachable again" in { - val r = Reachability.empty.unreachable(nodeB, nodeA).unreachable(nodeC, nodeA). - reachable(nodeB, nodeA).reachable(nodeC, nodeA). - unreachable(nodeB, nodeC).unreachable(nodeC, nodeB) + val r = Reachability.empty + .unreachable(nodeB, nodeA) + .unreachable(nodeC, nodeA) + .reachable(nodeB, nodeA) + .reachable(nodeC, nodeA) + .unreachable(nodeB, nodeC) + .unreachable(nodeC, nodeB) r.isReachable(nodeA) should ===(true) } "exclude observations from specific (downed) nodes" in { - val r = Reachability.empty. - unreachable(nodeC, nodeA).reachable(nodeC, nodeA). - unreachable(nodeC, nodeB). - unreachable(nodeB, nodeA).unreachable(nodeB, nodeC) + val r = Reachability.empty + .unreachable(nodeC, nodeA) + .reachable(nodeC, nodeA) + .unreachable(nodeC, nodeB) + .unreachable(nodeB, nodeA) + .unreachable(nodeB, nodeC) r.isReachable(nodeA) should ===(false) r.isReachable(nodeB) should ===(false) @@ -77,27 +83,28 @@ class ReachabilitySpec extends WordSpec with Matchers { } "be pruned when all records of an observer are Reachable" in { - val r = Reachability.empty. - unreachable(nodeB, nodeA).unreachable(nodeB, nodeC). - unreachable(nodeD, nodeC). - reachable(nodeB, nodeA).reachable(nodeB, nodeC) + val r = Reachability.empty + .unreachable(nodeB, nodeA) + .unreachable(nodeB, nodeC) + .unreachable(nodeD, nodeC) + .reachable(nodeB, nodeA) + .reachable(nodeB, nodeC) r.isReachable(nodeA) should ===(true) r.isReachable(nodeC) should ===(false) r.records should ===(Vector(Record(nodeD, nodeC, Unreachable, 1L))) val r2 = r.unreachable(nodeB, nodeD).unreachable(nodeB, nodeE) - r2.records.toSet should ===(Set( - Record(nodeD, nodeC, Unreachable, 1L), - Record(nodeB, nodeD, Unreachable, 5L), - Record(nodeB, nodeE, Unreachable, 6L))) + r2.records.toSet should ===( + Set(Record(nodeD, nodeC, Unreachable, 1L), + Record(nodeB, nodeD, Unreachable, 5L), + Record(nodeB, nodeE, Unreachable, 6L))) } "have correct aggregated status" in { - val records = Vector( - Reachability.Record(nodeA, nodeB, Reachable, 2), - Reachability.Record(nodeC, nodeB, Unreachable, 2), - Reachability.Record(nodeA, nodeD, Unreachable, 3), - Reachability.Record(nodeD, nodeB, Terminated, 4)) + val records = Vector(Reachability.Record(nodeA, nodeB, Reachable, 2), + Reachability.Record(nodeC, nodeB, Unreachable, 2), + Reachability.Record(nodeA, nodeD, Unreachable, 3), + Reachability.Record(nodeD, nodeB, Terminated, 4)) val versions = Map(nodeA -> 3L, nodeC -> 3L, nodeD -> 4L) val r = Reachability(records, versions) r.status(nodeA) should ===(Reachable) @@ -106,12 +113,18 @@ class ReachabilitySpec extends WordSpec with Matchers { } "have correct status for a mix of nodes" in { - val r = Reachability.empty. - unreachable(nodeB, nodeA).unreachable(nodeC, nodeA).unreachable(nodeD, nodeA). - unreachable(nodeC, nodeB).reachable(nodeC, nodeB).unreachable(nodeD, nodeB). - unreachable(nodeD, nodeC).reachable(nodeD, nodeC). - reachable(nodeE, nodeD). - unreachable(nodeA, nodeE).terminated(nodeB, nodeE) + val r = Reachability.empty + .unreachable(nodeB, nodeA) + .unreachable(nodeC, nodeA) + .unreachable(nodeD, nodeA) + .unreachable(nodeC, nodeB) + .reachable(nodeC, nodeB) + .unreachable(nodeD, nodeB) + .unreachable(nodeD, nodeC) + .reachable(nodeD, nodeC) + .reachable(nodeE, nodeD) + .unreachable(nodeA, nodeE) + .terminated(nodeB, nodeE) r.status(nodeB, nodeA) should ===(Unreachable) r.status(nodeC, nodeA) should ===(Unreachable) @@ -135,10 +148,8 @@ class ReachabilitySpec extends WordSpec with Matchers { r.allUnreachableFrom(nodeC) should ===(Set(nodeA)) r.allUnreachableFrom(nodeD) should ===(Set(nodeA, nodeB)) - r.observersGroupedByUnreachable should ===(Map( - nodeA -> Set(nodeB, nodeC, nodeD), - nodeB -> Set(nodeD), - nodeE -> Set(nodeA))) + r.observersGroupedByUnreachable should ===( + Map(nodeA -> Set(nodeB, nodeC, nodeD), nodeB -> Set(nodeD), nodeE -> Set(nodeA))) } "merge by picking latest version of each record" in { @@ -190,9 +201,7 @@ class ReachabilitySpec extends WordSpec with Matchers { val r3 = r1.reachable(nodeB, nodeA) // nodeB pruned val merged = r2.merge(Set(nodeA, nodeB, nodeC, nodeD, nodeE), r3) - merged.records.toSet should ===(Set( - Record(nodeA, nodeE, Unreachable, 1), - Record(nodeC, nodeD, Unreachable, 1))) + merged.records.toSet should ===(Set(Record(nodeA, nodeE, Unreachable, 1), Record(nodeC, nodeD, Unreachable, 1))) val merged3 = r3.merge(Set(nodeA, nodeB, nodeC, nodeD, nodeE), r2) merged3.records.toSet should ===(merged.records.toSet) @@ -211,12 +220,12 @@ class ReachabilitySpec extends WordSpec with Matchers { } "remove node" in { - val r = Reachability.empty. - unreachable(nodeB, nodeA). - unreachable(nodeC, nodeD). - unreachable(nodeB, nodeC). - unreachable(nodeB, nodeE). - remove(Set(nodeA, nodeB)) + val r = Reachability.empty + .unreachable(nodeB, nodeA) + .unreachable(nodeC, nodeD) + .unreachable(nodeB, nodeC) + .unreachable(nodeB, nodeE) + .remove(Set(nodeA, nodeB)) r.status(nodeB, nodeA) should ===(Reachable) r.status(nodeC, nodeD) should ===(Unreachable) @@ -225,10 +234,12 @@ class ReachabilitySpec extends WordSpec with Matchers { } "remove correctly after pruning" in { - val r = Reachability.empty. - unreachable(nodeB, nodeA).unreachable(nodeB, nodeC). - unreachable(nodeD, nodeC). - reachable(nodeB, nodeA).reachable(nodeB, nodeC) + val r = Reachability.empty + .unreachable(nodeB, nodeA) + .unreachable(nodeB, nodeC) + .unreachable(nodeD, nodeC) + .reachable(nodeB, nodeA) + .reachable(nodeB, nodeC) r.records should ===(Vector(Record(nodeD, nodeC, Unreachable, 1L))) val r2 = r.remove(List(nodeB)) r2.allObservers should ===(Set(nodeD)) @@ -236,10 +247,7 @@ class ReachabilitySpec extends WordSpec with Matchers { } "be able to filter records" in { - val r = Reachability.empty - .unreachable(nodeC, nodeB) - .unreachable(nodeB, nodeA) - .unreachable(nodeB, nodeC) + val r = Reachability.empty.unreachable(nodeC, nodeB).unreachable(nodeB, nodeA).unreachable(nodeB, nodeC) val filtered1 = r.filterRecords(record => record.observer != nodeC) filtered1.isReachable(nodeB) should ===(true) diff --git a/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala index dbad2a7505..45c21b20e0 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ResetSystemMessageSeqNrSpec.scala @@ -32,8 +32,8 @@ class ResetSystemMessageSeqNrSpec extends ArteryMultiNodeSpec(""" Cluster(sys2).join(Cluster(system).selfAddress) within(10.seconds) { awaitAssert { - Cluster(system).state.members.map(_.uniqueAddress) should ===(Set( - Cluster(system).selfUniqueAddress, Cluster(sys2).selfUniqueAddress)) + Cluster(system).state.members.map(_.uniqueAddress) should ===( + Set(Cluster(system).selfUniqueAddress, Cluster(sys2).selfUniqueAddress)) } } @@ -59,14 +59,14 @@ class ResetSystemMessageSeqNrSpec extends ArteryMultiNodeSpec(""" expectTerminated(echo1) shutdown(sys2) - val sys3 = newRemoteSystem( - name = Some(system.name), - extraConfig = Some(s"akka.remote.artery.canonical.port=${Cluster(sys2).selfAddress.port.get}")) + val sys3 = newRemoteSystem(name = Some(system.name), + extraConfig = + Some(s"akka.remote.artery.canonical.port=${Cluster(sys2).selfAddress.port.get}")) Cluster(sys3).join(Cluster(system).selfAddress) within(10.seconds) { awaitAssert { - Cluster(system).state.members.map(_.uniqueAddress) should ===(Set( - Cluster(system).selfUniqueAddress, Cluster(sys3).selfUniqueAddress)) + Cluster(system).state.members.map(_.uniqueAddress) should ===( + Set(Cluster(system).selfUniqueAddress, Cluster(sys3).selfUniqueAddress)) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ShutdownAfterJoinSeedNodesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ShutdownAfterJoinSeedNodesSpec.scala index f32f8ab2ca..e8bdd6e048 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ShutdownAfterJoinSeedNodesSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ShutdownAfterJoinSeedNodesSpec.scala @@ -49,7 +49,8 @@ class ShutdownAfterJoinSeedNodesSpec extends AkkaSpec(ShutdownAfterJoinSeedNodes Cluster(oridinary1).joinSeedNodes(seedNodes) Await.result(seed2.whenTerminated, Cluster(seed2).settings.ShutdownAfterUnsuccessfulJoinSeedNodes + 10.second) - Await.result(oridinary1.whenTerminated, Cluster(seed2).settings.ShutdownAfterUnsuccessfulJoinSeedNodes + 10.second) + Await.result(oridinary1.whenTerminated, + Cluster(seed2).settings.ShutdownAfterUnsuccessfulJoinSeedNodes + 10.second) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala b/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala index 232f467858..dae8edbf90 100644 --- a/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala @@ -29,13 +29,14 @@ object StartupWithOneThreadSpec { final case class GossipTo(address: Address) - def testProps = Props(new Actor with ActorLogging { - val cluster = Cluster(context.system) - log.debug(s"started ${cluster.selfAddress} ${Thread.currentThread().getName}") - def receive = { - case msg => sender() ! msg - } - }) + def testProps = + Props(new Actor with ActorLogging { + val cluster = Cluster(context.system) + log.debug(s"started ${cluster.selfAddress} ${Thread.currentThread().getName}") + def receive = { + case msg => sender() ! msg + } + }) } class StartupWithOneThreadSpec(startTime: Long) extends AkkaSpec(StartupWithOneThreadSpec.config) with ImplicitSender { @@ -54,14 +55,14 @@ class StartupWithOneThreadSpec(startTime: Long) extends AkkaSpec(StartupWithOneT // Note that the Cluster extension is started via ClusterActorRefProvider // before ActorSystem.apply returns, i.e. in the constructor of AkkaSpec. (System.nanoTime - startTime).nanos.toMillis should be < - (system.settings.CreationTimeout.duration - 2.second).toMillis + (system.settings.CreationTimeout.duration - 2.second).toMillis system.actorOf(testProps) ! "hello" system.actorOf(testProps) ! "hello" system.actorOf(testProps) ! "hello" val cluster = Cluster(system) (System.nanoTime - startTime).nanos.toMillis should be < - (system.settings.CreationTimeout.duration - 2.second).toMillis + (system.settings.CreationTimeout.duration - 2.second).toMillis expectMsg("hello") expectMsg("hello") diff --git a/akka-cluster/src/test/scala/akka/cluster/TestMember.scala b/akka-cluster/src/test/scala/akka/cluster/TestMember.scala index c64ca7223e..4e50e2a568 100644 --- a/akka-cluster/src/test/scala/akka/cluster/TestMember.scala +++ b/akka-cluster/src/test/scala/akka/cluster/TestMember.scala @@ -13,9 +13,17 @@ object TestMember { def apply(address: Address, status: MemberStatus, upNumber: Int, dc: ClusterSettings.DataCenter): Member = apply(address, status, Set.empty, dc, upNumber) - def apply(address: Address, status: MemberStatus, roles: Set[String], dataCenter: ClusterSettings.DataCenter = ClusterSettings.DefaultDataCenter, upNumber: Int = Int.MaxValue): Member = + def apply(address: Address, + status: MemberStatus, + roles: Set[String], + dataCenter: ClusterSettings.DataCenter = ClusterSettings.DefaultDataCenter, + upNumber: Int = Int.MaxValue): Member = withUniqueAddress(UniqueAddress(address, 0L), status, roles, dataCenter, upNumber) - def withUniqueAddress(uniqueAddress: UniqueAddress, status: MemberStatus, roles: Set[String], dataCenter: ClusterSettings.DataCenter, upNumber: Int = Int.MaxValue): Member = + def withUniqueAddress(uniqueAddress: UniqueAddress, + status: MemberStatus, + roles: Set[String], + dataCenter: ClusterSettings.DataCenter, + upNumber: Int = Int.MaxValue): Member = new Member(uniqueAddress, upNumber, status, roles + (ClusterSettings.DcRolePrefix + dataCenter)) } diff --git a/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala index e63d7a5a58..63ca48dd02 100644 --- a/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala @@ -4,7 +4,7 @@ package akka.cluster -import scala.collection.immutable.{ TreeMap, SortedSet } +import scala.collection.immutable.{ SortedSet, TreeMap } import org.scalatest.WordSpec import org.scalatest.Matchers @@ -55,7 +55,7 @@ class VectorClockPerfSpec extends WordSpec with Matchers { } def compareTo(order: Ordering)(vc1: VectorClock, vc2: VectorClock): Unit = { - vc1 compareTo vc2 should ===(order) + vc1.compareTo(vc2) should ===(order) } def !==(vc1: VectorClock, vc2: VectorClock): Unit = { diff --git a/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala index 76fa9e27e5..eaf0b27b6e 100644 --- a/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala @@ -148,13 +148,13 @@ class VectorClockSpec extends AkkaSpec { val clock2_2 = clock1_2 :+ node2 val clock3_2 = clock2_2 :+ node2 - val merged1 = clock3_2 merge clock5_1 + val merged1 = clock3_2.merge(clock5_1) merged1.versions.size should ===(3) merged1.versions.contains(node1) should ===(true) merged1.versions.contains(node2) should ===(true) merged1.versions.contains(node3) should ===(true) - val merged2 = clock5_1 merge clock3_2 + val merged2 = clock5_1.merge(clock3_2) merged2.versions.size should ===(3) merged2.versions.contains(node1) should ===(true) merged2.versions.contains(node2) should ===(true) @@ -185,14 +185,14 @@ class VectorClockSpec extends AkkaSpec { val clock2_2 = clock1_2 :+ node4 val clock3_2 = clock2_2 :+ node4 - val merged1 = clock3_2 merge clock5_1 + val merged1 = clock3_2.merge(clock5_1) merged1.versions.size should ===(4) merged1.versions.contains(node1) should ===(true) merged1.versions.contains(node2) should ===(true) merged1.versions.contains(node3) should ===(true) merged1.versions.contains(node4) should ===(true) - val merged2 = clock5_1 merge clock3_2 + val merged2 = clock5_1.merge(clock3_2) merged2.versions.size should ===(4) merged2.versions.contains(node1) should ===(true) merged2.versions.contains(node2) should ===(true) @@ -263,7 +263,7 @@ class VectorClockSpec extends AkkaSpec { c1.versions.contains(node1) should be(false) (c1 <> c) should be(true) - (c.prune(node1) merge c1).versions.contains(node1) should be(false) + c.prune(node1).merge(c1).versions.contains(node1) should be(false) val c2 = c :+ node2 (c1 <> c2) should be(true) diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala index cfcb74e4fe..c9f346bea7 100644 --- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala @@ -14,8 +14,7 @@ import collection.immutable.SortedSet import akka.testkit.{ AkkaSpec, TestKit } import com.typesafe.config.ConfigFactory -class ClusterMessageSerializerSpec extends AkkaSpec( - "akka.actor.provider = cluster") { +class ClusterMessageSerializerSpec extends AkkaSpec("akka.actor.provider = cluster") { val serializer = new ClusterMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) @@ -69,8 +68,11 @@ class ClusterMessageSerializerSpec extends AkkaSpec( val node4 = VectorClock.Node("node4") val g1 = (Gossip(SortedSet(a1, b1, c1, d1)) :+ node1 :+ node2).seen(a1.uniqueAddress).seen(b1.uniqueAddress) val g2 = (g1 :+ node3 :+ node4).seen(a1.uniqueAddress).seen(c1.uniqueAddress) - val reachability3 = Reachability.empty.unreachable(a1.uniqueAddress, e1.uniqueAddress).unreachable(b1.uniqueAddress, e1.uniqueAddress) - val g3 = g2.copy(members = SortedSet(a1, b1, c1, d1, e1), overview = g2.overview.copy(reachability = reachability3)) + val reachability3 = Reachability.empty + .unreachable(a1.uniqueAddress, e1.uniqueAddress) + .unreachable(b1.uniqueAddress, e1.uniqueAddress) + val g3 = + g2.copy(members = SortedSet(a1, b1, c1, d1, e1), overview = g2.overview.copy(reachability = reachability3)) val g4 = g1.remove(d1.uniqueAddress, 352684800) checkSerialization(GossipEnvelope(a1.uniqueAddress, uniqueAddress2, g1)) checkSerialization(GossipEnvelope(a1.uniqueAddress, uniqueAddress2, g2)) @@ -99,19 +101,18 @@ class ClusterMessageSerializerSpec extends AkkaSpec( "deserialize from wire format of version 2.5.9 (using serialized address for InitJoinAck)" in { // we must use the old singleton class name so that the other side will see an InitJoin // but discard the config as it does not know about the config check - val initJoinAck = InternalClusterAction.InitJoinAck( - Address("akka.tcp", "cluster", "127.0.0.1", 2552), - InternalClusterAction.UncheckedConfig) + val initJoinAck = InternalClusterAction.InitJoinAck(Address("akka.tcp", "cluster", "127.0.0.1", 2552), + InternalClusterAction.UncheckedConfig) val serializedInitJoinAckPre2510 = serializer.addressToProto(initJoinAck.address).build().toByteArray - val deserialized = serializer.fromBinary(serializedInitJoinAckPre2510, ClusterMessageSerializer.InitJoinAckManifest) + val deserialized = + serializer.fromBinary(serializedInitJoinAckPre2510, ClusterMessageSerializer.InitJoinAckManifest) deserialized shouldEqual initJoinAck } "serialize to wire format of version 2.5.9 (using serialized address for InitJoinAck)" in { - val initJoinAck = InternalClusterAction.InitJoinAck( - Address("akka.tcp", "cluster", "127.0.0.1", 2552), - InternalClusterAction.ConfigCheckUnsupportedByJoiningNode) + val initJoinAck = InternalClusterAction.InitJoinAck(Address("akka.tcp", "cluster", "127.0.0.1", 2552), + InternalClusterAction.ConfigCheckUnsupportedByJoiningNode) val bytes = serializer.toBinary(initJoinAck) val expectedSerializedInitJoinAckPre2510 = serializer.addressToProto(initJoinAck.address).build().toByteArray @@ -131,7 +132,7 @@ class ClusterMessageSerializerSpec extends AkkaSpec( val bytes = serializer.toBinary( ClusterRouterPool(RoundRobinPool(nrOfInstances = 4), ClusterRouterPoolSettings(123, 345, true, Some("role ABC")))) println(String.valueOf(encodeHex(bytes))) - */ + */ val oldBytesHex = "0a0f08101205524f5252501a04080418001211087b10d90218012208726f6c6520414243" @@ -166,43 +167,29 @@ class ClusterMessageSerializerSpec extends AkkaSpec( } "Cluster router pool" must { "be serializable with no role" in { - checkSerialization(ClusterRouterPool( - RoundRobinPool( - nrOfInstances = 4 - ), - ClusterRouterPoolSettings( - totalInstances = 2, - maxInstancesPerNode = 5, - allowLocalRoutees = true - ) - )) + checkSerialization( + ClusterRouterPool( + RoundRobinPool(nrOfInstances = 4), + ClusterRouterPoolSettings(totalInstances = 2, maxInstancesPerNode = 5, allowLocalRoutees = true))) } "be serializable with one role" in { - checkSerialization(ClusterRouterPool( - RoundRobinPool( - nrOfInstances = 4 - ), - ClusterRouterPoolSettings( - totalInstances = 2, - maxInstancesPerNode = 5, - allowLocalRoutees = true, - useRoles = Set("Richard, Duke of Gloucester") - ) - )) + checkSerialization( + ClusterRouterPool(RoundRobinPool(nrOfInstances = 4), + ClusterRouterPoolSettings(totalInstances = 2, + maxInstancesPerNode = 5, + allowLocalRoutees = true, + useRoles = Set("Richard, Duke of Gloucester")))) } "be serializable with many roles" in { - checkSerialization(ClusterRouterPool( - RoundRobinPool( - nrOfInstances = 4), - ClusterRouterPoolSettings( - totalInstances = 2, - maxInstancesPerNode = 5, - allowLocalRoutees = true, - useRoles = Set("Richard, Duke of Gloucester", "Hongzhi Emperor", "Red Rackham") - ) - )) + checkSerialization( + ClusterRouterPool(RoundRobinPool(nrOfInstances = 4), + ClusterRouterPoolSettings( + totalInstances = 2, + maxInstancesPerNode = 5, + allowLocalRoutees = true, + useRoles = Set("Richard, Duke of Gloucester", "Hongzhi Emperor", "Red Rackham")))) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala index 38986d668d..99457c6b79 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala @@ -34,16 +34,15 @@ class ClusterRouterSupervisorSpec extends AkkaSpec(""" "use provided supervisor strategy" in { val router = system.actorOf( - ClusterRouterPool(RoundRobinPool(nrOfInstances = 1, supervisorStrategy = - OneForOneStrategy(loggingEnabled = false) { + ClusterRouterPool( + RoundRobinPool(nrOfInstances = 1, supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { case _ => testActor ! "supervised" SupervisorStrategy.Stop - }), ClusterRouterPoolSettings( - totalInstances = 1, - maxInstancesPerNode = 1, - allowLocalRoutees = true)). - props(Props(classOf[KillableActor], testActor)), name = "therouter") + }), + ClusterRouterPoolSettings(totalInstances = 1, maxInstancesPerNode = 1, allowLocalRoutees = true)) + .props(Props(classOf[KillableActor], testActor)), + name = "therouter") router ! "go away" expectMsg("supervised") diff --git a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala index ac7ed6ef56..50a9acdef6 100644 --- a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala +++ b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/CircuitBreakerProxy.scala @@ -41,15 +41,21 @@ object CircuitBreakerProxy { * @param failureMap function to map a failure into a response message. The failing response message is wrapped * into a [[akka.contrib.circuitbreaker.CircuitBreakerProxy.CircuitOpenFailure]] object */ - def props( - target: ActorRef, - maxFailures: Int, - callTimeout: Timeout, - resetTimeout: Timeout, - circuitEventListener: Option[ActorRef], - failureDetector: Any => Boolean, - failureMap: CircuitOpenFailure => Any) = - Props(new CircuitBreakerProxy(target, maxFailures, callTimeout, resetTimeout, circuitEventListener, failureDetector, failureMap)) + def props(target: ActorRef, + maxFailures: Int, + callTimeout: Timeout, + resetTimeout: Timeout, + circuitEventListener: Option[ActorRef], + failureDetector: Any => Boolean, + failureMap: CircuitOpenFailure => Any) = + Props( + new CircuitBreakerProxy(target, + maxFailures, + callTimeout, + resetTimeout, + circuitEventListener, + failureDetector, + failureMap)) sealed trait CircuitBreakerCommand @@ -71,11 +77,14 @@ object CircuitBreakerProxy { final case class CircuitBreakerStateData(failureCount: Int = 0, firstHalfOpenMessageSent: Boolean = false) - final case class CircuitBreakerPropsBuilder( - maxFailures: Int, callTimeout: Timeout, resetTimeout: Timeout, - circuitEventListener: Option[ActorRef] = None, - failureDetector: Any => Boolean = { _ => false }, - openCircuitFailureConverter: CircuitOpenFailure => Any = identity) { + final case class CircuitBreakerPropsBuilder(maxFailures: Int, + callTimeout: Timeout, + resetTimeout: Timeout, + circuitEventListener: Option[ActorRef] = None, + failureDetector: Any => Boolean = { _ => + false + }, + openCircuitFailureConverter: CircuitOpenFailure => Any = identity) { def withMaxFailures(value: Int) = copy(maxFailures = value) def withCallTimeout(value: Timeout) = copy(callTimeout = value) @@ -89,7 +98,14 @@ object CircuitBreakerProxy { * * @param target the target actor ref */ - def props(target: ActorRef) = CircuitBreakerProxy.props(target, maxFailures, callTimeout, resetTimeout, circuitEventListener, failureDetector, openCircuitFailureConverter) + def props(target: ActorRef) = + CircuitBreakerProxy.props(target, + maxFailures, + callTimeout, + resetTimeout, + circuitEventListener, + failureDetector, + openCircuitFailureConverter) } @@ -103,31 +119,35 @@ object CircuitBreakerProxy { import akka.contrib.circuitbreaker.CircuitBreakerProxy._ @deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0") -final class CircuitBreakerProxy( - target: ActorRef, - maxFailures: Int, - callTimeout: Timeout, - resetTimeout: Timeout, - circuitEventListener: Option[ActorRef], - failureDetector: Any => Boolean, - failureMap: CircuitOpenFailure => Any) extends Actor with ActorLogging with FSM[CircuitBreakerState, CircuitBreakerStateData] { +final class CircuitBreakerProxy(target: ActorRef, + maxFailures: Int, + callTimeout: Timeout, + resetTimeout: Timeout, + circuitEventListener: Option[ActorRef], + failureDetector: Any => Boolean, + failureMap: CircuitOpenFailure => Any) + extends Actor + with ActorLogging + with FSM[CircuitBreakerState, CircuitBreakerStateData] { import CircuitBreakerInternalEvents._ import FSM.`->` - context watch target + context.watch(target) startWith(Closed, CircuitBreakerStateData(failureCount = 0)) def callSucceededHandling: StateFunction = { case Event(CallSucceeded, state) => log.debug("Received call succeeded notification in state {} resetting counter", state) - goto(Closed) using CircuitBreakerStateData(failureCount = 0, firstHalfOpenMessageSent = false) + goto(Closed).using(CircuitBreakerStateData(failureCount = 0, firstHalfOpenMessageSent = false)) } def passthroughHandling: StateFunction = { case Event(Passthrough(message), state) => - log.debug("Received a passthrough message in state {}, forwarding the message to the target actor without altering current state", state) + log.debug( + "Received a passthrough message in state {}, forwarding the message to the target actor without altering current state", + state) target ! message stay } @@ -138,10 +158,12 @@ final class CircuitBreakerProxy( stop } - def commonStateHandling: StateFunction = { callSucceededHandling orElse passthroughHandling orElse targetTerminationHandling } + def commonStateHandling: StateFunction = { + callSucceededHandling.orElse(passthroughHandling).orElse(targetTerminationHandling) + } when(Closed) { - commonStateHandling orElse { + commonStateHandling.orElse { case Event(TellOnly(message), _) => log.debug("Closed: Sending message {} without expecting any response", message) target ! message @@ -151,9 +173,9 @@ final class CircuitBreakerProxy( log.debug("Received call failed notification in state {} incrementing counter", state) val newState = state.copy(failureCount = state.failureCount + 1) if (newState.failureCount < maxFailures) { - stay using newState + stay.using(newState) } else { - goto(Open) using newState + goto(Open).using(newState) } case Event(message, state) => @@ -166,22 +188,27 @@ final class CircuitBreakerProxy( } when(Open, stateTimeout = resetTimeout.duration) { - commonStateHandling orElse { + commonStateHandling.orElse { case Event(StateTimeout, state) => log.debug("Timeout expired for state OPEN, going to half open") - goto(HalfOpen) using state.copy(firstHalfOpenMessageSent = false) + goto(HalfOpen).using(state.copy(firstHalfOpenMessageSent = false)) case Event(CallFailed, state) => - log.debug("Open: Call received a further call failed notification, probably from a previous timed out event, ignoring") + log.debug( + "Open: Call received a further call failed notification, probably from a previous timed out event, ignoring") stay case Event(openNotification @ CircuitOpenFailure(_), _) => - log.warning("Unexpected circuit open notification {} sent to myself. Please report this as a bug.", openNotification) + log.warning("Unexpected circuit open notification {} sent to myself. Please report this as a bug.", + openNotification) stay case Event(message, state) => val failureNotification = failureMap(CircuitOpenFailure(message)) - log.debug("OPEN: Failing request for message {}, sending failure notification {} to sender {}", message, failureNotification, sender) + log.debug("OPEN: Failing request for message {}, sending failure notification {} to sender {}", + message, + failureNotification, + sender) sender ! failureNotification stay @@ -189,7 +216,7 @@ final class CircuitBreakerProxy( } when(HalfOpen) { - commonStateHandling orElse { + commonStateHandling.orElse { case Event(TellOnly(message), _) => log.debug("HalfOpen: Dropping TellOnly request for message {}", message) stay @@ -199,17 +226,21 @@ final class CircuitBreakerProxy( goto(Open) case Event(CallFailed, CircuitBreakerStateData(_, false)) => - log.debug("HalfOpen: Call received a further call failed notification, probably from a previous timed out event, ignoring") + log.debug( + "HalfOpen: Call received a further call failed notification, probably from a previous timed out event, ignoring") stay case Event(message, state @ CircuitBreakerStateData(_, false)) => log.debug("HalfOpen: First message {} received, forwarding it to target {}", message, target) forwardRequest(message, sender, state, log) - stay using state.copy(firstHalfOpenMessageSent = true) + stay.using(state.copy(firstHalfOpenMessageSent = true)) case Event(message, CircuitBreakerStateData(_, true)) => val failureNotification = failureMap(CircuitOpenFailure(message)) - log.debug("HALF-OPEN: Failing request for message {}, sending failure notification {} to sender {}", message, failureNotification, sender) + log.debug("HALF-OPEN: Failing request for message {}, sending failure notification {} to sender {}", + message, + failureNotification, + sender) sender ! failureNotification stay } @@ -220,7 +251,9 @@ final class CircuitBreakerProxy( target.ask(message)(callTimeout).onComplete { case Success(response) => - log.debug("Request '{}' has been replied to with response {}, forwarding to original sender {}", message, currentSender) + log.debug("Request '{}' has been replied to with response {}, forwarding to original sender {}", + message, + currentSender) currentSender ! response @@ -229,14 +262,18 @@ final class CircuitBreakerProxy( if (isFailure) { log.debug( "Response '{}' is considered as failure sending self-message to ask incrementing failure count (origin state was {})", - response, state) + response, + state) self ! CallFailed } else { log.debug( "Request '{}' succeeded with response {}, returning response to sender {} and sending message to ask to reset failure count (origin state was {})", - message, response, currentSender, state) + message, + response, + currentSender, + state) self ! CallSucceeded } @@ -244,7 +281,10 @@ final class CircuitBreakerProxy( case Failure(reason) => log.debug( "Request '{}' to target {} failed with exception {}, sending self-message to ask incrementing failure count (origin state was {})", - message, target, reason, state) + message, + target, + reason, + state) self ! CallFailed } @@ -253,15 +293,15 @@ final class CircuitBreakerProxy( onTransition { case from -> Closed => log.debug("Moving from state {} to state CLOSED", from) - circuitEventListener foreach { _ ! CircuitClosed(self) } + circuitEventListener.foreach { _ ! CircuitClosed(self) } case from -> HalfOpen => log.debug("Moving from state {} to state HALF OPEN", from) - circuitEventListener foreach { _ ! CircuitHalfOpen(self) } + circuitEventListener.foreach { _ ! CircuitHalfOpen(self) } case from -> Open => log.debug("Moving from state {} to state OPEN", from) - circuitEventListener foreach { _ ! CircuitOpen(self) } + circuitEventListener.foreach { _ ! CircuitOpen(self) } } } diff --git a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/askExtensions.scala b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/askExtensions.scala index 868d174949..c6ef612b9c 100644 --- a/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/askExtensions.scala +++ b/akka-contrib/src/main/scala/akka/contrib/circuitbreaker/askExtensions.scala @@ -4,7 +4,7 @@ package akka.contrib.circuitbreaker -import akka.actor.{ ActorSelection, Actor, ActorRef } +import akka.actor.{ Actor, ActorRef, ActorSelection } import akka.contrib.circuitbreaker.CircuitBreakerProxy.CircuitOpenFailure import akka.util.Timeout import scala.language.implicitConversions @@ -13,7 +13,8 @@ import scala.concurrent.{ ExecutionContext, Future } @deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0") sealed class OpenCircuitException(message: String) extends RuntimeException(message) -private[circuitbreaker] final object OpenCircuitException extends OpenCircuitException("Unable to complete operation since the Circuit Breaker Actor Proxy is in Open State") +private[circuitbreaker] final object OpenCircuitException + extends OpenCircuitException("Unable to complete operation since the Circuit Breaker Actor Proxy is in Open State") /** * Convenience implicit conversions to provide circuit-breaker aware management of the ask pattern, @@ -23,6 +24,7 @@ private[circuitbreaker] final object OpenCircuitException extends OpenCircuitExc */ @deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0") object Implicits { + /** * Import this implicit to enable the methods `failForOpenCircuit` and `failForOpenCircuitWith` * to [[scala.concurrent.Future]] converting @@ -45,16 +47,21 @@ object Implicits { * [[akka.contrib.circuitbreaker.CircuitBreakerProxy.CircuitOpenFailure]] responses into a failure response caused * by an [[akka.contrib.circuitbreaker.OpenCircuitException]] */ - @throws[akka.contrib.circuitbreaker.OpenCircuitException]("if the call failed because the circuit breaker proxy state was OPEN") - def askWithCircuitBreaker(circuitBreakerProxy: ActorRef, message: Any)(implicit executionContext: ExecutionContext, timeout: Timeout): Future[Any] = + @throws[akka.contrib.circuitbreaker.OpenCircuitException]( + "if the call failed because the circuit breaker proxy state was OPEN") + def askWithCircuitBreaker(circuitBreakerProxy: ActorRef, message: Any)(implicit executionContext: ExecutionContext, + timeout: Timeout): Future[Any] = circuitBreakerProxy.internalAskWithCircuitBreaker(message, timeout, ActorRef.noSender) /** * Wraps the `ask` method in [[akka.pattern.AskSupport]] method to convert failures connected to the circuit * breaker being in open state */ - @throws[akka.contrib.circuitbreaker.OpenCircuitException]("if the call failed because the circuit breaker proxy state was OPEN") - def askWithCircuitBreaker(circuitBreakerProxy: ActorRef, message: Any, sender: ActorRef)(implicit executionContext: ExecutionContext, timeout: Timeout): Future[Any] = + @throws[akka.contrib.circuitbreaker.OpenCircuitException]( + "if the call failed because the circuit breaker proxy state was OPEN") + def askWithCircuitBreaker(circuitBreakerProxy: ActorRef, message: Any, sender: ActorRef)( + implicit executionContext: ExecutionContext, + timeout: Timeout): Future[Any] = circuitBreakerProxy.internalAskWithCircuitBreaker(message, timeout, sender) } @@ -67,7 +74,8 @@ object Implicits { @deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0") final class CircuitBreakerAwareFuture(val future: Future[Any]) extends AnyVal { @throws[OpenCircuitException] - def failForOpenCircuit(implicit executionContext: ExecutionContext): Future[Any] = failForOpenCircuitWith(OpenCircuitException) + def failForOpenCircuit(implicit executionContext: ExecutionContext): Future[Any] = + failForOpenCircuitWith(OpenCircuitException) def failForOpenCircuitWith(throwing: => Throwable)(implicit executionContext: ExecutionContext): Future[Any] = { future.flatMap { @@ -81,11 +89,14 @@ final class CircuitBreakerAwareFuture(val future: Future[Any]) extends AnyVal { @deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0") final class AskeableWithCircuitBreakerActor(val actorRef: ActorRef) extends AnyVal { - def askWithCircuitBreaker(message: Any)(implicit executionContext: ExecutionContext, timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = + def askWithCircuitBreaker(message: Any)(implicit executionContext: ExecutionContext, + timeout: Timeout, + sender: ActorRef = Actor.noSender): Future[Any] = internalAskWithCircuitBreaker(message, timeout, sender) @throws[OpenCircuitException] - private[circuitbreaker] def internalAskWithCircuitBreaker(message: Any, timeout: Timeout, sender: ActorRef)(implicit executionContext: ExecutionContext) = { + private[circuitbreaker] def internalAskWithCircuitBreaker(message: Any, timeout: Timeout, sender: ActorRef)( + implicit executionContext: ExecutionContext) = { import akka.pattern.ask import Implicits.futureExtensions @@ -95,10 +106,13 @@ final class AskeableWithCircuitBreakerActor(val actorRef: ActorRef) extends AnyV @deprecated("Use akka.pattern.CircuitBreaker + ask instead", "2.5.0") final class AskeableWithCircuitBreakerActorSelection(val actorSelection: ActorSelection) extends AnyVal { - def askWithCircuitBreaker(message: Any)(implicit executionContext: ExecutionContext, timeout: Timeout, sender: ActorRef = Actor.noSender): Future[Any] = + def askWithCircuitBreaker(message: Any)(implicit executionContext: ExecutionContext, + timeout: Timeout, + sender: ActorRef = Actor.noSender): Future[Any] = internalAskWithCircuitBreaker(message, timeout, sender) - private[circuitbreaker] def internalAskWithCircuitBreaker(message: Any, timeout: Timeout, sender: ActorRef)(implicit executionContext: ExecutionContext) = { + private[circuitbreaker] def internalAskWithCircuitBreaker(message: Any, timeout: Timeout, sender: ActorRef)( + implicit executionContext: ExecutionContext) = { import akka.pattern.ask import Implicits.futureExtensions diff --git a/akka-contrib/src/main/scala/akka/contrib/jul/JavaLogger.scala b/akka-contrib/src/main/scala/akka/contrib/jul/JavaLogger.scala index 924522a1bd..76378a854e 100644 --- a/akka-contrib/src/main/scala/akka/contrib/jul/JavaLogger.scala +++ b/akka-contrib/src/main/scala/akka/contrib/jul/JavaLogger.scala @@ -106,13 +106,12 @@ trait JavaLoggingAdapter extends LoggingAdapter { // it is unfortunate that this workaround is needed private def updateSource(record: logging.LogRecord): Unit = { val stack = Thread.currentThread.getStackTrace - val source = stack.find { - frame => - val cname = frame.getClassName - !cname.startsWith("akka.contrib.jul.") && - !cname.startsWith("akka.event.LoggingAdapter") && - !cname.startsWith("java.lang.reflect.") && - !cname.startsWith("sun.reflect.") + val source = stack.find { frame => + val cname = frame.getClassName + !cname.startsWith("akka.contrib.jul.") && + !cname.startsWith("akka.event.LoggingAdapter") && + !cname.startsWith("java.lang.reflect.") && + !cname.startsWith("sun.reflect.") } if (source.isDefined) { record.setSourceClassName(source.get.getClassName) diff --git a/akka-contrib/src/main/scala/akka/contrib/mailbox/PeekMailbox.scala b/akka-contrib/src/main/scala/akka/contrib/mailbox/PeekMailbox.scala index fa312a5432..8a228d3d91 100644 --- a/akka-contrib/src/main/scala/akka/contrib/mailbox/PeekMailbox.scala +++ b/akka-contrib/src/main/scala/akka/contrib/mailbox/PeekMailbox.scala @@ -8,7 +8,15 @@ import java.util.concurrent.{ ConcurrentHashMap, ConcurrentLinkedQueue } import com.typesafe.config.Config -import akka.actor.{ ActorContext, ActorRef, ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider } +import akka.actor.{ + ActorContext, + ActorRef, + ActorSystem, + ExtendedActorSystem, + Extension, + ExtensionId, + ExtensionIdProvider +} import akka.dispatch.{ Envelope, MailboxType, MessageQueue, UnboundedQueueBasedMessageQueue } @deprecated("Use an explicit supervisor or proxy actor instead", "2.5.0") @@ -57,8 +65,7 @@ class PeekMailboxType(settings: ActorSystem.Settings, config: Config) extends Ma } @deprecated("Use an explicit supervisor or proxy actor instead", "2.5.0") -class PeekMailbox(owner: ActorRef, system: ActorSystem, maxRetries: Int) - extends UnboundedQueueBasedMessageQueue { +class PeekMailbox(owner: ActorRef, system: ActorSystem, maxRetries: Int) extends UnboundedQueueBasedMessageQueue { final val queue = new ConcurrentLinkedQueue[Envelope]() /* @@ -104,4 +111,3 @@ class PeekMailbox(owner: ActorRef, system: ActorSystem, maxRetries: Int) PeekMailboxExtension(system).unregister(owner) } } - diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/Aggregator.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/Aggregator.scala index be24a289b6..a4dd08b5bb 100644 --- a/akka-contrib/src/main/scala/akka/contrib/pattern/Aggregator.scala +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/Aggregator.scala @@ -46,8 +46,8 @@ trait Aggregator { * @return True if the partial function is removed, false if not found. */ def unexpect(fn: Actor.Receive): Boolean = { - if (expectList remove fn) true - else if (processing && (addBuffer remove fn)) true + if (expectList.remove(fn)) true + else if (processing && (addBuffer.remove(fn))) true else false } @@ -66,14 +66,14 @@ trait Aggregator { def handleMessage(msg: Any): Boolean = { processing = true try { - expectList process { fn => + expectList.process { fn => var processed = true fn.applyOrElse(msg, (_: Any) => processed = false) processed } } finally { processing = false - expectList addAll addBuffer + expectList.addAll(addBuffer) addBuffer.removeAll() } } diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ReceivePipeline.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ReceivePipeline.scala index 4d1a8a066e..ac105ff681 100644 --- a/akka-contrib/src/main/scala/akka/contrib/pattern/ReceivePipeline.scala +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ReceivePipeline.scala @@ -8,12 +8,14 @@ import akka.actor.Actor @deprecated("Feel free to copy", "2.5.0") object ReceivePipeline { + /** * Result returned by an interceptor PF to determine what/whether to delegate to the next inner interceptor */ sealed trait Delegation case class Inner(transformedMsg: Any) extends Delegation { + /** * Add a block of code to be executed after the message (which may be further transformed and processed by * inner interceptors) is handled by the actor's receive. @@ -63,6 +65,7 @@ trait ReceivePipeline extends Actor { pipeline :+= withDefault(interceptor) decoratorCache = None } + /** * Adds an outer interceptor, it will be applied firstly, far from Actor's original behavior * @param interceptor an interceptor @@ -81,9 +84,11 @@ trait ReceivePipeline extends Actor { val zipped = pipeline.foldRight(innerReceiveHandler) { (outerInterceptor, innerHandler) => outerInterceptor.andThen { - case Inner(msg) => innerHandler(msg) - case InnerAndAfter(msg, after) => try innerHandler(msg) finally after(()) - case HandledCompletely => Done + case Inner(msg) => innerHandler(msg) + case InnerAndAfter(msg, after) => + try innerHandler(msg) + finally after(()) + case HandledCompletely => Done } } diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala index 9b7c8ba9ec..17f7430fb2 100644 --- a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala @@ -12,11 +12,14 @@ import java.util.concurrent.TimeUnit @deprecated("Use AtLeastOnceDelivery instead", "2.5.0") object ReliableProxy { + /** * Scala API Props. Arguments are detailed in the [[akka.contrib.pattern.ReliableProxy]] * constructor. */ - def props(targetPath: ActorPath, retryAfter: FiniteDuration, reconnectAfter: Option[FiniteDuration], + def props(targetPath: ActorPath, + retryAfter: FiniteDuration, + reconnectAfter: Option[FiniteDuration], maxReconnects: Option[Int]): Props = { Props(new ReliableProxy(targetPath, retryAfter, reconnectAfter, maxReconnects)) } @@ -25,7 +28,9 @@ object ReliableProxy { * Java API Props. Arguments are detailed in the [[akka.contrib.pattern.ReliableProxy]] * constructor. */ - def props(targetPath: ActorPath, retryAfter: FiniteDuration, reconnectAfter: FiniteDuration, + def props(targetPath: ActorPath, + retryAfter: FiniteDuration, + reconnectAfter: FiniteDuration, maxReconnects: Int): Props = { props(targetPath, retryAfter, Option(reconnectAfter), if (maxReconnects > 0) Some(maxReconnects) else None) } @@ -62,7 +67,7 @@ object ReliableProxy { } else { logDebug("Received message from {} with wrong serial: {}", snd, msg) } - case Terminated(`target`) => context stop self + case Terminated(`target`) => context.stop(self) } } @@ -118,7 +123,7 @@ object ReliableProxy { */ private[akka] trait ReliableProxyDebugLogging extends ActorLogging { this: Actor => val debug: Boolean = - Try(context.system.settings.config.getBoolean("akka.reliable-proxy.debug")) getOrElse false + Try(context.system.settings.config.getBoolean("akka.reliable-proxy.debug")).getOrElse(false) def enabled: Boolean = debug && log.isDebugEnabled @@ -226,9 +231,13 @@ import ReliableProxy._ * target actor. Use `None` for no limit. If `reconnectAfter` is `None` this value is ignored. */ @deprecated("Use AtLeastOnceDelivery instead", "2.5.0") -class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, - reconnectAfter: Option[FiniteDuration], maxConnectAttempts: Option[Int]) - extends Actor with LoggingFSM[State, Vector[Message]] with ReliableProxyDebugLogging { +class ReliableProxy(targetPath: ActorPath, + retryAfter: FiniteDuration, + reconnectAfter: Option[FiniteDuration], + maxConnectAttempts: Option[Int]) + extends Actor + with LoggingFSM[State, Vector[Message]] + with ReliableProxyDebugLogging { import FSM.`->` var tunnel: ActorRef = _ @@ -241,12 +250,16 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, val reconnectTimer = "reconnect" val retryGateClosedFor = - Try(context.system.settings.config.getDuration("akka.remote.retry-gate-closed-for", TimeUnit.MILLISECONDS)). - map(_.longValue).getOrElse(5000L) + Try(context.system.settings.config.getDuration("akka.remote.retry-gate-closed-for", TimeUnit.MILLISECONDS)) + .map(_.longValue) + .getOrElse(5000L) val defaultConnectInterval = - Try(context.system.settings.config.getDuration("akka.reliable-proxy.default-connect-interval", TimeUnit.MILLISECONDS)). - map(_.longValue).getOrElse(retryGateClosedFor).millis + Try( + context.system.settings.config.getDuration("akka.reliable-proxy.default-connect-interval", TimeUnit.MILLISECONDS)) + .map(_.longValue) + .getOrElse(retryGateClosedFor) + .millis val initialState = Connecting @@ -254,8 +267,9 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, def createTunnel(target: ActorRef): Unit = { logDebug("Creating new tunnel for {}", target) - tunnel = context.actorOf(receiver(target, lastAckSerial). - withDeploy(Deploy(scope = RemoteScope(target.path.address))), "tunnel") + tunnel = context.actorOf( + receiver(target, lastAckSerial).withDeploy(Deploy(scope = RemoteScope(target.path.address))), + "tunnel") context.watch(tunnel) currentTarget = target @@ -282,8 +296,8 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, when(Idle) { case Event(Terminated(_), _) => terminated() case Event(Ack(_), _) => stay() - case Event(Unsent(msgs), _) => goto(Active) using resend(updateSerial(msgs)) - case Event(msg, _) => goto(Active) using Vector(send(msg, sender())) + case Event(Unsent(msgs), _) => goto(Active).using(resend(updateSerial(msgs))) + case Event(msg, _) => goto(Active).using(Vector(send(msg, sender()))) } onTransition { @@ -296,20 +310,20 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, case Event(Terminated(_), _) => terminated() case Event(Ack(serial), queue) => - val q = queue dropWhile (m => compare(m.serial, serial) <= 0) + val q = queue.dropWhile(m => compare(m.serial, serial) <= 0) if (compare(serial, lastAckSerial) > 0) lastAckSerial = serial scheduleTick() - if (q.isEmpty) goto(Idle) using Vector.empty - else stay using q + if (q.isEmpty) goto(Idle).using(Vector.empty) + else stay.using(q) case Event(Tick, queue) => logResend(queue.size) - queue foreach { tunnel ! _ } + queue.foreach { tunnel ! _ } scheduleTick() stay() case Event(Unsent(msgs), queue) => - stay using queue ++ resend(updateSerial(msgs)) + stay.using(queue ++ resend(updateSerial(msgs))) case Event(msg, queue) => - stay using (queue :+ send(msg, sender())) + stay.using(queue :+ send(msg, sender())) } when(Connecting) { @@ -320,11 +334,11 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, cancelTimer(reconnectTimer) createTunnel(actor) if (currentTarget != curr) gossip(TargetChanged(currentTarget)) - if (queue.isEmpty) goto(Idle) else goto(Active) using resend(queue) + if (queue.isEmpty) goto(Idle) else goto(Active).using(resend(queue)) case Event(ActorIdentity(_, None), _) => stay() case Event(ReconnectTick, _) => - if (maxConnectAttempts exists (_ == attemptedReconnects)) { + if (maxConnectAttempts.exists(_ == attemptedReconnects)) { logDebug("Failed to reconnect after {}", attemptedReconnects) stop() } else { @@ -335,9 +349,9 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, stay() } case Event(Unsent(msgs), queue) => - stay using queue ++ updateSerial(msgs) + stay.using(queue ++ updateSerial(msgs)) case Event(msg, queue) => - stay using (queue :+ Message(msg, sender(), nextSerial())) + stay.using(queue :+ Message(msg, sender(), nextSerial())) } def scheduleTick(): Unit = setTimer(resendTimer, Tick, retryAfter, repeat = false) @@ -353,11 +367,11 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, m } - def updateSerial(q: Vector[Message]) = q map (_.copy(serial = nextSerial())) + def updateSerial(q: Vector[Message]) = q.map(_.copy(serial = nextSerial())) def resend(q: Vector[Message]): Vector[Message] = { logResend(q.size) - q foreach { tunnel ! _ } + q.foreach { tunnel ! _ } q } @@ -386,5 +400,5 @@ class ReliableProxy(targetPath: ActorPath, retryAfter: FiniteDuration, /** * Returns the next retry interval duration. By default each interval is the same, reconnectAfter. */ - def nextBackoff(): FiniteDuration = reconnectAfter getOrElse defaultConnectInterval + def nextBackoff(): FiniteDuration = reconnectAfter.getOrElse(defaultConnectInterval) } diff --git a/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala b/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala index 3ef3638cee..31591a883c 100644 --- a/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala +++ b/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala @@ -6,7 +6,7 @@ package akka.contrib.throttle import scala.concurrent.duration.{ Duration, FiniteDuration } import scala.collection.immutable.{ Queue => Q } -import akka.actor.{ ActorRef, Actor, FSM } +import akka.actor.{ Actor, ActorRef, FSM } import Throttler._ import TimerBasedThrottler._ import java.util.concurrent.TimeUnit @@ -19,6 +19,7 @@ import java.util.concurrent.TimeUnit */ @deprecated("Use streams, see migration guide", "2.5.0") object Throttler { + /** * A rate used for throttling. * @@ -40,6 +41,7 @@ object Throttler { * @see [[akka.contrib.throttle.Throttler]] */ final case class Rate(val numberOfCalls: Int, val duration: FiniteDuration) { + /** * The duration in milliseconds. */ @@ -60,6 +62,7 @@ object Throttler { * as well as any messages received in the future will be delivered to the new target at a rate not exceeding the current throttler's rate. */ final case class SetTarget(target: Option[ActorRef]) { + /** * Java API: * @param target if `target` is `null`, the throttler will stop delivering messages and the messages already received @@ -110,10 +113,7 @@ private[throttle] object TimerBasedThrottler { final case class Message(message: Any, sender: ActorRef) // The data of the FSM - final case class Data( - target: Option[ActorRef], - callsLeftInThisPeriod: Int, - queue: Q[Message]) + final case class Data(target: Option[ActorRef], callsLeftInThisPeriod: Int, queue: Q[Message]) } /** @@ -228,19 +228,19 @@ class TimerBasedThrottler(var rate: Rate) extends Actor with FSM[State, Data] { // Set the rate case Event(SetRate(newRate), d) => this.rate = normalizedRate(newRate) - stay using d.copy(callsLeftInThisPeriod = rate.numberOfCalls) + stay.using(d.copy(callsLeftInThisPeriod = rate.numberOfCalls)) // Set the target case Event(SetTarget(t @ Some(_)), d) if !d.queue.isEmpty => - goto(Active) using deliverMessages(d.copy(target = t)) + goto(Active).using(deliverMessages(d.copy(target = t))) case Event(SetTarget(t), d) => - stay using d.copy(target = t) + stay.using(d.copy(target = t)) // Queuing case Event(msg, d @ Data(None, _, queue)) => - stay using d.copy(queue = queue.enqueue(Message(msg, context.sender()))) + stay.using(d.copy(queue = queue.enqueue(Message(msg, context.sender())))) case Event(msg, d @ Data(Some(_), _, Seq())) => - goto(Active) using deliverMessages(d.copy(queue = Q(Message(msg, context.sender())))) + goto(Active).using(deliverMessages(d.copy(queue = Q(Message(msg, context.sender()))))) // Note: The case Event(msg, t @ Data(Some(_), _, _, Seq(_*))) should never happen here. } @@ -251,36 +251,36 @@ class TimerBasedThrottler(var rate: Rate) extends Actor with FSM[State, Data] { // Note: this should be improved (see "Known issues" in class comments) stopTimer() startTimer(rate) - stay using d.copy(callsLeftInThisPeriod = rate.numberOfCalls) + stay.using(d.copy(callsLeftInThisPeriod = rate.numberOfCalls)) // Set the target (when the new target is None) case Event(SetTarget(None), d) => // Note: We do not yet switch to state `Inactive` because we need the timer to tick once more before - stay using d.copy(target = None) + stay.using(d.copy(target = None)) // Set the target (when the new target is not None) case Event(SetTarget(t @ Some(_)), d) => - stay using d.copy(target = t) + stay.using(d.copy(target = t)) // Tick after a `SetTarget(None)`: take the additional permits and go to `Idle` case Event(Tick, d @ Data(None, _, _)) => - goto(Idle) using d.copy(callsLeftInThisPeriod = rate.numberOfCalls) + goto(Idle).using(d.copy(callsLeftInThisPeriod = rate.numberOfCalls)) // Period ends and we have no more messages: take the additional permits and go to `Idle` case Event(Tick, d @ Data(_, _, Seq())) => - goto(Idle) using d.copy(callsLeftInThisPeriod = rate.numberOfCalls) + goto(Idle).using(d.copy(callsLeftInThisPeriod = rate.numberOfCalls)) // Period ends and we get more occasions to send messages case Event(Tick, d @ Data(_, _, _)) => - stay using deliverMessages(d.copy(callsLeftInThisPeriod = rate.numberOfCalls)) + stay.using(deliverMessages(d.copy(callsLeftInThisPeriod = rate.numberOfCalls))) // Queue a message (when we cannot send messages in the current period anymore) case Event(msg, d @ Data(_, 0, queue)) => - stay using d.copy(queue = queue.enqueue(Message(msg, context.sender()))) + stay.using(d.copy(queue = queue.enqueue(Message(msg, context.sender())))) // Queue a message (when we can send some more messages in the current period) case Event(msg, d @ Data(_, _, queue)) => - stay using deliverMessages(d.copy(queue = queue.enqueue(Message(msg, context.sender())))) + stay.using(deliverMessages(d.copy(queue = queue.enqueue(Message(msg, context.sender()))))) } onTransition { diff --git a/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala b/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala index 8ac9c659e8..0013ff53d3 100644 --- a/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala +++ b/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala @@ -30,7 +30,11 @@ object ReliableProxySpec extends MultiNodeConfig { class ReliableProxyMultiJvmNode1 extends ReliableProxySpec class ReliableProxyMultiJvmNode2 extends ReliableProxySpec -class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNodeSpec with BeforeAndAfterEach with ImplicitSender { +class ReliableProxySpec + extends MultiNodeSpec(ReliableProxySpec) + with STMultiNodeSpec + with BeforeAndAfterEach + with ImplicitSender { import ReliableProxySpec._ import ReliableProxy._ @@ -70,8 +74,10 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod def expectTransition(s1: State, s2: State) = expectMsg(FSM.Transition(proxy, s1, s2)) def expectTransition(max: FiniteDuration, s1: State, s2: State) = expectMsg(max, FSM.Transition(proxy, s1, s2)) - def sendN(n: Int) = (1 to n) foreach (proxy ! _) - def expectN(n: Int) = (1 to n) foreach { n => expectMsg(n); lastSender should ===(target) } + def sendN(n: Int) = (1 to n).foreach(proxy ! _) + def expectN(n: Int) = (1 to n).foreach { n => + expectMsg(n); lastSender should ===(target) + } // avoid too long timeout for expectNoMsg when using dilated timeouts, because // blackhole will trigger failure detection @@ -244,7 +250,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod "reconnect to target" in { runOn(remote) { // Stop the target - system stop target + system.stop(target) } runOn(local) { @@ -304,7 +310,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod runOn(remote) { // Stop the target, this will cause the proxy to stop - system stop target + system.stop(target) } runOn(local) { @@ -347,7 +353,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod runOn(remote) { // Stop target - system stop target + system.stop(target) } runOn(local) { @@ -365,7 +371,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod within(5 * 2.seconds) { val proxyTerm = expectMsgType[ProxyTerminated] // Validate that the unsent messages are 50 ints - val unsentInts = proxyTerm.outstanding.queue collect { case Message(i: Int, _, _) if i > 0 && i <= 50 => i } + val unsentInts = proxyTerm.outstanding.queue.collect { case Message(i: Int, _, _) if i > 0 && i <= 50 => i } unsentInts should have size 50 expectTerminated(proxy) } diff --git a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala index 94d0c0f350..45c2648ff8 100644 --- a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/CircuitBreakerProxySpec.scala @@ -16,13 +16,9 @@ import scala.language.postfixOps class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { val baseCircuitBreakerPropsBuilder = - CircuitBreakerPropsBuilder( - maxFailures = 2, - callTimeout = 200 millis, - resetTimeout = 1 second, - failureDetector = { - _ == "FAILURE" - }) + CircuitBreakerPropsBuilder(maxFailures = 2, callTimeout = 200 millis, resetTimeout = 1 second, failureDetector = { + _ == "FAILURE" + }) trait CircuitBreakerScenario { val sender = TestProbe() @@ -161,7 +157,7 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { val circuitBreaker = defaultCircuitBreaker When("A number of consecutive request equal to the maxFailures configuration of the circuit breaker is failing") - (1 to baseCircuitBreakerPropsBuilder.maxFailures) foreach { index => + (1 to baseCircuitBreakerPropsBuilder.maxFailures).foreach { index => receiverRespondsWithFailureToRequest(s"request$index") } @@ -177,7 +173,7 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { val circuitBreaker = defaultCircuitBreaker When("A number of consecutive request equal to the maxFailures configuration of the circuit breaker is failing") - (1 to baseCircuitBreakerPropsBuilder.maxFailures) foreach { index => + (1 to baseCircuitBreakerPropsBuilder.maxFailures).foreach { index => receiverRespondsWithFailureToRequest(s"request$index") } @@ -189,14 +185,17 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { } "respond with the converted CircuitOpenFailure if a converter is provided" in new CircuitBreakerScenario { - Given("A circuit breaker proxy pointing to a target actor built with a function to convert CircuitOpenFailure response into a String response") + Given( + "A circuit breaker proxy pointing to a target actor built with a function to convert CircuitOpenFailure response into a String response") val circuitBreaker = system.actorOf( baseCircuitBreakerPropsBuilder - .copy(openCircuitFailureConverter = { failureMsg => s"NOT SENT: ${failureMsg.failedMsg}" }) + .copy(openCircuitFailureConverter = { failureMsg => + s"NOT SENT: ${failureMsg.failedMsg}" + }) .props(receiver.ref)) When("A number of consecutive request equal to the maxFailures configuration of the circuit breaker is failing") - (1 to baseCircuitBreakerPropsBuilder.maxFailures) foreach { index => + (1 to baseCircuitBreakerPropsBuilder.maxFailures).foreach { index => receiverRespondsWithFailureToRequest(s"request$index") } @@ -211,7 +210,8 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { Given("A circuit breaker actor proxying a test probe") val circuitBreaker = defaultCircuitBreaker - When("A number of request equal to the timed-out responses threashold is done without receiving response within the configured timeout") + When( + "A number of request equal to the timed-out responses threashold is done without receiving response within the configured timeout") sender.send(circuitBreaker, "request1") sender.send(circuitBreaker, "request2") @@ -269,7 +269,8 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { resetTimeoutExpires() And("Receiving a successful response") - receiverRespondsToRequestWith("First message in half-open state, should be forwarded", "This should close the circuit") + receiverRespondsToRequestWith("First message in half-open state, should be forwarded", + "This should close the circuit") circuitBreakerReceivesSelfNotificationMessage() @@ -327,7 +328,8 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { eventListener.expectMsg(CircuitHalfOpen(circuitBreaker)) When("Entering CLOSED state") - receiverRespondsToRequestWith("First message in half-open state, should be forwarded", "This should close the circuit") + receiverRespondsToRequestWith("First message in half-open state, should be forwarded", + "This should close the circuit") Then("An event is sent") eventListener.expectMsg(CircuitClosed(circuitBreaker)) @@ -336,18 +338,16 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { "stop if the target actor terminates itself" in new CircuitBreakerScenario { Given("An actor that will terminate when receiving a message") import akka.actor.ActorDSL._ - val suicidalActor = actor( - new Act { - become { - case anyMessage => - sender() ! "dying now" - context stop self - } - }) + val suicidalActor = actor(new Act { + become { + case anyMessage => + sender() ! "dying now" + context.stop(self) + } + }) And("A circuit breaker actor proxying another actor") - val circuitBreaker = system.actorOf( - baseCircuitBreakerPropsBuilder.props(target = suicidalActor)) + val circuitBreaker = system.actorOf(baseCircuitBreakerPropsBuilder.props(target = suicidalActor)) val suicidalActorWatch = TestProbe() suicidalActorWatch.watch(suicidalActor) @@ -386,7 +386,8 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { Given("A circuit breaker actor proxying a test probe") val circuitBreaker = defaultCircuitBreaker - When("A number of request equal to the timed-out responses wrapped in a TellOnly threashold is done without receiving response within the configured timeout") + When( + "A number of request equal to the timed-out responses wrapped in a TellOnly threashold is done without receiving response within the configured timeout") sender.send(circuitBreaker, TellOnly("Fire and forget 1")) sender.send(circuitBreaker, TellOnly("Fire and forget 2")) receiver.expectMsg("Fire and forget 1") @@ -405,7 +406,7 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { val circuitBreaker = defaultCircuitBreaker When("Circuit enters OPEN state") - (1 to baseCircuitBreakerPropsBuilder.maxFailures) foreach { index => + (1 to baseCircuitBreakerPropsBuilder.maxFailures).foreach { index => receiverRespondsWithFailureToRequest(s"request$index") } @@ -421,7 +422,7 @@ class CircuitBreakerProxySpec extends AkkaSpec() with GivenWhenThen { val circuitBreaker = defaultCircuitBreaker When("Circuit enters OPEN state") - (1 to baseCircuitBreakerPropsBuilder.maxFailures) foreach { index => + (1 to baseCircuitBreakerPropsBuilder.maxFailures).foreach { index => receiverRespondsWithFailureToRequest(s"request$index") } diff --git a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/sample/CircuitBreaker.scala b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/sample/CircuitBreaker.scala index 3164727292..cb6a35f2c4 100644 --- a/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/sample/CircuitBreaker.scala +++ b/akka-contrib/src/test/scala/akka/contrib/circuitbreaker/sample/CircuitBreaker.scala @@ -10,7 +10,7 @@ import akka.contrib.circuitbreaker.sample.CircuitBreaker.AskFor import akka.util.Timeout import scala.concurrent.duration._ -import scala.util.{ Failure, Success, Random } +import scala.util.{ Failure, Random, Success } //#simple-service object SimpleService { @@ -63,13 +63,12 @@ class CircuitBreaker(potentiallyFailingService: ActorRef) extends Actor with Act val serviceCircuitBreaker = context.actorOf( CircuitBreakerPropsBuilder(maxFailures = 3, callTimeout = 2.seconds, resetTimeout = 30.seconds) - .copy( - failureDetector = { - _ match { - case Response(Left(_)) => true - case _ => false - } - }) + .copy(failureDetector = { + _ match { + case Response(Left(_)) => true + case _ => false + } + }) .props(potentiallyFailingService), "serviceCircuitBreaker") @@ -105,17 +104,15 @@ class CircuitBreakerAsk(potentiallyFailingService: ActorRef) extends Actor with val serviceCircuitBreaker = context.actorOf( CircuitBreakerPropsBuilder(maxFailures = 3, callTimeout = askTimeout, resetTimeout = 30.seconds) - .copy( - failureDetector = { - _ match { - case Response(Left(_)) => true - case _ => false - } - }) - .copy( - openCircuitFailureConverter = { failure => - Left(s"Circuit open when processing ${failure.failedMsg}") - }) + .copy(failureDetector = { + _ match { + case Response(Left(_)) => true + case _ => false + } + }) + .copy(openCircuitFailureConverter = { failure => + Left(s"Circuit open when processing ${failure.failedMsg}") + }) .props(potentiallyFailingService), "serviceCircuitBreaker") diff --git a/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala b/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala index 1594ce6935..4760dd0b1e 100644 --- a/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/mailbox/PeekMailboxSpec.scala @@ -6,7 +6,7 @@ package akka.contrib.mailbox import com.typesafe.config.ConfigFactory -import akka.actor.{ Actor, ActorSystem, DeadLetter, PoisonPill, Props, actorRef2Scala } +import akka.actor.{ actorRef2Scala, Actor, ActorSystem, DeadLetter, PoisonPill, Props } import akka.testkit.{ AkkaSpec, EventFilter, ImplicitSender } object PeekMailboxSpec { @@ -28,7 +28,7 @@ object PeekMailboxSpec { PeekMailboxExtension.ack() } override def preRestart(cause: Throwable, msg: Option[Any]): Unit = { - for (m <- msg if m == "DIE") context stop self // for testing the case of mailbox.cleanUp + for (m <- msg if m == "DIE") context.stop(self) // for testing the case of mailbox.cleanUp } } } @@ -48,7 +48,7 @@ class PeekMailboxSpec extends AkkaSpec(""" val a = system.actorOf(Props(classOf[PeekActor], 1).withDispatcher("peek-dispatcher")) a ! "hello" expectMsg("hello") - EventFilter[RuntimeException]("DONTWANNA", occurrences = 1) intercept { + EventFilter[RuntimeException]("DONTWANNA", occurrences = 1).intercept { a ! "world" } expectMsg("world") @@ -59,7 +59,7 @@ class PeekMailboxSpec extends AkkaSpec(""" "put a bound on retries" in { val a = system.actorOf(Props(classOf[PeekActor], 0).withDispatcher("peek-dispatcher")) - EventFilter[RuntimeException]("DONTWANNA", occurrences = 3) intercept { + EventFilter[RuntimeException]("DONTWANNA", occurrences = 3).intercept { a ! "hello" } a ! Check @@ -80,7 +80,7 @@ class PeekMailboxSpec extends AkkaSpec(""" system.eventStream.subscribe(testActor, classOf[DeadLetter]) val a = system.actorOf(Props(classOf[PeekActor], 0).withDispatcher("peek-dispatcher")) watch(a) - EventFilter[RuntimeException]("DONTWANNA", occurrences = 1) intercept { + EventFilter[RuntimeException]("DONTWANNA", occurrences = 1).intercept { a ! "DIE" // stays in the mailbox } expectMsg("DIE") @@ -115,16 +115,15 @@ class MyActor extends Actor { } object MyApp extends App { - val system = ActorSystem("MySystem", ConfigFactory.parseString(""" + val system = ActorSystem("MySystem", + ConfigFactory.parseString(""" peek-dispatcher { mailbox-type = "akka.contrib.mailbox.PeekMailboxType" max-retries = 2 } """)) - val myActor = system.actorOf( - Props[MyActor].withDispatcher("peek-dispatcher"), - name = "myActor") + val myActor = system.actorOf(Props[MyActor].withDispatcher("peek-dispatcher"), name = "myActor") myActor ! "Hello" myActor ! "World" diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala index fc0d7b9abc..7b0b48ed08 100644 --- a/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/pattern/AggregatorSpec.scala @@ -13,12 +13,12 @@ import scala.concurrent.duration._ import scala.math.BigDecimal.int2bigDecimal import akka.actor._ import org.scalatest.BeforeAndAfterAll + /** * Sample and test code for the aggregator patter. * This is based on Jamie Allen's tutorial at * http://jaxenter.com/tutorial-asynchronous-programming-with-akka-actors-46220.html */ - sealed trait AccountType case object Checking extends AccountType case object Savings extends AccountType @@ -27,9 +27,7 @@ case object MoneyMarket extends AccountType final case class GetCustomerAccountBalances(id: Long, accountTypes: Set[AccountType]) final case class GetAccountBalances(id: Long) -final case class AccountBalances( - accountType: AccountType, - balance: Option[List[(Long, BigDecimal)]]) +final case class AccountBalances(accountType: AccountType, balance: Option[List[(Long, BigDecimal)]]) final case class CheckingAccountBalances(balances: Option[List[(Long, BigDecimal)]]) final case class SavingsAccountBalances(balances: Option[List[(Long, BigDecimal)]]) @@ -71,20 +69,17 @@ class AccountBalanceRetriever extends Actor with Aggregator { } //#initial-expect - class AccountAggregator( - originalSender: ActorRef, - id: Long, types: Set[AccountType]) { + class AccountAggregator(originalSender: ActorRef, id: Long, types: Set[AccountType]) { val results = mutable.ArrayBuffer.empty[(AccountType, Option[List[(Long, BigDecimal)]])] if (types.size > 0) - types foreach { + types.foreach { case Checking => fetchCheckingAccountsBalance() case Savings => fetchSavingsAccountsBalance() case MoneyMarket => fetchMoneyMarketAccountsBalance() - } - else collectBalances() // Empty type list yields empty response + } else collectBalances() // Empty type list yields empty response context.system.scheduler.scheduleOnce(1.second, self, TimedOut) //#expect-timeout @@ -181,14 +176,19 @@ class ChainingSample extends Actor with Aggregator { def processFinal(eval: List[Int]): Unit = { // Select only the entries coming back from eval - originalSender ! FinalResponse(eval map values) + originalSender ! FinalResponse(eval.map(values)) context.stop(self) } } } //#chain-sample -class AggregatorSpec extends TestKit(ActorSystem("AggregatorSpec")) with ImplicitSender with FunSuiteLike with Matchers with BeforeAndAfterAll { +class AggregatorSpec + extends TestKit(ActorSystem("AggregatorSpec")) + with ImplicitSender + with FunSuiteLike + with Matchers + with BeforeAndAfterAll { override def afterAll(): Unit = { shutdown() @@ -206,7 +206,7 @@ class AggregatorSpec extends TestKit(ActorSystem("AggregatorSpec")) with Implici test("Test request 3 account types") { system.actorOf(Props[AccountBalanceRetriever]) ! - GetCustomerAccountBalances(1, Set(Checking, Savings, MoneyMarket)) + GetCustomerAccountBalances(1, Set(Checking, Savings, MoneyMarket)) receiveOne(10.seconds) match { case result: List[_] => result should have size 3 @@ -226,7 +226,7 @@ class WorkListSpec extends FunSuiteLike { test("Processing empty WorkList") { // ProcessAndRemove something in the middle - val processed = workList process { + val processed = workList.process { case TestEntry(9) => true case _ => false } @@ -264,19 +264,19 @@ class WorkListSpec extends FunSuiteLike { test("Process temp entries") { // ProcessAndRemove something in the middle - assert(workList process { + assert(workList.process { case TestEntry(2) => true case _ => false }) // ProcessAndRemove the head - assert(workList process { + assert(workList.process { case TestEntry(0) => true case _ => false }) // ProcessAndRemove the tail - assert(workList process { + assert(workList.process { case TestEntry(3) => true case _ => false }) @@ -290,26 +290,26 @@ class WorkListSpec extends FunSuiteLike { } test("Process permanent entry") { - assert(workList process { + assert(workList.process { case TestEntry(4) => true case _ => false }) } test("Remove permanent entry") { - val removed = workList remove entry4 + val removed = workList.remove(entry4) assert(removed) } test("Remove temp entry already processed") { - val removed = workList remove entry2 + val removed = workList.remove(entry2) assert(!removed) } test("Process non-matching entries") { val processed = - workList process { + workList.process { case TestEntry(2) => true case _ => false } @@ -317,7 +317,7 @@ class WorkListSpec extends FunSuiteLike { assert(!processed) val processed2 = - workList process { + workList.process { case TestEntry(5) => true case _ => false } @@ -328,12 +328,16 @@ class WorkListSpec extends FunSuiteLike { test("Append two lists") { workList.removeAll() - 0 to 4 foreach { id => workList.add(TestEntry(id), permanent = false) } + (0 to 4).foreach { id => + workList.add(TestEntry(id), permanent = false) + } val l2 = new WorkList[TestEntry] - 5 to 9 foreach { id => l2.add(TestEntry(id), permanent = true) } + (5 to 9).foreach { id => + l2.add(TestEntry(id), permanent = true) + } - workList addAll l2 + workList.addAll(l2) @tailrec def checkEntries(id: Int, entry: WorkList.Entry[TestEntry]): Int = { @@ -357,9 +361,9 @@ class WorkListSpec extends FunSuiteLike { val fn1: PartialFunction[Any, Unit] = { case s: String => - val result1 = workList2 remove fn1 + val result1 = workList2.remove(fn1) assert(result1 === true, "First remove must return true") - val result2 = workList2 remove fn1 + val result2 = workList2.remove(fn1) assert(result2 === false, "Second remove must return false") } @@ -374,7 +378,7 @@ class WorkListSpec extends FunSuiteLike { assert(workList2.tail == workList2.head.next) // Processing inserted fn1, reentrant adding fn2 - workList2 process { fn => + workList2.process { fn => var processed = true fn.applyOrElse("Foo", (_: Any) => processed = false) processed @@ -383,7 +387,7 @@ class WorkListSpec extends FunSuiteLike { test("Reentrant delete") { // Processing inserted fn2, should delete itself - workList2 process { fn => + workList2.process { fn => var processed = true fn.applyOrElse("Foo", (_: Any) => processed = false) processed diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala index f0d7021f24..219273c6eb 100644 --- a/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/pattern/ReceivePipelineSpec.scala @@ -122,34 +122,34 @@ class ReceivePipelineSpec extends AkkaSpec with ImplicitSender { } "support any number of interceptors" in { - val replier = system.actorOf(Props( - new ReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) + val replier = system.actorOf( + Props(new ReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) replier ! 8 expectMsg("List(18, 19, 20)") } "delegate messages unhandled by interceptors to the inner behavior" in { - val replier = system.actorOf(Props( - new ReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) + val replier = system.actorOf( + Props(new ReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) replier ! 8L // unhandled by all interceptors but still replied expectMsg(8L) - replier ! Set(8F) // unhandled by all but ToString Interceptor, so replied as String + replier ! Set(8f) // unhandled by all but ToString Interceptor, so replied as String expectMsg("Set(8.0)") } "let any interceptor to explicitly ignore some messages" in { - val replier = system.actorOf(Props( - new ReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) + val replier = system.actorOf( + Props(new ReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) replier ! "explicitly ignored" replier ! 8L // unhandled by all interceptors but still replied expectMsg(8L) } "support changing behavior without losing the interceptions" in { - val replier = system.actorOf(Props( - new ReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) + val replier = system.actorOf( + Props(new ReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) replier ! 8 expectMsg("List(18, 19, 20)") replier ! "become" @@ -158,10 +158,8 @@ class ReceivePipelineSpec extends AkkaSpec with ImplicitSender { } "support swapping inner and outer interceptors mixin order" in { - val outerInnerReplier = system.actorOf(Props( - new ReplierActor with ListBuilderInterceptor with AdderInterceptor)) - val innerOuterReplier = system.actorOf(Props( - new ReplierActor with AdderInterceptor with ListBuilderInterceptor)) + val outerInnerReplier = system.actorOf(Props(new ReplierActor with ListBuilderInterceptor with AdderInterceptor)) + val innerOuterReplier = system.actorOf(Props(new ReplierActor with AdderInterceptor with ListBuilderInterceptor)) outerInnerReplier ! 4 expectMsg(IntList(List(14, 15, 16))) innerOuterReplier ! 6 @@ -195,8 +193,7 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit import PersistentReceivePipelineSpec._ def this() { - this(ConfigFactory.parseString( - s""" + this(ConfigFactory.parseString(s""" |akka.persistence.journal.plugin = "akka.persistence.journal.inmem" |akka.persistence.journal.leveldb.dir = "target/journal-${getClass.getSimpleName}" """.stripMargin)) @@ -204,14 +201,13 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit "A PersistentActor with ReceivePipeline" must { "support any number of interceptors" in { - val replier = system.actorOf(Props( - new PersistentReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) + val replier = system.actorOf( + Props(new PersistentReplierActor with ListBuilderInterceptor with AdderInterceptor with ToStringInterceptor)) replier ! 8 expectMsg("List(18, 19, 20)") } "allow messages explicitly passed on by interceptors to be handled by the actor" in { - val replier = system.actorOf(Props( - new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor)) + val replier = system.actorOf(Props(new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor)) // 6 -> 3 -> 6 replier ! 6 @@ -219,8 +215,7 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit } "allow messages not handled by some interceptors to be handled by the actor" in { - val replier = system.actorOf(Props( - new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor)) + val replier = system.actorOf(Props(new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor)) // 8 -> 4 ( -> not handled by OddDoublerInterceptor) replier ! 8 @@ -231,10 +226,9 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit val probe = new TestProbe(system) val probeRef = probe.ref - val replier = system.actorOf(Props( - new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { - override def unhandled(message: Any) = probeRef ! message - })) + val replier = system.actorOf(Props(new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { + override def unhandled(message: Any) = probeRef ! message + })) // 22 -> 11 -> 22 but > 10 so not handled in main receive: falls back to unhandled implementation... replier ! 22 @@ -245,10 +239,9 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit val probe = new TestProbe(system) val probeRef = probe.ref - val replier = system.actorOf(Props( - new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { - override def unhandled(message: Any) = probeRef ! message - })) + val replier = system.actorOf(Props(new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { + override def unhandled(message: Any) = probeRef ! message + })) // 11 ( -> not handled by EvenHalverInterceptor) -> 22 but > 10 so not handled in main receive: // original message falls back to unhandled implementation... @@ -260,10 +253,9 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit val probe = new TestProbe(system) val probeRef = probe.ref - val replier = system.actorOf(Props( - new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { - override def unhandled(message: Any) = probeRef ! message - })) + val replier = system.actorOf(Props(new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { + override def unhandled(message: Any) = probeRef ! message + })) replier ! "hi there!" probe.expectMsg("hi there!") @@ -273,10 +265,9 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit val probe = new TestProbe(system) val probeRef = probe.ref - val replier = system.actorOf(Props( - new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { - override def unhandled(message: Any) = probeRef ! message - })) + val replier = system.actorOf(Props(new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { + override def unhandled(message: Any) = probeRef ! message + })) replier ! 4 expectMsg(2) @@ -287,10 +278,9 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit val probe = new TestProbe(system) val probeRef = probe.ref - val replier = system.actorOf(Props( - new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { - override def unhandled(message: Any) = probeRef ! message - })) + val replier = system.actorOf(Props(new IntReplierActor(10) with EvenHalverInterceptor with OddDoublerInterceptor { + override def unhandled(message: Any) = probeRef ! message + })) replier ! "hi there!" replier ! 8 @@ -299,8 +289,7 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit } "call side-effecting receive code only once" in { - val totaller = system.actorOf(Props( - new TotallerActor with EvenHalverInterceptor with OddDoublerInterceptor)) + val totaller = system.actorOf(Props(new TotallerActor with EvenHalverInterceptor with OddDoublerInterceptor)) totaller ! 8 totaller ! 6 @@ -309,8 +298,7 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit } "not cache the result of the same message" in { - val totaller = system.actorOf(Props( - new TotallerActor with EvenHalverInterceptor with OddDoublerInterceptor)) + val totaller = system.actorOf(Props(new TotallerActor with EvenHalverInterceptor with OddDoublerInterceptor)) totaller ! 6 totaller ! 6 @@ -322,10 +310,9 @@ class PersistentReceivePipelineSpec(config: Config) extends AkkaSpec(config) wit val probe = new TestProbe(system) val probeRef = probe.ref - val totaller = system.actorOf(Props( - new TotallerActor with Timer { - def notifyDuration(d: Long) = probeRef ! d - })) + val totaller = system.actorOf(Props(new TotallerActor with Timer { + def notifyDuration(d: Long) = probeRef ! d + })) totaller ! 6 totaller ! "get" @@ -412,11 +399,10 @@ object MixinSample extends App { val system = ActorSystem("pipeline") //#mixin-model - val texts = Map( - "that.rug_EN" -> "That rug really tied the room together.", - "your.opinion_EN" -> "Yeah, well, you know, that's just, like, your opinion, man.", - "that.rug_ES" -> "Esa alfombra realmente completaba la sala.", - "your.opinion_ES" -> "Sí, bueno, ya sabes, eso es solo, como, tu opinion, amigo.") + val texts = Map("that.rug_EN" -> "That rug really tied the room together.", + "your.opinion_EN" -> "Yeah, well, you know, that's just, like, your opinion, man.", + "that.rug_ES" -> "Esa alfombra realmente completaba la sala.", + "your.opinion_ES" -> "Sí, bueno, ya sabes, eso es solo, como, tu opinion, amigo.") case class I18nText(locale: String, key: String) case class Message(author: Option[String], text: Any) @@ -446,8 +432,7 @@ object MixinSample extends App { val printerActor = system.actorOf(Props[PrinterActor]()) //#mixin-actor - class PrinterActor extends Actor with ReceivePipeline - with I18nInterceptor with AuditInterceptor { + class PrinterActor extends Actor with ReceivePipeline with I18nInterceptor with AuditInterceptor { override def receive: Receive = { case Message(author, text) => diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/ReliableProxyDocSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/ReliableProxyDocSpec.scala index 3e53e76b0b..73849a5aaa 100644 --- a/akka-contrib/src/test/scala/akka/contrib/pattern/ReliableProxyDocSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/pattern/ReliableProxyDocSpec.scala @@ -43,8 +43,8 @@ object ReliableProxyDocSpec { //#demo-transition class WatchingProxyParent(targetPath: ActorPath) extends Actor { - val proxy = context.watch(context.actorOf( - ReliableProxy.props(targetPath, 100.millis, reconnectAfter = 500.millis, maxReconnects = 3))) + val proxy = context.watch( + context.actorOf(ReliableProxy.props(targetPath, 100.millis, reconnectAfter = 500.millis, maxReconnects = 3))) var client: Option[ActorRef] = None @@ -53,7 +53,7 @@ object ReliableProxyDocSpec { proxy ! "world!" client = Some(sender()) case Terminated(`proxy`) => - client foreach { _ ! "terminated" } + client.foreach { _ ! "terminated" } } } } diff --git a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottleTest.scala b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottleTest.scala index 2dc1992755..ea351a2c92 100644 --- a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottleTest.scala +++ b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottleTest.scala @@ -14,10 +14,10 @@ import org.scalatest.WordSpecLike class TimerBasedThrottleTest extends TestKit(ActorSystem("TimerBasedThrottler")) with WordSpecLike { "A throttler" must { "normalize all rates to the highest precision (nanoseconds)" in { - val throttler = TestActorRef(new TimerBasedThrottler(1 msgsPer (1, SECONDS))) - val throttler2 = TestActorRef(new TimerBasedThrottler(5 msgsPer (1, SECONDS))) - val throttler3 = TestActorRef(new TimerBasedThrottler(10 msgsPer (10, MILLISECONDS))) - val throttler4 = TestActorRef(new TimerBasedThrottler(1 msgsPer (1, MINUTES))) + val throttler = TestActorRef(new TimerBasedThrottler(1.msgsPer(1, SECONDS))) + val throttler2 = TestActorRef(new TimerBasedThrottler(5.msgsPer(1, SECONDS))) + val throttler3 = TestActorRef(new TimerBasedThrottler(10.msgsPer(10, MILLISECONDS))) + val throttler4 = TestActorRef(new TimerBasedThrottler(1.msgsPer(1, MINUTES))) assert(throttler.underlyingActor.rate.duration.toNanos == 1e9) assert(throttler.underlyingActor.rate.numberOfCalls == 1) @@ -33,7 +33,7 @@ class TimerBasedThrottleTest extends TestKit(ActorSystem("TimerBasedThrottler")) } "handle zero number of calls gracefully" in { - val throttler = TestActorRef(new TimerBasedThrottler(0 msgsPer (1, SECONDS))) + val throttler = TestActorRef(new TimerBasedThrottler(0.msgsPer(1, SECONDS))) assert(throttler.underlyingActor.rate.duration.toSeconds == 1) assert(throttler.underlyingActor.rate.numberOfCalls == 0) diff --git a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala index 0a24e0375a..afbe66a692 100644 --- a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala @@ -30,8 +30,12 @@ object TimerBasedThrottlerSpec { //#demo-code } -class TimerBasedThrottlerSpec extends TestKit(ActorSystem("TimerBasedThrottlerSpec")) with ImplicitSender - with WordSpecLike with Matchers with BeforeAndAfterAll { +class TimerBasedThrottlerSpec + extends TestKit(ActorSystem("TimerBasedThrottlerSpec")) + with ImplicitSender + with WordSpecLike + with Matchers + with BeforeAndAfterAll { import TimerBasedThrottlerSpec._ @@ -45,9 +49,7 @@ class TimerBasedThrottlerSpec extends TestKit(ActorSystem("TimerBasedThrottlerSp //#demo-code val printer = system.actorOf(Props[PrintActor]) // The throttler for this example, setting the rate - val throttler = system.actorOf(Props( - classOf[TimerBasedThrottler], - 3 msgsPer 1.second)) + val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3.msgsPer(1.second))) // Set the target throttler ! SetTarget(Some(printer)) // These three messages will be sent to the target immediately @@ -62,20 +64,20 @@ class TimerBasedThrottlerSpec extends TestKit(ActorSystem("TimerBasedThrottlerSp "keep messages until a target is set" in { val echo = system.actorOf(TestActors.echoActorProps) - val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3 msgsPer (1.second.dilated))) - 1 to 6 foreach { throttler ! _ } + val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3.msgsPer(1.second.dilated))) + (1 to 6).foreach { throttler ! _ } expectNoMsg(1 second) throttler ! SetTarget(Some(echo)) within(2.5 seconds) { - 1 to 6 foreach { expectMsg(_) } + (1 to 6).foreach { expectMsg(_) } } } "send messages after a `SetTarget(None)` pause" in { val echo = system.actorOf(TestActors.echoActorProps) - val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3 msgsPer (5.second.dilated))) + val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3.msgsPer(5.second.dilated))) throttler ! SetTarget(Some(echo)) - 1 to 3 foreach { throttler ! _ } + (1 to 3).foreach { throttler ! _ } throttler ! SetTarget(None) within(1.7 second) { expectMsg(1) @@ -83,17 +85,17 @@ class TimerBasedThrottlerSpec extends TestKit(ActorSystem("TimerBasedThrottlerSp } expectNoMsg(1 second) throttler ! SetTarget(Some(echo)) - 4 to 7 foreach { throttler ! _ } + (4 to 7).foreach { throttler ! _ } within(10.5 seconds) { - 2 to 7 foreach { expectMsg(_) } + (2 to 7).foreach { expectMsg(_) } } } "keep messages when the target is set to None" in { val echo = system.actorOf(TestActors.echoActorProps) - val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3 msgsPer (5.second.dilated))) + val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3.msgsPer(5.second.dilated))) throttler ! SetTarget(Some(echo)) - 1 to 7 foreach { throttler ! _ } + (1 to 7).foreach { throttler ! _ } throttler ! SetTarget(None) within(1.7 second) { expectMsg(1) @@ -102,24 +104,24 @@ class TimerBasedThrottlerSpec extends TestKit(ActorSystem("TimerBasedThrottlerSp expectNoMsg(1 second) throttler ! SetTarget(Some(echo)) within(10.5 seconds) { - 2 to 7 foreach { expectMsg(_) } + (2 to 7).foreach { expectMsg(_) } } } "respect the rate (3 msg/s)" in within(1.5 seconds, 2.5 seconds) { val echo = system.actorOf(TestActors.echoActorProps) - val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3 msgsPer (1.second.dilated))) + val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 3.msgsPer(1.second.dilated))) throttler ! SetTarget(Some(echo)) - 1 to 7 foreach { throttler ! _ } - 1 to 7 foreach { expectMsg(_) } + (1 to 7).foreach { throttler ! _ } + (1 to 7).foreach { expectMsg(_) } } "respect the rate (4 msg/s)" in within(1.5 seconds, 2.5 seconds) { val echo = system.actorOf(TestActors.echoActorProps) - val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 4 msgsPer (1.second.dilated))) + val throttler = system.actorOf(Props(classOf[TimerBasedThrottler], 4.msgsPer(1.second.dilated))) throttler ! SetTarget(Some(echo)) - 1 to 9 foreach { throttler ! _ } - 1 to 9 foreach { expectMsg(_) } + (1 to 9).foreach { throttler ! _ } + (1 to 9).foreach { expectMsg(_) } } } } diff --git a/akka-discovery/src/main/scala/akka/discovery/Discovery.scala b/akka-discovery/src/main/scala/akka/discovery/Discovery.scala index 8da83d8fa1..ab1ef0e2c5 100644 --- a/akka-discovery/src/main/scala/akka/discovery/Discovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/Discovery.scala @@ -26,8 +26,8 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { case "" => throw new IllegalArgumentException( "No default service discovery implementation configured in " + - "`akka.discovery.method`. Make sure to configure this setting to your preferred implementation such as " + - "'akka-dns' in your application.conf (from the akka-discovery module).") + "`akka.discovery.method`. Make sure to configure this setting to your preferred implementation such as " + + "'akka-dns' in your application.conf (from the akka-discovery module).") case method => method } @@ -61,7 +61,9 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { def classNameFromConfig(path: String): String = if (config.hasPath(path)) config.getString(path) - else throw new IllegalArgumentException(s"$path must contain field `class` that is a FQN of a `akka.discovery.ServiceDiscovery` implementation") + else + throw new IllegalArgumentException( + s"$path must contain field `class` that is a FQN of a `akka.discovery.ServiceDiscovery` implementation") def create(clazzName: String): Try[ServiceDiscovery] = { dynamic @@ -83,8 +85,9 @@ final class Discovery(implicit system: ExtendedActorSystem) extends Extension { case Failure(e @ (_: ClassNotFoundException | _: NoSuchMethodException)) => throw new IllegalArgumentException( s"Illegal [$configName] value or incompatible class! " + - "The implementation class MUST extend akka.discovery.ServiceDiscovery and take an " + - "ExtendedActorSystem as constructor argument.", e) + "The implementation class MUST extend akka.discovery.ServiceDiscovery and take an " + + "ExtendedActorSystem as constructor argument.", + e) case Failure(e) => throw e case Success(instance) => instance } @@ -109,7 +112,8 @@ object Discovery extends ExtensionId[Discovery] with ExtensionIdProvider { private[akka] def checkClassPathForOldDiscovery(system: ExtendedActorSystem): Unit = { try { system.dynamicAccess.getClassFor("akka.discovery.SimpleServiceDiscovery").get - throw new RuntimeException("Old version of Akka Discovery from Akka Management found on the classpath. Remove `com.lightbend.akka.discovery:akka-discovery` from the classpath..") + throw new RuntimeException( + "Old version of Akka Discovery from Akka Management found on the classpath. Remove `com.lightbend.akka.discovery:akka-discovery` from the classpath..") } catch { case _: ClassNotFoundException => // all good } diff --git a/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala b/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala index 3854e5f7e9..abf7aea694 100644 --- a/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/ServiceDiscovery.scala @@ -29,7 +29,7 @@ object ServiceDiscovery { /** Result of a successful resolve request */ final class Resolved(val serviceName: String, val addresses: immutable.Seq[ResolvedTarget]) - extends DeadLetterSuppression { + extends DeadLetterSuppression { /** * Java API @@ -80,11 +80,7 @@ object ServiceDiscovery { * @param port optional port number * @param address optional IP address of the target. This is used during cluster bootstap when available. */ - final class ResolvedTarget( - val host: String, - val port: Option[Int], - val address: Option[InetAddress] - ) { + final class ResolvedTarget(val host: String, val port: Option[Int], val address: Option[InetAddress]) { /** * Java API @@ -125,10 +121,7 @@ object ServiceDiscovery { * * @throws IllegalArgumentException if [[serviceName]] is 'null' or an empty String */ -final class Lookup( - val serviceName: String, - val portName: Option[String], - val protocol: Option[String]) { +final class Lookup(val serviceName: String, val portName: Option[String], val protocol: Option[String]) { require(serviceName != null, "'serviceName' cannot be null") require(serviceName.trim.nonEmpty, "'serviceName' cannot be empty") @@ -157,10 +150,9 @@ final class Lookup( def getProtocol: Optional[String] = protocol.asJava - private def copy( - serviceName: String = serviceName, - portName: Option[String] = portName, - protocol: Option[String] = protocol): Lookup = + private def copy(serviceName: String = serviceName, + portName: Option[String] = portName, + protocol: Option[String] = protocol): Lookup = new Lookup(serviceName, portName, protocol) override def toString: String = s"Lookup($serviceName,$portName,$protocol)" @@ -228,8 +220,10 @@ case object Lookup { case SrvQuery(portName, protocol, serviceName) if validDomainName(serviceName) => Lookup(serviceName).withPortName(portName).withProtocol(protocol) - case null => throw new NullPointerException("Unable to create Lookup from passed SRV string. Passed value is 'null'") - case _ => throw new IllegalArgumentException(s"Unable to create Lookup from passed SRV string, invalid format: $str") + case null => + throw new NullPointerException("Unable to create Lookup from passed SRV string. Passed value is 'null'") + case _ => + throw new IllegalArgumentException(s"Unable to create Lookup from passed SRV string, invalid format: $str") } /** diff --git a/akka-discovery/src/main/scala/akka/discovery/config/ConfigServiceDiscovery.scala b/akka-discovery/src/main/scala/akka/discovery/config/ConfigServiceDiscovery.scala index a13b7b5527..37e76e3567 100644 --- a/akka-discovery/src/main/scala/akka/discovery/config/ConfigServiceDiscovery.scala +++ b/akka-discovery/src/main/scala/akka/discovery/config/ConfigServiceDiscovery.scala @@ -52,8 +52,7 @@ private[akka] class ConfigServiceDiscovery(system: ExtendedActorSystem) extends private val log = Logging(system, getClass) private val resolvedServices = ConfigServicesParser.parse( - system.settings.config.getConfig(system.settings.config.getString("akka.discovery.config.services-path")) - ) + system.settings.config.getConfig(system.settings.config.getString("akka.discovery.config.services-path"))) log.debug("Config discovery serving: {}", resolvedServices) diff --git a/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala b/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala index e27d7cc9c6..80e7ef6e15 100644 --- a/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/DiscoveryConfigurationSpec.scala @@ -29,7 +29,8 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers { "select implementation from config by config name (inside akka.discovery namespace)" in { val className = classOf[FakeTestDiscovery].getCanonicalName - val sys = ActorSystem("DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" + val sys = ActorSystem("DiscoveryConfigurationSpec", + ConfigFactory.parseString(s""" akka.discovery { method = akka-mock-inside @@ -47,7 +48,8 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers { val className1 = classOf[FakeTestDiscovery].getCanonicalName val className2 = classOf[FakeTestDiscovery2].getCanonicalName - val sys = ActorSystem("DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" + val sys = ActorSystem("DiscoveryConfigurationSpec", + ConfigFactory.parseString(s""" akka.discovery { method = mock1 @@ -70,7 +72,8 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers { val className1 = classOf[FakeTestDiscovery].getCanonicalName val className2 = classOf[FakeTestDiscovery2].getCanonicalName - val sys = ActorSystem("DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" + val sys = ActorSystem("DiscoveryConfigurationSpec", + ConfigFactory.parseString(s""" akka.discovery { method = mock1 @@ -84,17 +87,18 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers { """).withFallback(ConfigFactory.load())) try { - Discovery(sys).loadServiceDiscovery("mock2") should be theSameInstanceAs Discovery(sys) - .loadServiceDiscovery("mock2") + (Discovery(sys).loadServiceDiscovery("mock2") should be) + .theSameInstanceAs(Discovery(sys).loadServiceDiscovery("mock2")) - Discovery(sys).discovery should be theSameInstanceAs Discovery(sys).loadServiceDiscovery("mock1") + (Discovery(sys).discovery should be).theSameInstanceAs(Discovery(sys).loadServiceDiscovery("mock1")) } finally TestKit.shutdownActorSystem(sys) } "throw a specific discovery method exception" in { val className = classOf[ExceptionThrowingDiscovery].getCanonicalName - val sys = ActorSystem("DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" + val sys = ActorSystem("DiscoveryConfigurationSpec", + ConfigFactory.parseString(s""" akka.discovery { method = "mock1" mock1 { @@ -111,7 +115,8 @@ class DiscoveryConfigurationSpec extends WordSpec with Matchers { "throw an illegal argument exception for not existing method" in { val className = "className" - val sys = ActorSystem("DiscoveryConfigurationSpec", ConfigFactory.parseString(s""" + val sys = ActorSystem("DiscoveryConfigurationSpec", + ConfigFactory.parseString(s""" akka.discovery { method = "$className" } diff --git a/akka-discovery/src/test/scala/akka/discovery/LookupSpec.scala b/akka-discovery/src/test/scala/akka/discovery/LookupSpec.scala index 4ea760c848..0939990843 100644 --- a/akka-discovery/src/test/scala/akka/discovery/LookupSpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/LookupSpec.scala @@ -10,21 +10,19 @@ class LookupSpec extends WordSpec with Matchers with OptionValues { // SRV strings with invalid domain names // should fail to build lookups - val srvWithInvalidDomainNames = List( - "_portName._protocol.service_name.local", - "_portName._protocol.servicename,local", - "_portName._protocol.servicename.local-", - "_portName._protocol.-servicename.local") + val srvWithInvalidDomainNames = List("_portName._protocol.service_name.local", + "_portName._protocol.servicename,local", + "_portName._protocol.servicename.local-", + "_portName._protocol.-servicename.local") // No SRV that should result in simple A/AAAA lookups - val noSrvLookups = List( - "portName.protocol.serviceName.local", - "serviceName.local", - "_portName.serviceName", - "_serviceName.local", - "_serviceName,local", - "-serviceName.local", - "serviceName.local-") + val noSrvLookups = List("portName.protocol.serviceName.local", + "serviceName.local", + "_portName.serviceName", + "_serviceName.local", + "_serviceName,local", + "-serviceName.local", + "serviceName.local-") "Lookup.parseSrv" should { diff --git a/akka-discovery/src/test/scala/akka/discovery/aggregate/AggregateServiceDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/aggregate/AggregateServiceDiscoverySpec.scala index 387c6a5803..21d0f6dd5a 100644 --- a/akka-discovery/src/test/scala/akka/discovery/aggregate/AggregateServiceDiscoverySpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/aggregate/AggregateServiceDiscoverySpec.scala @@ -6,7 +6,7 @@ package akka.discovery.aggregate import akka.actor.{ ActorSystem, ExtendedActorSystem } import akka.discovery.ServiceDiscovery.{ Resolved, ResolvedTarget } -import akka.discovery.{ Lookup, Discovery, ServiceDiscovery } +import akka.discovery.{ Discovery, Lookup, ServiceDiscovery } import akka.testkit.TestKit import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.concurrent.ScalaFutures @@ -20,11 +20,9 @@ class StubbedServiceDiscovery(system: ExtendedActorSystem) extends ServiceDiscov override def lookup(query: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = { if (query.serviceName == "stubbed") { - Future.successful(Resolved( - query.serviceName, - immutable.Seq( - ResolvedTarget(host = "stubbed1", port = Some(1234), address = None) - ))) + Future.successful( + Resolved(query.serviceName, + immutable.Seq(ResolvedTarget(host = "stubbed1", port = Some(1234), address = None)))) } else if (query.serviceName == "fail") { Future.failed(new RuntimeException("No resolving for you!")) } else { @@ -75,11 +73,11 @@ object AggregateServiceDiscoverySpec { } class AggregateServiceDiscoverySpec - extends TestKit(ActorSystem("AggregateDiscoverySpec", AggregateServiceDiscoverySpec.config)) - with WordSpecLike - with Matchers - with BeforeAndAfterAll - with ScalaFutures { + extends TestKit(ActorSystem("AggregateDiscoverySpec", AggregateServiceDiscoverySpec.config)) + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with ScalaFutures { override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(system) @@ -91,31 +89,22 @@ class AggregateServiceDiscoverySpec "only call first one if returns results" in { val results = discovery.lookup("stubbed", 100.millis).futureValue - results shouldEqual Resolved( - "stubbed", - immutable.Seq( - ResolvedTarget(host = "stubbed1", port = Some(1234), address = None) - )) + results shouldEqual Resolved("stubbed", + immutable.Seq(ResolvedTarget(host = "stubbed1", port = Some(1234), address = None))) } "move onto the next if no resolved targets" in { val results = discovery.lookup("config1", 100.millis).futureValue - results shouldEqual Resolved( - "config1", - immutable.Seq( - ResolvedTarget(host = "cat", port = Some(1233), address = None), - ResolvedTarget(host = "dog", port = Some(1234), address = None) - )) + results shouldEqual Resolved("config1", + immutable.Seq(ResolvedTarget(host = "cat", port = Some(1233), address = None), + ResolvedTarget(host = "dog", port = Some(1234), address = None))) } "move onto next if fails" in { val results = discovery.lookup("fail", 100.millis).futureValue // Stub fails then result comes from config - results shouldEqual Resolved( - "fail", - immutable.Seq( - ResolvedTarget(host = "from-config", port = None, address = None) - )) + results shouldEqual Resolved("fail", + immutable.Seq(ResolvedTarget(host = "from-config", port = None, address = None))) } } diff --git a/akka-discovery/src/test/scala/akka/discovery/config/ConfigServiceDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/config/ConfigServiceDiscoverySpec.scala index 30589f02f4..647cb0a72b 100644 --- a/akka-discovery/src/test/scala/akka/discovery/config/ConfigServiceDiscoverySpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/config/ConfigServiceDiscoverySpec.scala @@ -47,11 +47,11 @@ akka { } class ConfigServiceDiscoverySpec - extends TestKit(ActorSystem("ConfigDiscoverySpec", ConfigServiceDiscoverySpec.config)) - with WordSpecLike - with Matchers - with BeforeAndAfterAll - with ScalaFutures { + extends TestKit(ActorSystem("ConfigDiscoverySpec", ConfigServiceDiscoverySpec.config)) + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with ScalaFutures { override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(system) @@ -63,10 +63,8 @@ class ConfigServiceDiscoverySpec "load from config" in { val result = discovery.lookup("service1", 100.millis).futureValue result.serviceName shouldEqual "service1" - result.addresses shouldEqual immutable.Seq( - ResolvedTarget(host = "cat", port = Some(1233), address = None), - ResolvedTarget(host = "dog", port = None, address = None) - ) + result.addresses shouldEqual immutable.Seq(ResolvedTarget(host = "cat", port = Some(1233), address = None), + ResolvedTarget(host = "dog", port = None, address = None)) } "return no resolved targets if not in config" in { diff --git a/akka-discovery/src/test/scala/akka/discovery/config/ConfigServicesParserSpec.scala b/akka-discovery/src/test/scala/akka/discovery/config/ConfigServicesParserSpec.scala index 2f00c3730f..677666079a 100644 --- a/akka-discovery/src/test/scala/akka/discovery/config/ConfigServicesParserSpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/config/ConfigServicesParserSpec.scala @@ -40,12 +40,11 @@ class ConfigServicesParserSpec extends WordSpec with Matchers { val result = ConfigServicesParser.parse(config) - result("service1") shouldEqual Resolved( - "service1", - immutable.Seq( - ResolvedTarget(host = "cat", port = Some(1233), address = None), - ResolvedTarget(host = "dog", port = None, address = None) - )) + result("service1") shouldEqual Resolved("service1", + immutable.Seq(ResolvedTarget(host = "cat", + port = Some(1233), + address = None), + ResolvedTarget(host = "dog", port = None, address = None))) result("service2") shouldEqual Resolved("service2", immutable.Seq()) } } diff --git a/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala index b2bb28f75a..e93b8f7477 100644 --- a/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/dns/DnsDiscoverySpec.scala @@ -18,8 +18,7 @@ import akka.discovery.ServiceDiscovery object DnsDiscoverySpec { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" //#configure-dns akka { discovery { @@ -35,15 +34,13 @@ object DnsDiscoverySpec { lazy val dockerDnsServerPort = SocketUtil.temporaryLocalPort() - val configWithAsyncDnsResolverAsDefault = ConfigFactory.parseString( - """ + val configWithAsyncDnsResolverAsDefault = ConfigFactory.parseString(""" akka.io.dns.resolver = "async-dns" """).withFallback(config) } -class DnsDiscoverySpec extends AkkaSpec(DnsDiscoverySpec.config) - with DockerBindDnsService { +class DnsDiscoverySpec extends AkkaSpec(DnsDiscoverySpec.config) with DockerBindDnsService { import DnsDiscoverySpec._ @@ -59,11 +56,9 @@ class DnsDiscoverySpec extends AkkaSpec(DnsDiscoverySpec.config) .lookup(Lookup("foo.test.").withPortName("service").withProtocol("tcp"), resolveTimeout = 10.seconds) .futureValue - val expected = Set( - ResolvedTarget("a-single.foo.test", Some(5060), Some(InetAddress.getByName("192.168.1.20"))), - ResolvedTarget("a-double.foo.test", Some(65535), Some(InetAddress.getByName("192.168.1.21"))), - ResolvedTarget("a-double.foo.test", Some(65535), Some(InetAddress.getByName("192.168.1.22"))) - ) + val expected = Set(ResolvedTarget("a-single.foo.test", Some(5060), Some(InetAddress.getByName("192.168.1.20"))), + ResolvedTarget("a-double.foo.test", Some(65535), Some(InetAddress.getByName("192.168.1.21"))), + ResolvedTarget("a-double.foo.test", Some(65535), Some(InetAddress.getByName("192.168.1.22")))) val result1 = lookup() result1.addresses.toSet shouldEqual expected diff --git a/akka-discovery/src/test/scala/akka/discovery/dns/DnsServiceDiscoverySpec.scala b/akka-discovery/src/test/scala/akka/discovery/dns/DnsServiceDiscoverySpec.scala index 4ad0e5ad6e..7e756530a6 100644 --- a/akka-discovery/src/test/scala/akka/discovery/dns/DnsServiceDiscoverySpec.scala +++ b/akka-discovery/src/test/scala/akka/discovery/dns/DnsServiceDiscoverySpec.scala @@ -18,12 +18,24 @@ import scala.concurrent.duration._ class DnsServiceDiscoverySpec extends WordSpec with Matchers { "srvRecordsToResolved" must { "fill in ips from A records" in { - val resolved = DnsProtocol.Resolved("cats.com", im.Seq(new SRVRecord("cats.com", Ttl.fromPositive(1.second), 2, 3, 4, "kittens.com")), - im.Seq( - new ARecord("kittens.com", Ttl.fromPositive(1.second), InetAddress.getByName("127.0.0.2")), - new ARecord("kittens.com", Ttl.fromPositive(1.second), InetAddress.getByName("127.0.0.3")), - new ARecord("donkeys.com", Ttl.fromPositive(1.second), InetAddress.getByName("127.0.0.4")) - )) + val resolved = DnsProtocol.Resolved("cats.com", + im.Seq( + new SRVRecord("cats.com", + Ttl.fromPositive(1.second), + 2, + 3, + 4, + "kittens.com")), + im.Seq( + new ARecord("kittens.com", + Ttl.fromPositive(1.second), + InetAddress.getByName("127.0.0.2")), + new ARecord("kittens.com", + Ttl.fromPositive(1.second), + InetAddress.getByName("127.0.0.3")), + new ARecord("donkeys.com", + Ttl.fromPositive(1.second), + InetAddress.getByName("127.0.0.4")))) val result: ServiceDiscovery.Resolved = DnsServiceDiscovery.srvRecordsToResolved("cats.com", resolved) @@ -31,14 +43,23 @@ class DnsServiceDiscoverySpec extends WordSpec with Matchers { result.serviceName shouldEqual "cats.com" result.addresses.toSet shouldEqual Set( ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("127.0.0.2"))), - ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("127.0.0.3"))) - ) + ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("127.0.0.3")))) } // Naughty DNS server "use SRV target and port if no additional records" in { - val resolved = DnsProtocol.Resolved("cats.com", im.Seq(new SRVRecord("cats.com", Ttl.fromPositive(1.second), 2, 3, 8080, "kittens.com")), - im.Seq(new ARecord("donkeys.com", Ttl.fromPositive(1.second), InetAddress.getByName("127.0.0.4")))) + val resolved = DnsProtocol.Resolved("cats.com", + im.Seq( + new SRVRecord("cats.com", + Ttl.fromPositive(1.second), + 2, + 3, + 8080, + "kittens.com")), + im.Seq( + new ARecord("donkeys.com", + Ttl.fromPositive(1.second), + InetAddress.getByName("127.0.0.4")))) val result = DnsServiceDiscovery.srvRecordsToResolved("cats.com", resolved) @@ -47,21 +68,31 @@ class DnsServiceDiscoverySpec extends WordSpec with Matchers { } "fill in ips from AAAA records" in { - val resolved = DnsProtocol.Resolved("cats.com", im.Seq(new SRVRecord("cats1.com", Ttl.fromPositive(1.second), 2, 3, 4, "kittens.com")), - im.Seq( - new AAAARecord("kittens.com", Ttl.fromPositive(2.seconds), InetAddress.getByName("::1").asInstanceOf[Inet6Address]), - new AAAARecord("kittens.com", Ttl.fromPositive(2.seconds), InetAddress.getByName("::2").asInstanceOf[Inet6Address]), - new AAAARecord("donkeys.com", Ttl.fromPositive(2.seconds), InetAddress.getByName("::3").asInstanceOf[Inet6Address]) - )) + val resolved = DnsProtocol.Resolved("cats.com", + im.Seq( + new SRVRecord("cats1.com", + Ttl.fromPositive(1.second), + 2, + 3, + 4, + "kittens.com")), + im.Seq( + new AAAARecord("kittens.com", + Ttl.fromPositive(2.seconds), + InetAddress.getByName("::1").asInstanceOf[Inet6Address]), + new AAAARecord("kittens.com", + Ttl.fromPositive(2.seconds), + InetAddress.getByName("::2").asInstanceOf[Inet6Address]), + new AAAARecord("donkeys.com", + Ttl.fromPositive(2.seconds), + InetAddress.getByName("::3").asInstanceOf[Inet6Address]))) val result: ServiceDiscovery.Resolved = DnsServiceDiscovery.srvRecordsToResolved("cats.com", resolved) result.serviceName shouldEqual "cats.com" - result.addresses.toSet shouldEqual Set( - ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("::1"))), - ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("::2"))) - ) + result.addresses.toSet shouldEqual Set(ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("::1"))), + ResolvedTarget("kittens.com", Some(4), Some(InetAddress.getByName("::2")))) } } } diff --git a/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala b/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala index 9e5d18d7ea..81c7b4f924 100644 --- a/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala +++ b/akka-discovery/src/test/scala/doc/akka/discovery/CompileOnlySpec.scala @@ -25,7 +25,8 @@ object CompileOnlySpec { //#basic //#full - val lookup: Future[ServiceDiscovery.Resolved] = serviceDiscovery.lookup(Lookup("akka.io").withPortName("remoting").withProtocol("tcp"), 1.second) + val lookup: Future[ServiceDiscovery.Resolved] = + serviceDiscovery.lookup(Lookup("akka.io").withPortName("remoting").withProtocol("tcp"), 1.second) //#full // compiler diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala index 49f140c492..8dce001fb7 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DeltaPropagationSelector.scala @@ -108,27 +108,27 @@ import akka.util.ccompat._ val cacheKey = (key, fromSeqNr, toSeqNr) val deltaGroup = cache.get(cacheKey) match { case None => - val group = deltaEntriesAfterJ.valuesIterator.reduceLeft { - (d1, d2) => - val merged = d2 match { - case NoDeltaPlaceholder => NoDeltaPlaceholder - case _ => - // this is fine also if d1 is a NoDeltaPlaceholder - d1.merge(d2.asInstanceOf[d1.T]) - } - merged match { - case s: ReplicatedDeltaSize if s.deltaSize >= maxDeltaSize => - // discard too large deltas - NoDeltaPlaceholder - case _ => merged - } + val group = deltaEntriesAfterJ.valuesIterator.reduceLeft { (d1, d2) => + val merged = d2 match { + case NoDeltaPlaceholder => NoDeltaPlaceholder + case _ => + // this is fine also if d1 is a NoDeltaPlaceholder + d1.merge(d2.asInstanceOf[d1.T]) + } + merged match { + case s: ReplicatedDeltaSize if s.deltaSize >= maxDeltaSize => + // discard too large deltas + NoDeltaPlaceholder + case _ => merged + } } cache = cache.updated(cacheKey, group) group case Some(group) => group } deltas = deltas.updated(key, (deltaGroup, fromSeqNr, toSeqNr)) - deltaSentToNode = deltaSentToNode.updated(key, deltaSentToNodeForKey.updated(node, deltaEntriesAfterJ.lastKey)) + deltaSentToNode = + deltaSentToNode.updated(key, deltaSentToNodeForKey.updated(node, deltaEntriesAfterJ.lastKey)) } } diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala index a8e699e8bd..b177b2e47a 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DistributedData.scala @@ -37,7 +37,8 @@ class DistributedData(system: ExtendedActorSystem) extends Extension { */ val replicator: ActorRef = if (isTerminated) { - system.log.warning("Replicator points to dead letters: Make sure the cluster node is not terminated and has the proper role!") + system.log.warning( + "Replicator points to dead letters: Make sure the cluster node is not terminated and has the proper role!") system.deadLetters } else { system.systemActorOf(Replicator.props(settings), ReplicatorSettings.name(system, None)) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala index 0e1174d61b..f5f76ee97a 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/DurableStore.scala @@ -81,8 +81,8 @@ object DurableStore { * the wrapped `ReplicatedData` including its serializerId and * manifest. */ - final class DurableDataEnvelope private[akka] ( - private[akka] val dataEnvelope: DataEnvelope) extends ReplicatorMessage { + final class DurableDataEnvelope private[akka] (private[akka] val dataEnvelope: DataEnvelope) + extends ReplicatorMessage { def this(data: ReplicatedData) = this(DataEnvelope(data)) @@ -103,11 +103,10 @@ object LmdbDurableStore { private case object WriteBehind extends DeadLetterSuppression - private final case class Lmdb( - env: Env[ByteBuffer], - db: Dbi[ByteBuffer], - keyBuffer: ByteBuffer, - valueBuffer: ByteBuffer) + private final case class Lmdb(env: Env[ByteBuffer], + db: Dbi[ByteBuffer], + keyBuffer: ByteBuffer, + valueBuffer: ByteBuffer) } final class LmdbDurableStore(config: Config) extends Actor with ActorLogging { @@ -142,10 +141,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging { val env = { val mapSize = config.getBytes("lmdb.map-size") dir.mkdirs() - Env.create() - .setMapSize(mapSize) - .setMaxDbs(1) - .open(dir, EnvFlags.MDB_NOLOCK) + Env.create().setMapSize(mapSize).setMaxDbs(1).open(dir, EnvFlags.MDB_NOLOCK) } val db = env.openDbi("ddata", DbiFlags.MDB_CREATE) @@ -154,8 +150,9 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging { val valueBuffer = ByteBuffer.allocateDirect(100 * 1024) // will grow when needed if (log.isDebugEnabled) - log.debug("Init of LMDB in directory [{}] took [{} ms]", dir.getCanonicalPath, - TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0)) + log.debug("Init of LMDB in directory [{}] took [{} ms]", + dir.getCanonicalPath, + TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0)) val l = Lmdb(env, db, keyBuffer, valueBuffer) _lmdb = OptionVal.Some(l) l @@ -218,8 +215,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging { sender() ! loadData sender() ! LoadAllCompleted if (log.isDebugEnabled) - log.debug("load all of [{}] entries took [{} ms]", n, - TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0)) + log.debug("load all of [{}] entries took [{} ms]", n, TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0)) context.become(active) } finally { Try(iter.close()) @@ -297,8 +293,9 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging { } tx.commit() if (log.isDebugEnabled) - log.debug("store and commit of [{}] entries took [{} ms]", pending.size, - TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0)) + log.debug("store and commit of [{}] entries took [{} ms]", + pending.size, + TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0)) } catch { case NonFatal(e) => import scala.collection.JavaConverters._ @@ -311,4 +308,3 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging { } } - diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala index 461ba7e79a..440793b5e8 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Flag.scala @@ -5,6 +5,7 @@ package akka.cluster.ddata object Flag { + /** * `Flag` that is initialized to `false`. */ diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala index dbc55dd5ae..87bd39850d 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GCounter.scala @@ -12,6 +12,7 @@ import akka.annotation.InternalApi object GCounter { val empty: GCounter = new GCounter def apply(): GCounter = empty + /** * Java API */ @@ -40,11 +41,13 @@ object GCounter { * This class is immutable, i.e. "modifying" methods return a new instance. */ @SerialVersionUID(1L) -final class GCounter private[akka] ( - private[akka] val state: Map[UniqueAddress, BigInt] = Map.empty, - override val delta: Option[GCounter] = None) - extends DeltaReplicatedData with ReplicatedDelta - with ReplicatedDataSerialization with RemovedNodePruning with FastMerge { +final class GCounter private[akka] (private[akka] val state: Map[UniqueAddress, BigInt] = Map.empty, + override val delta: Option[GCounter] = None) + extends DeltaReplicatedData + with ReplicatedDelta + with ReplicatedDataSerialization + with RemovedNodePruning + with FastMerge { import GCounter.Zero @@ -54,7 +57,9 @@ final class GCounter private[akka] ( /** * Scala API: Current total value of the counter. */ - def value: BigInt = state.values.foldLeft(Zero) { (acc, v) => acc + v } + def value: BigInt = state.values.foldLeft(Zero) { (acc, v) => + acc + v + } /** * Java API: Current total value of the counter. diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala index 63e06d760d..b1c551c047 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/GSet.scala @@ -31,8 +31,10 @@ object GSet { */ @SerialVersionUID(1L) final case class GSet[A] private (elements: Set[A])(override val delta: Option[GSet[A]]) - extends DeltaReplicatedData with ReplicatedDelta - with ReplicatedDataSerialization with FastMerge { + extends DeltaReplicatedData + with ReplicatedDelta + with ReplicatedDataSerialization + with FastMerge { type T = GSet[A] type D = GSet[A] @@ -72,7 +74,7 @@ final case class GSet[A] private (elements: Set[A])(override val delta: Option[G else if (this.isAncestorOf(that)) that.clearAncestor() else { clearAncestor() - new GSet[A](elements union that.elements)(None) + new GSet[A](elements.union(that.elements))(None) } override def mergeDelta(thatDelta: GSet[A]): GSet[A] = merge(thatDelta) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala index 3b66339da4..67bb8cefec 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Key.scala @@ -5,6 +5,7 @@ package akka.cluster.ddata object Key { + /** * Extract the [[Key#id]]. */ @@ -35,4 +36,3 @@ abstract class Key[+T <: ReplicatedData](val id: Key.KeyId) extends Serializable override def toString(): String = id } - diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala index d20f0f50c7..9d0bf67b5c 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWMap.scala @@ -10,6 +10,7 @@ import akka.cluster.UniqueAddress import akka.cluster.ddata.ORMap.ZeroTag object LWWMap { + /** * INTERNAL API */ @@ -21,6 +22,7 @@ object LWWMap { private val _empty: LWWMap[Any, Any] = new LWWMap(new ORMap(ORSet.empty, Map.empty, zeroTag = LWWMapTag)) def empty[A, B]: LWWMap[A, B] = _empty.asInstanceOf[LWWMap[A, B]] def apply(): LWWMap[Any, Any] = _empty + /** * Java API */ @@ -55,10 +57,11 @@ object LWWMap { * This class is immutable, i.e. "modifying" methods return a new instance. */ @SerialVersionUID(1L) -final class LWWMap[A, B] private[akka] ( - private[akka] val underlying: ORMap[A, LWWRegister[B]]) - extends DeltaReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { - import LWWRegister.{ Clock, defaultClock } +final class LWWMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, LWWRegister[B]]) + extends DeltaReplicatedData + with ReplicatedDataSerialization + with RemovedNodePruning { + import LWWRegister.{ defaultClock, Clock } type T = LWWMap[A, B] type D = ORMap.DeltaOp diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala index b98e28096b..9f951096ae 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/LWWRegister.scala @@ -12,6 +12,7 @@ import akka.util.HashCode object LWWRegister { trait Clock[A] { + /** * @param currentTimestamp the current `timestamp` value of the `LWWRegister` * @param value the register value to set and associate with the returned timestamp @@ -128,12 +129,10 @@ object LWWRegister { * This class is immutable, i.e. "modifying" methods return a new instance. */ @SerialVersionUID(1L) -final class LWWRegister[A] private[akka] ( - private[akka] val node: UniqueAddress, - val value: A, - val timestamp: Long) - extends ReplicatedData with ReplicatedDataSerialization { - import LWWRegister.{ Clock, defaultClock } +final class LWWRegister[A] private[akka] (private[akka] val node: UniqueAddress, val value: A, val timestamp: Long) + extends ReplicatedData + with ReplicatedDataSerialization { + import LWWRegister.{ defaultClock, Clock } type T = LWWRegister[A] diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala index 8fba388c14..99f1a81254 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMap.scala @@ -16,6 +16,7 @@ object ORMap { private val _empty: ORMap[Any, ReplicatedData] = new ORMap(ORSet.empty, Map.empty, VanillaORMapTag) def empty[A, B <: ReplicatedData]: ORMap[A, B] = _empty.asInstanceOf[ORMap[A, B]] def apply(): ORMap[Any, ReplicatedData] = _empty + /** * Java API */ @@ -52,7 +53,9 @@ object ORMap { /** * INTERNAL API */ - @InternalApi private[akka] sealed abstract class AtomicDeltaOp[A, B <: ReplicatedData] extends DeltaOp with ReplicatedDeltaSize { + @InternalApi private[akka] sealed abstract class AtomicDeltaOp[A, B <: ReplicatedData] + extends DeltaOp + with ReplicatedDeltaSize { def underlying: ORSet.DeltaOp def zeroTag: ZeroTag override def zero: DeltaReplicatedData = zeroTag.zero @@ -65,7 +68,10 @@ object ORMap { // PutDeltaOp contains ORSet delta and full value /** INTERNAL API */ - @InternalApi private[akka] final case class PutDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, value: (A, B), zeroTag: ZeroTag) extends AtomicDeltaOp[A, B] { + @InternalApi private[akka] final case class PutDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, + value: (A, B), + zeroTag: ZeroTag) + extends AtomicDeltaOp[A, B] { override def merge(that: DeltaOp): DeltaOp = that match { case put: PutDeltaOp[A, B] if this.value._1 == put.value._1 => new PutDeltaOp[A, B](this.underlying.merge(put.underlying), put.value, zeroTag) @@ -87,21 +93,21 @@ object ORMap { // UpdateDeltaOp contains ORSet delta and either delta of value (in case where underlying type supports deltas) or full value /** INTERNAL API */ - @InternalApi private[akka] final case class UpdateDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, values: Map[A, B], zeroTag: ZeroTag) extends AtomicDeltaOp[A, B] { + @InternalApi private[akka] final case class UpdateDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, + values: Map[A, B], + zeroTag: ZeroTag) + extends AtomicDeltaOp[A, B] { override def merge(that: DeltaOp): DeltaOp = that match { case update: UpdateDeltaOp[A, B] => - new UpdateDeltaOp[A, B]( - this.underlying.merge(update.underlying), - update.values.foldLeft(this.values) { - (map, pair) => - val (key, value) = pair - if (this.values.contains(key)) { - val elem1 = this.values(key) - val elem2 = value.asInstanceOf[elem1.T] - map + (key -> elem1.merge(elem2).asInstanceOf[B]) - } else map + pair - }, - zeroTag) + new UpdateDeltaOp[A, B](this.underlying.merge(update.underlying), update.values.foldLeft(this.values) { + (map, pair) => + val (key, value) = pair + if (this.values.contains(key)) { + val elem1 = this.values(key) + val elem2 = value.asInstanceOf[elem1.T] + map + (key -> elem1.merge(elem2).asInstanceOf[B]) + } else map + pair + }, zeroTag) case put: PutDeltaOp[A, B] if this.values.size == 1 && this.values.contains(put.value._1) => new PutDeltaOp[A, B](this.underlying.merge(put.underlying), put.value, zeroTag) case other: AtomicDeltaOp[A, B] => DeltaGroup(Vector(this, other)) @@ -111,16 +117,22 @@ object ORMap { // RemoveDeltaOp does not contain any value at all - the propagated 'value' map would be empty /** INTERNAL API */ - @InternalApi private[akka] final case class RemoveDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, zeroTag: ZeroTag) extends AtomicDeltaOp[A, B] + @InternalApi private[akka] final case class RemoveDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, + zeroTag: ZeroTag) + extends AtomicDeltaOp[A, B] // RemoveKeyDeltaOp contains a single value - to provide the recipient with the removed key for value map /** INTERNAL API */ - @InternalApi private[akka] final case class RemoveKeyDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, removedKey: A, zeroTag: ZeroTag) extends AtomicDeltaOp[A, B] + @InternalApi private[akka] final case class RemoveKeyDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, + removedKey: A, + zeroTag: ZeroTag) + extends AtomicDeltaOp[A, B] // DeltaGroup is effectively a causally ordered list of individual deltas /** INTERNAL API */ @InternalApi private[akka] final case class DeltaGroup[A, B <: ReplicatedData](ops: immutable.IndexedSeq[DeltaOp]) - extends DeltaOp with ReplicatedDeltaSize { + extends DeltaOp + with ReplicatedDeltaSize { override def merge(that: DeltaOp): DeltaOp = that match { case that: AtomicDeltaOp[A, B] => ops.last match { @@ -141,7 +153,8 @@ object ORMap { case DeltaGroup(thatOps) => DeltaGroup(ops ++ thatOps) } - override def zero: DeltaReplicatedData = ops.headOption.fold(ORMap.empty[A, B].asInstanceOf[DeltaReplicatedData])(_.zero) + override def zero: DeltaReplicatedData = + ops.headOption.fold(ORMap.empty[A, B].asInstanceOf[DeltaReplicatedData])(_.zero) override def deltaSize: Int = ops.size } @@ -156,14 +169,15 @@ object ORMap { * This class is immutable, i.e. "modifying" methods return a new instance. */ @SerialVersionUID(1L) -final class ORMap[A, B <: ReplicatedData] private[akka] ( - private[akka] val keys: ORSet[A], - private[akka] val values: Map[A, B], - private[akka] val zeroTag: ZeroTag, - override val delta: Option[ORMap.DeltaOp] = None) - extends DeltaReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { +final class ORMap[A, B <: ReplicatedData] private[akka] (private[akka] val keys: ORSet[A], + private[akka] val values: Map[A, B], + private[akka] val zeroTag: ZeroTag, + override val delta: Option[ORMap.DeltaOp] = None) + extends DeltaReplicatedData + with ReplicatedDataSerialization + with RemovedNodePruning { - import ORMap.{ PutDeltaOp, UpdateDeltaOp, RemoveDeltaOp, RemoveKeyDeltaOp } + import ORMap.{ PutDeltaOp, RemoveDeltaOp, RemoveKeyDeltaOp, UpdateDeltaOp } type T = ORMap[A, B] type D = ORMap.DeltaOp @@ -235,8 +249,8 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( if (value.isInstanceOf[ORSet[_]] && values.contains(key)) throw new IllegalArgumentException( "`ORMap.put` must not be used to replace an existing `ORSet` " + - "value, because important history can be lost when replacing the `ORSet` and " + - "undesired effects of merging will occur. Use `ORMultiMap` or `ORMap.updated` instead.") + "value, because important history can be lost when replacing the `ORSet` and " + + "undesired effects of merging will occur. Use `ORMultiMap` or `ORMap.updated` instead.") else { val newKeys = keys.resetDelta.add(node, key) val putDeltaOp = PutDeltaOp(newKeys.delta.get, key -> value, zeroTag) @@ -285,7 +299,8 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( /** * INTERNAL API */ - @InternalApi private[akka] def updated(node: UniqueAddress, key: A, initial: B, valueDeltas: Boolean = false)(modify: B => B): ORMap[A, B] = { + @InternalApi private[akka] def updated(node: UniqueAddress, key: A, initial: B, valueDeltas: Boolean = false)( + modify: B => B): ORMap[A, B] = { val (oldValue, hasOldValue) = values.get(key) match { case Some(old) => (old, true) case _ => (initial, false) @@ -411,7 +426,9 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( } var mergedKeys: ORSet[A] = this.keys - var (mergedValues, tombstonedVals): (Map[A, B], Map[A, B]) = this.values.partition { case (k, _) => this.keys.contains(k) } + var (mergedValues, tombstonedVals): (Map[A, B], Map[A, B]) = this.values.partition { + case (k, _) => this.keys.contains(k) + } val processDelta: PartialFunction[ORMap.DeltaOp, Unit] = { case putOp: PutDeltaOp[A, B] => @@ -466,7 +483,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( } } - (processDelta orElse processNestedDelta)(thatDelta) + processDelta.orElse(processNestedDelta)(thatDelta) if (withValueDeltas) new ORMap[A, B](mergedKeys, tombstonedVals ++ mergedValues, zeroTag = zeroTag) @@ -497,10 +514,10 @@ final class ORMap[A, B <: ReplicatedData] private[akka] ( } override def modifiedByNodes: Set[UniqueAddress] = { - keys.modifiedByNodes union values.foldLeft(Set.empty[UniqueAddress]) { - case (acc, (_, data: RemovedNodePruning)) => acc union data.modifiedByNodes + keys.modifiedByNodes.union(values.foldLeft(Set.empty[UniqueAddress]) { + case (acc, (_, data: RemovedNodePruning)) => acc.union(data.modifiedByNodes) case (acc, _) => acc - } + }) } override def needPruningFrom(removedNode: UniqueAddress): Boolean = { @@ -553,4 +570,6 @@ object ORMapKey { } @SerialVersionUID(1L) -final case class ORMapKey[A, B <: ReplicatedData](_id: String) extends Key[ORMap[A, B]](_id) with ReplicatedDataSerialization +final case class ORMapKey[A, B <: ReplicatedData](_id: String) + extends Key[ORMap[A, B]](_id) + with ReplicatedDataSerialization diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala index 9c2b6fc1c1..2258d8bc72 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORMultiMap.scala @@ -9,6 +9,7 @@ import akka.cluster.ddata.ORMap._ import akka.cluster.{ Cluster, UniqueAddress } object ORMultiMap { + /** * INTERNAL API */ @@ -26,7 +27,9 @@ object ORMultiMap { } val _empty: ORMultiMap[Any, Any] = new ORMultiMap(new ORMap(ORSet.empty, Map.empty, zeroTag = ORMultiMapTag), false) - val _emptyWithValueDeltas: ORMultiMap[Any, Any] = new ORMultiMap(new ORMap(ORSet.empty, Map.empty, zeroTag = ORMultiMapWithValueDeltasTag), true) + val _emptyWithValueDeltas: ORMultiMap[Any, Any] = + new ORMultiMap(new ORMap(ORSet.empty, Map.empty, zeroTag = ORMultiMapWithValueDeltasTag), true) + /** * Provides an empty multimap. */ @@ -62,10 +65,11 @@ object ORMultiMap { * Note that on concurrent adds and removals for the same key (on the same set), removals can be lost. */ @SerialVersionUID(1L) -final class ORMultiMap[A, B] private[akka] ( - private[akka] val underlying: ORMap[A, ORSet[B]], - private[akka] val withValueDeltas: Boolean) - extends DeltaReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { +final class ORMultiMap[A, B] private[akka] (private[akka] val underlying: ORMap[A, ORSet[B]], + private[akka] val withValueDeltas: Boolean) + extends DeltaReplicatedData + with ReplicatedDataSerialization + with RemovedNodePruning { override type T = ORMultiMap[A, B] override type D = ORMap.DeltaOp @@ -75,8 +79,11 @@ final class ORMultiMap[A, B] private[akka] ( if (withValueDeltas) { val newUnderlying = underlying.mergeRetainingDeletedValues(that.underlying) // Garbage collect the tombstones we no longer need, i.e. those that have Set() as a value. - val newValues = newUnderlying.values.filterNot { case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty } - new ORMultiMap[A, B](new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta), withValueDeltas) + val newValues = newUnderlying.values.filterNot { + case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty + } + new ORMultiMap[A, B](new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta), + withValueDeltas) } else new ORMultiMap(underlying.merge(that.underlying), withValueDeltas) } else throw new IllegalArgumentException("Trying to merge two ORMultiMaps of different map sub-type") @@ -84,10 +91,10 @@ final class ORMultiMap[A, B] private[akka] ( /** * Scala API: All entries of a multimap where keys are strings and values are sets. */ - def entries: Map[A, Set[B]] = if (withValueDeltas) - underlying.entries.collect { case (k, v) if underlying.keys.elements.contains(k) => k -> v.elements } - else - underlying.entries.map { case (k, v) => k -> v.elements } + def entries: Map[A, Set[B]] = + if (withValueDeltas) + underlying.entries.collect { case (k, v) if underlying.keys.elements.contains(k) => k -> v.elements } else + underlying.entries.map { case (k, v) => k -> v.elements } /** * Java API: All entries of a multimap where keys are strings and values are sets. @@ -96,8 +103,9 @@ final class ORMultiMap[A, B] private[akka] ( import scala.collection.JavaConverters._ val result = new java.util.HashMap[A, java.util.Set[B]] if (withValueDeltas) - underlying.entries.foreach { case (k, v) => if (underlying.keys.elements.contains(k)) result.put(k, v.elements.asJava) } - else + underlying.entries.foreach { + case (k, v) => if (underlying.keys.elements.contains(k)) result.put(k, v.elements.asJava) + } else underlying.entries.foreach { case (k, v) => result.put(k, v.elements.asJava) } result } @@ -171,7 +179,9 @@ final class ORMultiMap[A, B] private[akka] ( */ @InternalApi private[akka] def put(node: UniqueAddress, key: A, value: Set[B]): ORMultiMap[A, B] = { val newUnderlying = underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas) { existing => - value.foldLeft(existing.clear(node)) { (s, element) => s.add(node, element) } + value.foldLeft(existing.clear(node)) { (s, element) => + s.add(node, element) + } } new ORMultiMap(newUnderlying, withValueDeltas) } @@ -203,7 +213,9 @@ final class ORMultiMap[A, B] private[akka] ( */ @InternalApi private[akka] def remove(node: UniqueAddress, key: A): ORMultiMap[A, B] = { if (withValueDeltas) { - val u = underlying.updated(node, key, ORSet.empty[B], valueDeltas = true) { existing => existing.clear(node) } + val u = underlying.updated(node, key, ORSet.empty[B], valueDeltas = true) { existing => + existing.clear(node) + } new ORMultiMap(u.removeKey(node, key), withValueDeltas) } else { new ORMultiMap(underlying.remove(node, key), withValueDeltas) @@ -232,7 +244,8 @@ final class ORMultiMap[A, B] private[akka] ( * INTERNAL API */ @InternalApi private[akka] def addBinding(node: UniqueAddress, key: A, element: B): ORMultiMap[A, B] = { - val newUnderlying = underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas)(_.add(node, element)) + val newUnderlying = + underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas)(_.add(node, element)) new ORMultiMap(newUnderlying, withValueDeltas) } @@ -292,7 +305,10 @@ final class ORMultiMap[A, B] private[akka] ( /** * INTERNAL API */ - @InternalApi private[akka] def replaceBinding(node: UniqueAddress, key: A, oldElement: B, newElement: B): ORMultiMap[A, B] = + @InternalApi private[akka] def replaceBinding(node: UniqueAddress, + key: A, + oldElement: B, + newElement: B): ORMultiMap[A, B] = if (newElement != oldElement) addBinding(node, key, newElement).removeBinding(node, key, oldElement) else @@ -307,8 +323,11 @@ final class ORMultiMap[A, B] private[akka] ( if (withValueDeltas) { val newUnderlying = underlying.mergeDeltaRetainingDeletedValues(thatDelta) // Garbage collect the tombstones we no longer need, i.e. those that have Set() as a value. - val newValues = newUnderlying.values.filterNot { case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty } - new ORMultiMap[A, B](new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta), withValueDeltas) + val newValues = newUnderlying.values.filterNot { + case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty + } + new ORMultiMap[A, B](new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta), + withValueDeltas) } else new ORMultiMap(underlying.mergeDelta(thatDelta), withValueDeltas) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala index ea37217016..31da85ad7d 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ORSet.scala @@ -16,6 +16,7 @@ object ORSet { private val _empty: ORSet[Any] = new ORSet(Map.empty, VersionVector.empty) def empty[A]: ORSet[A] = _empty.asInstanceOf[ORSet[A]] def apply(): ORSet[Any] = _empty + /** * Java API */ @@ -58,9 +59,8 @@ object ORSet { override def merge(that: DeltaOp): DeltaOp = that match { case AddDeltaOp(u) => // Note that we only merge deltas originating from the same node - AddDeltaOp(new ORSet( - concatElementsMap(u.elementsMap.asInstanceOf[Map[A, Dot]]), - underlying.vvector.merge(u.vvector))) + AddDeltaOp( + new ORSet(concatElementsMap(u.elementsMap.asInstanceOf[Map[A, Dot]]), underlying.vvector.merge(u.vvector))) case _: AtomicDeltaOp[A] => DeltaGroup(Vector(this, that)) case DeltaGroup(ops) => DeltaGroup(this +: ops) } @@ -97,7 +97,8 @@ object ORSet { * INTERNAL API */ @InternalApi private[akka] final case class DeltaGroup[A](ops: immutable.IndexedSeq[DeltaOp]) - extends DeltaOp with ReplicatedDeltaSize { + extends DeltaOp + with ReplicatedDeltaSize { override def merge(that: DeltaOp): DeltaOp = that match { case thatAdd: AddDeltaOp[A] => // merge AddDeltaOp into last AddDeltaOp in the group, if possible @@ -125,7 +126,8 @@ object ORSet { */ @InternalApi private[akka] def subtractDots(dot: Dot, vvector: VersionVector): Dot = { - @tailrec def dropDots(remaining: List[(UniqueAddress, Long)], acc: List[(UniqueAddress, Long)]): List[(UniqueAddress, Long)] = + @tailrec def dropDots(remaining: List[(UniqueAddress, Long)], + acc: List[(UniqueAddress, Long)]): List[(UniqueAddress, Long)] = remaining match { case Nil => acc case (d @ (node, v1)) :: rest => @@ -158,7 +160,9 @@ object ORSet { * INTERNAL API * @see [[ORSet#merge]] */ - @InternalApi private[akka] def mergeCommonKeys[A](commonKeys: Set[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] = + @InternalApi private[akka] def mergeCommonKeys[A](commonKeys: Set[A], + lhs: ORSet[A], + rhs: ORSet[A]): Map[A, ORSet.Dot] = mergeCommonKeys(commonKeys.iterator, lhs, rhs) private def mergeCommonKeys[A](commonKeys: Iterator[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] = { @@ -227,12 +231,15 @@ object ORSet { * INTERNAL API * @see [[ORSet#merge]] */ - @InternalApi private[akka] def mergeDisjointKeys[A]( - keys: Set[A], elementsMap: Map[A, ORSet.Dot], vvector: VersionVector, - accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = + @InternalApi private[akka] def mergeDisjointKeys[A](keys: Set[A], + elementsMap: Map[A, ORSet.Dot], + vvector: VersionVector, + accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = mergeDisjointKeys(keys.iterator, elementsMap, vvector, accumulator) - private def mergeDisjointKeys[A](keys: Iterator[A], elementsMap: Map[A, ORSet.Dot], vvector: VersionVector, + private def mergeDisjointKeys[A](keys: Iterator[A], + elementsMap: Map[A, ORSet.Dot], + vvector: VersionVector, accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = { keys.foldLeft(accumulator) { case (acc, k) => @@ -278,12 +285,13 @@ object ORSet { * This class is immutable, i.e. "modifying" methods return a new instance. */ @SerialVersionUID(1L) -final class ORSet[A] private[akka] ( - private[akka] val elementsMap: Map[A, ORSet.Dot], - private[akka] val vvector: VersionVector, - override val delta: Option[ORSet.DeltaOp] = None) - extends DeltaReplicatedData - with ReplicatedDataSerialization with RemovedNodePruning with FastMerge { +final class ORSet[A] private[akka] (private[akka] val elementsMap: Map[A, ORSet.Dot], + private[akka] val vvector: VersionVector, + override val delta: Option[ORSet.DeltaOp] = None) + extends DeltaReplicatedData + with ReplicatedDataSerialization + with RemovedNodePruning + with FastMerge { type T = ORSet[A] type D = ORSet.DeltaOp @@ -426,8 +434,7 @@ final class ORSet[A] private[akka] ( val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that) val entries0 = if (addDeltaOp) - entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } - else { + entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) } else { val thisUniqueKeys = this.elementsMap.keysIterator.filterNot(that.elementsMap.contains) ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00) } @@ -523,7 +530,8 @@ final class ORSet[A] private[akka] ( new ORSet(updated, vvector.pruningCleanup(removedNode)) } - private def copy(elementsMap: Map[A, ORSet.Dot] = this.elementsMap, vvector: VersionVector = this.vvector, + private def copy(elementsMap: Map[A, ORSet.Dot] = this.elementsMap, + vvector: VersionVector = this.vvector, delta: Option[ORSet.DeltaOp] = this.delta): ORSet[A] = new ORSet(elementsMap, vvector, delta) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala index 09c4ea98d5..3d933d8fb1 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounter.scala @@ -14,6 +14,7 @@ import akka.annotation.InternalApi object PNCounter { val empty: PNCounter = new PNCounter(GCounter.empty, GCounter.empty) def apply(): PNCounter = empty + /** * Java API */ @@ -40,10 +41,11 @@ object PNCounter { * This class is immutable, i.e. "modifying" methods return a new instance. */ @SerialVersionUID(1L) -final class PNCounter private[akka] ( - private[akka] val increments: GCounter, private[akka] val decrements: GCounter) - extends DeltaReplicatedData with ReplicatedDelta - with ReplicatedDataSerialization with RemovedNodePruning { +final class PNCounter private[akka] (private[akka] val increments: GCounter, private[akka] val decrements: GCounter) + extends DeltaReplicatedData + with ReplicatedDelta + with ReplicatedDataSerialization + with RemovedNodePruning { type T = PNCounter type D = PNCounter @@ -157,10 +159,13 @@ final class PNCounter private[akka] ( /** Internal API */ @InternalApi private[akka] def increment(key: UniqueAddress, n: BigInt): PNCounter = change(key, n) + /** Internal API */ @InternalApi private[akka] def increment(key: UniqueAddress): PNCounter = increment(key, 1) + /** Internal API */ @InternalApi private[akka] def decrement(key: UniqueAddress, n: BigInt): PNCounter = change(key, -n) + /** Internal API */ @InternalApi private[akka] def decrement(key: UniqueAddress): PNCounter = decrement(key, 1) @@ -171,9 +176,7 @@ final class PNCounter private[akka] ( else this override def merge(that: PNCounter): PNCounter = - copy( - increments = that.increments.merge(this.increments), - decrements = that.decrements.merge(this.decrements)) + copy(increments = that.increments.merge(this.increments), decrements = that.decrements.merge(this.decrements)) override def delta: Option[PNCounter] = { val incrementsDelta = increments.delta match { @@ -196,20 +199,17 @@ final class PNCounter private[akka] ( else new PNCounter(increments.resetDelta, decrements.resetDelta) override def modifiedByNodes: Set[UniqueAddress] = - increments.modifiedByNodes union decrements.modifiedByNodes + increments.modifiedByNodes.union(decrements.modifiedByNodes) override def needPruningFrom(removedNode: UniqueAddress): Boolean = increments.needPruningFrom(removedNode) || decrements.needPruningFrom(removedNode) override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): PNCounter = - copy( - increments = increments.prune(removedNode, collapseInto), - decrements = decrements.prune(removedNode, collapseInto)) + copy(increments = increments.prune(removedNode, collapseInto), + decrements = decrements.prune(removedNode, collapseInto)) override def pruningCleanup(removedNode: UniqueAddress): PNCounter = - copy( - increments = increments.pruningCleanup(removedNode), - decrements = decrements.pruningCleanup(removedNode)) + copy(increments = increments.pruningCleanup(removedNode), decrements = decrements.pruningCleanup(removedNode)) private def copy(increments: GCounter = this.increments, decrements: GCounter = this.decrements): PNCounter = new PNCounter(increments, decrements) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala index 6da2766fd4..baa8442152 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PNCounterMap.scala @@ -12,6 +12,7 @@ import akka.cluster.UniqueAddress import akka.cluster.ddata.ORMap._ object PNCounterMap { + /** * INTERNAL API */ @@ -22,6 +23,7 @@ object PNCounterMap { def empty[A]: PNCounterMap[A] = new PNCounterMap(new ORMap(ORSet.empty, Map.empty, zeroTag = PNCounterMapTag)) def apply[A](): PNCounterMap[A] = empty + /** * Java API */ @@ -39,9 +41,10 @@ object PNCounterMap { * This class is immutable, i.e. "modifying" methods return a new instance. */ @SerialVersionUID(1L) -final class PNCounterMap[A] private[akka] ( - private[akka] val underlying: ORMap[A, PNCounter]) - extends DeltaReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { +final class PNCounterMap[A] private[akka] (private[akka] val underlying: ORMap[A, PNCounter]) + extends DeltaReplicatedData + with ReplicatedDataSerialization + with RemovedNodePruning { type T = PNCounterMap[A] type D = ORMap.DeltaOp diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala index 632f3cffbf..e641dbeec0 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/PruningState.scala @@ -37,7 +37,7 @@ import akka.annotation.InternalApi case (_, _: PruningPerformed) => that case (PruningInitialized(thisOwner, thisSeen), PruningInitialized(thatOwner, thatSeen)) => if (thisOwner == thatOwner) - PruningInitialized(thisOwner, thisSeen union thatSeen) + PruningInitialized(thisOwner, thisSeen.union(thatSeen)) else if (Member.addressOrdering.compare(thisOwner.address, thatOwner.address) > 0) that else @@ -46,4 +46,3 @@ import akka.annotation.InternalApi def addSeen(node: Address): PruningState = this } - diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala index 917f732b7f..6853eb7366 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/ReplicatedData.scala @@ -30,6 +30,7 @@ import java.util.Optional * it has support for delta-CRDT replication. */ trait ReplicatedData { + /** * The type of the concrete implementation, e.g. `GSet[A]`. * To be specified by subclass. @@ -97,6 +98,7 @@ trait DeltaReplicatedData extends ReplicatedData { * The delta must implement this type. */ trait ReplicatedDelta extends ReplicatedData { + /** * The empty full state. This is used when a delta is received * and no existing full state exists on the receiving side. Then @@ -159,7 +161,8 @@ abstract class AbstractReplicatedData[A <: AbstractReplicatedData[A]] extends Re * E.g. `class TwoPhaseSet extends AbstractDeltaReplicatedData<TwoPhaseSet, TwoPhaseSet>` */ abstract class AbstractDeltaReplicatedData[A <: AbstractDeltaReplicatedData[A, B], B <: ReplicatedDelta] - extends AbstractReplicatedData[A] with DeltaReplicatedData { + extends AbstractReplicatedData[A] + with DeltaReplicatedData { override type D = ReplicatedDelta @@ -239,4 +242,3 @@ trait RemovedNodePruning extends ReplicatedData { * [[akka.cluster.ddata.protobuf.ReplicatedDataSerializer]]. */ trait ReplicatedDataSerialization extends Serializable - diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala index 484db818e0..bbf68ffe83 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/Replicator.scala @@ -138,62 +138,120 @@ object ReplicatorSettings { * `*` at the end of a key. All entries can be made durable by including "*" * in the `Set`. */ -final class ReplicatorSettings( - val roles: Set[String], - val gossipInterval: FiniteDuration, - val notifySubscribersInterval: FiniteDuration, - val maxDeltaElements: Int, - val dispatcher: String, - val pruningInterval: FiniteDuration, - val maxPruningDissemination: FiniteDuration, - val durableStoreProps: Either[(String, Config), Props], - val durableKeys: Set[KeyId], - val pruningMarkerTimeToLive: FiniteDuration, - val durablePruningMarkerTimeToLive: FiniteDuration, - val deltaCrdtEnabled: Boolean, - val maxDeltaSize: Int) { +final class ReplicatorSettings(val roles: Set[String], + val gossipInterval: FiniteDuration, + val notifySubscribersInterval: FiniteDuration, + val maxDeltaElements: Int, + val dispatcher: String, + val pruningInterval: FiniteDuration, + val maxPruningDissemination: FiniteDuration, + val durableStoreProps: Either[(String, Config), Props], + val durableKeys: Set[KeyId], + val pruningMarkerTimeToLive: FiniteDuration, + val durablePruningMarkerTimeToLive: FiniteDuration, + val deltaCrdtEnabled: Boolean, + val maxDeltaSize: Int) { // for backwards compatibility - def this( - role: Option[String], - gossipInterval: FiniteDuration, - notifySubscribersInterval: FiniteDuration, - maxDeltaElements: Int, - dispatcher: String, - pruningInterval: FiniteDuration, - maxPruningDissemination: FiniteDuration, - durableStoreProps: Either[(String, Config), Props], - durableKeys: Set[KeyId], - pruningMarkerTimeToLive: FiniteDuration, - durablePruningMarkerTimeToLive: FiniteDuration, - deltaCrdtEnabled: Boolean, - maxDeltaSize: Int) = - this(role.toSet, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, - maxPruningDissemination, durableStoreProps, durableKeys, pruningMarkerTimeToLive, durablePruningMarkerTimeToLive, - deltaCrdtEnabled, maxDeltaSize) + def this(role: Option[String], + gossipInterval: FiniteDuration, + notifySubscribersInterval: FiniteDuration, + maxDeltaElements: Int, + dispatcher: String, + pruningInterval: FiniteDuration, + maxPruningDissemination: FiniteDuration, + durableStoreProps: Either[(String, Config), Props], + durableKeys: Set[KeyId], + pruningMarkerTimeToLive: FiniteDuration, + durablePruningMarkerTimeToLive: FiniteDuration, + deltaCrdtEnabled: Boolean, + maxDeltaSize: Int) = + this(role.toSet, + gossipInterval, + notifySubscribersInterval, + maxDeltaElements, + dispatcher, + pruningInterval, + maxPruningDissemination, + durableStoreProps, + durableKeys, + pruningMarkerTimeToLive, + durablePruningMarkerTimeToLive, + deltaCrdtEnabled, + maxDeltaSize) // For backwards compatibility - def this(role: Option[String], gossipInterval: FiniteDuration, notifySubscribersInterval: FiniteDuration, - maxDeltaElements: Int, dispatcher: String, pruningInterval: FiniteDuration, maxPruningDissemination: FiniteDuration) = - this(roles = role.toSet, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, - maxPruningDissemination, Right(Props.empty), Set.empty, 6.hours, 10.days, true, 200) + def this(role: Option[String], + gossipInterval: FiniteDuration, + notifySubscribersInterval: FiniteDuration, + maxDeltaElements: Int, + dispatcher: String, + pruningInterval: FiniteDuration, + maxPruningDissemination: FiniteDuration) = + this(roles = role.toSet, + gossipInterval, + notifySubscribersInterval, + maxDeltaElements, + dispatcher, + pruningInterval, + maxPruningDissemination, + Right(Props.empty), + Set.empty, + 6.hours, + 10.days, + true, + 200) // For backwards compatibility - def this(role: Option[String], gossipInterval: FiniteDuration, notifySubscribersInterval: FiniteDuration, - maxDeltaElements: Int, dispatcher: String, pruningInterval: FiniteDuration, maxPruningDissemination: FiniteDuration, - durableStoreProps: Either[(String, Config), Props], durableKeys: Set[String]) = - this(role, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, - maxPruningDissemination, durableStoreProps, durableKeys, 6.hours, 10.days, true, 200) + def this(role: Option[String], + gossipInterval: FiniteDuration, + notifySubscribersInterval: FiniteDuration, + maxDeltaElements: Int, + dispatcher: String, + pruningInterval: FiniteDuration, + maxPruningDissemination: FiniteDuration, + durableStoreProps: Either[(String, Config), Props], + durableKeys: Set[String]) = + this(role, + gossipInterval, + notifySubscribersInterval, + maxDeltaElements, + dispatcher, + pruningInterval, + maxPruningDissemination, + durableStoreProps, + durableKeys, + 6.hours, + 10.days, + true, + 200) // For backwards compatibility - def this(role: Option[String], gossipInterval: FiniteDuration, notifySubscribersInterval: FiniteDuration, - maxDeltaElements: Int, dispatcher: String, pruningInterval: FiniteDuration, maxPruningDissemination: FiniteDuration, - durableStoreProps: Either[(String, Config), Props], durableKeys: Set[String], - pruningMarkerTimeToLive: FiniteDuration, durablePruningMarkerTimeToLive: FiniteDuration, + def this(role: Option[String], + gossipInterval: FiniteDuration, + notifySubscribersInterval: FiniteDuration, + maxDeltaElements: Int, + dispatcher: String, + pruningInterval: FiniteDuration, + maxPruningDissemination: FiniteDuration, + durableStoreProps: Either[(String, Config), Props], + durableKeys: Set[String], + pruningMarkerTimeToLive: FiniteDuration, + durablePruningMarkerTimeToLive: FiniteDuration, deltaCrdtEnabled: Boolean) = - this(role, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, pruningInterval, - maxPruningDissemination, durableStoreProps, durableKeys, pruningMarkerTimeToLive, durablePruningMarkerTimeToLive, - deltaCrdtEnabled, 200) + this(role, + gossipInterval, + notifySubscribersInterval, + maxDeltaElements, + dispatcher, + pruningInterval, + maxPruningDissemination, + durableStoreProps, + durableKeys, + pruningMarkerTimeToLive, + durablePruningMarkerTimeToLive, + deltaCrdtEnabled, + 200) def withRole(role: String): ReplicatorSettings = copy(roles = ReplicatorSettings.roleOption(role).toSet) @@ -230,12 +288,10 @@ final class ReplicatorSettings( def withPruning(pruningInterval: FiniteDuration, maxPruningDissemination: FiniteDuration): ReplicatorSettings = copy(pruningInterval = pruningInterval, maxPruningDissemination = maxPruningDissemination) - def withPruningMarkerTimeToLive( - pruningMarkerTimeToLive: FiniteDuration, - durablePruningMarkerTimeToLive: FiniteDuration): ReplicatorSettings = - copy( - pruningMarkerTimeToLive = pruningMarkerTimeToLive, - durablePruningMarkerTimeToLive = durablePruningMarkerTimeToLive) + def withPruningMarkerTimeToLive(pruningMarkerTimeToLive: FiniteDuration, + durablePruningMarkerTimeToLive: FiniteDuration): ReplicatorSettings = + copy(pruningMarkerTimeToLive = pruningMarkerTimeToLive, + durablePruningMarkerTimeToLive = durablePruningMarkerTimeToLive) def withDurableStoreProps(durableStoreProps: Props): ReplicatorSettings = copy(durableStoreProps = Right(durableStoreProps)) @@ -260,23 +316,32 @@ final class ReplicatorSettings( def withMaxDeltaSize(maxDeltaSize: Int): ReplicatorSettings = copy(maxDeltaSize = maxDeltaSize) - private def copy( - roles: Set[String] = roles, - gossipInterval: FiniteDuration = gossipInterval, - notifySubscribersInterval: FiniteDuration = notifySubscribersInterval, - maxDeltaElements: Int = maxDeltaElements, - dispatcher: String = dispatcher, - pruningInterval: FiniteDuration = pruningInterval, - maxPruningDissemination: FiniteDuration = maxPruningDissemination, - durableStoreProps: Either[(String, Config), Props] = durableStoreProps, - durableKeys: Set[KeyId] = durableKeys, - pruningMarkerTimeToLive: FiniteDuration = pruningMarkerTimeToLive, - durablePruningMarkerTimeToLive: FiniteDuration = durablePruningMarkerTimeToLive, - deltaCrdtEnabled: Boolean = deltaCrdtEnabled, - maxDeltaSize: Int = maxDeltaSize): ReplicatorSettings = - new ReplicatorSettings(roles, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher, - pruningInterval, maxPruningDissemination, durableStoreProps, durableKeys, - pruningMarkerTimeToLive, durablePruningMarkerTimeToLive, deltaCrdtEnabled, maxDeltaSize) + private def copy(roles: Set[String] = roles, + gossipInterval: FiniteDuration = gossipInterval, + notifySubscribersInterval: FiniteDuration = notifySubscribersInterval, + maxDeltaElements: Int = maxDeltaElements, + dispatcher: String = dispatcher, + pruningInterval: FiniteDuration = pruningInterval, + maxPruningDissemination: FiniteDuration = maxPruningDissemination, + durableStoreProps: Either[(String, Config), Props] = durableStoreProps, + durableKeys: Set[KeyId] = durableKeys, + pruningMarkerTimeToLive: FiniteDuration = pruningMarkerTimeToLive, + durablePruningMarkerTimeToLive: FiniteDuration = durablePruningMarkerTimeToLive, + deltaCrdtEnabled: Boolean = deltaCrdtEnabled, + maxDeltaSize: Int = maxDeltaSize): ReplicatorSettings = + new ReplicatorSettings(roles, + gossipInterval, + notifySubscribersInterval, + maxDeltaElements, + dispatcher, + pruningInterval, + maxPruningDissemination, + durableStoreProps, + durableKeys, + pruningMarkerTimeToLive, + durablePruningMarkerTimeToLive, + deltaCrdtEnabled, + maxDeltaSize) } object Replicator { @@ -285,9 +350,8 @@ object Replicator { * Factory method for the [[akka.actor.Props]] of the [[Replicator]] actor. */ def props(settings: ReplicatorSettings): Props = { - require( - settings.durableKeys.isEmpty || (settings.durableStoreProps != Right(Props.empty)), - "durableStoreProps must be defined when durableKeys are defined") + require(settings.durableKeys.isEmpty || (settings.durableStoreProps != Right(Props.empty)), + "durableStoreProps must be defined when durableKeys are defined") Props(new Replicator(settings)).withDeploy(Deploy.local).withDispatcher(settings.dispatcher) } @@ -337,7 +401,8 @@ object Replicator { */ def this(n: Int, timeout: java.time.Duration) = this(n, timeout.asScala) } - final case class WriteMajority(timeout: FiniteDuration, minCap: Int = DefaultMajorityMinCap) extends WriteConsistency { + final case class WriteMajority(timeout: FiniteDuration, minCap: Int = DefaultMajorityMinCap) + extends WriteConsistency { def this(timeout: FiniteDuration) = this(timeout, DefaultMajorityMinCap) /** @@ -372,6 +437,7 @@ object Replicator { * INTERNAL API */ @InternalApi private[akka] final case class GetKeyIdsResult(keyIds: Set[KeyId]) { + /** * Java API */ @@ -394,7 +460,9 @@ object Replicator { * or maintain local correlation data structures. */ final case class Get[A <: ReplicatedData](key: Key[A], consistency: ReadConsistency, request: Option[Any] = None) - extends Command[A] with ReplicatorMessage { + extends Command[A] + with ReplicatorMessage { + /** * Java API: `Get` value from local `Replicator`, i.e. `ReadLocal` consistency. */ @@ -414,11 +482,13 @@ object Replicator { /** Java API */ def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) } + /** * Reply from `Get`. The data value is retrieved with [[#get]] using the typed key. */ final case class GetSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any])(data: A) - extends GetResponse[A] with ReplicatorMessage { + extends GetResponse[A] + with ReplicatorMessage { /** * The data value, with correct type. @@ -435,13 +505,16 @@ object Replicator { def dataValue: A = data } final case class NotFound[A <: ReplicatedData](key: Key[A], request: Option[Any]) - extends GetResponse[A] with ReplicatorMessage + extends GetResponse[A] + with ReplicatorMessage + /** * The [[Get]] request could not be fulfill according to the given * [[ReadConsistency consistency level]] and [[ReadConsistency#timeout timeout]]. */ final case class GetFailure[A <: ReplicatedData](key: Key[A], request: Option[Any]) - extends GetResponse[A] with ReplicatorMessage + extends GetResponse[A] + with ReplicatorMessage /** * Register a subscriber that will be notified with a [[Changed]] message @@ -458,18 +531,21 @@ object Replicator { * message. */ final case class Subscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef) extends ReplicatorMessage + /** * Unregister a subscriber. * * @see [[Replicator.Subscribe]] */ final case class Unsubscribe[A <: ReplicatedData](key: Key[A], subscriber: ActorRef) extends ReplicatorMessage + /** * The data value is retrieved with [[#get]] using the typed key. * * @see [[Replicator.Subscribe]] */ final case class Changed[A <: ReplicatedData](key: Key[A])(data: A) extends ReplicatorMessage { + /** * The data value, with correct type. * Scala pattern matching cannot infer the type from the `key` parameter. @@ -502,9 +578,10 @@ object Replicator { * way to pass contextual information (e.g. original sender) without having to use `ask` * or local correlation data structures. */ - def apply[A <: ReplicatedData]( - key: Key[A], initial: A, writeConsistency: WriteConsistency, - request: Option[Any] = None)(modify: A => A): Update[A] = + def apply[A <: ReplicatedData](key: Key[A], + initial: A, + writeConsistency: WriteConsistency, + request: Option[Any] = None)(modify: A => A): Update[A] = Update(key, writeConsistency, request)(modifyWithInitial(initial, modify)) private def modifyWithInitial[A <: ReplicatedData](initial: A, modify: A => A): Option[A] => A = { @@ -512,6 +589,7 @@ object Replicator { case None => modify(initial) } } + /** * Send this message to the local `Replicator` to update a data value for the * given `key`. The `Replicator` will reply with one of the [[UpdateResponse]] messages. @@ -528,9 +606,10 @@ object Replicator { * function that only uses the data parameter and stable fields from enclosing scope. It must * for example not access `sender()` reference of an enclosing actor. */ - final case class Update[A <: ReplicatedData](key: Key[A], writeConsistency: WriteConsistency, - request: Option[Any])(val modify: Option[A] => A) - extends Command[A] with NoSerializationVerificationNeeded { + final case class Update[A <: ReplicatedData](key: Key[A], writeConsistency: WriteConsistency, request: Option[Any])( + val modify: Option[A] => A) + extends Command[A] + with NoSerializationVerificationNeeded { /** * Java API: Modify value of local `Replicator` and replicate with given `writeConsistency`. @@ -539,8 +618,7 @@ object Replicator { * If there is no current data value for the `key` the `initial` value will be * passed to the `modify` function. */ - def this( - key: Key[A], initial: A, writeConsistency: WriteConsistency, modify: JFunction[A, A]) = + def this(key: Key[A], initial: A, writeConsistency: WriteConsistency, modify: JFunction[A, A]) = this(key, writeConsistency, None)(Update.modifyWithInitial(initial, data => modify.apply(data))) /** @@ -554,9 +632,13 @@ object Replicator { * way to pass contextual information (e.g. original sender) without having to use `ask` * or local correlation data structures. */ - def this( - key: Key[A], initial: A, writeConsistency: WriteConsistency, request: Optional[Any], modify: JFunction[A, A]) = - this(key, writeConsistency, Option(request.orElse(null)))(Update.modifyWithInitial(initial, data => modify.apply(data))) + def this(key: Key[A], + initial: A, + writeConsistency: WriteConsistency, + request: Optional[Any], + modify: JFunction[A, A]) = + this(key, writeConsistency, Option(request.orElse(null)))( + Update.modifyWithInitial(initial, data => modify.apply(data))) } @@ -568,7 +650,8 @@ object Replicator { def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) } final case class UpdateSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any]) - extends UpdateResponse[A] with DeadLetterSuppression + extends UpdateResponse[A] + with DeadLetterSuppression sealed abstract class UpdateFailure[A <: ReplicatedData] extends UpdateResponse[A] /** @@ -581,14 +664,19 @@ object Replicator { * crashes before it has been able to communicate with other replicas. */ final case class UpdateTimeout[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends UpdateFailure[A] + /** * If the `modify` function of the [[Update]] throws an exception the reply message * will be this `ModifyFailure` message. The original exception is included as `cause`. */ - final case class ModifyFailure[A <: ReplicatedData](key: Key[A], errorMessage: String, cause: Throwable, request: Option[Any]) - extends UpdateFailure[A] { + final case class ModifyFailure[A <: ReplicatedData](key: Key[A], + errorMessage: String, + cause: Throwable, + request: Option[Any]) + extends UpdateFailure[A] { override def toString: String = s"ModifyFailure [$key]: $errorMessage" } + /** * The local store or direct replication of the [[Update]] could not be fulfill according to * the given [[WriteConsistency consistency level]] due to durable store errors. This is @@ -600,7 +688,8 @@ object Replicator { * crashes before it has been able to communicate with other replicas. */ final case class StoreFailure[A <: ReplicatedData](key: Key[A], request: Option[Any]) - extends UpdateFailure[A] with DeleteResponse[A] { + extends UpdateFailure[A] + with DeleteResponse[A] { /** Java API */ override def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) @@ -615,7 +704,8 @@ object Replicator { * or maintain local correlation data structures. */ final case class Delete[A <: ReplicatedData](key: Key[A], consistency: WriteConsistency, request: Option[Any] = None) - extends Command[A] with NoSerializationVerificationNeeded { + extends Command[A] + with NoSerializationVerificationNeeded { def this(key: Key[A], consistency: WriteConsistency) = this(key, consistency, None) @@ -631,9 +721,12 @@ object Replicator { def getRequest: Optional[Any] = Optional.ofNullable(request.orNull) } final case class DeleteSuccess[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends DeleteResponse[A] - final case class ReplicationDeleteFailure[A <: ReplicatedData](key: Key[A], request: Option[Any]) extends DeleteResponse[A] + final case class ReplicationDeleteFailure[A <: ReplicatedData](key: Key[A], request: Option[Any]) + extends DeleteResponse[A] final case class DataDeleted[A <: ReplicatedData](key: Key[A], request: Option[Any]) - extends RuntimeException with NoStackTrace with DeleteResponse[A] { + extends RuntimeException + with NoStackTrace + with DeleteResponse[A] { override def toString: String = s"DataDeleted [$key]" } @@ -699,11 +792,10 @@ object Replicator { /** * The `DataEnvelope` wraps a data entry and carries state of the pruning process for the entry. */ - final case class DataEnvelope( - data: ReplicatedData, - pruning: Map[UniqueAddress, PruningState] = Map.empty, - deltaVersions: VersionVector = VersionVector.empty) - extends ReplicatorMessage { + final case class DataEnvelope(data: ReplicatedData, + pruning: Map[UniqueAddress, PruningState] = Map.empty, + deltaVersions: VersionVector = VersionVector.empty) + extends ReplicatorMessage { import PruningState._ @@ -725,9 +817,8 @@ object Replicator { } def initRemovedNodePruning(removed: UniqueAddress, owner: UniqueAddress): DataEnvelope = { - copy( - pruning = pruning.updated(removed, PruningInitialized(owner, Set.empty)), - deltaVersions = cleanedDeltaVersions(removed)) + copy(pruning = pruning.updated(removed, PruningInitialized(owner, Set.empty)), + deltaVersions = cleanedDeltaVersions(removed)) } def prune(from: UniqueAddress, pruningPerformed: PruningPerformed): DataEnvelope = { @@ -737,8 +828,9 @@ object Replicator { pruning(from) match { case PruningInitialized(owner, _) => val prunedData = dataWithRemovedNodePruning.prune(from, owner) - copy(data = prunedData, pruning = pruning.updated(from, pruningPerformed), - deltaVersions = cleanedDeltaVersions(from)) + copy(data = prunedData, + pruning = pruning.updated(from, pruningPerformed), + deltaVersions = cleanedDeltaVersions(from)) case _ => this } @@ -757,7 +849,7 @@ object Replicator { case None => acc.updated(key, thisValue) case Some(thatValue) => - acc.updated(key, thisValue merge thatValue) + acc.updated(key, thisValue.merge(thatValue)) } } val filteredMergedPruning = { @@ -773,15 +865,18 @@ object Replicator { // cleanup and merge deltaVersions val removedNodes = filteredMergedPruning.keys - val cleanedDV = removedNodes.foldLeft(deltaVersions) { (acc, node) => acc.pruningCleanup(node) } - val cleanedOtherDV = removedNodes.foldLeft(other.deltaVersions) { (acc, node) => acc.pruningCleanup(node) } + val cleanedDV = removedNodes.foldLeft(deltaVersions) { (acc, node) => + acc.pruningCleanup(node) + } + val cleanedOtherDV = removedNodes.foldLeft(other.deltaVersions) { (acc, node) => + acc.pruningCleanup(node) + } val mergedDeltaVersions = cleanedDV.merge(cleanedOtherDV) // cleanup both sides before merging, `merge(otherData: ReplicatedData)` will cleanup other.data - copy( - data = cleaned(data, filteredMergedPruning), - deltaVersions = mergedDeltaVersions, - pruning = filteredMergedPruning).merge(other.data) + copy(data = cleaned(data, filteredMergedPruning), + deltaVersions = mergedDeltaVersions, + pruning = filteredMergedPruning).merge(other.data) } def merge(otherData: ReplicatedData): DataEnvelope = { @@ -789,10 +884,11 @@ object Replicator { else { val mergedData = cleaned(otherData, pruning) match { - case d: ReplicatedDelta => data match { - case drd: DeltaReplicatedData => drd.mergeDelta(d.asInstanceOf[drd.D]) - case _ => throw new IllegalArgumentException("Expected DeltaReplicatedData") - } + case d: ReplicatedDelta => + data match { + case drd: DeltaReplicatedData => drd.mergeDelta(d.asInstanceOf[drd.D]) + case _ => throw new IllegalArgumentException("Expected DeltaReplicatedData") + } case c => data.merge(c.asInstanceOf[data.T]) } if (data.getClass != mergedData.getClass) @@ -830,15 +926,19 @@ object Replicator { final case class Status(digests: Map[KeyId, Digest], chunk: Int, totChunks: Int) extends ReplicatorMessage { override def toString: String = - (digests.map { - case (key, bytes) => key + " -> " + bytes.map(byte => f"$byte%02x").mkString("") - }).mkString("Status(", ", ", ")") + (digests + .map { + case (key, bytes) => key + " -> " + bytes.map(byte => f"$byte%02x").mkString("") + }) + .mkString("Status(", ", ", ")") } final case class Gossip(updatedData: Map[KeyId, DataEnvelope], sendBack: Boolean) extends ReplicatorMessage final case class Delta(dataEnvelope: DataEnvelope, fromSeqNr: Long, toSeqNr: Long) - final case class DeltaPropagation(fromNode: UniqueAddress, reply: Boolean, deltas: Map[KeyId, Delta]) extends ReplicatorMessage + final case class DeltaPropagation(fromNode: UniqueAddress, reply: Boolean, deltas: Map[KeyId, Delta]) + extends ReplicatorMessage object DeltaPropagation { + /** * When a DeltaReplicatedData returns `None` from `delta` it must still be * treated as a delta that increase the version counter in `DeltaPropagationSelector`. @@ -1066,14 +1166,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog val selfUniqueAddress = cluster.selfUniqueAddress require(!cluster.isTerminated, "Cluster node must not be terminated") - require( - roles.subsetOf(cluster.selfRoles), - s"This cluster member [${selfAddress}] doesn't have all the roles [${roles.mkString(", ")}]") + require(roles.subsetOf(cluster.selfRoles), + s"This cluster member [${selfAddress}] doesn't have all the roles [${roles.mkString(", ")}]") //Start periodic gossip to random nodes in cluster import context.dispatcher val gossipTask = context.system.scheduler.schedule(gossipInterval, gossipInterval, self, GossipTick) - val notifyTask = context.system.scheduler.schedule(notifySubscribersInterval, notifySubscribersInterval, self, FlushChanges) + val notifyTask = + context.system.scheduler.schedule(notifySubscribersInterval, notifySubscribersInterval, self, FlushChanges) val pruningTask = if (pruningInterval >= Duration.Zero) Some(context.system.scheduler.schedule(pruningInterval, pruningInterval, self, RemovedNodePruningTick)) @@ -1110,13 +1210,15 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog override def createDeltaPropagation(deltas: Map[KeyId, (ReplicatedData, Long, Long)]): DeltaPropagation = { // Important to include the pruning state in the deltas. For example if the delta is based // on an entry that has been pruned but that has not yet been performed on the target node. - DeltaPropagation(selfUniqueAddress, reply = false, deltas.iterator.collect { - case (key, (d, fromSeqNr, toSeqNr)) if d != NoDeltaPlaceholder => - getData(key) match { - case Some(envelope) => key -> Delta(envelope.copy(data = d), fromSeqNr, toSeqNr) - case None => key -> Delta(DataEnvelope(d), fromSeqNr, toSeqNr) - } - }.toMap) + DeltaPropagation(selfUniqueAddress, + reply = false, + deltas.iterator.collect { + case (key, (d, fromSeqNr, toSeqNr)) if d != NoDeltaPlaceholder => + getData(key) match { + case Some(envelope) => key -> Delta(envelope.copy(data = d), fromSeqNr, toSeqNr) + case None => key -> Delta(DataEnvelope(d), fromSeqNr, toSeqNr) + } + }.toMap) } } val deltaPropagationTask: Option[Cancellable] = @@ -1125,8 +1227,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog // Normally the delta is propagated to all nodes within the gossip tick, so that // full state gossip is not needed. val deltaPropagationInterval = (gossipInterval / deltaPropagationSelector.gossipIntervalDivisor).max(200.millis) - Some(context.system.scheduler.schedule(deltaPropagationInterval, deltaPropagationInterval, - self, DeltaPropagationTick)) + Some( + context.system.scheduler + .schedule(deltaPropagationInterval, deltaPropagationInterval, self, DeltaPropagationTick)) } else None // cluster nodes, doesn't contain selfAddress @@ -1181,8 +1284,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog if (hasDurableKeys) durableStore ! LoadAll // not using LeaderChanged/RoleLeaderChanged because here we need one node independent of data center - cluster.subscribe(self, initialStateMode = InitialStateAsEvents, - classOf[MemberEvent], classOf[ReachabilityEvent]) + cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[ReachabilityEvent]) } override def postStop(): Unit = { @@ -1198,13 +1300,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog override val supervisorStrategy = { def fromDurableStore: Boolean = sender() == durableStore && sender() != context.system.deadLetters - OneForOneStrategy()( - ({ - case e @ (_: DurableStore.LoadFailed | _: ActorInitializationException) if fromDurableStore => - log.error(e, "Stopping distributed-data Replicator due to load or startup failure in durable store, caused by: {}", if (e.getCause eq null) "" else e.getCause.getMessage) - context.stop(self) - SupervisorStrategy.Stop - }: SupervisorStrategy.Decider).orElse(SupervisorStrategy.defaultDecider)) + OneForOneStrategy()(({ + case e @ (_: DurableStore.LoadFailed | _: ActorInitializationException) if fromDurableStore => + log.error(e, + "Stopping distributed-data Replicator due to load or startup failure in durable store, caused by: {}", + if (e.getCause eq null) "" else e.getCause.getMessage) + context.stop(self) + SupervisorStrategy.Stop + }: SupervisorStrategy.Decider).orElse(SupervisorStrategy.defaultDecider)) } def receive = @@ -1241,9 +1344,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } } case LoadAllCompleted => - log.debug( - "Loading {} entries from durable store took {} ms, stashed {}", - count, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime), stash.size) + log.debug("Loading {} entries from durable store took {} ms, stashed {}", + count, + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime), + stash.size) context.become(normalReceive) unstashAll() self ! FlushChanges @@ -1306,8 +1410,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } replyTo ! reply } else - context.actorOf(ReadAggregator.props(key, consistency, req, nodes, unreachable, localValue, replyTo) - .withDispatcher(context.props.dispatcher)) + context.actorOf( + ReadAggregator + .props(key, consistency, req, nodes, unreachable, localValue, replyTo) + .withDispatcher(context.props.dispatcher)) } def isLocalGet(readConsistency: ReadConsistency): Boolean = @@ -1323,8 +1429,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def isLocalSender(): Boolean = !replyTo.path.address.hasGlobalScope - def receiveUpdate(key: KeyR, modify: Option[ReplicatedData] => ReplicatedData, - writeConsistency: WriteConsistency, req: Option[Any]): Unit = { + def receiveUpdate(key: KeyR, + modify: Option[ReplicatedData] => ReplicatedData, + writeConsistency: WriteConsistency, + req: Option[Any]): Unit = { val localValue = getData(key.id) def deltaOrPlaceholder(d: DeltaReplicatedData): Option[ReplicatedDelta] = { @@ -1344,11 +1452,12 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog case d => (envelope.merge(d.asInstanceOf[existing.T]), None) } - case None => modify(None) match { - case d: DeltaReplicatedData if deltaCrdtEnabled => - (DataEnvelope(d.resetDelta), deltaOrPlaceholder(d)) - case d => (DataEnvelope(d), None) - } + case None => + modify(None) match { + case d: DeltaReplicatedData if deltaCrdtEnabled => + (DataEnvelope(d.resetDelta), deltaOrPlaceholder(d)) + case d => (DataEnvelope(d), None) + } } } match { case Success((envelope, delta)) => @@ -1367,8 +1476,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog val durable = isDurable(key.id) if (isLocalUpdate(writeConsistency)) { if (durable) - durableStore ! Store(key.id, new DurableDataEnvelope(newEnvelope), - Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), replyTo))) + durableStore ! Store(key.id, + new DurableDataEnvelope(newEnvelope), + Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), replyTo))) else replyTo ! UpdateSuccess(key, req) } else { @@ -1381,12 +1491,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog case None => (newEnvelope, None) } val writeAggregator = - context.actorOf(WriteAggregator.props(key, writeEnvelope, writeDelta, writeConsistency, - req, nodes, unreachable, replyTo, durable) - .withDispatcher(context.props.dispatcher)) + context.actorOf( + WriteAggregator + .props(key, writeEnvelope, writeDelta, writeConsistency, req, nodes, unreachable, replyTo, durable) + .withDispatcher(context.props.dispatcher)) if (durable) { - durableStore ! Store(key.id, new DurableDataEnvelope(newEnvelope), - Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), writeAggregator))) + durableStore ! Store(key.id, + new DurableDataEnvelope(newEnvelope), + Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), writeAggregator))) } } case Failure(e: DataDeleted[_]) => @@ -1436,8 +1548,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog Some(setData(key, merged)) } catch { case e: IllegalArgumentException => - log.warning( - "Couldn't merge [{}], due to: {}", key, e.getMessage) + log.warning("Couldn't merge [{}], due to: {}", key, e.getMessage) None } case None => @@ -1462,9 +1573,11 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } def receiveGetKeyIds(): Unit = { - val keys: Set[KeyId] = dataEntries.iterator.collect { - case (key, (DataEnvelope(data, _, _), _)) if data != DeletedData => key - }.to(immutable.Set) + val keys: Set[KeyId] = dataEntries.iterator + .collect { + case (key, (DataEnvelope(data, _, _), _)) if data != DeletedData => key + } + .to(immutable.Set) replyTo ! GetKeyIdsResult(keys) } @@ -1478,17 +1591,21 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog val durable = isDurable(key.id) if (isLocalUpdate(consistency)) { if (durable) - durableStore ! Store(key.id, new DurableDataEnvelope(DeletedEnvelope), - Some(StoreReply(DeleteSuccess(key, req), StoreFailure(key, req), replyTo))) + durableStore ! Store(key.id, + new DurableDataEnvelope(DeletedEnvelope), + Some(StoreReply(DeleteSuccess(key, req), StoreFailure(key, req), replyTo))) else replyTo ! DeleteSuccess(key, req) } else { val writeAggregator = - context.actorOf(WriteAggregator.props(key, DeletedEnvelope, None, consistency, req, nodes, unreachable, replyTo, durable) - .withDispatcher(context.props.dispatcher)) + context.actorOf( + WriteAggregator + .props(key, DeletedEnvelope, None, consistency, req, nodes, unreachable, replyTo, durable) + .withDispatcher(context.props.dispatcher)) if (durable) { - durableStore ! Store(key.id, new DurableDataEnvelope(DeletedEnvelope), - Some(StoreReply(DeleteSuccess(key, req), StoreFailure(key, req), writeAggregator))) + durableStore ! Store(key.id, + new DurableDataEnvelope(DeletedEnvelope), + Some(StoreReply(DeleteSuccess(key, req), StoreFailure(key, req), writeAggregator))) } } } @@ -1549,10 +1666,11 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } def isNodeRemoved(node: UniqueAddress, keys: Iterable[KeyId]): Boolean = { - removedNodes.contains(node) || (keys.exists(key => dataEntries.get(key) match { - case Some((DataEnvelope(_, pruning, _), _)) => pruning.contains(node) - case None => false - })) + removedNodes.contains(node) || (keys.exists(key => + dataEntries.get(key) match { + case Some((DataEnvelope(_, pruning, _), _)) => pruning.contains(node) + case None => false + })) } def receiveFlushChanges(): Unit = { @@ -1601,32 +1719,44 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog try { val isDebugEnabled = log.isDebugEnabled if (isDebugEnabled) - log.debug("Received DeltaPropagation from [{}], containing [{}]", fromNode.address, - deltas.collect { case (key, Delta(_, fromSeqNr, toSeqNr)) => s"$key $fromSeqNr-$toSeqNr" }.mkString(", ")) + log.debug("Received DeltaPropagation from [{}], containing [{}]", + fromNode.address, + deltas + .collect { case (key, Delta(_, fromSeqNr, toSeqNr)) => s"$key $fromSeqNr-$toSeqNr" } + .mkString(", ")) if (isNodeRemoved(fromNode, deltas.keys)) { // Late message from a removed node. // Drop it to avoid merging deltas that have been pruned on one side. - if (isDebugEnabled) log.debug( - "Skipping DeltaPropagation from [{}] because that node has been removed", fromNode.address) + if (isDebugEnabled) + log.debug("Skipping DeltaPropagation from [{}] because that node has been removed", fromNode.address) } else { deltas.foreach { case (key, Delta(envelope @ DataEnvelope(_: RequiresCausalDeliveryOfDeltas, _, _), fromSeqNr, toSeqNr)) => val currentSeqNr = getDeltaSeqNr(key, fromNode) if (currentSeqNr >= toSeqNr) { - if (isDebugEnabled) log.debug( - "Skipping DeltaPropagation from [{}] for [{}] because toSeqNr [{}] already handled [{}]", - fromNode.address, key, toSeqNr, currentSeqNr) + if (isDebugEnabled) + log.debug("Skipping DeltaPropagation from [{}] for [{}] because toSeqNr [{}] already handled [{}]", + fromNode.address, + key, + toSeqNr, + currentSeqNr) if (reply) replyTo ! WriteAck } else if (fromSeqNr > (currentSeqNr + 1)) { - if (isDebugEnabled) log.debug( - "Skipping DeltaPropagation from [{}] for [{}] because missing deltas between [{}-{}]", - fromNode.address, key, currentSeqNr + 1, fromSeqNr - 1) + if (isDebugEnabled) + log.debug("Skipping DeltaPropagation from [{}] for [{}] because missing deltas between [{}-{}]", + fromNode.address, + key, + currentSeqNr + 1, + fromSeqNr - 1) if (reply) replyTo ! DeltaNack } else { - if (isDebugEnabled) log.debug( - "Applying DeltaPropagation from [{}] for [{}] with sequence numbers [{}], current was [{}]", - fromNode.address, key, s"$fromSeqNr-$toSeqNr", currentSeqNr) + if (isDebugEnabled) + log.debug("Applying DeltaPropagation from [{}] for [{}] with sequence numbers [{}], current was [{}]", + fromNode.address, + key, + s"$fromSeqNr-$toSeqNr", + currentSeqNr) val newEnvelope = envelope.copy(deltaVersions = VersionVector(fromNode, toSeqNr)) writeAndStore(key, newEnvelope, reply) } @@ -1648,7 +1778,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def receiveGossipTick(): Unit = { if (fullStateGossipEnabled) - selectRandomNode(nodes.union(weaklyUpNodes).toVector) foreach gossipTo + selectRandomNode(nodes.union(weaklyUpNodes).toVector).foreach(gossipTo) } def gossipTo(address: Address): Unit = { @@ -1675,15 +1805,18 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } def selectRandomNode(addresses: immutable.IndexedSeq[Address]): Option[Address] = - if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) + if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current.nextInt(addresses.size))) def replica(address: Address): ActorSelection = context.actorSelection(self.path.toStringWithAddress(address)) def receiveStatus(otherDigests: Map[KeyId, Digest], chunk: Int, totChunks: Int): Unit = { if (log.isDebugEnabled) - log.debug("Received gossip status from [{}], chunk [{}] of [{}] containing [{}]", replyTo.path.address, - (chunk + 1), totChunks, otherDigests.keys.mkString(", ")) + log.debug("Received gossip status from [{}], chunk [{}] of [{}] containing [{}]", + replyTo.path.address, + (chunk + 1), + totChunks, + otherDigests.keys.mkString(", ")) def isOtherDifferent(key: KeyId, otherDigest: Digest): Boolean = { val d = getDigest(key) @@ -1696,7 +1829,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog val myKeys = if (totChunks == 1) dataEntries.keySet else dataEntries.keysIterator.filter(key => math.abs(key.hashCode % totChunks) == chunk).toSet - val otherMissingKeys = myKeys diff otherKeys + val otherMissingKeys = myKeys.diff(otherKeys) val keys = (otherDifferentKeys ++ otherMissingKeys).take(maxDeltaElements) if (keys.nonEmpty) { if (log.isDebugEnabled) @@ -1704,10 +1837,12 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog val g = Gossip(keys.iterator.map(k => k -> getData(k).get).toMap, sendBack = otherDifferentKeys.nonEmpty) replyTo ! g } - val myMissingKeys = otherKeys diff myKeys + val myMissingKeys = otherKeys.diff(myKeys) if (myMissingKeys.nonEmpty) { if (log.isDebugEnabled) - log.debug("Sending gossip status to [{}], requesting missing [{}]", replyTo.path.address, myMissingKeys.mkString(", ")) + log.debug("Sending gossip status to [{}], requesting missing [{}]", + replyTo.path.address, + myMissingKeys.mkString(", ")) val status = Status(myMissingKeys.iterator.map(k => k -> NotFoundDigest).toMap, chunk, totChunks) replyTo ! status } @@ -1749,8 +1884,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } def hasSubscriber(subscriber: ActorRef): Boolean = - (subscribers.exists { case (k, s) => s.contains(subscriber) }) || - (newSubscribers.exists { case (k, s) => s.contains(subscriber) }) + (subscribers.exists { case (k, s) => s.contains(subscriber) }) || + (newSubscribers.exists { case (k, s) => s.contains(subscriber) }) def receiveTerminated(ref: ActorRef): Unit = { if (ref == durableStore) { @@ -1758,9 +1893,13 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog context.stop(self) } else { val keys1 = subscribers.collect { case (k, s) if s.contains(ref) => k } - keys1.foreach { key => subscribers.removeBinding(key, ref) } + keys1.foreach { key => + subscribers.removeBinding(key, ref) + } val keys2 = newSubscribers.collect { case (k, s) if s.contains(ref) => k } - keys2.foreach { key => newSubscribers.removeBinding(key, ref) } + keys2.foreach { key => + newSubscribers.removeBinding(key, ref) + } (keys1 ++ keys2).foreach { key => if (!subscribers.contains(key) && !newSubscribers.contains(key)) @@ -1784,7 +1923,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def receiveMemberRemoved(m: Member): Unit = { if (m.address == selfAddress) - context stop self + context.stop(self) else if (matchingRole(m)) { // filter, it's possible that the ordering is changed since it based on MemberStatus leader = leader.filterNot(_.uniqueAddress == m.uniqueAddress) @@ -1830,11 +1969,11 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog } def collectRemovedNodes(): Unit = { - val knownNodes = nodes union weaklyUpNodes union removedNodes.keySet.map(_.address) + val knownNodes = nodes.union(weaklyUpNodes).union(removedNodes.keySet.map(_.address)) val newRemovedNodes = dataEntries.foldLeft(Set.empty[UniqueAddress]) { case (acc, (_, (envelope @ DataEnvelope(data: RemovedNodePruning, _, _), _))) => - acc union data.modifiedByNodes.filterNot(n => n == selfUniqueAddress || knownNodes(n.address)) + acc.union(data.modifiedByNodes.filterNot(n => n == selfUniqueAddress || knownNodes(n.address))) case (acc, _) => acc } @@ -1847,9 +1986,11 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def initRemovedNodePruning(): Unit = { // initiate pruning for removed nodes - val removedSet: Set[UniqueAddress] = removedNodes.iterator.collect { - case (r, t) if ((allReachableClockTime - t) > maxPruningDisseminationNanos) => r - }.to(immutable.Set) + val removedSet: Set[UniqueAddress] = removedNodes.iterator + .collect { + case (r, t) if ((allReachableClockTime - t) > maxPruningDisseminationNanos) => r + } + .to(immutable.Set) if (removedSet.nonEmpty) { for ((key, (envelope, _)) <- dataEntries; removed <- removedSet) { @@ -1864,9 +2005,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog envelope.data match { case dataWithRemovedNodePruning: RemovedNodePruning => envelope.pruning.get(removed) match { - case None => init() + case None => init() case Some(PruningInitialized(owner, _)) if owner != selfUniqueAddress => init() - case _ => // already in progress + case _ => // already in progress } case _ => } @@ -1877,14 +2018,15 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def performRemovedNodePruning(): Unit = { // perform pruning when all seen Init - val allNodes = nodes union weaklyUpNodes + val allNodes = nodes.union(weaklyUpNodes) val pruningPerformed = PruningPerformed(System.currentTimeMillis() + pruningMarkerTimeToLive.toMillis) val durablePruningPerformed = PruningPerformed(System.currentTimeMillis() + durablePruningMarkerTimeToLive.toMillis) dataEntries.foreach { case (key, (envelope @ DataEnvelope(data: RemovedNodePruning, pruning, _), _)) => pruning.foreach { - case (removed, PruningInitialized(owner, seen)) if owner == selfUniqueAddress - && (allNodes.isEmpty || allNodes.forall(seen)) => + case (removed, PruningInitialized(owner, seen)) + if owner == selfUniqueAddress + && (allNodes.isEmpty || allNodes.forall(seen)) => val newEnvelope = envelope.prune(removed, if (isDurable(key)) durablePruningPerformed else pruningPerformed) log.debug("Perform pruning of [{}] from [{}] to [{}]", key, removed, selfUniqueAddress) setData(key, newEnvelope) @@ -1949,7 +2091,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def timeout: FiniteDuration def nodes: Set[Address] def unreachable: Set[Address] - def reachableNodes: Set[Address] = nodes diff unreachable + def reachableNodes: Set[Address] = nodes.diff(unreachable) import context.dispatcher var sendToSecondarySchedule = context.system.scheduler.scheduleOnce(timeout / 5, self, SendToSecondary) @@ -1989,16 +2131,15 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog * INTERNAL API */ @InternalApi private[akka] object WriteAggregator { - def props( - key: KeyR, - envelope: Replicator.Internal.DataEnvelope, - delta: Option[Replicator.Internal.Delta], - consistency: Replicator.WriteConsistency, - req: Option[Any], - nodes: Set[Address], - unreachable: Set[Address], - replyTo: ActorRef, - durable: Boolean): Props = + def props(key: KeyR, + envelope: Replicator.Internal.DataEnvelope, + delta: Option[Replicator.Internal.Delta], + consistency: Replicator.WriteConsistency, + req: Option[Any], + nodes: Set[Address], + unreachable: Set[Address], + replyTo: ActorRef, + durable: Boolean): Props = Props(new WriteAggregator(key, envelope, delta, consistency, req, nodes, unreachable, replyTo, durable)) .withDeploy(Deploy.local) } @@ -2006,16 +2147,16 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog /** * INTERNAL API */ -@InternalApi private[akka] class WriteAggregator( - key: KeyR, - envelope: Replicator.Internal.DataEnvelope, - delta: Option[Replicator.Internal.Delta], - consistency: Replicator.WriteConsistency, - req: Option[Any], - override val nodes: Set[Address], - override val unreachable: Set[Address], - replyTo: ActorRef, - durable: Boolean) extends ReadWriteAggregator { +@InternalApi private[akka] class WriteAggregator(key: KeyR, + envelope: Replicator.Internal.DataEnvelope, + delta: Option[Replicator.Internal.Delta], + consistency: Replicator.WriteConsistency, + req: Option[Any], + override val nodes: Set[Address], + override val unreachable: Set[Address], + replyTo: ActorRef, + durable: Boolean) + extends ReadWriteAggregator { import Replicator._ import Replicator.Internal._ @@ -2086,7 +2227,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog case SendToSecondary => deltaMsg match { - case None => + case None => case Some(d) => // Deltas must be applied in order and we can't keep track of ordering of // simultaneous updates so there is a chance that the delta could not be applied. @@ -2102,8 +2243,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog def isDone: Boolean = gotLocalStoreReply && - (remaining.size <= doneWhenRemainingSize || (remaining diff gotWriteNackFrom).isEmpty || - notEnoughNodes) + (remaining.size <= doneWhenRemainingSize || remaining.diff(gotWriteNackFrom).isEmpty || + notEnoughNodes) def notEnoughNodes: Boolean = doneWhenRemainingSize < 0 || nodes.size < doneWhenRemainingSize @@ -2129,30 +2270,28 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog * INTERNAL API */ @InternalApi private[akka] object ReadAggregator { - def props( - key: KeyR, - consistency: Replicator.ReadConsistency, - req: Option[Any], - nodes: Set[Address], - unreachable: Set[Address], - localValue: Option[Replicator.Internal.DataEnvelope], - replyTo: ActorRef): Props = - Props(new ReadAggregator(key, consistency, req, nodes, unreachable, localValue, replyTo)) - .withDeploy(Deploy.local) + def props(key: KeyR, + consistency: Replicator.ReadConsistency, + req: Option[Any], + nodes: Set[Address], + unreachable: Set[Address], + localValue: Option[Replicator.Internal.DataEnvelope], + replyTo: ActorRef): Props = + Props(new ReadAggregator(key, consistency, req, nodes, unreachable, localValue, replyTo)).withDeploy(Deploy.local) } /** * INTERNAL API */ -@InternalApi private[akka] class ReadAggregator( - key: KeyR, - consistency: Replicator.ReadConsistency, - req: Option[Any], - override val nodes: Set[Address], - override val unreachable: Set[Address], - localValue: Option[Replicator.Internal.DataEnvelope], - replyTo: ActorRef) extends ReadWriteAggregator { +@InternalApi private[akka] class ReadAggregator(key: KeyR, + consistency: Replicator.ReadConsistency, + req: Option[Any], + override val nodes: Set[Address], + override val unreachable: Set[Address], + localValue: Option[Replicator.Internal.DataEnvelope], + replyTo: ActorRef) + extends ReadWriteAggregator { import Replicator._ import Replicator.Internal._ @@ -2231,4 +2370,3 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog case ReceiveTimeout => } } - diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala index b3a6532e2f..5a7fe67b79 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/VersionVector.scala @@ -44,6 +44,7 @@ object VersionVector { case object Before extends Ordering case object Same extends Ordering case object Concurrent extends Ordering + /** * Marker to ensure that we do a full order comparison instead of bailing out early. */ @@ -96,8 +97,7 @@ object VersionVector { * This class is immutable, i.e. "modifying" methods return a new instance. */ @SerialVersionUID(1L) -sealed abstract class VersionVector - extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { +sealed abstract class VersionVector extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning { type T = VersionVector @@ -182,18 +182,23 @@ sealed abstract class VersionVector private final def compareOnlyTo(that: VersionVector, order: Ordering): Ordering = { def nextOrElse[A](iter: Iterator[A], default: A): A = if (iter.hasNext) iter.next() else default - def compare(i1: Iterator[(UniqueAddress, Long)], i2: Iterator[(UniqueAddress, Long)], requestedOrder: Ordering): Ordering = { + def compare(i1: Iterator[(UniqueAddress, Long)], + i2: Iterator[(UniqueAddress, Long)], + requestedOrder: Ordering): Ordering = { @tailrec def compareNext(nt1: (UniqueAddress, Long), nt2: (UniqueAddress, Long), currentOrder: Ordering): Ordering = if ((requestedOrder ne FullOrder) && (currentOrder ne Same) && (currentOrder ne requestedOrder)) currentOrder else if ((nt1 eq cmpEndMarker) && (nt2 eq cmpEndMarker)) currentOrder // i1 is empty but i2 is not, so i1 can only be Before - else if (nt1 eq cmpEndMarker) { if (currentOrder eq After) Concurrent else Before } + else if (nt1 eq cmpEndMarker) { + if (currentOrder eq After) Concurrent else Before + } // i2 is empty but i1 is not, so i1 can only be After - else if (nt2 eq cmpEndMarker) { if (currentOrder eq Before) Concurrent else After } - else { + else if (nt2 eq cmpEndMarker) { + if (currentOrder eq Before) Concurrent else After + } else { // compare the nodes - val nc = nt1._1 compareTo nt2._1 + val nc = nt1._1.compareTo(nt2._1) if (nc == 0) { // both nodes exist compare the timestamps // same timestamp so just continue with the next nodes @@ -347,22 +352,23 @@ final case class ManyVersionVector(versions: TreeMap[UniqueAddress, Long]) exten override def merge(that: VersionVector): VersionVector = { if (that.isEmpty) this else if (this.isEmpty) that - else that match { - case ManyVersionVector(vs2) => - var mergedVersions = vs2 - for ((node, time) <- versions) { - val mergedVersionsCurrentTime = mergedVersions.getOrElse(node, Timestamp.Zero) - if (time > mergedVersionsCurrentTime) - mergedVersions = mergedVersions.updated(node, time) - } - VersionVector(mergedVersions) - case OneVersionVector(n2, v2) => - val v1 = versions.getOrElse(n2, Timestamp.Zero) - val mergedVersions = - if (v1 >= v2) versions - else versions.updated(n2, v2) - VersionVector(mergedVersions) - } + else + that match { + case ManyVersionVector(vs2) => + var mergedVersions = vs2 + for ((node, time) <- versions) { + val mergedVersionsCurrentTime = mergedVersions.getOrElse(node, Timestamp.Zero) + if (time > mergedVersionsCurrentTime) + mergedVersions = mergedVersions.updated(node, time) + } + VersionVector(mergedVersions) + case OneVersionVector(n2, v2) => + val v1 = versions.getOrElse(n2, Timestamp.Zero) + val mergedVersions = + if (v1 >= v2) versions + else versions.updated(n2, v2) + VersionVector(mergedVersions) + } } override def modifiedByNodes: Set[UniqueAddress] = diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala index e4e396d3c1..5c3ee1ebed 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializer.scala @@ -35,6 +35,7 @@ private object ReplicatedDataSerializer { * Generic superclass to allow to compare Entry types used in protobuf. */ abstract class KeyComparator[A <: GeneratedMessage] extends Comparator[A] { + /** * Get the key from the entry. The key may be a String, Integer, Long, or Any * @param entry The protobuf entry used with Map types @@ -57,19 +58,36 @@ private object ReplicatedDataSerializer { } implicit object ORMapEntryComparator extends KeyComparator[rd.ORMap.Entry] { - override def getKey(e: rd.ORMap.Entry): Any = if (e.hasStringKey) e.getStringKey else if (e.hasIntKey) e.getIntKey else if (e.hasLongKey) e.getLongKey else e.getOtherKey + override def getKey(e: rd.ORMap.Entry): Any = + if (e.hasStringKey) e.getStringKey + else if (e.hasIntKey) e.getIntKey + else if (e.hasLongKey) e.getLongKey + else e.getOtherKey } implicit object LWWMapEntryComparator extends KeyComparator[rd.LWWMap.Entry] { - override def getKey(e: rd.LWWMap.Entry): Any = if (e.hasStringKey) e.getStringKey else if (e.hasIntKey) e.getIntKey else if (e.hasLongKey) e.getLongKey else e.getOtherKey + override def getKey(e: rd.LWWMap.Entry): Any = + if (e.hasStringKey) e.getStringKey + else if (e.hasIntKey) e.getIntKey + else if (e.hasLongKey) e.getLongKey + else e.getOtherKey } implicit object PNCounterMapEntryComparator extends KeyComparator[rd.PNCounterMap.Entry] { - override def getKey(e: rd.PNCounterMap.Entry): Any = if (e.hasStringKey) e.getStringKey else if (e.hasIntKey) e.getIntKey else if (e.hasLongKey) e.getLongKey else e.getOtherKey + override def getKey(e: rd.PNCounterMap.Entry): Any = + if (e.hasStringKey) e.getStringKey + else if (e.hasIntKey) e.getIntKey + else if (e.hasLongKey) e.getLongKey + else e.getOtherKey } implicit object ORMultiMapEntryComparator extends KeyComparator[rd.ORMultiMap.Entry] { - override def getKey(e: rd.ORMultiMap.Entry): Any = if (e.hasStringKey) e.getStringKey else if (e.hasIntKey) e.getIntKey else if (e.hasLongKey) e.getLongKey else e.getOtherKey + override def getKey(e: rd.ORMultiMap.Entry): Any = + if (e.hasStringKey) e.getStringKey + else if (e.hasIntKey) e.getIntKey + else if (e.hasLongKey) e.getLongKey + else e.getOtherKey } - sealed trait ProtoMapEntryWriter[Entry <: GeneratedMessage, EntryBuilder <: GeneratedMessage.Builder[EntryBuilder], Value <: GeneratedMessage] { + sealed trait ProtoMapEntryWriter[ + Entry <: GeneratedMessage, EntryBuilder <: GeneratedMessage.Builder[EntryBuilder], Value <: GeneratedMessage] { def setStringKey(builder: EntryBuilder, key: String, value: Value): Entry def setLongKey(builder: EntryBuilder, key: Long, value: Value): Entry def setIntKey(builder: EntryBuilder, key: Int, value: Value): Entry @@ -88,11 +106,18 @@ private object ReplicatedDataSerializer { def getValue(entry: Entry): A } - implicit object ORMapEntry extends ProtoMapEntryWriter[rd.ORMap.Entry, rd.ORMap.Entry.Builder, dm.OtherMessage] with ProtoMapEntryReader[rd.ORMap.Entry, dm.OtherMessage] { - override def setStringKey(builder: rd.ORMap.Entry.Builder, key: String, value: dm.OtherMessage): rd.ORMap.Entry = builder.setStringKey(key).setValue(value).build() - override def setLongKey(builder: rd.ORMap.Entry.Builder, key: Long, value: dm.OtherMessage): rd.ORMap.Entry = builder.setLongKey(key).setValue(value).build() - override def setIntKey(builder: rd.ORMap.Entry.Builder, key: Int, value: dm.OtherMessage): rd.ORMap.Entry = builder.setIntKey(key).setValue(value).build() - override def setOtherKey(builder: rd.ORMap.Entry.Builder, key: dm.OtherMessage, value: dm.OtherMessage): rd.ORMap.Entry = builder.setOtherKey(key).setValue(value).build() + implicit object ORMapEntry + extends ProtoMapEntryWriter[rd.ORMap.Entry, rd.ORMap.Entry.Builder, dm.OtherMessage] + with ProtoMapEntryReader[rd.ORMap.Entry, dm.OtherMessage] { + override def setStringKey(builder: rd.ORMap.Entry.Builder, key: String, value: dm.OtherMessage): rd.ORMap.Entry = + builder.setStringKey(key).setValue(value).build() + override def setLongKey(builder: rd.ORMap.Entry.Builder, key: Long, value: dm.OtherMessage): rd.ORMap.Entry = + builder.setLongKey(key).setValue(value).build() + override def setIntKey(builder: rd.ORMap.Entry.Builder, key: Int, value: dm.OtherMessage): rd.ORMap.Entry = + builder.setIntKey(key).setValue(value).build() + override def setOtherKey(builder: rd.ORMap.Entry.Builder, + key: dm.OtherMessage, + value: dm.OtherMessage): rd.ORMap.Entry = builder.setOtherKey(key).setValue(value).build() override def hasStringKey(entry: rd.ORMap.Entry): Boolean = entry.hasStringKey override def getStringKey(entry: rd.ORMap.Entry): String = entry.getStringKey override def hasIntKey(entry: rd.ORMap.Entry): Boolean = entry.hasIntKey @@ -104,11 +129,18 @@ private object ReplicatedDataSerializer { override def getValue(entry: rd.ORMap.Entry): dm.OtherMessage = entry.getValue } - implicit object LWWMapEntry extends ProtoMapEntryWriter[rd.LWWMap.Entry, rd.LWWMap.Entry.Builder, rd.LWWRegister] with ProtoMapEntryReader[rd.LWWMap.Entry, rd.LWWRegister] { - override def setStringKey(builder: rd.LWWMap.Entry.Builder, key: String, value: rd.LWWRegister): rd.LWWMap.Entry = builder.setStringKey(key).setValue(value).build() - override def setLongKey(builder: rd.LWWMap.Entry.Builder, key: Long, value: rd.LWWRegister): rd.LWWMap.Entry = builder.setLongKey(key).setValue(value).build() - override def setIntKey(builder: rd.LWWMap.Entry.Builder, key: Int, value: rd.LWWRegister): rd.LWWMap.Entry = builder.setIntKey(key).setValue(value).build() - override def setOtherKey(builder: rd.LWWMap.Entry.Builder, key: OtherMessage, value: rd.LWWRegister): rd.LWWMap.Entry = builder.setOtherKey(key).setValue(value).build() + implicit object LWWMapEntry + extends ProtoMapEntryWriter[rd.LWWMap.Entry, rd.LWWMap.Entry.Builder, rd.LWWRegister] + with ProtoMapEntryReader[rd.LWWMap.Entry, rd.LWWRegister] { + override def setStringKey(builder: rd.LWWMap.Entry.Builder, key: String, value: rd.LWWRegister): rd.LWWMap.Entry = + builder.setStringKey(key).setValue(value).build() + override def setLongKey(builder: rd.LWWMap.Entry.Builder, key: Long, value: rd.LWWRegister): rd.LWWMap.Entry = + builder.setLongKey(key).setValue(value).build() + override def setIntKey(builder: rd.LWWMap.Entry.Builder, key: Int, value: rd.LWWRegister): rd.LWWMap.Entry = + builder.setIntKey(key).setValue(value).build() + override def setOtherKey(builder: rd.LWWMap.Entry.Builder, + key: OtherMessage, + value: rd.LWWRegister): rd.LWWMap.Entry = builder.setOtherKey(key).setValue(value).build() override def hasStringKey(entry: rd.LWWMap.Entry): Boolean = entry.hasStringKey override def getStringKey(entry: rd.LWWMap.Entry): String = entry.getStringKey override def hasIntKey(entry: rd.LWWMap.Entry): Boolean = entry.hasIntKey @@ -120,11 +152,24 @@ private object ReplicatedDataSerializer { override def getValue(entry: rd.LWWMap.Entry): rd.LWWRegister = entry.getValue } - implicit object PNCounterMapEntry extends ProtoMapEntryWriter[rd.PNCounterMap.Entry, rd.PNCounterMap.Entry.Builder, rd.PNCounter] with ProtoMapEntryReader[rd.PNCounterMap.Entry, rd.PNCounter] { - override def setStringKey(builder: rd.PNCounterMap.Entry.Builder, key: String, value: rd.PNCounter): rd.PNCounterMap.Entry = builder.setStringKey(key).setValue(value).build() - override def setLongKey(builder: rd.PNCounterMap.Entry.Builder, key: Long, value: rd.PNCounter): rd.PNCounterMap.Entry = builder.setLongKey(key).setValue(value).build() - override def setIntKey(builder: rd.PNCounterMap.Entry.Builder, key: Int, value: rd.PNCounter): rd.PNCounterMap.Entry = builder.setIntKey(key).setValue(value).build() - override def setOtherKey(builder: rd.PNCounterMap.Entry.Builder, key: OtherMessage, value: rd.PNCounter): rd.PNCounterMap.Entry = builder.setOtherKey(key).setValue(value).build() + implicit object PNCounterMapEntry + extends ProtoMapEntryWriter[rd.PNCounterMap.Entry, rd.PNCounterMap.Entry.Builder, rd.PNCounter] + with ProtoMapEntryReader[rd.PNCounterMap.Entry, rd.PNCounter] { + override def setStringKey(builder: rd.PNCounterMap.Entry.Builder, + key: String, + value: rd.PNCounter): rd.PNCounterMap.Entry = + builder.setStringKey(key).setValue(value).build() + override def setLongKey(builder: rd.PNCounterMap.Entry.Builder, + key: Long, + value: rd.PNCounter): rd.PNCounterMap.Entry = + builder.setLongKey(key).setValue(value).build() + override def setIntKey(builder: rd.PNCounterMap.Entry.Builder, + key: Int, + value: rd.PNCounter): rd.PNCounterMap.Entry = builder.setIntKey(key).setValue(value).build() + override def setOtherKey(builder: rd.PNCounterMap.Entry.Builder, + key: OtherMessage, + value: rd.PNCounter): rd.PNCounterMap.Entry = + builder.setOtherKey(key).setValue(value).build() override def hasStringKey(entry: rd.PNCounterMap.Entry): Boolean = entry.hasStringKey override def getStringKey(entry: rd.PNCounterMap.Entry): String = entry.getStringKey override def hasIntKey(entry: rd.PNCounterMap.Entry): Boolean = entry.hasIntKey @@ -136,11 +181,18 @@ private object ReplicatedDataSerializer { override def getValue(entry: rd.PNCounterMap.Entry): rd.PNCounter = entry.getValue } - implicit object ORMultiMapEntry extends ProtoMapEntryWriter[rd.ORMultiMap.Entry, rd.ORMultiMap.Entry.Builder, rd.ORSet] with ProtoMapEntryReader[rd.ORMultiMap.Entry, rd.ORSet] { - override def setStringKey(builder: rd.ORMultiMap.Entry.Builder, key: String, value: rd.ORSet): rd.ORMultiMap.Entry = builder.setStringKey(key).setValue(value).build() - override def setLongKey(builder: rd.ORMultiMap.Entry.Builder, key: Long, value: rd.ORSet): rd.ORMultiMap.Entry = builder.setLongKey(key).setValue(value).build() - override def setIntKey(builder: rd.ORMultiMap.Entry.Builder, key: Int, value: rd.ORSet): rd.ORMultiMap.Entry = builder.setIntKey(key).setValue(value).build() - override def setOtherKey(builder: rd.ORMultiMap.Entry.Builder, key: dm.OtherMessage, value: rd.ORSet): rd.ORMultiMap.Entry = builder.setOtherKey(key).setValue(value).build() + implicit object ORMultiMapEntry + extends ProtoMapEntryWriter[rd.ORMultiMap.Entry, rd.ORMultiMap.Entry.Builder, rd.ORSet] + with ProtoMapEntryReader[rd.ORMultiMap.Entry, rd.ORSet] { + override def setStringKey(builder: rd.ORMultiMap.Entry.Builder, key: String, value: rd.ORSet): rd.ORMultiMap.Entry = + builder.setStringKey(key).setValue(value).build() + override def setLongKey(builder: rd.ORMultiMap.Entry.Builder, key: Long, value: rd.ORSet): rd.ORMultiMap.Entry = + builder.setLongKey(key).setValue(value).build() + override def setIntKey(builder: rd.ORMultiMap.Entry.Builder, key: Int, value: rd.ORSet): rd.ORMultiMap.Entry = + builder.setIntKey(key).setValue(value).build() + override def setOtherKey(builder: rd.ORMultiMap.Entry.Builder, + key: dm.OtherMessage, + value: rd.ORSet): rd.ORMultiMap.Entry = builder.setOtherKey(key).setValue(value).build() override def hasStringKey(entry: rd.ORMultiMap.Entry): Boolean = entry.hasStringKey override def getStringKey(entry: rd.ORMultiMap.Entry): String = entry.getStringKey override def hasIntKey(entry: rd.ORMultiMap.Entry): Boolean = entry.hasIntKey @@ -152,11 +204,25 @@ private object ReplicatedDataSerializer { override def getValue(entry: rd.ORMultiMap.Entry): rd.ORSet = entry.getValue } - implicit object ORMapDeltaGroupEntry extends ProtoMapEntryWriter[rd.ORMapDeltaGroup.MapEntry, rd.ORMapDeltaGroup.MapEntry.Builder, dm.OtherMessage] with ProtoMapEntryReader[rd.ORMapDeltaGroup.MapEntry, dm.OtherMessage] { - override def setStringKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder, key: String, value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry = builder.setStringKey(key).setValue(value).build() - override def setLongKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder, key: Long, value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry = builder.setLongKey(key).setValue(value).build() - override def setIntKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder, key: Int, value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry = builder.setIntKey(key).setValue(value).build() - override def setOtherKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder, key: dm.OtherMessage, value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry = builder.setOtherKey(key).setValue(value).build() + implicit object ORMapDeltaGroupEntry + extends ProtoMapEntryWriter[rd.ORMapDeltaGroup.MapEntry, rd.ORMapDeltaGroup.MapEntry.Builder, dm.OtherMessage] + with ProtoMapEntryReader[rd.ORMapDeltaGroup.MapEntry, dm.OtherMessage] { + override def setStringKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder, + key: String, + value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry = + builder.setStringKey(key).setValue(value).build() + override def setLongKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder, + key: Long, + value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry = + builder.setLongKey(key).setValue(value).build() + override def setIntKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder, + key: Int, + value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry = + builder.setIntKey(key).setValue(value).build() + override def setOtherKey(builder: rd.ORMapDeltaGroup.MapEntry.Builder, + key: dm.OtherMessage, + value: dm.OtherMessage): rd.ORMapDeltaGroup.MapEntry = + builder.setOtherKey(key).setValue(value).build() override def hasStringKey(entry: rd.ORMapDeltaGroup.MapEntry): Boolean = entry.hasStringKey override def getStringKey(entry: rd.ORMapDeltaGroup.MapEntry): String = entry.getStringKey override def hasIntKey(entry: rd.ORMapDeltaGroup.MapEntry): Boolean = entry.hasIntKey @@ -174,7 +240,9 @@ private object ReplicatedDataSerializer { * Protobuf serializer of ReplicatedData. */ class ReplicatedDataSerializer(val system: ExtendedActorSystem) - extends SerializerWithStringManifest with SerializationSupport with BaseSerializer { + extends SerializerWithStringManifest + with SerializationSupport + with BaseSerializer { import ReplicatedDataSerializer._ @@ -232,7 +300,6 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) ORMultiMapManifest -> multimapFromBinary, DeletedDataManifest -> (_ => DeletedData), VersionVectorManifest -> versionVectorFromBinary, - GSetKeyManifest -> (bytes => GSetKey(keyIdFromBinary(bytes))), ORSetKeyManifest -> (bytes => ORSetKey(keyIdFromBinary(bytes))), FlagKeyManifest -> (bytes => FlagKey(keyIdFromBinary(bytes))), @@ -264,20 +331,20 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) case DeletedData => DeletedDataManifest case _: VersionVector => VersionVectorManifest - case _: ORSetKey[_] => ORSetKeyManifest - case _: GSetKey[_] => GSetKeyManifest - case _: GCounterKey => GCounterKeyManifest - case _: PNCounterKey => PNCounterKeyManifest - case _: FlagKey => FlagKeyManifest - case _: LWWRegisterKey[_] => LWWRegisterKeyManifest - case _: ORMapKey[_, _] => ORMapKeyManifest - case _: LWWMapKey[_, _] => LWWMapKeyManifest - case _: PNCounterMapKey[_] => PNCounterMapKeyManifest - case _: ORMultiMapKey[_, _] => ORMultiMapKeyManifest + case _: ORSetKey[_] => ORSetKeyManifest + case _: GSetKey[_] => GSetKeyManifest + case _: GCounterKey => GCounterKeyManifest + case _: PNCounterKey => PNCounterKeyManifest + case _: FlagKey => FlagKeyManifest + case _: LWWRegisterKey[_] => LWWRegisterKeyManifest + case _: ORMapKey[_, _] => ORMapKeyManifest + case _: LWWMapKey[_, _] => LWWMapKeyManifest + case _: PNCounterMapKey[_] => PNCounterMapKeyManifest + case _: ORMultiMapKey[_, _] => ORMultiMapKeyManifest - case _: ORSet.DeltaGroup[_] => ORSetDeltaGroupManifest - case _: ORMap.DeltaGroup[_, _] => ORMapDeltaGroupManifest - case _: ORSet.FullStateDeltaOp[_] => ORSetFullManifest + case _: ORSet.DeltaGroup[_] => ORSetDeltaGroupManifest + case _: ORMap.DeltaGroup[_, _] => ORMapDeltaGroupManifest + case _: ORSet.FullStateDeltaOp[_] => ORSetFullManifest case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]") @@ -313,8 +380,9 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = fromBinaryMap.get(manifest) match { case Some(f) => f(bytes) - case None => throw new NotSerializableException( - s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + case None => + throw new NotSerializableException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") } def gsetToProto(gset: GSet[_]): rd.GSet = { @@ -361,10 +429,10 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def gsetFromProto(gset: rd.GSet): GSet[Any] = { val elements: Iterator[Any] = { gset.getStringElementsList.iterator.asScala ++ - gset.getIntElementsList.iterator.asScala ++ - gset.getLongElementsList.iterator.asScala ++ - gset.getOtherElementsList.iterator.asScala.map(otherMessageFromProto) ++ - gset.getActorRefElementsList.iterator.asScala.map(resolveActorRef) + gset.getIntElementsList.iterator.asScala ++ + gset.getLongElementsList.iterator.asScala ++ + gset.getOtherElementsList.iterator.asScala.map(otherMessageFromProto) ++ + gset.getActorRefElementsList.iterator.asScala.map(resolveActorRef) } GSet(elements.toSet) } @@ -451,9 +519,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) private def orsetDeltaGroupToProto(deltaGroup: ORSet.DeltaGroup[_]): rd.ORSetDeltaGroup = { def createEntry(opType: rd.ORSetDeltaOp, u: ORSet[_]) = { - rd.ORSetDeltaGroup.Entry.newBuilder() - .setOperation(opType) - .setUnderlying(orsetToProto(u)) + rd.ORSetDeltaGroup.Entry.newBuilder().setOperation(opType).setUnderlying(orsetToProto(u)) } val b = rd.ORSetDeltaGroup.newBuilder() @@ -473,26 +539,28 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) private def orsetDeltaGroupFromBinary(bytes: Array[Byte]): ORSet.DeltaGroup[Any] = { val deltaGroup = rd.ORSetDeltaGroup.parseFrom(bytes) val ops: Vector[ORSet.DeltaOp] = - deltaGroup.getEntriesList.asScala.iterator.map { entry => - if (entry.getOperation == rd.ORSetDeltaOp.Add) - ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)) - else if (entry.getOperation == rd.ORSetDeltaOp.Remove) - ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)) - else if (entry.getOperation == rd.ORSetDeltaOp.Full) - ORSet.FullStateDeltaOp(orsetFromProto(entry.getUnderlying)) - else - throw new NotSerializableException(s"Unknow ORSet delta operation ${entry.getOperation}") - }.to(immutable.Vector) + deltaGroup.getEntriesList.asScala.iterator + .map { entry => + if (entry.getOperation == rd.ORSetDeltaOp.Add) + ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)) + else if (entry.getOperation == rd.ORSetDeltaOp.Remove) + ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)) + else if (entry.getOperation == rd.ORSetDeltaOp.Full) + ORSet.FullStateDeltaOp(orsetFromProto(entry.getUnderlying)) + else + throw new NotSerializableException(s"Unknow ORSet delta operation ${entry.getOperation}") + } + .to(immutable.Vector) ORSet.DeltaGroup(ops) } def orsetFromProto(orset: rd.ORSet): ORSet[Any] = { val elements: Iterator[Any] = { orset.getStringElementsList.iterator.asScala ++ - orset.getIntElementsList.iterator.asScala ++ - orset.getLongElementsList.iterator.asScala ++ - orset.getOtherElementsList.iterator.asScala.map(otherMessageFromProto) ++ - orset.getActorRefElementsList.iterator.asScala.map(resolveActorRef) + orset.getIntElementsList.iterator.asScala ++ + orset.getLongElementsList.iterator.asScala ++ + orset.getOtherElementsList.iterator.asScala.map(otherMessageFromProto) ++ + orset.getActorRefElementsList.iterator.asScala.map(resolveActorRef) } val dots = orset.getDotsList.asScala.map(versionVectorFromProto).iterator @@ -511,26 +579,30 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) if (flag.getEnabled) Flag.Enabled else Flag.Disabled def lwwRegisterToProto(lwwRegister: LWWRegister[_]): rd.LWWRegister = - rd.LWWRegister.newBuilder(). - setTimestamp(lwwRegister.timestamp). - setNode(uniqueAddressToProto(lwwRegister.node)). - setState(otherMessageToProto(lwwRegister.value)). - build() + rd.LWWRegister + .newBuilder() + .setTimestamp(lwwRegister.timestamp) + .setNode(uniqueAddressToProto(lwwRegister.node)) + .setState(otherMessageToProto(lwwRegister.value)) + .build() def lwwRegisterFromBinary(bytes: Array[Byte]): LWWRegister[Any] = lwwRegisterFromProto(rd.LWWRegister.parseFrom(bytes)) def lwwRegisterFromProto(lwwRegister: rd.LWWRegister): LWWRegister[Any] = - new LWWRegister( - uniqueAddressFromProto(lwwRegister.getNode), - otherMessageFromProto(lwwRegister.getState), - lwwRegister.getTimestamp) + new LWWRegister(uniqueAddressFromProto(lwwRegister.getNode), + otherMessageFromProto(lwwRegister.getState), + lwwRegister.getTimestamp) def gcounterToProto(gcounter: GCounter): rd.GCounter = { val b = rd.GCounter.newBuilder() gcounter.state.toVector.sortBy { case (address, _) => address }.foreach { - case (address, value) => b.addEntries(rd.GCounter.Entry.newBuilder(). - setNode(uniqueAddressToProto(address)).setValue(ByteString.copyFrom(value.toByteArray))) + case (address, value) => + b.addEntries( + rd.GCounter.Entry + .newBuilder() + .setNode(uniqueAddressToProto(address)) + .setValue(ByteString.copyFrom(value.toByteArray))) } b.build() } @@ -539,82 +611,101 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) gcounterFromProto(rd.GCounter.parseFrom(bytes)) def gcounterFromProto(gcounter: rd.GCounter): GCounter = { - new GCounter(state = gcounter.getEntriesList.asScala.iterator.map(entry => - uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray)).toMap) + new GCounter( + state = gcounter.getEntriesList.asScala.iterator + .map(entry => uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray)) + .toMap) } def pncounterToProto(pncounter: PNCounter): rd.PNCounter = - rd.PNCounter.newBuilder(). - setIncrements(gcounterToProto(pncounter.increments)). - setDecrements(gcounterToProto(pncounter.decrements)). - build() + rd.PNCounter + .newBuilder() + .setIncrements(gcounterToProto(pncounter.increments)) + .setDecrements(gcounterToProto(pncounter.decrements)) + .build() def pncounterFromBinary(bytes: Array[Byte]): PNCounter = pncounterFromProto(rd.PNCounter.parseFrom(bytes)) def pncounterFromProto(pncounter: rd.PNCounter): PNCounter = { - new PNCounter( - increments = gcounterFromProto(pncounter.getIncrements), - decrements = gcounterFromProto(pncounter.getDecrements)) + new PNCounter(increments = gcounterFromProto(pncounter.getIncrements), + decrements = gcounterFromProto(pncounter.getDecrements)) } /* * Convert a Map[A, B] to an Iterable[Entry] where Entry is the protobuf map entry. */ - private def getEntries[IKey, IValue, EntryBuilder <: GeneratedMessage.Builder[EntryBuilder], PEntry <: GeneratedMessage, PValue <: GeneratedMessage](input: Map[IKey, IValue], createBuilder: () => EntryBuilder, valueConverter: IValue => PValue)(implicit comparator: Comparator[PEntry], eh: ProtoMapEntryWriter[PEntry, EntryBuilder, PValue]): java.lang.Iterable[PEntry] = { + private def getEntries[IKey, + IValue, + EntryBuilder <: GeneratedMessage.Builder[EntryBuilder], + PEntry <: GeneratedMessage, + PValue <: GeneratedMessage](input: Map[IKey, IValue], + createBuilder: () => EntryBuilder, + valueConverter: IValue => PValue)( + implicit comparator: Comparator[PEntry], + eh: ProtoMapEntryWriter[PEntry, EntryBuilder, PValue]): java.lang.Iterable[PEntry] = { // The resulting Iterable needs to be ordered deterministically in order to create same signature upon serializing same data val protoEntries = new TreeSet[PEntry](comparator) input.foreach { case (key: String, value) => protoEntries.add(eh.setStringKey(createBuilder(), key, valueConverter(value))) case (key: Int, value) => protoEntries.add(eh.setIntKey(createBuilder(), key, valueConverter(value))) case (key: Long, value) => protoEntries.add(eh.setLongKey(createBuilder(), key, valueConverter(value))) - case (key, value) => protoEntries.add(eh.setOtherKey(createBuilder(), otherMessageToProto(key), valueConverter(value))) + case (key, value) => + protoEntries.add(eh.setOtherKey(createBuilder(), otherMessageToProto(key), valueConverter(value))) } protoEntries } def ormapToProto(ormap: ORMap[_, _]): rd.ORMap = { val ormapBuilder = rd.ORMap.newBuilder() - val entries: jl.Iterable[rd.ORMap.Entry] = getEntries(ormap.values, rd.ORMap.Entry.newBuilder _, otherMessageToProto) + val entries: jl.Iterable[rd.ORMap.Entry] = + getEntries(ormap.values, rd.ORMap.Entry.newBuilder _, otherMessageToProto) ormapBuilder.setKeys(orsetToProto(ormap.keys)).addAllEntries(entries).build() } def ormapFromBinary(bytes: Array[Byte]): ORMap[Any, ReplicatedData] = ormapFromProto(rd.ORMap.parseFrom(decompress(bytes))) - def mapTypeFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage, B <: ReplicatedData](input: util.List[PEntry], valueCreator: A => B)(implicit eh: ProtoMapEntryReader[PEntry, A]): Map[Any, B] = { + def mapTypeFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage, B <: ReplicatedData]( + input: util.List[PEntry], + valueCreator: A => B)(implicit eh: ProtoMapEntryReader[PEntry, A]): Map[Any, B] = { input.asScala.map { entry => if (eh.hasStringKey(entry)) eh.getStringKey(entry) -> valueCreator(eh.getValue(entry)) else if (eh.hasIntKey(entry)) eh.getIntKey(entry) -> valueCreator(eh.getValue(entry)) else if (eh.hasLongKey(entry)) eh.getLongKey(entry) -> valueCreator(eh.getValue(entry)) else if (eh.hasOtherKey(entry)) otherMessageFromProto(eh.getOtherKey(entry)) -> valueCreator(eh.getValue(entry)) - else throw new IllegalArgumentException(s"Can't deserialize ${entry.getClass} because it does not have any key in the serialized message.") + else + throw new IllegalArgumentException( + s"Can't deserialize ${entry.getClass} because it does not have any key in the serialized message.") }.toMap } def ormapFromProto(ormap: rd.ORMap): ORMap[Any, ReplicatedData] = { - val entries = mapTypeFromProto(ormap.getEntriesList, (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData]) - new ORMap( - keys = orsetFromProto(ormap.getKeys), - entries, - ORMap.VanillaORMapTag) + val entries = mapTypeFromProto(ormap.getEntriesList, + (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData]) + new ORMap(keys = orsetFromProto(ormap.getKeys), entries, ORMap.VanillaORMapTag) } - def singleMapEntryFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage, B <: ReplicatedData](input: util.List[PEntry], valueCreator: A => B)(implicit eh: ProtoMapEntryReader[PEntry, A]): Map[Any, B] = { + def singleMapEntryFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage, B <: ReplicatedData]( + input: util.List[PEntry], + valueCreator: A => B)(implicit eh: ProtoMapEntryReader[PEntry, A]): Map[Any, B] = { val map = mapTypeFromProto(input, valueCreator) if (map.size > 1) - throw new IllegalArgumentException(s"Can't deserialize the key/value pair in the ORMap delta - too many pairs on the wire") + throw new IllegalArgumentException( + s"Can't deserialize the key/value pair in the ORMap delta - too many pairs on the wire") else map } - def singleKeyEntryFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage](entryOption: Option[PEntry])(implicit eh: ProtoMapEntryReader[PEntry, A]): Any = + def singleKeyEntryFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage](entryOption: Option[PEntry])( + implicit eh: ProtoMapEntryReader[PEntry, A]): Any = entryOption match { - case Some(entry) => if (eh.hasStringKey(entry)) eh.getStringKey(entry) - else if (eh.hasIntKey(entry)) eh.getIntKey(entry) - else if (eh.hasLongKey(entry)) eh.getLongKey(entry) - else if (eh.hasOtherKey(entry)) otherMessageFromProto(eh.getOtherKey(entry)) - else throw new IllegalArgumentException(s"Can't deserialize the key in the ORMap delta") + case Some(entry) => + if (eh.hasStringKey(entry)) eh.getStringKey(entry) + else if (eh.hasIntKey(entry)) eh.getIntKey(entry) + else if (eh.hasLongKey(entry)) eh.getLongKey(entry) + else if (eh.hasOtherKey(entry)) otherMessageFromProto(eh.getOtherKey(entry)) + else throw new IllegalArgumentException(s"Can't deserialize the key in the ORMap delta") case _ => throw new IllegalArgumentException(s"Can't deserialize the key in the ORMap delta") } @@ -656,12 +747,12 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) // this can be made client-extendable in the same way as Http codes in Spray are private def zeroTagFromCode(code: Int) = code match { - case ORMap.VanillaORMapTag.value => ORMap.VanillaORMapTag - case PNCounterMap.PNCounterMapTag.value => PNCounterMap.PNCounterMapTag - case ORMultiMap.ORMultiMapTag.value => ORMultiMap.ORMultiMapTag + case ORMap.VanillaORMapTag.value => ORMap.VanillaORMapTag + case PNCounterMap.PNCounterMapTag.value => PNCounterMap.PNCounterMapTag + case ORMultiMap.ORMultiMapTag.value => ORMultiMap.ORMultiMapTag case ORMultiMap.ORMultiMapWithValueDeltasTag.value => ORMultiMap.ORMultiMapWithValueDeltasTag - case LWWMap.LWWMapTag.value => LWWMap.LWWMapTag - case _ => throw new IllegalArgumentException("Invalid ZeroTag code") + case LWWMap.LWWMapTag.value => LWWMap.LWWMapTag + case _ => throw new IllegalArgumentException("Invalid ZeroTag code") } private def ormapDeltaGroupFromBinary(bytes: Array[Byte]): ORMap.DeltaGroup[Any, ReplicatedData] = { @@ -671,21 +762,33 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) private def ormapDeltaGroupOpsFromBinary(bytes: Array[Byte]): scala.collection.immutable.IndexedSeq[ORMap.DeltaOp] = { val deltaGroup = rd.ORMapDeltaGroup.parseFrom(bytes) val ops: Vector[ORMap.DeltaOp] = - deltaGroup.getEntriesList.asScala.iterator.map { entry => - if (entry.getOperation == rd.ORMapDeltaOp.ORMapPut) { - val map = singleMapEntryFromProto(entry.getEntryDataList, (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData]) - ORMap.PutDeltaOp(ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)), map.head, zeroTagFromCode(entry.getZeroTag)) - } else if (entry.getOperation == rd.ORMapDeltaOp.ORMapRemove) { - ORMap.RemoveDeltaOp(ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)), zeroTagFromCode(entry.getZeroTag)) - } else if (entry.getOperation == rd.ORMapDeltaOp.ORMapRemoveKey) { - val elem = singleKeyEntryFromProto(entry.getEntryDataList.asScala.headOption) - ORMap.RemoveKeyDeltaOp(ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)), elem, zeroTagFromCode(entry.getZeroTag)) - } else if (entry.getOperation == rd.ORMapDeltaOp.ORMapUpdate) { - val map = mapTypeFromProto(entry.getEntryDataList, (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedDelta]) - ORMap.UpdateDeltaOp(ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)), map, zeroTagFromCode(entry.getZeroTag)) - } else - throw new NotSerializableException(s"Unknown ORMap delta operation ${entry.getOperation}") - }.to(immutable.Vector) + deltaGroup.getEntriesList.asScala.iterator + .map { entry => + if (entry.getOperation == rd.ORMapDeltaOp.ORMapPut) { + val map = + singleMapEntryFromProto(entry.getEntryDataList, + (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData]) + ORMap.PutDeltaOp(ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)), + map.head, + zeroTagFromCode(entry.getZeroTag)) + } else if (entry.getOperation == rd.ORMapDeltaOp.ORMapRemove) { + ORMap.RemoveDeltaOp(ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)), + zeroTagFromCode(entry.getZeroTag)) + } else if (entry.getOperation == rd.ORMapDeltaOp.ORMapRemoveKey) { + val elem = singleKeyEntryFromProto(entry.getEntryDataList.asScala.headOption) + ORMap.RemoveKeyDeltaOp(ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)), + elem, + zeroTagFromCode(entry.getZeroTag)) + } else if (entry.getOperation == rd.ORMapDeltaOp.ORMapUpdate) { + val map = mapTypeFromProto(entry.getEntryDataList, + (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedDelta]) + ORMap.UpdateDeltaOp(ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)), + map, + zeroTagFromCode(entry.getZeroTag)) + } else + throw new NotSerializableException(s"Unknown ORMap delta operation ${entry.getOperation}") + } + .to(immutable.Vector) ops } @@ -714,15 +817,25 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) if (m.size > 1 && opType != rd.ORMapDeltaOp.ORMapUpdate) throw new IllegalArgumentException("Invalid size of ORMap delta map") else { - val builder = rd.ORMapDeltaGroup.Entry.newBuilder() - .setOperation(opType) - .setUnderlying(orsetToProto(u)) - .setZeroTag(zt) + val builder = + rd.ORMapDeltaGroup.Entry.newBuilder().setOperation(opType).setUnderlying(orsetToProto(u)).setZeroTag(zt) m.foreach { - case (key: String, value) => builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setStringKey(key).setValue(otherMessageToProto(value)).build()) - case (key: Int, value) => builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setIntKey(key).setValue(otherMessageToProto(value)).build()) - case (key: Long, value) => builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setLongKey(key).setValue(otherMessageToProto(value)).build()) - case (key, value) => builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setOtherKey(otherMessageToProto(key)).setValue(otherMessageToProto(value)).build()) + case (key: String, value) => + builder.addEntryData( + rd.ORMapDeltaGroup.MapEntry.newBuilder().setStringKey(key).setValue(otherMessageToProto(value)).build()) + case (key: Int, value) => + builder.addEntryData( + rd.ORMapDeltaGroup.MapEntry.newBuilder().setIntKey(key).setValue(otherMessageToProto(value)).build()) + case (key: Long, value) => + builder.addEntryData( + rd.ORMapDeltaGroup.MapEntry.newBuilder().setLongKey(key).setValue(otherMessageToProto(value)).build()) + case (key, value) => + builder.addEntryData( + rd.ORMapDeltaGroup.MapEntry + .newBuilder() + .setOtherKey(otherMessageToProto(key)) + .setValue(otherMessageToProto(value)) + .build()) } builder } @@ -736,10 +849,8 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) case key: Long => entryDataBuilder.setLongKey(key) case key => entryDataBuilder.setOtherKey(otherMessageToProto(key)) } - val builder = rd.ORMapDeltaGroup.Entry.newBuilder() - .setOperation(opType) - .setUnderlying(orsetToProto(u)) - .setZeroTag(zt) + val builder = + rd.ORMapDeltaGroup.Entry.newBuilder().setOperation(opType).setUnderlying(orsetToProto(u)).setZeroTag(zt) builder.addEntryData(entryDataBuilder.build()) builder } @@ -747,13 +858,23 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) val b = rd.ORMapDeltaGroup.newBuilder() deltaGroupOps.foreach { case ORMap.PutDeltaOp(op, pair, zt) => - b.addEntries(createEntry(rd.ORMapDeltaOp.ORMapPut, op.asInstanceOf[ORSet.AddDeltaOp[_]].underlying, Map(pair), zt.value)) + b.addEntries( + createEntry(rd.ORMapDeltaOp.ORMapPut, op.asInstanceOf[ORSet.AddDeltaOp[_]].underlying, Map(pair), zt.value)) case ORMap.RemoveDeltaOp(op, zt) => - b.addEntries(createEntry(rd.ORMapDeltaOp.ORMapRemove, op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying, Map.empty, zt.value)) + b.addEntries( + createEntry(rd.ORMapDeltaOp.ORMapRemove, + op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying, + Map.empty, + zt.value)) case ORMap.RemoveKeyDeltaOp(op, k, zt) => - b.addEntries(createEntryWithKey(rd.ORMapDeltaOp.ORMapRemoveKey, op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying, k, zt.value)) + b.addEntries( + createEntryWithKey(rd.ORMapDeltaOp.ORMapRemoveKey, + op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying, + k, + zt.value)) case ORMap.UpdateDeltaOp(op, m, zt) => - b.addEntries(createEntry(rd.ORMapDeltaOp.ORMapUpdate, op.asInstanceOf[ORSet.AddDeltaOp[_]].underlying, m, zt.value)) + b.addEntries( + createEntry(rd.ORMapDeltaOp.ORMapUpdate, op.asInstanceOf[ORSet.AddDeltaOp[_]].underlying, m, zt.value)) case ORMap.DeltaGroup(u) => throw new IllegalArgumentException("ORMap.DeltaGroup should not be nested") } @@ -762,7 +883,8 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def lwwmapToProto(lwwmap: LWWMap[_, _]): rd.LWWMap = { val lwwmapBuilder = rd.LWWMap.newBuilder() - val entries: jl.Iterable[rd.LWWMap.Entry] = getEntries(lwwmap.underlying.entries, rd.LWWMap.Entry.newBuilder _, lwwRegisterToProto) + val entries: jl.Iterable[rd.LWWMap.Entry] = + getEntries(lwwmap.underlying.entries, rd.LWWMap.Entry.newBuilder _, lwwRegisterToProto) lwwmapBuilder.setKeys(orsetToProto(lwwmap.underlying.keys)).addAllEntries(entries).build() } @@ -771,14 +893,13 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def lwwmapFromProto(lwwmap: rd.LWWMap): LWWMap[Any, Any] = { val entries = mapTypeFromProto(lwwmap.getEntriesList, lwwRegisterFromProto) - new LWWMap(new ORMap( - keys = orsetFromProto(lwwmap.getKeys), - entries, LWWMap.LWWMapTag)) + new LWWMap(new ORMap(keys = orsetFromProto(lwwmap.getKeys), entries, LWWMap.LWWMapTag)) } def pncountermapToProto(pncountermap: PNCounterMap[_]): rd.PNCounterMap = { val pncountermapBuilder = rd.PNCounterMap.newBuilder() - val entries: jl.Iterable[rd.PNCounterMap.Entry] = getEntries(pncountermap.underlying.entries, rd.PNCounterMap.Entry.newBuilder _, pncounterToProto) + val entries: jl.Iterable[rd.PNCounterMap.Entry] = + getEntries(pncountermap.underlying.entries, rd.PNCounterMap.Entry.newBuilder _, pncounterToProto) pncountermapBuilder.setKeys(orsetToProto(pncountermap.underlying.keys)).addAllEntries(entries).build() } @@ -787,14 +908,13 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def pncountermapFromProto(pncountermap: rd.PNCounterMap): PNCounterMap[_] = { val entries = mapTypeFromProto(pncountermap.getEntriesList, pncounterFromProto) - new PNCounterMap(new ORMap( - keys = orsetFromProto(pncountermap.getKeys), - entries, PNCounterMap.PNCounterMapTag)) + new PNCounterMap(new ORMap(keys = orsetFromProto(pncountermap.getKeys), entries, PNCounterMap.PNCounterMapTag)) } def multimapToProto(multimap: ORMultiMap[_, _]): rd.ORMultiMap = { val ormultimapBuilder = rd.ORMultiMap.newBuilder() - val entries: jl.Iterable[rd.ORMultiMap.Entry] = getEntries(multimap.underlying.entries, rd.ORMultiMap.Entry.newBuilder _, orsetToProto) + val entries: jl.Iterable[rd.ORMultiMap.Entry] = + getEntries(multimap.underlying.entries, rd.ORMultiMap.Entry.newBuilder _, orsetToProto) ormultimapBuilder.setKeys(orsetToProto(multimap.underlying.keys)).addAllEntries(entries) if (multimap.withValueDeltas) ormultimapBuilder.setWithValueDeltas(true) @@ -806,18 +926,17 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem) def multimapFromProto(multimap: rd.ORMultiMap): ORMultiMap[Any, Any] = { val entries = mapTypeFromProto(multimap.getEntriesList, orsetFromProto) - val withValueDeltas = if (multimap.hasWithValueDeltas) - multimap.getWithValueDeltas - else false - new ORMultiMap( - new ORMap( - keys = orsetFromProto(multimap.getKeys), - entries, - if (withValueDeltas) - ORMultiMap.ORMultiMapWithValueDeltasTag - else - ORMultiMap.ORMultiMapTag), - withValueDeltas) + val withValueDeltas = + if (multimap.hasWithValueDeltas) + multimap.getWithValueDeltas + else false + new ORMultiMap(new ORMap(keys = orsetFromProto(multimap.getKeys), + entries, + if (withValueDeltas) + ORMultiMap.ORMultiMapWithValueDeltasTag + else + ORMultiMap.ORMultiMapTag), + withValueDeltas) } def keyIdToBinary(id: String): Array[Byte] = diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala index 970418951d..808797c3e4 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializer.scala @@ -147,11 +147,14 @@ import akka.util.ccompat._ * Protobuf serializer of ReplicatorMessage messages. */ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) - extends SerializerWithStringManifest with SerializationSupport with BaseSerializer { + extends SerializerWithStringManifest + with SerializationSupport + with BaseSerializer { import ReplicatorMessageSerializer.SmallCache - private val cacheTimeToLive = system.settings.config.getDuration( - "akka.cluster.distributed-data.serializer-cache-time-to-live", TimeUnit.MILLISECONDS).millis + private val cacheTimeToLive = system.settings.config + .getDuration("akka.cluster.distributed-data.serializer-cache-time-to-live", TimeUnit.MILLISECONDS) + .millis private val readCache = new SmallCache[Read, Array[Byte]](4, cacheTimeToLive, m => readToProto(m).toByteArray) private val writeCache = new SmallCache[Write, Array[Byte]](4, cacheTimeToLive, m => writeToProto(m).toByteArray) system.scheduler.schedule(cacheTimeToLive, cacheTimeToLive / 2) { @@ -181,25 +184,27 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) val DeltaPropagationManifest = "Q" val DeltaNackManifest = "R" - private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef]( - GetManifest -> getFromBinary, - GetSuccessManifest -> getSuccessFromBinary, - NotFoundManifest -> notFoundFromBinary, - GetFailureManifest -> getFailureFromBinary, - SubscribeManifest -> subscribeFromBinary, - UnsubscribeManifest -> unsubscribeFromBinary, - ChangedManifest -> changedFromBinary, - DataEnvelopeManifest -> dataEnvelopeFromBinary, - WriteManifest -> writeFromBinary, - WriteAckManifest -> (_ => WriteAck), - ReadManifest -> readFromBinary, - ReadResultManifest -> readResultFromBinary, - StatusManifest -> statusFromBinary, - GossipManifest -> gossipFromBinary, - DeltaPropagationManifest -> deltaPropagationFromBinary, - WriteNackManifest -> (_ => WriteNack), - DeltaNackManifest -> (_ => DeltaNack), - DurableDataEnvelopeManifest -> durableDataEnvelopeFromBinary) + private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef](GetManifest -> getFromBinary, + GetSuccessManifest -> getSuccessFromBinary, + NotFoundManifest -> notFoundFromBinary, + GetFailureManifest -> getFailureFromBinary, + SubscribeManifest -> subscribeFromBinary, + UnsubscribeManifest -> unsubscribeFromBinary, + ChangedManifest -> changedFromBinary, + DataEnvelopeManifest -> dataEnvelopeFromBinary, + WriteManifest -> writeFromBinary, + WriteAckManifest -> (_ => + WriteAck), + ReadManifest -> readFromBinary, + ReadResultManifest -> readResultFromBinary, + StatusManifest -> statusFromBinary, + GossipManifest -> gossipFromBinary, + DeltaPropagationManifest -> deltaPropagationFromBinary, + WriteNackManifest -> (_ => + WriteNack), + DeltaNackManifest -> (_ => + DeltaNack), + DurableDataEnvelopeManifest -> durableDataEnvelopeFromBinary) override def manifest(obj: AnyRef): String = obj match { case _: DataEnvelope => DataEnvelopeManifest @@ -250,8 +255,9 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = fromBinaryMap.get(manifest) match { case Some(f) => f(bytes) - case None => throw new NotSerializableException( - s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + case None => + throw new NotSerializableException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") } private def statusToProto(status: Status): dm.Status = { @@ -259,48 +265,41 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) b.setChunk(status.chunk).setTotChunks(status.totChunks) val entries = status.digests.foreach { case (key, digest) => - b.addEntries(dm.Status.Entry.newBuilder(). - setKey(key). - setDigest(ByteString.copyFrom(digest.toArray))) + b.addEntries(dm.Status.Entry.newBuilder().setKey(key).setDigest(ByteString.copyFrom(digest.toArray))) } b.build() } private def statusFromBinary(bytes: Array[Byte]): Status = { val status = dm.Status.parseFrom(bytes) - Status( - status.getEntriesList.asScala.iterator.map(e => - e.getKey -> AkkaByteString(e.getDigest.toByteArray())).toMap, - status.getChunk, status.getTotChunks) + Status(status.getEntriesList.asScala.iterator.map(e => e.getKey -> AkkaByteString(e.getDigest.toByteArray())).toMap, + status.getChunk, + status.getTotChunks) } private def gossipToProto(gossip: Gossip): dm.Gossip = { val b = dm.Gossip.newBuilder().setSendBack(gossip.sendBack) val entries = gossip.updatedData.foreach { case (key, data) => - b.addEntries(dm.Gossip.Entry.newBuilder(). - setKey(key). - setEnvelope(dataEnvelopeToProto(data))) + b.addEntries(dm.Gossip.Entry.newBuilder().setKey(key).setEnvelope(dataEnvelopeToProto(data))) } b.build() } private def gossipFromBinary(bytes: Array[Byte]): Gossip = { val gossip = dm.Gossip.parseFrom(decompress(bytes)) - Gossip( - gossip.getEntriesList.asScala.iterator.map(e => - e.getKey -> dataEnvelopeFromProto(e.getEnvelope)).toMap, - sendBack = gossip.getSendBack) + Gossip(gossip.getEntriesList.asScala.iterator.map(e => e.getKey -> dataEnvelopeFromProto(e.getEnvelope)).toMap, + sendBack = gossip.getSendBack) } private def deltaPropagationToProto(deltaPropagation: DeltaPropagation): dm.DeltaPropagation = { - val b = dm.DeltaPropagation.newBuilder() - .setFromNode(uniqueAddressToProto(deltaPropagation.fromNode)) + val b = dm.DeltaPropagation.newBuilder().setFromNode(uniqueAddressToProto(deltaPropagation.fromNode)) if (deltaPropagation.reply) b.setReply(deltaPropagation.reply) val entries = deltaPropagation.deltas.foreach { case (key, Delta(data, fromSeqNr, toSeqNr)) => - val b2 = dm.DeltaPropagation.Entry.newBuilder() + val b2 = dm.DeltaPropagation.Entry + .newBuilder() .setKey(key) .setEnvelope(dataEnvelopeToProto(data)) .setFromSeqNr(fromSeqNr) @@ -314,14 +313,13 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) private def deltaPropagationFromBinary(bytes: Array[Byte]): DeltaPropagation = { val deltaPropagation = dm.DeltaPropagation.parseFrom(bytes) val reply = deltaPropagation.hasReply && deltaPropagation.getReply - DeltaPropagation( - uniqueAddressFromProto(deltaPropagation.getFromNode), - reply, - deltaPropagation.getEntriesList.asScala.iterator.map { e => - val fromSeqNr = e.getFromSeqNr - val toSeqNr = if (e.hasToSeqNr) e.getToSeqNr else fromSeqNr - e.getKey -> Delta(dataEnvelopeFromProto(e.getEnvelope), fromSeqNr, toSeqNr) - }.toMap) + DeltaPropagation(uniqueAddressFromProto(deltaPropagation.getFromNode), + reply, + deltaPropagation.getEntriesList.asScala.iterator.map { e => + val fromSeqNr = e.getFromSeqNr + val toSeqNr = if (e.hasToSeqNr) e.getToSeqNr else fromSeqNr + e.getKey -> Delta(dataEnvelopeFromProto(e.getEnvelope), fromSeqNr, toSeqNr) + }.toMap) } private def getToProto(get: Get[_]): dm.Get = { @@ -332,10 +330,11 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) case _: ReadAll => -1 } - val b = dm.Get.newBuilder(). - setKey(otherMessageToProto(get.key)). - setConsistency(consistencyValue). - setTimeout(get.consistency.timeout.toMillis.toInt) + val b = dm.Get + .newBuilder() + .setKey(otherMessageToProto(get.key)) + .setConsistency(consistencyValue) + .setTimeout(get.consistency.timeout.toMillis.toInt) get.request.foreach(o => b.setRequest(otherMessageToProto(o))) b.build() @@ -356,9 +355,10 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) } private def getSuccessToProto(getSuccess: GetSuccess[_]): dm.GetSuccess = { - val b = dm.GetSuccess.newBuilder(). - setKey(otherMessageToProto(getSuccess.key)). - setData(otherMessageToProto(getSuccess.dataValue)) + val b = dm.GetSuccess + .newBuilder() + .setKey(otherMessageToProto(getSuccess.key)) + .setData(otherMessageToProto(getSuccess.dataValue)) getSuccess.request.foreach(o => b.setRequest(otherMessageToProto(o))) b.build() @@ -399,10 +399,11 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) } private def subscribeToProto(subscribe: Subscribe[_]): dm.Subscribe = - dm.Subscribe.newBuilder(). - setKey(otherMessageToProto(subscribe.key)). - setRef(Serialization.serializedActorPath(subscribe.subscriber)). - build() + dm.Subscribe + .newBuilder() + .setKey(otherMessageToProto(subscribe.key)) + .setRef(Serialization.serializedActorPath(subscribe.subscriber)) + .build() private def subscribeFromBinary(bytes: Array[Byte]): Subscribe[_] = { val subscribe = dm.Subscribe.parseFrom(bytes) @@ -411,10 +412,11 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) } private def unsubscribeToProto(unsubscribe: Unsubscribe[_]): dm.Unsubscribe = - dm.Unsubscribe.newBuilder(). - setKey(otherMessageToProto(unsubscribe.key)). - setRef(Serialization.serializedActorPath(unsubscribe.subscriber)). - build() + dm.Unsubscribe + .newBuilder() + .setKey(otherMessageToProto(unsubscribe.key)) + .setRef(Serialization.serializedActorPath(unsubscribe.subscriber)) + .build() private def unsubscribeFromBinary(bytes: Array[Byte]): Unsubscribe[_] = { val unsubscribe = dm.Unsubscribe.parseFrom(bytes) @@ -423,10 +425,11 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) } private def changedToProto(changed: Changed[_]): dm.Changed = - dm.Changed.newBuilder(). - setKey(otherMessageToProto(changed.key)). - setData(otherMessageToProto(changed.dataValue)). - build() + dm.Changed + .newBuilder() + .setKey(otherMessageToProto(changed.key)) + .setData(otherMessageToProto(changed.dataValue)) + .build() private def changedFromBinary(bytes: Array[Byte]): Changed[_] = { val changed = dm.Changed.parseFrom(bytes) @@ -438,11 +441,12 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) private def pruningToProto(entries: Map[UniqueAddress, PruningState]): Iterable[dm.DataEnvelope.PruningEntry] = { entries.map { case (removedAddress, state) => - val b = dm.DataEnvelope.PruningEntry.newBuilder(). - setRemovedAddress(uniqueAddressToProto(removedAddress)) + val b = dm.DataEnvelope.PruningEntry.newBuilder().setRemovedAddress(uniqueAddressToProto(removedAddress)) state match { case PruningState.PruningInitialized(owner, seen) => - seen.toVector.sorted(Member.addressOrdering).map(addressToProto).foreach { a => b.addSeen(a) } + seen.toVector.sorted(Member.addressOrdering).map(addressToProto).foreach { a => + b.addSeen(a) + } b.setOwnerAddress(uniqueAddressToProto(owner)) b.setPerformed(false) case PruningState.PruningPerformed(obsoleteTime) => @@ -456,8 +460,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) } private def dataEnvelopeToProto(dataEnvelope: DataEnvelope): dm.DataEnvelope = { - val dataEnvelopeBuilder = dm.DataEnvelope.newBuilder(). - setData(otherMessageToProto(dataEnvelope.data)) + val dataEnvelopeBuilder = dm.DataEnvelope.newBuilder().setData(otherMessageToProto(dataEnvelope.data)) dataEnvelopeBuilder.addAllPruning(pruningToProto(dataEnvelope.pruning).asJava) @@ -479,7 +482,8 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) DataEnvelope(data, pruning, deltaVersions) } - private def pruningFromProto(pruningEntries: java.util.List[dm.DataEnvelope.PruningEntry]): Map[UniqueAddress, PruningState] = { + private def pruningFromProto( + pruningEntries: java.util.List[dm.DataEnvelope.PruningEntry]): Map[UniqueAddress, PruningState] = { if (pruningEntries.isEmpty) Map.empty else @@ -499,10 +503,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) } private def writeToProto(write: Write): dm.Write = - dm.Write.newBuilder(). - setKey(write.key). - setEnvelope(dataEnvelopeToProto(write.envelope)). - build() + dm.Write.newBuilder().setKey(write.key).setEnvelope(dataEnvelopeToProto(write.envelope)).build() private def writeFromBinary(bytes: Array[Byte]): Write = { val write = dm.Write.parseFrom(bytes) @@ -539,8 +540,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem) case _ => false } - val builder = dm.DurableDataEnvelope.newBuilder() - .setData(otherMessageToProto(durableDataEnvelope.data)) + val builder = dm.DurableDataEnvelope.newBuilder().setData(otherMessageToProto(durableDataEnvelope.data)) builder.addAllPruning(pruningToProto(pruning).asJava) diff --git a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala index 0004fe2b69..4f7175ec4a 100644 --- a/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala +++ b/akka-distributed-data/src/main/scala/akka/cluster/ddata/protobuf/SerializationSupport.scala @@ -90,26 +90,26 @@ trait SerializationSupport { Address(addressProtocol, system.name, address.getHostname, address.getPort) def uniqueAddressToProto(uniqueAddress: UniqueAddress): dm.UniqueAddress.Builder = - dm.UniqueAddress.newBuilder().setAddress(addressToProto(uniqueAddress.address)) + dm.UniqueAddress + .newBuilder() + .setAddress(addressToProto(uniqueAddress.address)) .setUid(uniqueAddress.longUid.toInt) .setUid2((uniqueAddress.longUid >> 32).toInt) def uniqueAddressFromProto(uniqueAddress: dm.UniqueAddress): UniqueAddress = - UniqueAddress( - addressFromProto(uniqueAddress.getAddress), - if (uniqueAddress.hasUid2) { - // new remote node join the two parts of the long uid back - (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0xFFFFFFFFL) - } else { - // old remote node - uniqueAddress.getUid.toLong - }) + UniqueAddress(addressFromProto(uniqueAddress.getAddress), if (uniqueAddress.hasUid2) { + // new remote node join the two parts of the long uid back + (uniqueAddress.getUid2.toLong << 32) | (uniqueAddress.getUid & 0XFFFFFFFFL) + } else { + // old remote node + uniqueAddress.getUid.toLong + }) def versionVectorToProto(versionVector: VersionVector): dm.VersionVector = { val b = dm.VersionVector.newBuilder() versionVector.versionsIterator.foreach { - case (node, value) => b.addEntries(dm.VersionVector.Entry.newBuilder(). - setNode(uniqueAddressToProto(node)).setVersion(value)) + case (node, value) => + b.addEntries(dm.VersionVector.Entry.newBuilder().setNode(uniqueAddressToProto(node)).setVersion(value)) } b.build() } @@ -124,8 +124,9 @@ trait SerializationSupport { else if (entries.size == 1) VersionVector(uniqueAddressFromProto(entries.get(0).getNode), entries.get(0).getVersion) else { - val versions: TreeMap[UniqueAddress, Long] = scala.collection.immutable.TreeMap.from(versionVector.getEntriesList.asScala.iterator.map(entry => - uniqueAddressFromProto(entry.getNode) -> entry.getVersion)) + val versions: TreeMap[UniqueAddress, Long] = + scala.collection.immutable.TreeMap.from(versionVector.getEntriesList.asScala.iterator.map(entry => + uniqueAddressFromProto(entry.getNode) -> entry.getVersion)) VersionVector(versions) } } @@ -137,8 +138,9 @@ trait SerializationSupport { def buildOther(): dm.OtherMessage = { val m = msg.asInstanceOf[AnyRef] val msgSerializer = serialization.findSerializerFor(m) - val builder = dm.OtherMessage.newBuilder(). - setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(m))) + val builder = dm.OtherMessage + .newBuilder() + .setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(m))) .setSerializerId(msgSerializer.identifier) val ms = Serializers.manifestFor(msgSerializer, m) @@ -164,10 +166,7 @@ trait SerializationSupport { def otherMessageFromProto(other: dm.OtherMessage): AnyRef = { val manifest = if (other.hasMessageManifest) other.getMessageManifest.toStringUtf8 else "" - serialization.deserialize( - other.getEnclosedMessage.toByteArray, - other.getSerializerId, - manifest).get + serialization.deserialize(other.getEnclosedMessage.toByteArray, other.getSerializerId, manifest).get } } diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala index fcebc6b256..ba1478d196 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurableDataSpec.scala @@ -54,11 +54,11 @@ object DurableDataSpec { if (failStore) reply match { case Some(StoreReply(_, failureMsg, replyTo)) => replyTo ! failureMsg case None => - } - else reply match { - case Some(StoreReply(successMsg, _, replyTo)) => replyTo ! successMsg - case None => - } + } else + reply match { + case Some(StoreReply(successMsg, _, replyTo)) => replyTo ! successMsg + case None => + } } } @@ -72,7 +72,9 @@ class DurableDataWriteBehindSpecMultiJvmNode1 extends DurableDataSpec(DurableDat class DurableDataWriteBehindSpecMultiJvmNode2 extends DurableDataSpec(DurableDataSpecConfig(writeBehind = true)) abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) - extends MultiNodeSpec(multiNodeConfig) with STMultiNodeSpec with ImplicitSender { + extends MultiNodeSpec(multiNodeConfig) + with STMultiNodeSpec + with ImplicitSender { import DurableDataSpec._ import Replicator._ import multiNodeConfig._ @@ -95,12 +97,13 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) enterBarrier("after-" + testStepCounter) } - def newReplicator(sys: ActorSystem = system) = sys.actorOf(Replicator.props( - ReplicatorSettings(system).withGossipInterval(1.second)), "replicator-" + testStepCounter) + def newReplicator(sys: ActorSystem = system) = + sys.actorOf(Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second)), + "replicator-" + testStepCounter) def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -272,10 +275,9 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) Await.ready(sys1.terminate(), 10.seconds) } - val sys2 = ActorSystem( - "AdditionalSys", - // use the same port - ConfigFactory.parseString(s""" + val sys2 = ActorSystem("AdditionalSys", + // use the same port + ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} akka.remote.netty.tcp.port = ${address.port.get} """).withFallback(system.settings.config)) @@ -310,8 +312,7 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) "stop Replicator if Load fails" in { runOn(first) { val r = system.actorOf( - Replicator.props( - ReplicatorSettings(system).withDurableStoreProps(testDurableStoreProps(failLoad = true))), + Replicator.props(ReplicatorSettings(system).withDurableStoreProps(testDurableStoreProps(failLoad = true))), "replicator-" + testStepCounter) watch(r) expectTerminated(r) @@ -322,8 +323,7 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) "reply with StoreFailure if store fails" in { runOn(first) { val r = system.actorOf( - Replicator.props( - ReplicatorSettings(system).withDurableStoreProps(testDurableStoreProps(failStore = true))), + Replicator.props(ReplicatorSettings(system).withDurableStoreProps(testDurableStoreProps(failStore = true))), "replicator-" + testStepCounter) r ! Update(KeyA, GCounter(), WriteLocal, request = Some("a"))(_ :+ 1) expectMsg(StoreFailure(KeyA, Some("a"))) @@ -332,4 +332,3 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig) } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala index 6780786124..ddc67c7a8f 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/DurablePruningSpec.scala @@ -50,8 +50,10 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN def startReplicator(sys: ActorSystem): ActorRef = sys.actorOf(Replicator.props( - ReplicatorSettings(sys).withGossipInterval(1.second) - .withPruning(pruningInterval = 1.second, maxPruningDissemination)), "replicator") + ReplicatorSettings(sys) + .withGossipInterval(1.second) + .withPruning(pruningInterval = 1.second, maxPruningDissemination)), + "replicator") val replicator = startReplicator(system) val timeout = 5.seconds.dilated @@ -59,7 +61,7 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -150,7 +152,8 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN runOn(first) { val address = cluster2.selfAddress - val sys3 = ActorSystem(system.name, ConfigFactory.parseString(s""" + val sys3 = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} akka.remote.netty.tcp.port = ${address.port.get} """).withFallback(system.settings.config)) @@ -190,4 +193,3 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala index cfdcd11b72..4061cba9cd 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/JepsenInspiredInsertSpec.scala @@ -43,7 +43,10 @@ class JepsenInspiredInsertSpecMultiJvmNode4 extends JepsenInspiredInsertSpec class JepsenInspiredInsertSpecMultiJvmNode5 extends JepsenInspiredInsertSpec class JepsenInspiredInsertSpecMultiJvmNode6 extends JepsenInspiredInsertSpec -class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) with STMultiNodeSpec with ImplicitSender { +class JepsenInspiredInsertSpec + extends MultiNodeSpec(JepsenInspiredInsertSpec) + with STMultiNodeSpec + with ImplicitSender { import JepsenInspiredInsertSpec._ import Replicator._ @@ -82,7 +85,7 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -102,7 +105,9 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w } runOn(controller) { - nodes.foreach { n => enterBarrier(n.name + "-joined") } + nodes.foreach { n => + enterBarrier(n.name + "-joined") + } } enterBarrier("after-setup") @@ -119,7 +124,7 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w writeProbe.receiveOne(3.seconds) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -152,7 +157,7 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w writeProbe.receiveOne(timeout + 1.second) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -196,7 +201,7 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w writeProbe.receiveOne(3.seconds) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) failureWriteAcks should be(Nil) @@ -241,7 +246,7 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w writeProbe.receiveOne(timeout + 1.second) } val successWriteAcks = writeAcks.collect { case success: UpdateSuccess[_] => success } - val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } + val failureWriteAcks = writeAcks.collect { case fail: UpdateFailure[_] => fail } runOn(n1, n4, n5) { successWriteAcks.map(_.request.get).toSet should be(myData.toSet) successWriteAcks.size should be(myData.size) @@ -282,4 +287,3 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala index 395c46bc4a..b5a175bd57 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/PerformanceSpec.scala @@ -78,14 +78,17 @@ class PerformanceSpec extends MultiNodeSpec(PerformanceSpec) with STMultiNodeSpe def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } - def repeat(description: String, keys: Iterable[ORSetKey[Int]], n: Int, - expectedAfterReplication: Option[Set[Int]] = None, oneByOne: Boolean = false)( - block: (ORSetKey[Int], Int, ActorRef) => Unit, afterEachKey: ORSetKey[Int] => Unit = _ => ()): Unit = { + def repeat(description: String, + keys: Iterable[ORSetKey[Int]], + n: Int, + expectedAfterReplication: Option[Set[Int]] = None, + oneByOne: Boolean = false)(block: (ORSetKey[Int], Int, ActorRef) => Unit, + afterEachKey: ORSetKey[Int] => Unit = _ => ()): Unit = { keys.foreach { key => val startTime = System.nanoTime() @@ -124,7 +127,9 @@ class PerformanceSpec extends MultiNodeSpec(PerformanceSpec) with STMultiNodeSpe } def awaitReplicated(keys: Iterable[ORSetKey[Int]], expectedData: Set[Int]): Unit = - keys.foreach { key => awaitReplicated(key, expectedData) } + keys.foreach { key => + awaitReplicated(key, expectedData) + } def awaitReplicated(key: ORSetKey[Int], expectedData: Set[Int]): Unit = { within(20.seconds) { @@ -266,4 +271,3 @@ class PerformanceSpec extends MultiNodeSpec(PerformanceSpec) with STMultiNodeSpe } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala index 2a40abcb40..0c4ff9e8b3 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorChaosSpec.scala @@ -45,8 +45,9 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult val cluster = Cluster(system) implicit val selfUniqueAddress = DistributedData(system).selfUniqueAddress - val replicator = system.actorOf(Replicator.props( - ReplicatorSettings(system).withRole("backend").withGossipInterval(1.second)), "replicator") + val replicator = system.actorOf( + Replicator.props(ReplicatorSettings(system).withRole("backend").withGossipInterval(1.second)), + "replicator") val timeout = 3.seconds.dilated val KeyA = GCounterKey("A") @@ -59,7 +60,7 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -69,12 +70,13 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult awaitAssert { replicator ! Get(key, ReadLocal) val value = expectMsgPF() { - case g @ GetSuccess(`key`, _) => g.dataValue match { - case c: GCounter => c.value - case c: PNCounter => c.value - case c: GSet[_] => c.elements - case c: ORSet[_] => c.elements - } + case g @ GetSuccess(`key`, _) => + g.dataValue match { + case c: GCounter => c.value + case c: PNCounter => c.value + case c: GSet[_] => c.elements + case c: ORSet[_] => c.elements + } } value should be(expected) } @@ -107,7 +109,7 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult runOn(first) { for (_ <- 0 until 5) { replicator ! Update(KeyA, GCounter(), WriteLocal)(_ :+ 1) - replicator ! Update(KeyB, PNCounter(), WriteLocal)(_ decrement 1) + replicator ! Update(KeyB, PNCounter(), WriteLocal)(_.decrement(1)) replicator ! Update(KeyC, GCounter(), WriteAll(timeout))(_ :+ 1) } receiveN(15).map(_.getClass).toSet should be(Set(classOf[UpdateSuccess[_]])) @@ -117,9 +119,8 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult replicator ! Update(KeyA, GCounter(), WriteLocal)(_ :+ 20) replicator ! Update(KeyB, PNCounter(), WriteTo(2, timeout))(_ :+ 20) replicator ! Update(KeyC, GCounter(), WriteAll(timeout))(_ :+ 20) - receiveN(3).toSet should be(Set( - UpdateSuccess(KeyA, None), - UpdateSuccess(KeyB, None), UpdateSuccess(KeyC, None))) + receiveN(3).toSet should be( + Set(UpdateSuccess(KeyA, None), UpdateSuccess(KeyB, None), UpdateSuccess(KeyC, None))) replicator ! Update(KeyE, GSet(), WriteLocal)(_ + "e1" + "e2") expectMsg(UpdateSuccess(KeyE, None)) @@ -180,7 +181,7 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult replicator ! Update(KeyE, GSet(), WriteTo(2, timeout))(_ + "e4") expectMsg(UpdateSuccess(KeyE, None)) - replicator ! Update(KeyF, ORSet(), WriteTo(2, timeout))(_ remove "e2") + replicator ! Update(KeyF, ORSet(), WriteTo(2, timeout))(_.remove("e2")) expectMsg(UpdateSuccess(KeyF, None)) } runOn(fourth) { @@ -234,4 +235,3 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorDeltaSpec.scala index 6de66619e8..28fab6eb32 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorDeltaSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorDeltaSpec.scala @@ -31,7 +31,9 @@ object ReplicatorDeltaSpec extends MultiNodeConfig { testTransport(on = true) case class Highest(n: Int, delta: Option[Highest] = None) - extends DeltaReplicatedData with RequiresCausalDeliveryOfDeltas with ReplicatedDelta { + extends DeltaReplicatedData + with RequiresCausalDeliveryOfDeltas + with ReplicatedDelta { type T = Highest type D = Highest @@ -143,8 +145,9 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult val cluster = Cluster(system) implicit val selfUniqueAddress = DistributedData(system).selfUniqueAddress - val fullStateReplicator = system.actorOf(Replicator.props( - ReplicatorSettings(system).withGossipInterval(1.second).withDeltaCrdtEnabled(false)), "fullStateReplicator") + val fullStateReplicator = system.actorOf( + Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second).withDeltaCrdtEnabled(false)), + "fullStateReplicator") val deltaReplicator = { val r = system.actorOf(Replicator.props(ReplicatorSettings(system)), "deltaReplicator") r ! Replicator.Internal.TestFullStateGossip(enabled = false) @@ -161,7 +164,7 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -370,8 +373,8 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult fullStateReplicator ! Update(key, PNCounter.empty, consistency)(_ :+ n) deltaReplicator ! Update(key, PNCounter.empty, consistency)(_ :+ n) case Decr(key, n, consistency) => - fullStateReplicator ! Update(key, PNCounter.empty, consistency)(_ decrement n) - deltaReplicator ! Update(key, PNCounter.empty, consistency)(_ decrement n) + fullStateReplicator ! Update(key, PNCounter.empty, consistency)(_.decrement(n)) + deltaReplicator ! Update(key, PNCounter.empty, consistency)(_.decrement(n)) case Add(key, elem, consistency) => // to have an deterministic result when mixing add/remove we can only perform // the ORSet operations from one node @@ -381,8 +384,8 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult } case Remove(key, elem, consistency) => runOn(first) { - fullStateReplicator ! Update(key, ORSet.empty[String], consistency)(_ remove elem) - deltaReplicator ! Update(key, ORSet.empty[String], consistency)(_ remove elem) + fullStateReplicator ! Update(key, ORSet.empty[String], consistency)(_.remove(elem)) + deltaReplicator ! Update(key, ORSet.empty[String], consistency)(_.remove(elem)) } } } @@ -425,4 +428,3 @@ class ReplicatorDeltaSpec extends MultiNodeSpec(ReplicatorDeltaSpec) with STMult } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorMapDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorMapDeltaSpec.scala index b6cc32d45c..c5144b0ec8 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorMapDeltaSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorMapDeltaSpec.scala @@ -40,13 +40,19 @@ object ReplicatorMapDeltaSpec extends MultiNodeConfig { final case class Incr(ki: (PNCounterMapKey[String], String), n: Int, consistency: WriteConsistency) extends Op final case class Decr(ki: (PNCounterMapKey[String], String), n: Int, consistency: WriteConsistency) extends Op // AddVD and RemoveVD for variant of ORMultiMap with Value Deltas, NoVD - for the vanilla ORMultiMap - final case class AddVD(ki: (ORMultiMapKey[String, String], String), elem: String, consistency: WriteConsistency) extends Op - final case class RemoveVD(ki: (ORMultiMapKey[String, String], String), elem: String, consistency: WriteConsistency) extends Op - final case class AddNoVD(ki: (ORMultiMapKey[String, String], String), elem: String, consistency: WriteConsistency) extends Op - final case class RemoveNoVD(ki: (ORMultiMapKey[String, String], String), elem: String, consistency: WriteConsistency) extends Op + final case class AddVD(ki: (ORMultiMapKey[String, String], String), elem: String, consistency: WriteConsistency) + extends Op + final case class RemoveVD(ki: (ORMultiMapKey[String, String], String), elem: String, consistency: WriteConsistency) + extends Op + final case class AddNoVD(ki: (ORMultiMapKey[String, String], String), elem: String, consistency: WriteConsistency) + extends Op + final case class RemoveNoVD(ki: (ORMultiMapKey[String, String], String), elem: String, consistency: WriteConsistency) + extends Op // AddOM and RemoveOM for Vanilla ORMap holding ORSet inside - final case class AddOM(ki: (ORMapKey[String, ORSet[String]], String), elem: String, consistency: WriteConsistency) extends Op - final case class RemoveOM(ki: (ORMapKey[String, ORSet[String]], String), elem: String, consistency: WriteConsistency) extends Op + final case class AddOM(ki: (ORMapKey[String, ORSet[String]], String), elem: String, consistency: WriteConsistency) + extends Op + final case class RemoveOM(ki: (ORMapKey[String, ORSet[String]], String), elem: String, consistency: WriteConsistency) + extends Op val timeout = 5.seconds val writeTwo = WriteTo(2, timeout) @@ -164,10 +170,12 @@ object ReplicatorMapDeltaSpec extends MultiNodeConfig { }.toVector } - def addElementToORMap(om: ORMap[String, ORSet[String]], key: String, element: String)(implicit node: SelfUniqueAddress) = + def addElementToORMap(om: ORMap[String, ORSet[String]], key: String, element: String)( + implicit node: SelfUniqueAddress) = om.updated(node, key, ORSet.empty[String])(_ :+ element) - def removeElementFromORMap(om: ORMap[String, ORSet[String]], key: String, element: String)(implicit node: SelfUniqueAddress) = + def removeElementFromORMap(om: ORMap[String, ORSet[String]], key: String, element: String)( + implicit node: SelfUniqueAddress) = om.updated(node, key, ORSet.empty[String])(_.remove(element)) } @@ -184,16 +192,17 @@ class ReplicatorMapDeltaSpec extends MultiNodeSpec(ReplicatorMapDeltaSpec) with val cluster = Cluster(system) implicit val selfUniqueAddress = DistributedData(system).selfUniqueAddress - val fullStateReplicator = system.actorOf(Replicator.props( - ReplicatorSettings(system).withGossipInterval(1.second).withDeltaCrdtEnabled(false)), "fullStateReplicator") + val fullStateReplicator = system.actorOf( + Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second).withDeltaCrdtEnabled(false)), + "fullStateReplicator") val deltaReplicator = { val r = system.actorOf(Replicator.props(ReplicatorSettings(system)), "deltaReplicator") r ! Replicator.Internal.TestFullStateGossip(enabled = false) r } // both deltas and full state - val ordinaryReplicator = system.actorOf(Replicator.props( - ReplicatorSettings(system).withGossipInterval(1.second)), "ordinaryReplicator") + val ordinaryReplicator = + system.actorOf(Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second)), "ordinaryReplicator") var afterCounter = 0 def enterBarrierAfterTestStep(): Unit = { @@ -203,7 +212,7 @@ class ReplicatorMapDeltaSpec extends MultiNodeSpec(ReplicatorMapDeltaSpec) with def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -246,16 +255,20 @@ class ReplicatorMapDeltaSpec extends MultiNodeSpec(ReplicatorMapDeltaSpec) with deltaReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_.incrementBy(key._2, 1)) } List(KeyD, KeyE, KeyF).foreach { key => - fullStateReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)(_ :+ (key._2 -> Set("a"))) - deltaReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)(_ :+ (key._2 -> Set("a"))) + fullStateReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)( + _ :+ (key._2 -> Set("a"))) + deltaReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)( + _ :+ (key._2 -> Set("a"))) } List(KeyG, KeyH, KeyI).foreach { key => fullStateReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)(_ :+ (key._2 -> Set("a"))) deltaReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)(_ :+ (key._2 -> Set("a"))) } List(KeyJ, KeyK, KeyL).foreach { key => - fullStateReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(_ :+ (key._2 -> (ORSet.empty :+ "a"))) - deltaReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(_ :+ (key._2 -> (ORSet.empty :+ "a"))) + fullStateReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)( + _ :+ (key._2 -> (ORSet.empty :+ "a"))) + deltaReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)( + _ :+ (key._2 -> (ORSet.empty :+ "a"))) } } enterBarrier("updated-1") @@ -335,46 +348,58 @@ class ReplicatorMapDeltaSpec extends MultiNodeSpec(ReplicatorMapDeltaSpec) with op match { case Delay(d) => Thread.sleep(d) case Incr(key, n, _) => - fullStateReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_ incrementBy (key._2, n)) - deltaReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_ incrementBy (key._2, n)) + fullStateReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_.incrementBy(key._2, n)) + deltaReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_.incrementBy(key._2, n)) case Decr(key, n, _) => - fullStateReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_ decrementBy (key._2, n)) - deltaReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_ decrementBy (key._2, n)) + fullStateReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_.decrementBy(key._2, n)) + deltaReplicator ! Update(key._1, PNCounterMap.empty[String], WriteLocal)(_.decrementBy(key._2, n)) case AddVD(key, elem, _) => // to have an deterministic result when mixing add/remove we can only perform // the ORSet operations from one node runOn((if (key == KeyF) List(first) else List(first, second, third)): _*) { - fullStateReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)(_ addBindingBy (key._2, elem)) - deltaReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)(_ addBindingBy (key._2, elem)) + fullStateReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)( + _.addBindingBy(key._2, elem)) + deltaReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)( + _.addBindingBy(key._2, elem)) } case RemoveVD(key, elem, _) => runOn(first) { - fullStateReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)(_ removeBindingBy (key._2, elem)) - deltaReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)(_ removeBindingBy (key._2, elem)) + fullStateReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)( + _.removeBindingBy(key._2, elem)) + deltaReplicator ! Update(key._1, ORMultiMap.emptyWithValueDeltas[String, String], WriteLocal)( + _.removeBindingBy(key._2, elem)) } case AddNoVD(key, elem, _) => // to have an deterministic result when mixing add/remove we can only perform // the ORSet operations from one node runOn((if (key == KeyI) List(first) else List(first, second, third)): _*) { - fullStateReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)(_ addBindingBy (key._2, elem)) - deltaReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)(_ addBindingBy (key._2, elem)) + fullStateReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)( + _.addBindingBy(key._2, elem)) + deltaReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)( + _.addBindingBy(key._2, elem)) } case RemoveNoVD(key, elem, _) => runOn(first) { - fullStateReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)(_ removeBindingBy (key._2, elem)) - deltaReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)(_ removeBindingBy (key._2, elem)) + fullStateReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)( + _.removeBindingBy(key._2, elem)) + deltaReplicator ! Update(key._1, ORMultiMap.empty[String, String], WriteLocal)( + _.removeBindingBy(key._2, elem)) } case AddOM(key, elem, _) => // to have an deterministic result when mixing add/remove we can only perform // the ORSet operations from one node runOn((if (key == KeyL) List(first) else List(first, second, third)): _*) { - fullStateReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(om => addElementToORMap(om, key._2, elem)) - deltaReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(om => addElementToORMap(om, key._2, elem)) + fullStateReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(om => + addElementToORMap(om, key._2, elem)) + deltaReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(om => + addElementToORMap(om, key._2, elem)) } case RemoveOM(key, elem, _) => runOn(first) { - fullStateReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(om => removeElementFromORMap(om, key._2, elem)) - deltaReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(om => removeElementFromORMap(om, key._2, elem)) + fullStateReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(om => + removeElementFromORMap(om, key._2, elem)) + deltaReplicator ! Update(key._1, ORMap.empty[String, ORSet[String]], WriteLocal)(om => + removeElementFromORMap(om, key._2, elem)) } } } @@ -443,4 +468,3 @@ class ReplicatorMapDeltaSpec extends MultiNodeSpec(ReplicatorMapDeltaSpec) with } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala index 7723556c74..fd2bae18c2 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorORSetDeltaSpec.scala @@ -36,7 +36,10 @@ class ReplicatorORSetDeltaSpecMultiJvmNode1 extends ReplicatorORSetDeltaSpec class ReplicatorORSetDeltaSpecMultiJvmNode2 extends ReplicatorORSetDeltaSpec class ReplicatorORSetDeltaSpecMultiJvmNode3 extends ReplicatorORSetDeltaSpec -class ReplicatorORSetDeltaSpec extends MultiNodeSpec(ReplicatorORSetDeltaSpec) with STMultiNodeSpec with ImplicitSender { +class ReplicatorORSetDeltaSpec + extends MultiNodeSpec(ReplicatorORSetDeltaSpec) + with STMultiNodeSpec + with ImplicitSender { import Replicator._ import ReplicatorORSetDeltaSpec._ @@ -44,8 +47,8 @@ class ReplicatorORSetDeltaSpec extends MultiNodeSpec(ReplicatorORSetDeltaSpec) w val cluster = Cluster(system) implicit val selfUniqueAddress = DistributedData(system).selfUniqueAddress - val replicator = system.actorOf(Replicator.props( - ReplicatorSettings(system).withGossipInterval(1.second)), "replicator") + val replicator = + system.actorOf(Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second)), "replicator") val timeout = 3.seconds.dilated val KeyA = ORSetKey[String]("A") @@ -54,7 +57,7 @@ class ReplicatorORSetDeltaSpec extends MultiNodeSpec(ReplicatorORSetDeltaSpec) w def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -64,9 +67,10 @@ class ReplicatorORSetDeltaSpec extends MultiNodeSpec(ReplicatorORSetDeltaSpec) w awaitAssert { replicator ! Get(key, ReadLocal) val value = expectMsgPF() { - case g @ GetSuccess(`key`, _) => g.dataValue match { - case c: ORSet[_] => c.elements - } + case g @ GetSuccess(`key`, _) => + g.dataValue match { + case c: ORSet[_] => c.elements + } } value should be(expected) } @@ -166,4 +170,3 @@ class ReplicatorORSetDeltaSpec extends MultiNodeSpec(ReplicatorORSetDeltaSpec) w } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala index acb6ced7cb..da2e8f1658 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorPruningSpec.scala @@ -42,8 +42,10 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST implicit val selfUniqueAddress = DistributedData(system).selfUniqueAddress val maxPruningDissemination = 3.seconds val replicator = system.actorOf(Replicator.props( - ReplicatorSettings(system).withGossipInterval(1.second) - .withPruning(pruningInterval = 1.second, maxPruningDissemination)), "replicator") + ReplicatorSettings(system) + .withGossipInterval(1.second) + .withPruning(pruningInterval = 1.second, maxPruningDissemination)), + "replicator") val timeout = 3.seconds.dilated val KeyA = GCounterKey("A") @@ -54,7 +56,7 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -77,10 +79,13 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST val memberProbe = TestProbe() cluster.subscribe(memberProbe.ref, initialStateMode = InitialStateAsEvents, classOf[MemberUp]) val thirdUniqueAddress = { - val member = memberProbe.fishForMessage(3.seconds) { - case MemberUp(m) if m.address == node(third).address => true - case _ => false - }.asInstanceOf[MemberUp].member + val member = memberProbe + .fishForMessage(3.seconds) { + case MemberUp(m) if m.address == node(third).address => true + case _ => false + } + .asInstanceOf[MemberUp] + .member member.uniqueAddress } @@ -90,13 +95,17 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST replicator ! Update(KeyB, ORSet(), WriteAll(timeout))(_ :+ "a" :+ "b" :+ "c") expectMsg(UpdateSuccess(KeyB, None)) - replicator ! Update(KeyC, PNCounterMap.empty[String], WriteAll(timeout)) { _.incrementBy("x", 1).incrementBy("y", 1) } + replicator ! Update(KeyC, PNCounterMap.empty[String], WriteAll(timeout)) { + _.incrementBy("x", 1).incrementBy("y", 1) + } expectMsg(UpdateSuccess(KeyC, None)) replicator ! Update(KeyD, ORMultiMap.empty[String, String], WriteAll(timeout)) { _ :+ ("a" -> Set("A")) } expectMsg(UpdateSuccess(KeyD, None)) - replicator ! Update(KeyE, ORMap.empty[String, GSet[String]], WriteAll(timeout)) { _ :+ ("a" -> GSet.empty[String].add("A")) } + replicator ! Update(KeyE, ORMap.empty[String, GSet[String]], WriteAll(timeout)) { + _ :+ ("a" -> GSet.empty[String].add("A")) + } expectMsg(UpdateSuccess(KeyE, None)) enterBarrier("updates-done") @@ -126,7 +135,7 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST enterBarrier("get-old") runOn(third) { - replicator ! Update(KeyE, ORMap.empty[String, GSet[String]], WriteLocal) { _ remove "a" } + replicator ! Update(KeyE, ORMap.empty[String, GSet[String]], WriteLocal) { _.remove("a") } expectMsg(UpdateSuccess(KeyE, None)) } @@ -246,4 +255,3 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala index d069f8c7bc..0675fe6e14 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/ReplicatorSpec.scala @@ -42,8 +42,9 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec val cluster = Cluster(system) implicit val selfUniqueAddress = DistributedData(system).selfUniqueAddress - val replicator = system.actorOf(Replicator.props( - ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)), "replicator") + val replicator = system.actorOf( + Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)), + "replicator") val timeout = 3.seconds.dilated val writeTwo = WriteTo(2, timeout) val writeMajority = WriteMajority(timeout) @@ -75,7 +76,7 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec def join(from: RoleName, to: RoleName): Unit = { runOn(from) { - cluster join node(to).address + cluster.join(node(to).address) } enterBarrier(from.name + "-joined") } @@ -575,4 +576,3 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec } } - diff --git a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala index bc41c046ad..9703b8c1b1 100644 --- a/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala +++ b/akka-distributed-data/src/multi-jvm/scala/akka/cluster/ddata/STMultiNodeSpec.scala @@ -12,8 +12,7 @@ import org.scalatest.Matchers /** * Hooks up MultiNodeSpec with ScalaTest */ -trait STMultiNodeSpec extends MultiNodeSpecCallbacks - with WordSpecLike with Matchers with BeforeAndAfterAll { +trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with Matchers with BeforeAndAfterAll { override def beforeAll() = multiNodeSpecBeforeAll() diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala index 9940857a43..e5744e2348 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/DeltaPropagationSelectorSpec.scala @@ -16,9 +16,8 @@ import org.scalatest.Matchers import org.scalatest.WordSpec object DeltaPropagationSelectorSpec { - class TestSelector( - val selfUniqueAddress: UniqueAddress, - override val allNodes: Vector[Address]) extends DeltaPropagationSelector { + class TestSelector(val selfUniqueAddress: UniqueAddress, override val allNodes: Vector[Address]) + extends DeltaPropagationSelector { override val gossipIntervalDivisor = 5 override def createDeltaPropagation(deltas: Map[KeyId, (ReplicatedData, Long, Long)]): DeltaPropagation = DeltaPropagation(selfUniqueAddress, false, deltas.map { @@ -53,9 +52,10 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck selector.cleanupDeltaEntries() selector.hasDeltaEntries("A") should ===(true) selector.hasDeltaEntries("B") should ===(true) - val expected = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(deltaA), 1L, 1L), - "B" -> Delta(DataEnvelope(deltaB), 1L, 1L))) + val expected = + DeltaPropagation(selfUniqueAddress, + false, + Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L))) selector.collectPropagations() should ===(Map(nodes(0) -> expected)) selector.collectPropagations() should ===(Map.empty[Address, DeltaPropagation]) selector.cleanupDeltaEntries() @@ -67,9 +67,10 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck val selector = new TestSelector(selfUniqueAddress, nodes.take(3)) selector.update("A", deltaA) selector.update("B", deltaB) - val expected = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(deltaA), 1L, 1L), - "B" -> Delta(DataEnvelope(deltaB), 1L, 1L))) + val expected = + DeltaPropagation(selfUniqueAddress, + false, + Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L))) selector.collectPropagations() should ===(Map(nodes(0) -> expected, nodes(1) -> expected)) selector.cleanupDeltaEntries() selector.hasDeltaEntries("A") should ===(true) @@ -85,18 +86,19 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck val selector = new TestSelector(selfUniqueAddress, nodes.take(3)) selector.update("A", deltaA) selector.update("B", deltaB) - val expected1 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(deltaA), 1L, 1L), - "B" -> Delta(DataEnvelope(deltaB), 1L, 1L))) + val expected1 = + DeltaPropagation(selfUniqueAddress, + false, + Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), "B" -> Delta(DataEnvelope(deltaB), 1L, 1L))) selector.collectPropagations() should ===(Map(nodes(0) -> expected1, nodes(1) -> expected1)) // new update before previous was propagated to all nodes selector.update("C", deltaC) - val expected2 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(deltaA), 1L, 1L), - "B" -> Delta(DataEnvelope(deltaB), 1L, 1L), - "C" -> Delta(DataEnvelope(deltaC), 1L, 1L))) - val expected3 = DeltaPropagation(selfUniqueAddress, false, Map( - "C" -> Delta(DataEnvelope(deltaC), 1L, 1L))) + val expected2 = DeltaPropagation(selfUniqueAddress, + false, + Map("A" -> Delta(DataEnvelope(deltaA), 1L, 1L), + "B" -> Delta(DataEnvelope(deltaB), 1L, 1L), + "C" -> Delta(DataEnvelope(deltaC), 1L, 1L))) + val expected3 = DeltaPropagation(selfUniqueAddress, false, Map("C" -> Delta(DataEnvelope(deltaC), 1L, 1L))) selector.collectPropagations() should ===(Map(nodes(2) -> expected2, nodes(0) -> expected3)) selector.cleanupDeltaEntries() selector.hasDeltaEntries("A") should ===(false) @@ -117,13 +119,12 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck selector.currentVersion("A") should ===(1L) selector.update("A", delta2) selector.currentVersion("A") should ===(2L) - val expected1 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(delta1.merge(delta2)), 1L, 2L))) + val expected1 = + DeltaPropagation(selfUniqueAddress, false, Map("A" -> Delta(DataEnvelope(delta1.merge(delta2)), 1L, 2L))) selector.collectPropagations() should ===(Map(nodes(0) -> expected1)) selector.update("A", delta3) selector.currentVersion("A") should ===(3L) - val expected2 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(delta3), 3L, 3L))) + val expected2 = DeltaPropagation(selfUniqueAddress, false, Map("A" -> Delta(DataEnvelope(delta3), 3L, 3L))) selector.collectPropagations() should ===(Map(nodes(0) -> expected2)) selector.collectPropagations() should ===(Map.empty[Address, DeltaPropagation]) } @@ -136,26 +137,25 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck override def nodesSliceSize(allNodesSize: Int): Int = 1 } selector.update("A", delta1) - val expected1 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(delta1), 1L, 1L))) + val expected1 = DeltaPropagation(selfUniqueAddress, false, Map("A" -> Delta(DataEnvelope(delta1), 1L, 1L))) selector.collectPropagations() should ===(Map(nodes(0) -> expected1)) selector.update("A", delta2) - val expected2 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(delta1.merge(delta2)), 1L, 2L))) + val expected2 = + DeltaPropagation(selfUniqueAddress, false, Map("A" -> Delta(DataEnvelope(delta1.merge(delta2)), 1L, 2L))) selector.collectPropagations() should ===(Map(nodes(1) -> expected2)) selector.update("A", delta3) - val expected3 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(delta1.merge(delta2).merge(delta3)), 1L, 3L))) + val expected3 = DeltaPropagation(selfUniqueAddress, + false, + Map("A" -> Delta(DataEnvelope(delta1.merge(delta2).merge(delta3)), 1L, 3L))) selector.collectPropagations() should ===(Map(nodes(2) -> expected3)) - val expected4 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(delta2.merge(delta3)), 2L, 3L))) + val expected4 = + DeltaPropagation(selfUniqueAddress, false, Map("A" -> Delta(DataEnvelope(delta2.merge(delta3)), 2L, 3L))) selector.collectPropagations() should ===(Map(nodes(0) -> expected4)) - val expected5 = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(delta3), 3L, 3L))) + val expected5 = DeltaPropagation(selfUniqueAddress, false, Map("A" -> Delta(DataEnvelope(delta3), 3L, 3L))) selector.collectPropagations() should ===(Map(nodes(1) -> expected5)) selector.collectPropagations() should ===(Map.empty[Address, DeltaPropagation]) @@ -171,8 +171,8 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck selector.update("A", d.delta.get) data = d } - val expected = DeltaPropagation(selfUniqueAddress, false, Map( - "A" -> Delta(DataEnvelope(NoDeltaPlaceholder), 1L, 1000L))) + val expected = + DeltaPropagation(selfUniqueAddress, false, Map("A" -> Delta(DataEnvelope(NoDeltaPlaceholder), 1L, 1000L))) selector.collectPropagations() should ===(Map(nodes(0) -> expected)) } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/FlagSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/FlagSpec.scala index c9b9d23952..d02088034b 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/FlagSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/FlagSpec.scala @@ -24,9 +24,9 @@ class FlagSpec extends WordSpec with Matchers { "merge by picking true" in { val f1 = Flag() val f2 = f1.switchOn - val m1 = f1 merge f2 + val m1 = f1.merge(f2) m1.enabled should be(true) - val m2 = f2 merge f1 + val m2 = f2.merge(f1) m2.enabled should be(true) } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala index ade71647cc..34b4a4d814 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GCounterSpec.scala @@ -20,12 +20,12 @@ class GCounterSpec extends WordSpec with Matchers { "be able to increment each node's record by one" in { val c1 = GCounter() - val c2 = c1 increment node1 - val c3 = c2 increment node1 + val c2 = c1.increment(node1) + val c3 = c2.increment(node1) - val c4 = c3 increment node2 - val c5 = c4 increment node2 - val c6 = c5.resetDelta increment node2 + val c4 = c3.increment(node2) + val c5 = c4.increment(node2) + val c6 = c5.resetDelta.increment(node2) c6.state(node1) should be(2) c6.state(node2) should be(3) @@ -41,12 +41,12 @@ class GCounterSpec extends WordSpec with Matchers { "be able to increment each node's record by arbitrary delta" in { val c1 = GCounter() - val c2 = c1 increment (node1, 3) - val c3 = c2 increment (node1, 4) + val c2 = c1.increment(node1, 3) + val c3 = c2.increment(node1, 4) - val c4 = c3 increment (node2, 2) - val c5 = c4 increment (node2, 7) - val c6 = c5 increment node2 + val c4 = c3.increment(node2, 2) + val c5 = c4.increment(node2, 7) + val c6 = c5.increment(node2) c6.state(node1) should be(7) c6.state(node2) should be(10) @@ -55,12 +55,12 @@ class GCounterSpec extends WordSpec with Matchers { "be able to summarize the history to the correct aggregated value" in { val c1 = GCounter() - val c2 = c1 increment (node1, 3) - val c3 = c2 increment (node1, 4) + val c2 = c1.increment(node1, 3) + val c3 = c2.increment(node1, 4) - val c4 = c3 increment (node2, 2) - val c5 = c4 increment (node2, 7) - val c6 = c5 increment node2 + val c4 = c3.increment(node2, 2) + val c5 = c4.increment(node2, 7) + val c6 = c5.increment(node2) c6.state(node1) should be(7) c6.state(node2) should be(10) @@ -71,11 +71,11 @@ class GCounterSpec extends WordSpec with Matchers { "be able to have its history correctly merged with another GCounter 1" in { // counter 1 val c11 = GCounter() - val c12 = c11 increment (node1, 3) - val c13 = c12 increment (node1, 4) - val c14 = c13 increment (node2, 2) - val c15 = c14 increment (node2, 7) - val c16 = c15 increment node2 + val c12 = c11.increment(node1, 3) + val c13 = c12.increment(node1, 4) + val c14 = c13.increment(node2, 2) + val c15 = c14.increment(node2, 7) + val c16 = c15.increment(node2) c16.state(node1) should be(7) c16.state(node2) should be(10) @@ -83,24 +83,24 @@ class GCounterSpec extends WordSpec with Matchers { // counter 2 val c21 = GCounter() - val c22 = c21 increment (node1, 2) - val c23 = c22 increment (node1, 2) - val c24 = c23 increment (node2, 3) - val c25 = c24 increment (node2, 2) - val c26 = c25 increment node2 + val c22 = c21.increment(node1, 2) + val c23 = c22.increment(node1, 2) + val c24 = c23.increment(node2, 3) + val c25 = c24.increment(node2, 2) + val c26 = c25.increment(node2) c26.state(node1) should be(4) c26.state(node2) should be(6) c26.value should be(10) // merge both ways - val merged1 = c16 merge c26 + val merged1 = c16.merge(c26) merged1.state(node1) should be(7) merged1.state(node2) should be(10) merged1.value should be(17) merged1.delta should ===(None) - val merged2 = c26 merge c16 + val merged2 = c26.merge(c16) merged2.state(node1) should be(7) merged2.state(node2) should be(10) merged2.value should be(17) @@ -110,11 +110,11 @@ class GCounterSpec extends WordSpec with Matchers { "be able to have its history correctly merged with another GCounter 2" in { // counter 1 val c11 = GCounter() - val c12 = c11 increment (node1, 2) - val c13 = c12 increment (node1, 2) - val c14 = c13 increment (node2, 2) - val c15 = c14 increment (node2, 7) - val c16 = c15 increment node2 + val c12 = c11.increment(node1, 2) + val c13 = c12.increment(node1, 2) + val c14 = c13.increment(node2, 2) + val c15 = c14.increment(node2, 7) + val c16 = c15.increment(node2) c16.state(node1) should be(4) c16.state(node2) should be(10) @@ -122,23 +122,23 @@ class GCounterSpec extends WordSpec with Matchers { // counter 1 val c21 = GCounter() - val c22 = c21 increment (node1, 3) - val c23 = c22 increment (node1, 4) - val c24 = c23 increment (node2, 3) - val c25 = c24 increment (node2, 2) - val c26 = c25 increment node2 + val c22 = c21.increment(node1, 3) + val c23 = c22.increment(node1, 4) + val c24 = c23.increment(node2, 3) + val c25 = c24.increment(node2, 2) + val c26 = c25.increment(node2) c26.state(node1) should be(7) c26.state(node2) should be(6) c26.value should be(13) // merge both ways - val merged1 = c16 merge c26 + val merged1 = c16.merge(c26) merged1.state(node1) should be(7) merged1.state(node2) should be(10) merged1.value should be(17) - val merged2 = c26 merge c16 + val merged2 = c26.merge(c16) merged2.state(node1) should be(7) merged2.state(node2) should be(10) merged2.value should be(17) @@ -146,8 +146,8 @@ class GCounterSpec extends WordSpec with Matchers { "have support for pruning" in { val c1 = GCounter() - val c2 = c1 increment node1 - val c3 = c2 increment node2 + val c2 = c1.increment(node1) + val c3 = c2.increment(node2) c2.modifiedByNodes should ===(Set(node1)) c2.needPruningFrom(node1) should be(true) c2.needPruningFrom(node2) should be(false) @@ -162,7 +162,7 @@ class GCounterSpec extends WordSpec with Matchers { c4.needPruningFrom(node1) should be(false) c4.value should be(2) - val c5 = (c4 increment node1).pruningCleanup(node1) + val c5 = c4.increment(node1).pruningCleanup(node1) c5.modifiedByNodes should ===(Set(node2)) c5.needPruningFrom(node1) should be(false) c4.value should be(2) diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala index 3315701cc9..a5eaeda440 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/GSetSpec.scala @@ -50,10 +50,10 @@ class GSetSpec extends WordSpec with Matchers { c23.elements should ===(Set(user3, user4)) // merge both ways - val merged1 = c13 merge c23 + val merged1 = c13.merge(c23) merged1.elements should ===(Set(user1, user2, user3, user4)) - val merged2 = c23 merge c13 + val merged2 = c23.merge(c13) merged2.elements should ===(Set(user1, user2, user3, user4)) } @@ -68,10 +68,10 @@ class GSetSpec extends WordSpec with Matchers { c13.delta.get.elements should ===(Set(user1, user2)) // deltas build state - (c12 mergeDelta c13.delta.get) should ===(c13) + (c12.mergeDelta(c13.delta.get)) should ===(c13) // own deltas are idempotent - (c13 mergeDelta c13.delta.get) should ===(c13) + (c13.mergeDelta(c13.delta.get)) should ===(c13) // set 2 val c21 = GSet.empty[String] @@ -84,13 +84,18 @@ class GSetSpec extends WordSpec with Matchers { c23.elements should ===(Set(user3, user4)) - val c33 = c13 merge c23 + val c33 = c13.merge(c23) // merge both ways - val merged1 = GSet.empty[String] mergeDelta c12.delta.get mergeDelta c13.delta.get mergeDelta c22.delta.get mergeDelta c23.delta.get + val merged1 = GSet + .empty[String] + .mergeDelta(c12.delta.get) + .mergeDelta(c13.delta.get) + .mergeDelta(c22.delta.get) + .mergeDelta(c23.delta.get) merged1.elements should ===(Set(user1, user2, user3, user4)) - val merged2 = GSet.empty[String] mergeDelta c23.delta.get mergeDelta c13.delta.get mergeDelta c22.delta.get + val merged2 = GSet.empty[String].mergeDelta(c23.delta.get).mergeDelta(c13.delta.get).mergeDelta(c22.delta.get) merged2.elements should ===(Set(user1, user2, user3, user4)) merged1 should ===(c33) @@ -121,13 +126,13 @@ class GSetSpec extends WordSpec with Matchers { c23.elements should contain(user4) // merge both ways - val merged1 = c13 merge c23 + val merged1 = c13.merge(c23) merged1.elements should contain(user1) merged1.elements should contain(user2) merged1.elements should contain(user3) merged1.elements should contain(user4) - val merged2 = c23 merge c13 + val merged2 = c23.merge(c13) merged2.elements should contain(user1) merged2.elements should contain(user2) merged2.elements should contain(user3) diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala index 388a68dbe8..6d6b659151 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWMapSpec.scala @@ -29,22 +29,22 @@ class LWWMapSpec extends WordSpec with Matchers { // merge both ways val expected = Map("a" -> 1, "b" -> 2, "c" -> 3) - (m1 merge m2).entries should be(expected) - (m2 merge m1).entries should be(expected) + m1.merge(m2).entries should be(expected) + m2.merge(m1).entries should be(expected) } "be able to remove entry" in { val m1 = LWWMap.empty[String, Int].put(node1, "a", 1, defaultClock[Int]).put(node1, "b", 2, defaultClock[Int]) val m2 = LWWMap.empty[String, Int].put(node2, "c", 3, defaultClock[Int]) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.remove(node1, "b") - (merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 3)) + merged1.merge(m3).entries should be(Map("a" -> 1, "c" -> 3)) // but if there is a conflicting update the entry is not removed val m4 = merged1.put(node2, "b", 22, defaultClock[Int]) - (m3 merge m4).entries should be(Map("a" -> 1, "b" -> 22, "c" -> 3)) + m3.merge(m4).entries should be(Map("a" -> 1, "b" -> 22, "c" -> 3)) } "be able to work with deltas" in { @@ -52,20 +52,20 @@ class LWWMapSpec extends WordSpec with Matchers { val m2 = LWWMap.empty[String, Int].put(node2, "c", 3, defaultClock[Int]) val expected = Map("a" -> 1, "b" -> 2, "c" -> 3) - (m1 merge m2).entries should be(expected) - (m2 merge m1).entries should be(expected) + m1.merge(m2).entries should be(expected) + m2.merge(m1).entries should be(expected) LWWMap.empty.mergeDelta(m1.delta.get).mergeDelta(m2.delta.get).entries should be(expected) LWWMap.empty.mergeDelta(m2.delta.get).mergeDelta(m1.delta.get).entries should be(expected) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") - (merged1 mergeDelta m3.delta.get).entries should be(Map("a" -> 1, "c" -> 3)) + merged1.mergeDelta(m3.delta.get).entries should be(Map("a" -> 1, "c" -> 3)) // but if there is a conflicting update the entry is not removed val m4 = merged1.resetDelta.put(node2, "b", 22, defaultClock[Int]) - (m3 mergeDelta m4.delta.get).entries should be(Map("a" -> 1, "b" -> 22, "c" -> 3)) + m3.mergeDelta(m4.delta.get).entries should be(Map("a" -> 1, "b" -> 22, "c" -> 3)) } "have unapply extractor" in { diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala index d29bbdc32e..9be4c5b8af 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LWWRegisterSpec.scala @@ -35,10 +35,10 @@ class LWWRegisterSpec extends WordSpec with Matchers { r1.timestamp should be(100) val r2 = r1.withValue(node2, "B", clock) r2.timestamp should be(101) - val m1 = r1 merge r2 + val m1 = r1.merge(r2) m1.value should be("B") m1.timestamp should be(101) - val m2 = r2 merge r1 + val m2 = r2.merge(r1) m2.value should be("B") m2.timestamp should be(101) } @@ -49,9 +49,9 @@ class LWWRegisterSpec extends WordSpec with Matchers { } val r1 = LWWRegister(node1, "A", clock) val r2 = LWWRegister(node2, "B", clock) - val m1 = r1 merge r2 + val m1 = r1.merge(r2) m1.value should be("A") - val m2 = r2 merge r1 + val m2 = r2.merge(r1) m2.value should be("A") } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala index e1380c0d28..87740abc0c 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LocalConcurrencySpec.scala @@ -37,14 +37,18 @@ object LocalConcurrencySpec { } } -class LocalConcurrencySpec(_system: ActorSystem) extends TestKit(_system) - with WordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender { +class LocalConcurrencySpec(_system: ActorSystem) + extends TestKit(_system) + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with ImplicitSender { import LocalConcurrencySpec._ def this() { - this(ActorSystem( - "LocalConcurrencySpec", - ConfigFactory.parseString(""" + this( + ActorSystem("LocalConcurrencySpec", + ConfigFactory.parseString(""" akka.actor.provider = "cluster" akka.remote.netty.tcp.port=0 akka.remote.artery.canonical.port = 0 diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala index 04382fbcba..fb75a7a27b 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/LotsOfDataBot.scala @@ -30,9 +30,10 @@ object LotsOfDataBot { def startup(ports: Seq[String]): Unit = { ports.foreach { port => // Override the configuration of the port - val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port). - withFallback(ConfigFactory.load( - ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString("akka.remote.netty.tcp.port=" + port) + .withFallback( + ConfigFactory.load(ConfigFactory.parseString(""" passive = off max-entries = 100000 akka.actor.provider = "cluster" @@ -107,7 +108,7 @@ class LotsOfDataBot extends Actor with ActorLogging { replicator ! Update(key, ORSet(), WriteLocal)(_ :+ s) } else { // remove - replicator ! Update(key, ORSet(), WriteLocal)(_ remove s) + replicator ! Update(key, ORSet(), WriteLocal)(_.remove(s)) } } @@ -136,4 +137,3 @@ class LotsOfDataBot extends Actor with ActorLogging { override def postStop(): Unit = tickTask.cancel() } - diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala index 7e5d56b870..0f9807418d 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMapSpec.scala @@ -82,25 +82,32 @@ class ORMapSpec extends WordSpec with Matchers { val m2 = ORMap().put(node2, "c", GSet() + "C") // merge both ways - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries.keySet should contain("a") merged1.entries.keySet should contain("b") merged1.entries.keySet should contain("c") - val merged2 = m2 merge m1 + val merged2 = m2.merge(m1) merged2.entries.keySet should contain("a") merged2.entries.keySet should contain("b") merged2.entries.keySet should contain("c") } "be able to have its entries correctly merged with another ORMap with overlapping entries" in { - val m1 = ORMap().put(node1, "a", GSet() + "A1").put(node1, "b", GSet() + "B1"). - remove(node1, "a").put(node1, "d", GSet() + "D1") - val m2 = ORMap().put(node2, "c", GSet() + "C2").put(node2, "a", GSet() + "A2"). - put(node2, "b", GSet() + "B2").remove(node2, "b").put(node2, "d", GSet() + "D2") + val m1 = ORMap() + .put(node1, "a", GSet() + "A1") + .put(node1, "b", GSet() + "B1") + .remove(node1, "a") + .put(node1, "d", GSet() + "D1") + val m2 = ORMap() + .put(node2, "c", GSet() + "C2") + .put(node2, "a", GSet() + "A2") + .put(node2, "b", GSet() + "B2") + .remove(node2, "b") + .put(node2, "d", GSet() + "D2") // merge both ways - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries.keySet should contain("a") val GSet(a1) = merged1.entries("a") a1 should be(Set("A2")) @@ -112,7 +119,7 @@ class ORMapSpec extends WordSpec with Matchers { val GSet(d1) = merged1.entries("d") d1 should be(Set("D1", "D2")) - val merged2 = m2 merge m1 + val merged2 = m2.merge(m1) merged2.entries.keySet should contain("a") val GSet(a2) = merged1.entries("a") a2 should be(Set("A2")) @@ -129,12 +136,12 @@ class ORMapSpec extends WordSpec with Matchers { val m1 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node1, "b", GSet.empty + "B") val m2 = ORMap.empty.put(node2, "c", GSet.empty + "C") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.remove(node1, "b").put(node1, "b", GSet.empty + "B2") // same thing if only put is used // val m3 = merged1.put(node1, "b", GSet.empty + "B2") - val merged2 = merged1 merge m3 + val merged2 = merged1.merge(m3) merged2.entries("a").elements should be(Set("A")) // note that B is included, because GSet("B") is merged with GSet("B2") @@ -170,19 +177,19 @@ class ORMapSpec extends WordSpec with Matchers { val m1 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node1, "b", GSet.empty + "B") val m2 = ORMap.empty.put(node2, "c", GSet.empty + "C") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") val m4 = merged1.resetDelta.updated(node1, "b", GSet.empty[String])(_.add("B2")) - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) // note that B is included, because GSet("B") is merged with GSet("B2") merged2.entries("b").elements should be(Set("B", "B2")) merged2.entries("c").elements should be(Set("C")) - val merged3 = m3 mergeDelta m4.delta.get + val merged3 = m3.mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) // note that B is included, because GSet("B") is merged with GSet("B2") @@ -194,19 +201,19 @@ class ORMapSpec extends WordSpec with Matchers { val m1 = ORMap.empty.put(node1, "a", ORSet.empty.add(node1, "A")).put(node1, "b", ORSet.empty.add(node1, "B")) val m2 = ORMap.empty.put(node2, "c", ORSet.empty.add(node2, "C")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") val m4 = merged1.resetDelta.remove(node1, "b").updated(node1, "b", ORSet.empty[String])(_.add(node1, "B2")) - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) // note that B is not included, because it was removed in both timelines merged2.entries("b").elements should be(Set("B2")) merged2.entries("c").elements should be(Set("C")) - val merged3 = m3 mergeDelta m4.delta.get + val merged3 = m3.mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) // note that B is not included, because it was removed in both timelines @@ -218,19 +225,19 @@ class ORMapSpec extends WordSpec with Matchers { val m1 = ORMap.empty.put(node1, "a", ORSet.empty.add(node1, "A")).put(node1, "b", ORSet.empty.add(node1, "B")) val m2 = ORMap.empty.put(node2, "c", ORSet.empty.add(node2, "C")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") val m4 = merged1.resetDelta.remove(node2, "b").updated(node2, "b", ORSet.empty[String])(_.add(node2, "B2")) - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) // note that B is not included, because it was removed in both timelines merged2.entries("b").elements should be(Set("B2")) merged2.entries("c").elements should be(Set("C")) - val merged3 = m3 mergeDelta m4.delta.get + val merged3 = m3.mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) // note that B is not included, because it was removed in both timelines @@ -242,19 +249,19 @@ class ORMapSpec extends WordSpec with Matchers { val m1 = ORMap.empty.put(node1, "a", ORSet.empty.add(node1, "A")).put(node1, "b", ORSet.empty.add(node1, "B")) val m2 = ORMap.empty.put(node2, "c", ORSet.empty.add(node2, "C")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") val m4 = merged1.resetDelta.updated(node1, "b", ORSet.empty[String])(_.add(node1, "B2")) - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) // note that B is included, because ORSet("B") is merged with ORSet("B2") merged2.entries("b").elements should be(Set("B", "B2")) merged2.entries("c").elements should be(Set("C")) - val merged3 = m3 mergeDelta m4.delta.get + val merged3 = m3.mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) // note that B is included, because ORSet("B") is merged with ORSet("B2") @@ -266,19 +273,19 @@ class ORMapSpec extends WordSpec with Matchers { val m1 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node1, "b", GSet.empty + "B") val m2 = ORMap.empty.put(node2, "c", GSet.empty + "C") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") val m4 = merged1.resetDelta.put(node2, "b", GSet.empty + "B2") - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) // note that B is not included, because it was removed in both timelines merged2.entries("b").elements should be(Set("B2")) merged2.entries("c").elements should be(Set("C")) - val merged3 = m3 mergeDelta m4.delta.get + val merged3 = m3.mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) // note that B is not included, because it was removed in both timelines @@ -290,19 +297,21 @@ class ORMapSpec extends WordSpec with Matchers { val m1 = ORMap.empty.put(node1, "a", ORSet.empty.add(node1, "A")).put(node1, "b", ORSet.empty.add(node1, "B")) val m2 = ORMap.empty.put(node2, "b", ORSet.empty.add(node2, "B3")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") - val m4 = merged1.resetDelta.remove(node2, "b").updated(node2, "b", ORSet.empty[String])(_.add(node2, "B1")) + val m4 = merged1.resetDelta + .remove(node2, "b") + .updated(node2, "b", ORSet.empty[String])(_.add(node2, "B1")) .updated(node2, "b", ORSet.empty[String])(_.add(node2, "B2")) - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) // note that B is not included, because it was removed in both timelines merged2.entries("b").elements should be(Set("B1", "B2")) - val merged3 = m3 mergeDelta m4.delta.get + val merged3 = m3.mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) // note that B is not included, because it was removed in both timelines @@ -310,14 +319,17 @@ class ORMapSpec extends WordSpec with Matchers { } "not have anomalies for remove+updated scenario and deltas 7" in { - val m1 = ORMap.empty.put(node1, "a", ORSet.empty.add(node1, "A")) - .put(node1, "b", ORSet.empty.add(node1, "B1")).remove(node1, "b") + val m1 = ORMap.empty + .put(node1, "a", ORSet.empty.add(node1, "A")) + .put(node1, "b", ORSet.empty.add(node1, "B1")) + .remove(node1, "b") val m2 = ORMap.empty.put(node1, "a", ORSet.empty.add(node1, "A")).put(node1, "b", ORSet.empty.add(node1, "B2")) val m2d = m2.resetDelta.remove(node1, "b") - val m2u = m2.resetDelta.updated(node1, "b", ORSet.empty[String])(_.add(node1, "B3")) + val m2u = m2.resetDelta + .updated(node1, "b", ORSet.empty[String])(_.add(node1, "B3")) .updated(node2, "b", ORSet.empty[String])(_.add(node2, "B4")) - val merged1 = (m1 merge m2d) mergeDelta m2u.delta.get + val merged1 = (m1.merge(m2d)).mergeDelta(m2u.delta.get) merged1.entries("a").elements should be(Set("A")) // note that B1 is lost as it was added and removed earlier in timeline than B2 @@ -325,22 +337,24 @@ class ORMapSpec extends WordSpec with Matchers { } "not have anomalies for remove+updated scenario and deltas 8" in { - val m1 = ORMap.empty.put(node1, "a", GSet.empty + "A") - .put(node1, "b", GSet.empty + "B").put(node2, "b", GSet.empty + "B") + val m1 = ORMap.empty + .put(node1, "a", GSet.empty + "A") + .put(node1, "b", GSet.empty + "B") + .put(node2, "b", GSet.empty + "B") val m2 = ORMap.empty.put(node2, "c", GSet.empty + "C") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b").remove(node2, "b") val m4 = merged1.resetDelta.put(node2, "b", GSet.empty + "B2").put(node2, "b", GSet.empty + "B3") - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) merged2.entries("b").elements should be(Set("B3")) merged2.entries("c").elements should be(Set("C")) - val merged3 = (merged1 mergeDelta m3.delta.get) mergeDelta m4.delta.get + val merged3 = (merged1.mergeDelta(m3.delta.get)).mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B3")) @@ -348,23 +362,26 @@ class ORMapSpec extends WordSpec with Matchers { } "not have anomalies for remove+updated scenario and deltas 9" in { - val m1 = ORMap.empty.put(node1, "a", GSet.empty + "A") - .put(node1, "b", GSet.empty + "B").put(node2, "b", GSet.empty + "B") + val m1 = ORMap.empty + .put(node1, "a", GSet.empty + "A") + .put(node1, "b", GSet.empty + "B") + .put(node2, "b", GSet.empty + "B") val m2 = ORMap.empty.put(node2, "c", GSet.empty + "C") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b").remove(node2, "b") - val m4 = merged1.resetDelta.updated(node2, "b", GSet.empty[String])(_.add("B2")) + val m4 = merged1.resetDelta + .updated(node2, "b", GSet.empty[String])(_.add("B2")) .updated(node2, "b", GSet.empty[String])(_.add("B3")) - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) merged2.entries("b").elements should be(Set("B2", "B3")) merged2.entries("c").elements should be(Set("C")) - val merged3 = (merged1 mergeDelta m3.delta.get) mergeDelta m4.delta.get + val merged3 = (merged1.mergeDelta(m3.delta.get)).mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B2", "B3")) @@ -372,18 +389,17 @@ class ORMapSpec extends WordSpec with Matchers { } "not have anomalies for remove+updated scenario and deltas 10" in { - val m1 = ORMap.empty.put(node2, "a", GSet.empty + "A") - .put(node2, "b", GSet.empty + "B") + val m1 = ORMap.empty.put(node2, "a", GSet.empty + "A").put(node2, "b", GSet.empty + "B") val m3 = m1.resetDelta.remove(node2, "b") val m4 = m3.resetDelta.put(node2, "b", GSet.empty + "B2").updated(node2, "b", GSet.empty[String])(_.add("B3")) - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a").elements should be(Set("A")) merged2.entries("b").elements should be(Set("B2", "B3")) - val merged3 = m3 mergeDelta m4.delta.get + val merged3 = m3.mergeDelta(m4.delta.get) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B2", "B3")) @@ -394,11 +410,11 @@ class ORMapSpec extends WordSpec with Matchers { val m2 = ORMap.empty.put(node2, "a", GSet.empty[String]).remove(node2, "a") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries("a").elements should be(Set("A")) - val merged2 = m1 mergeDelta m2.delta.get + val merged2 = m1.mergeDelta(m2.delta.get) merged2.entries("a").elements should be(Set("A")) } @@ -411,9 +427,9 @@ class ORMapSpec extends WordSpec with Matchers { val m2 = ORMap.empty.put(node2, "c", ORSet.empty.add(node2, "C")) // m1 - node1 gets the update from m2 - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) // m2 - node2 gets the update from m1 - val merged2 = m2 merge m1 + val merged2 = m2.merge(m1) // RACE CONDITION ahead! val m3 = merged1.resetDelta.remove(node1, "b") @@ -422,9 +438,9 @@ class ORMapSpec extends WordSpec with Matchers { // and the update is propagated before the update from node1 is merged val m4 = merged2.resetDelta.updated(node2, "b", ORSet.empty[String])(_.add(node2, "B2")) // and later merged on node1 - val merged3 = m3 merge m4 + val merged3 = m3.merge(m4) // and the other way round... - val merged4 = m4 merge m3 + val merged4 = m4.merge(m3) // result - the element "B" is kept on both sides... merged3.entries("a").elements should be(Set("A")) @@ -437,9 +453,9 @@ class ORMapSpec extends WordSpec with Matchers { // but if the timing was slightly different, so that the update from node1 // would get merged just before update on node2: - val merged5 = (m2 merge m3).resetDelta.updated(node2, "b", ORSet.empty[String])(_.add(node2, "B2")) + val merged5 = m2.merge(m3).resetDelta.updated(node2, "b", ORSet.empty[String])(_.add(node2, "B2")) // the update propagated ... and merged on node1: - val merged6 = m3 merge merged5 + val merged6 = m3.merge(merged5) // then the outcome is different... because the vvector of value("b") was lost... merged5.entries("a").elements should be(Set("A")) @@ -457,12 +473,12 @@ class ORMapSpec extends WordSpec with Matchers { val m1 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node2, "b", GSet.empty + "B") val m2 = m1.resetDelta.put(node2, "b", GSet.empty + "B2").updated(node2, "b", GSet.empty[String])(_.add("B3")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries("a").elements should be(Set("A")) merged1.entries("b").elements should be(Set("B", "B2", "B3")) - val merged2 = m1 mergeDelta m2.delta.get + val merged2 = m1.mergeDelta(m2.delta.get) merged2.entries("a").elements should be(Set("A")) merged2.entries("b").elements should be(Set("B", "B2", "B3")) @@ -470,55 +486,63 @@ class ORMapSpec extends WordSpec with Matchers { val m3 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node2, "b", GSet.empty + "B") val m4 = m3.resetDelta.put(node2, "b", GSet.empty + "B2").put(node2, "b", GSet.empty + "B3") - val merged3 = m3 merge m4 + val merged3 = m3.merge(m4) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B", "B3")) - val merged4 = m3 mergeDelta m4.delta.get + val merged4 = m3.mergeDelta(m4.delta.get) merged4.entries("a").elements should be(Set("A")) merged4.entries("b").elements should be(Set("B", "B3")) val m5 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node2, "b", GSet.empty + "B") - val m6 = m5.resetDelta.put(node2, "b", GSet.empty + "B2").updated(node2, "b", GSet.empty[String])(_.add("B3")) + val m6 = m5.resetDelta + .put(node2, "b", GSet.empty + "B2") + .updated(node2, "b", GSet.empty[String])(_.add("B3")) .updated(node2, "b", GSet.empty[String])(_.add("B4")) - val merged5 = m5 merge m6 + val merged5 = m5.merge(m6) merged5.entries("a").elements should be(Set("A")) merged5.entries("b").elements should be(Set("B", "B2", "B3", "B4")) - val merged6 = m5 mergeDelta m6.delta.get + val merged6 = m5.mergeDelta(m6.delta.get) merged6.entries("a").elements should be(Set("A")) merged6.entries("b").elements should be(Set("B", "B2", "B3", "B4")) val m7 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node2, "b", GSet.empty + "B") - val m8 = m7.resetDelta.put(node2, "b", GSet.empty + "B2").put(node2, "d", GSet.empty + "D").put(node2, "b", GSet.empty + "B3") + val m8 = m7.resetDelta + .put(node2, "b", GSet.empty + "B2") + .put(node2, "d", GSet.empty + "D") + .put(node2, "b", GSet.empty + "B3") - val merged7 = m7 merge m8 + val merged7 = m7.merge(m8) merged7.entries("a").elements should be(Set("A")) merged7.entries("b").elements should be(Set("B", "B3")) merged7.entries("d").elements should be(Set("D")) - val merged8 = m7 mergeDelta m8.delta.get + val merged8 = m7.mergeDelta(m8.delta.get) merged8.entries("a").elements should be(Set("A")) merged8.entries("b").elements should be(Set("B", "B3")) merged8.entries("d").elements should be(Set("D")) val m9 = ORMap.empty.put(node1, "a", GSet.empty + "A").put(node2, "b", GSet.empty + "B") - val m10 = m9.resetDelta.put(node2, "b", GSet.empty + "B2").put(node2, "d", GSet.empty + "D") - .remove(node2, "d").put(node2, "b", GSet.empty + "B3") + val m10 = m9.resetDelta + .put(node2, "b", GSet.empty + "B2") + .put(node2, "d", GSet.empty + "D") + .remove(node2, "d") + .put(node2, "b", GSet.empty + "B3") - val merged9 = m9 merge m10 + val merged9 = m9.merge(m10) merged9.entries("a").elements should be(Set("A")) merged9.entries("b").elements should be(Set("B", "B3")) - val merged10 = m9 mergeDelta m10.delta.get + val merged10 = m9.mergeDelta(m10.delta.get) merged10.entries("a").elements should be(Set("A")) merged10.entries("b").elements should be(Set("B", "B3")) @@ -543,7 +567,9 @@ class ORMapSpec extends WordSpec with Matchers { "work with aggregated deltas and updated for GSet elements type" in { val m1 = ORMap.empty.put(node1, "a", GSet.empty + "A") - val m2 = m1.resetDelta.updated(node1, "a", GSet.empty[String])(_.add("B")).updated(node1, "a", GSet.empty[String])(_.add("C")) + val m2 = m1.resetDelta + .updated(node1, "a", GSet.empty[String])(_.add("B")) + .updated(node1, "a", GSet.empty[String])(_.add("C")) val m3 = ORMap().mergeDelta(m1.delta.get).mergeDelta(m2.delta.get) val GSet(d3) = m3.entries("a") d3 should be(Set("A", "B", "C")) @@ -584,70 +610,76 @@ class ORMapSpec extends WordSpec with Matchers { } "be able to update entry" in { - val m1 = ORMap.empty[String, ORSet[String]].put(node1, "a", ORSet.empty.add(node1, "A")) + val m1 = ORMap + .empty[String, ORSet[String]] + .put(node1, "a", ORSet.empty.add(node1, "A")) .put(node1, "b", ORSet.empty.add(node1, "B01").add(node1, "B02").add(node1, "B03")) val m2 = ORMap.empty[String, ORSet[String]].put(node2, "c", ORSet.empty.add(node2, "C")) - val merged1: ORMap[String, ORSet[String]] = m1 merge m2 + val merged1: ORMap[String, ORSet[String]] = m1.merge(m2) val m3 = merged1.updated(node1, "b", ORSet.empty[String])(_.clear(node1).add(node1, "B2")) - val merged2 = merged1 merge m3 + val merged2 = merged1.merge(m3) merged2.entries("a").elements should be(Set("A")) merged2.entries("b").elements should be(Set("B2")) merged2.entries("c").elements should be(Set("C")) val m4 = merged1.updated(node2, "b", ORSet.empty[String])(_.add(node2, "B3")) - val merged3 = m3 merge m4 + val merged3 = m3.merge(m4) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B2", "B3")) merged3.entries("c").elements should be(Set("C")) } "be able to update ORSet entry with remove+put" in { - val m1 = ORMap.empty[String, ORSet[String]].put(node1, "a", ORSet.empty.add(node1, "A01")) + val m1 = ORMap + .empty[String, ORSet[String]] + .put(node1, "a", ORSet.empty.add(node1, "A01")) .updated(node1, "a", ORSet.empty[String])(_.add(node1, "A02")) .updated(node1, "a", ORSet.empty[String])(_.add(node1, "A03")) .put(node1, "b", ORSet.empty.add(node1, "B01").add(node1, "B02").add(node1, "B03")) val m2 = ORMap.empty[String, ORSet[String]].put(node2, "c", ORSet.empty.add(node2, "C")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) // note that remove + put work because the new VersionVector version is incremented // from a global counter val m3 = merged1.remove(node1, "b").put(node1, "b", ORSet.empty.add(node1, "B2")) - val merged2 = merged1 merge m3 + val merged2 = merged1.merge(m3) merged2.entries("a").elements should be(Set("A01", "A02", "A03")) merged2.entries("b").elements should be(Set("B2")) merged2.entries("c").elements should be(Set("C")) val m4 = merged1.updated(node2, "b", ORSet.empty[String])(_.add(node2, "B3")) - val merged3 = m3 merge m4 + val merged3 = m3.merge(m4) merged3.entries("a").elements should be(Set("A01", "A02", "A03")) merged3.entries("b").elements should be(Set("B2", "B3")) merged3.entries("c").elements should be(Set("C")) } "be able to update ORSet entry with remove -> merge -> put" in { - val m1 = ORMap.empty.put(node1, "a", ORSet.empty.add(node1, "A")) + val m1 = ORMap.empty + .put(node1, "a", ORSet.empty.add(node1, "A")) .put(node1, "b", ORSet.empty.add(node1, "B01").add(node1, "B02").add(node1, "B03")) val m2 = ORMap.empty.put(node2, "c", ORSet.empty.add(node2, "C")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.remove(node1, "b") - val merged2 = merged1 merge m3 + val merged2 = merged1.merge(m3) merged2.entries("a").elements should be(Set("A")) merged2.contains("b") should be(false) merged2.entries("c").elements should be(Set("C")) val m4 = merged2.put(node1, "b", ORSet.empty.add(node1, "B2")) - val m5 = merged2.updated(node2, "c", ORSet.empty[String])(_.add(node2, "C2")) + val m5 = merged2 + .updated(node2, "c", ORSet.empty[String])(_.add(node2, "C2")) .put(node2, "b", ORSet.empty.add(node2, "B3")) - val merged3 = m5 merge m4 + val merged3 = m5.merge(m4) merged3.entries("a").elements should be(Set("A")) merged3.entries("b").elements should be(Set("B2", "B3")) merged3.entries("c").elements should be(Set("C", "C2")) diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala index 66e326d20a..3d0068588d 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORMultiMapSpec.scala @@ -46,12 +46,8 @@ class ORMultiMapSpec extends WordSpec with Matchers { // more to document that the concurrent removal from the set may be lost // than asserting anything - merged1.entries should be(Map( - "a" -> Set("A", "B") - )) - merged2.entries should be(Map( - "a" -> Set("A", "B") - )) + merged1.entries should be(Map("a" -> Set("A", "B"))) + merged2.entries should be(Map("a" -> Set("A", "B"))) } "be able to have its entries correctly merged with another ORMultiMap with other entries" in { @@ -60,15 +56,12 @@ class ORMultiMapSpec extends WordSpec with Matchers { // merge both ways - val expectedMerge = Map( - "a" -> Set("A"), - "b" -> Set("B"), - "c" -> Set("C")) + val expectedMerge = Map("a" -> Set("A"), "b" -> Set("B"), "c" -> Set("C")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries should be(expectedMerge) - val merged2 = m2 merge m1 + val merged2 = m2.merge(m1) merged2.entries should be(expectedMerge) } @@ -87,63 +80,58 @@ class ORMultiMapSpec extends WordSpec with Matchers { // merge both ways - val expectedMerged = Map( - "a" -> Set("A2"), - "b" -> Set("B1"), - "c" -> Set("C2"), - "d" -> Set("D1", "D2")) + val expectedMerged = Map("a" -> Set("A2"), "b" -> Set("B1"), "c" -> Set("C2"), "d" -> Set("D1", "D2")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries should be(expectedMerged) - val merged2 = m2 merge m1 + val merged2 = m2.merge(m1) merged2.entries should be(expectedMerged) - val merged3 = m1 mergeDelta m2.delta.get + val merged3 = m1.mergeDelta(m2.delta.get) merged3.entries should be(expectedMerged) - val merged4 = m2 mergeDelta m1.delta.get + val merged4 = m2.mergeDelta(m1.delta.get) merged4.entries should be(expectedMerged) } } "be able to have its entries correctly merged with another ORMultiMap with overlapping entries 2" in { - val m1 = ORMultiMap() - .addBinding(node1, "b", "B1") - val m2 = ORMultiMap() - .addBinding(node2, "b", "B2") - .remove(node2, "b") + val m1 = ORMultiMap().addBinding(node1, "b", "B1") + val m2 = ORMultiMap().addBinding(node2, "b", "B2").remove(node2, "b") // merge both ways - val expectedMerged = Map( - "b" -> Set("B1")) + val expectedMerged = Map("b" -> Set("B1")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries should be(expectedMerged) - val merged2 = m2 merge m1 + val merged2 = m2.merge(m1) merged2.entries should be(expectedMerged) - val merged3 = m1 mergeDelta m2.delta.get + val merged3 = m1.mergeDelta(m2.delta.get) merged3.entries should be(expectedMerged) - val merged4 = m2 mergeDelta m1.delta.get + val merged4 = m2.mergeDelta(m1.delta.get) merged4.entries should be(expectedMerged) } "not have anomalies for remove+updated scenario and deltas" in { val m2a = ORMultiMap.empty[String, String].addBinding(node1, "q", "Q").removeBinding(node1, "q", "Q") - val m1 = ORMultiMap.empty[String, String].addBinding(node1, "z", "Z").addBinding(node2, "x", "X") + val m1 = ORMultiMap + .empty[String, String] + .addBinding(node1, "z", "Z") + .addBinding(node2, "x", "X") .removeBinding(node1, "z", "Z") val m2 = m2a.resetDelta.removeBinding(node2, "a", "A") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.contains("a") should be(false) - val merged2 = m1 mergeDelta m2.delta.get + val merged2 = m1.mergeDelta(m2.delta.get) merged2.contains("a") should be(false) } @@ -156,9 +144,7 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m2 = m.put(node1, "a", a - "A1") - val expectedMerged = Map( - "a" -> Set("A2"), - "b" -> Set("B1")) + val expectedMerged = Map("a" -> Set("A2"), "b" -> Set("B1")) m2.entries should be(expectedMerged) } @@ -179,36 +165,36 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m1 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m2 = ORMultiMap.emptyWithValueDeltas[String, String].put(node2, "c", Set("C")) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") val m4 = m3.resetDelta.addBinding(node1, "b", "B2") - val merged2 = m3 merge m4 + val merged2 = m3.merge(m4) merged2.entries("a") should be(Set("A")) merged2.entries("b") should be(Set("B2")) merged2.entries("c") should be(Set("C")) - val merged3 = m3 mergeDelta m4.delta.get + val merged3 = m3.mergeDelta(m4.delta.get) merged3.entries("a") should be(Set("A")) merged3.entries("b") should be(Set("B2")) merged3.entries("c") should be(Set("C")) - val merged4 = merged1 merge m3 merge m4 + val merged4 = merged1.merge(m3).merge(m4) merged4.entries("a") should be(Set("A")) merged4.entries("b") should be(Set("B2")) merged4.entries("c") should be(Set("C")) - val merged5 = merged1 mergeDelta m3.delta.get mergeDelta m4.delta.get + val merged5 = merged1.mergeDelta(m3.delta.get).mergeDelta(m4.delta.get) merged5.entries("a") should be(Set("A")) merged5.entries("b") should be(Set("B2")) merged5.entries("c") should be(Set("C")) - val merged6 = merged1 mergeDelta m3.delta.get.merge(m4.delta.get) + val merged6 = merged1.mergeDelta(m3.delta.get.merge(m4.delta.get)) merged6.entries("a") should be(Set("A")) merged6.entries("b") should be(Set("B2")) @@ -221,9 +207,9 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m2 = ORMultiMap.emptyWithValueDeltas[String, String].put(node2, "c", Set("C")) // m1 - node1 gets the update from m2 - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) // m2 - node2 gets the update from m1 - val merged2 = m2 merge m1 + val merged2 = m2.merge(m1) // no race condition val m3 = merged1.resetDelta.remove(node1, "b") @@ -232,9 +218,9 @@ class ORMultiMapSpec extends WordSpec with Matchers { // and the update is propagated before the update from node1 is merged val m4 = merged2.resetDelta.addBinding(node2, "b", "B2") // and later merged on node1 - val merged3 = m3 merge m4 + val merged3 = m3.merge(m4) // and the other way round... - val merged4 = m4 merge m3 + val merged4 = m4.merge(m3) // result - the element "B" is kept on both sides... merged3.entries("a") should be(Set("A")) @@ -247,9 +233,9 @@ class ORMultiMapSpec extends WordSpec with Matchers { // but if the timing was slightly different, so that the update from node1 // would get merged just before update on node2: - val merged5 = (m2 merge m3).resetDelta.addBinding(node2, "b", "B2") + val merged5 = m2.merge(m3).resetDelta.addBinding(node2, "b", "B2") // the update propagated ... and merged on node1: - val merged6 = m3 merge merged5 + val merged6 = m3.merge(merged5) // then the outcome would be the same... merged5.entries("a") should be(Set("A")) @@ -265,12 +251,12 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m1 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m2 = m1.resetDelta.put(node2, "b", Set("B2")).addBinding(node2, "b", "B3") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries("a") should be(Set("A")) merged1.entries("b") should be(Set("B2", "B3")) - val merged2 = m1 mergeDelta m2.delta.get + val merged2 = m1.mergeDelta(m2.delta.get) merged2.entries("a") should be(Set("A")) merged2.entries("b") should be(Set("B2", "B3")) @@ -278,12 +264,12 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m3 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m4 = m3.resetDelta.put(node2, "b", Set("B2")).put(node2, "b", Set("B3")) - val merged3 = m3 merge m4 + val merged3 = m3.merge(m4) merged3.entries("a") should be(Set("A")) merged3.entries("b") should be(Set("B3")) - val merged4 = m3 mergeDelta m4.delta.get + val merged4 = m3.mergeDelta(m4.delta.get) merged4.entries("a") should be(Set("A")) merged4.entries("b") should be(Set("B3")) @@ -291,12 +277,12 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m5 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m6 = m5.resetDelta.put(node2, "b", Set("B2")).addBinding(node2, "b", "B3").addBinding(node2, "b", "B4") - val merged5 = m5 merge m6 + val merged5 = m5.merge(m6) merged5.entries("a") should be(Set("A")) merged5.entries("b") should be(Set("B2", "B3", "B4")) - val merged6 = m5 mergeDelta m6.delta.get + val merged6 = m5.mergeDelta(m6.delta.get) merged6.entries("a") should be(Set("A")) merged6.entries("b") should be(Set("B2", "B3", "B4")) @@ -304,13 +290,13 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m7 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m8 = m7.resetDelta.put(node2, "d", Set("D")).addBinding(node2, "b", "B3").put(node2, "b", Set("B4")) - val merged7 = m7 merge m8 + val merged7 = m7.merge(m8) merged7.entries("a") should be(Set("A")) merged7.entries("b") should be(Set("B4")) merged7.entries("d") should be(Set("D")) - val merged8 = m7 mergeDelta m8.delta.get + val merged8 = m7.mergeDelta(m8.delta.get) merged8.entries("a") should be(Set("A")) merged8.entries("b") should be(Set("B4")) @@ -319,55 +305,64 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m9 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m10 = m9.resetDelta.addBinding(node2, "b", "B3").addBinding(node2, "b", "B4") - val merged9 = m9 merge m10 + val merged9 = m9.merge(m10) merged9.entries("a") should be(Set("A")) merged9.entries("b") should be(Set("B", "B3", "B4")) - val merged10 = m9 mergeDelta m10.delta.get + val merged10 = m9.mergeDelta(m10.delta.get) merged10.entries("a") should be(Set("A")) merged10.entries("b") should be(Set("B", "B3", "B4")) - val m11 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B", "B1")) + val m11 = ORMultiMap + .emptyWithValueDeltas[String, String] + .put(node1, "a", Set("A")) + .put(node1, "b", Set("B", "B1")) .remove(node1, "b") val m12 = m11.resetDelta.addBinding(node2, "b", "B2").addBinding(node2, "b", "B3") - val merged11 = m11 merge m12 + val merged11 = m11.merge(m12) merged11.entries("a") should be(Set("A")) merged11.entries("b") should be(Set("B2", "B3")) - val merged12 = m11 mergeDelta m12.delta.get + val merged12 = m11.mergeDelta(m12.delta.get) merged12.entries("a") should be(Set("A")) merged12.entries("b") should be(Set("B2", "B3")) - val m13 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B", "B1")) + val m13 = ORMultiMap + .emptyWithValueDeltas[String, String] + .put(node1, "a", Set("A")) + .put(node1, "b", Set("B", "B1")) .remove(node1, "b") val m14 = m13.resetDelta.addBinding(node2, "b", "B2").put(node2, "b", Set("B3")) - val merged13 = m13 merge m14 + val merged13 = m13.merge(m14) merged13.entries("a") should be(Set("A")) merged13.entries("b") should be(Set("B3")) - val merged14 = m13 mergeDelta m14.delta.get + val merged14 = m13.mergeDelta(m14.delta.get) merged14.entries("a") should be(Set("A")) merged14.entries("b") should be(Set("B3")) - val m15 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B", "B1")) + val m15 = ORMultiMap + .emptyWithValueDeltas[String, String] + .put(node1, "a", Set("A")) + .put(node1, "b", Set("B", "B1")) .put(node1, "c", Set("C")) val m16 = m15.resetDelta.addBinding(node2, "b", "B2").addBinding(node2, "c", "C1") - val merged15 = m15 merge m16 + val merged15 = m15.merge(m16) merged15.entries("a") should be(Set("A")) merged15.entries("b") should be(Set("B", "B1", "B2")) merged15.entries("c") should be(Set("C", "C1")) - val merged16 = m15 mergeDelta m16.delta.get + val merged16 = m15.mergeDelta(m16.delta.get) merged16.entries("a") should be(Set("A")) merged16.entries("b") should be(Set("B", "B1", "B2")) @@ -378,12 +373,12 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m18 = m17.resetDelta.addBinding(node2, "b", "B2") val m19 = ORMultiMap.emptyWithValueDeltas[String, String].resetDelta.put(node2, "b", Set("B3")) - val merged17 = m17 merge m18 merge m19 + val merged17 = m17.merge(m18).merge(m19) merged17.entries("a") should be(Set("A")) merged17.entries("b") should be(Set("B", "B1", "B3")) - val merged18 = m17 mergeDelta m18.delta.get.merge(m19.delta.get) + val merged18 = m17.mergeDelta(m18.delta.get.merge(m19.delta.get)) merged18.entries("a") should be(Set("A")) merged18.entries("b") should be(Set("B", "B1", "B3")) @@ -393,12 +388,12 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m1 = ORMultiMap.empty[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m2 = m1.resetDelta.put(node2, "b", Set("B2")).addBinding(node2, "b", "B3") - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) merged1.entries("a") should be(Set("A")) merged1.entries("b") should be(Set("B2", "B3")) - val merged2 = m1 mergeDelta m2.delta.get + val merged2 = m1.mergeDelta(m2.delta.get) merged2.entries("a") should be(Set("A")) merged2.entries("b") should be(Set("B2", "B3")) @@ -406,12 +401,12 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m3 = ORMultiMap.empty[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m4 = m3.resetDelta.put(node2, "b", Set("B2")).put(node2, "b", Set("B3")) - val merged3 = m3 merge m4 + val merged3 = m3.merge(m4) merged3.entries("a") should be(Set("A")) merged3.entries("b") should be(Set("B3")) - val merged4 = m3 mergeDelta m4.delta.get + val merged4 = m3.mergeDelta(m4.delta.get) merged4.entries("a") should be(Set("A")) merged4.entries("b") should be(Set("B3")) @@ -419,12 +414,12 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m5 = ORMultiMap.empty[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m6 = m5.resetDelta.put(node2, "b", Set("B2")).addBinding(node2, "b", "B3").addBinding(node2, "b", "B4") - val merged5 = m5 merge m6 + val merged5 = m5.merge(m6) merged5.entries("a") should be(Set("A")) merged5.entries("b") should be(Set("B2", "B3", "B4")) - val merged6 = m5 mergeDelta m6.delta.get + val merged6 = m5.mergeDelta(m6.delta.get) merged6.entries("a") should be(Set("A")) merged6.entries("b") should be(Set("B2", "B3", "B4")) @@ -432,13 +427,13 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m7 = ORMultiMap.empty[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m8 = m7.resetDelta.put(node2, "d", Set("D")).addBinding(node2, "b", "B3").put(node2, "b", Set("B4")) - val merged7 = m7 merge m8 + val merged7 = m7.merge(m8) merged7.entries("a") should be(Set("A")) merged7.entries("b") should be(Set("B4")) merged7.entries("d") should be(Set("D")) - val merged8 = m7 mergeDelta m8.delta.get + val merged8 = m7.mergeDelta(m8.delta.get) merged8.entries("a") should be(Set("A")) merged8.entries("b") should be(Set("B4")) @@ -447,26 +442,26 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m9 = ORMultiMap.empty[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B")) val m10 = m9.resetDelta.addBinding(node2, "b", "B3").addBinding(node2, "b", "B4") - val merged9 = m9 merge m10 + val merged9 = m9.merge(m10) merged9.entries("a") should be(Set("A")) merged9.entries("b") should be(Set("B", "B3", "B4")) - val merged10 = m9 mergeDelta m10.delta.get + val merged10 = m9.mergeDelta(m10.delta.get) merged10.entries("a") should be(Set("A")) merged10.entries("b") should be(Set("B", "B3", "B4")) - val m11 = ORMultiMap.empty[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B", "B1")) - .remove(node1, "b") + val m11 = + ORMultiMap.empty[String, String].put(node1, "a", Set("A")).put(node1, "b", Set("B", "B1")).remove(node1, "b") val m12 = ORMultiMap.empty[String, String].addBinding(node2, "b", "B2").addBinding(node2, "b", "B3") - val merged11 = m11 merge m12 + val merged11 = m11.merge(m12) merged11.entries("a") should be(Set("A")) merged11.entries("b") should be(Set("B2", "B3")) - val merged12 = m11 mergeDelta m12.delta.get + val merged12 = m11.mergeDelta(m12.delta.get) merged12.entries("a") should be(Set("A")) merged12.entries("b") should be(Set("B2", "B3")) @@ -488,13 +483,15 @@ class ORMultiMapSpec extends WordSpec with Matchers { val m3 = m1.mergeDelta(m2.delta.get) val m4 = m1.merge(m2) - m3.underlying.values.contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the mergeDelta - m4.underlying.values.contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the merge + m3.underlying.values + .contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the mergeDelta + m4.underlying.values + .contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the merge val m5 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A1")) - (m3 mergeDelta m5.delta.get).entries("a") should ===(Set("A1")) - (m4 mergeDelta m5.delta.get).entries("a") should ===(Set("A1")) - (m4 merge m5).entries("a") should ===(Set("A1")) + m3.mergeDelta(m5.delta.get).entries("a") should ===(Set("A1")) + m4.mergeDelta(m5.delta.get).entries("a") should ===(Set("A1")) + m4.merge(m5).entries("a") should ===(Set("A1")) // addBinding - add a binding for a certain value - no tombstone is created // this operation works through "updated" call of the underlying ORMap, that is not exposed @@ -510,13 +507,15 @@ class ORMultiMapSpec extends WordSpec with Matchers { val um3 = um1.mergeDelta(um2.delta.get) val um4 = um1.merge(um2) - um3.underlying.values.contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the mergeDelta - um4.underlying.values.contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the merge + um3.underlying.values + .contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the mergeDelta + um4.underlying.values + .contains("a") should be(false) // tombstone for 'a' has been optimized away at the end of the merge val um5 = ORMultiMap.emptyWithValueDeltas[String, String].addBinding(node1, "a", "A1") - (um3 mergeDelta um5.delta.get).entries("a") should ===(Set("A1")) - (um4 mergeDelta um5.delta.get).entries("a") should ===(Set("A1")) - (um4 merge um5).entries("a") should ===(Set("A1")) + um3.mergeDelta(um5.delta.get).entries("a") should ===(Set("A1")) + um4.mergeDelta(um5.delta.get).entries("a") should ===(Set("A1")) + um4.merge(um5).entries("a") should ===(Set("A1")) // replaceBinding - that would first addBinding for new binding and then removeBinding for old binding // so no tombstone would be created @@ -528,15 +527,22 @@ class ORMultiMapSpec extends WordSpec with Matchers { // without previous delta containing 'clear' or 'put' operation setting the tombstone at Set() // the example shown below cannot happen in practice - val tm1 = new ORMultiMap(ORMultiMap.emptyWithValueDeltas[String, String].addBinding(node1, "a", "A").underlying.removeKey(node1, "a"), true) + val tm1 = new ORMultiMap( + ORMultiMap.emptyWithValueDeltas[String, String].addBinding(node1, "a", "A").underlying.removeKey(node1, "a"), + true) tm1.underlying.values("a").elements should ===(Set("A")) // tombstone tm1.addBinding(node1, "a", "A1").entries("a") should be(Set("A", "A1")) - val tm2 = ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).resetDelta.addBinding(node1, "a", "A1") + val tm2 = + ORMultiMap.emptyWithValueDeltas[String, String].put(node1, "a", Set("A")).resetDelta.addBinding(node1, "a", "A1") tm1.mergeDelta(tm2.delta.get).entries("a") should be(Set("A", "A1")) tm1.merge(tm2).entries("a") should be(Set("A", "A1")) - val tm3 = new ORMultiMap(ORMultiMap.emptyWithValueDeltas[String, String].addBinding(node1, "a", "A").underlying.remove(node1, "a"), true) + val tm3 = new ORMultiMap( + ORMultiMap.emptyWithValueDeltas[String, String].addBinding(node1, "a", "A").underlying.remove(node1, "a"), + true) tm3.underlying.contains("a") should ===(false) // no tombstone, because remove not removeKey - tm3.mergeDelta(tm2.delta.get).entries should ===(Map.empty[String, String]) // no tombstone - update delta could not be applied + tm3 + .mergeDelta(tm2.delta.get) + .entries should ===(Map.empty[String, String]) // no tombstone - update delta could not be applied tm3.merge(tm2).entries should ===(Map.empty[String, String]) // The only valid value for tombstone created by means of either API call or application of delta propagation is Set() diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala index 72b110ab7c..24a7c82ad4 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ORSetSpec.scala @@ -115,13 +115,13 @@ class ORSetSpec extends WordSpec with Matchers { c2.elements should contain(user4) // merge both ways - val merged1 = c1 merge c2 + val merged1 = c1.merge(c2) merged1.elements should contain(user1) merged1.elements should contain(user2) merged1.elements should not contain (user3) merged1.elements should contain(user4) - val merged2 = c2 merge c1 + val merged2 = c2.merge(c1) merged2.elements should contain(user1) merged2.elements should contain(user2) merged2.elements should not contain (user3) @@ -145,13 +145,13 @@ class ORSetSpec extends WordSpec with Matchers { c2.elements should contain(user4) // merge both ways - val merged1 = c1 merge c2 + val merged1 = c1.merge(c2) merged1.elements should contain(user1) merged1.elements should contain(user2) merged1.elements should not contain (user3) merged1.elements should contain(user4) - val merged2 = c2 merge c1 + val merged2 = c2.merge(c1) merged2.elements should contain(user1) merged2.elements should contain(user2) merged2.elements should not contain (user3) @@ -172,12 +172,12 @@ class ORSetSpec extends WordSpec with Matchers { c2.elements should not contain (user3) // merge both ways - val merged1 = c1 merge c2 + val merged1 = c1.merge(c2) merged1.elements should contain(user1) merged1.elements should not contain (user2) merged1.elements should not contain (user3) - val merged2 = c2 merge c1 + val merged2 = c2.merge(c1) merged2.elements should contain(user1) merged2.elements should not contain (user2) merged2.elements should not contain (user3) @@ -185,13 +185,13 @@ class ORSetSpec extends WordSpec with Matchers { val c3 = c1.add(node1, user4).remove(node1, user3).add(node1, user2) // merge both ways - val merged3 = c2 merge c3 + val merged3 = c2.merge(c3) merged3.elements should contain(user1) merged3.elements should contain(user2) merged3.elements should not contain (user3) merged3.elements should contain(user4) - val merged4 = c3 merge c2 + val merged4 = c3.merge(c2) merged4.elements should contain(user1) merged4.elements should contain(user2) merged4.elements should not contain (user3) @@ -203,23 +203,23 @@ class ORSetSpec extends WordSpec with Matchers { val c2 = c1.remove(node2, user2) // merge both ways - val merged1 = c1 merge c2 + val merged1 = c1.merge(c2) merged1.elements should contain(user1) merged1.elements should not contain (user2) - val merged2 = c2 merge c1 + val merged2 = c2.merge(c1) merged2.elements should contain(user1) merged2.elements should not contain (user2) val c3 = c1.add(node1, user3) // merge both ways - val merged3 = c3 merge c2 + val merged3 = c3.merge(c2) merged3.elements should contain(user1) merged3.elements should not contain (user2) merged3.elements should contain(user3) - val merged4 = c2 merge c3 + val merged4 = c2.merge(c3) merged4.elements should contain(user1) merged4.elements should not contain (user2) merged4.elements should contain(user3) @@ -289,21 +289,21 @@ class ORSetSpec extends WordSpec with Matchers { val d2 = s2.delta.get val s3 = s2.resetDelta.add(node1, "b") val d3 = s3.delta.get - val d4 = d2 merge d3 + val d4 = d2.merge(d3) asAddDeltaOp(d4).underlying.elements should ===(Set("a", "b")) s1.mergeDelta(d4) should ===(s3) s2.mergeDelta(d4) should ===(s3) val s5 = s3.resetDelta.remove(node1, "b") val d5 = s5.delta.get - val d6 = (d4 merge d5).asInstanceOf[ORSet.DeltaGroup[String]] + val d6 = d4.merge(d5).asInstanceOf[ORSet.DeltaGroup[String]] d6.ops.last.getClass should ===(classOf[ORSet.RemoveDeltaOp[String]]) d6.ops.size should ===(2) s3.mergeDelta(d6) should ===(s5) val s7 = s5.resetDelta.add(node1, "c") val s8 = s7.resetDelta.add(node1, "d") - val d9 = (d6 merge s7.delta.get merge s8.delta.get).asInstanceOf[ORSet.DeltaGroup[String]] + val d9 = d6.merge(s7.delta.get).merge(s8.delta.get).asInstanceOf[ORSet.DeltaGroup[String]] // the add "c" and add "d" are merged into one AddDeltaOp asAddDeltaOp(d9.ops.last).underlying.elements should ===(Set("c", "d")) d9.ops.size should ===(3) @@ -363,15 +363,15 @@ class ORSetSpec extends WordSpec with Matchers { val s4 = s3.resetDelta.add(node1, "a") val s5 = s4.resetDelta.remove(node1, "b") - val deltaGroup1 = s3.delta.get merge s4.delta.get merge s5.delta.get + val deltaGroup1 = s3.delta.get.merge(s4.delta.get).merge(s5.delta.get) - val s7 = s2 mergeDelta deltaGroup1 + val s7 = s2.mergeDelta(deltaGroup1) s7.elements should ===(Set("a")) // The above scenario was constructed from failing ReplicatorDeltaSpec, // some more checks... val s8 = s2.resetDelta.add(node2, "z") // concurrent update from node2 - val s9 = s8 mergeDelta deltaGroup1 + val s9 = s8.mergeDelta(deltaGroup1) s9.elements should ===(Set("a", "z")) } @@ -384,7 +384,7 @@ class ORSetSpec extends WordSpec with Matchers { s5.elements should ===(Set("b")) - val delta1 = s2.delta.get merge s3.delta.get + val delta1 = s2.delta.get.merge(s3.delta.get) val delta2 = s4.delta.get val t1 = ORSet.empty[String] @@ -406,7 +406,7 @@ class ORSetSpec extends WordSpec with Matchers { s5.elements should ===(Set("b")) - val delta1 = s2.delta.get merge s3.delta.get + val delta1 = s2.delta.get.merge(s3.delta.get) val t1 = ORSet.empty[String] @@ -440,14 +440,14 @@ class ORSetSpec extends WordSpec with Matchers { val node3_1 = ORSet.empty[String].mergeDelta(delta1_1).mergeDelta(delta2_1).mergeDelta(delta1_2) // and node3_1 receives full update from node2 via gossip - val merged1 = node3_1 merge node2_2 + val merged1 = node3_1.merge(node2_2) merged1.contains("a") should be(false) // and node3_1 receives delta update from node2 (it just needs to get the second delta, // as it already got the first delta just a second ago) - val merged2 = node3_1 mergeDelta delta2_2 + val merged2 = node3_1.mergeDelta(delta2_2) val ORSet(mg2) = merged2 mg2 should be(Set("x")) // !!! @@ -468,15 +468,15 @@ class ORSetSpec extends WordSpec with Matchers { val s21 = s0.resetDelta.add(node2, "d") // node3 receives delta for "d" and "c", but the delta for "b" is lost - val s31 = s0 mergeDelta s21.delta.get mergeDelta s12.delta.get + val s31 = s0.mergeDelta(s21.delta.get).mergeDelta(s12.delta.get) s31.elements should ===(Set("a", "c", "d")) // node4 receives all deltas - val s41 = s0 mergeDelta s11.delta.get mergeDelta s12.delta.get mergeDelta s21.delta.get + val s41 = s0.mergeDelta(s11.delta.get).mergeDelta(s12.delta.get).mergeDelta(s21.delta.get) s41.elements should ===(Set("a", "b", "c", "d")) // node3 and node4 sync with full state gossip - val s32 = s31 merge s41 + val s32 = s31.merge(s41) // one would expect elements "a", "b", "c", "d", but "b" is removed // because we applied s12.delta without applying s11.delta s32.elements should ===(Set("a", "c", "d")) @@ -487,7 +487,8 @@ class ORSetSpec extends WordSpec with Matchers { "ORSet unit test" must { "verify subtractDots" in { val dot = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 2L, nodeD -> 14L, nodeG -> 22L)) - val vvector = VersionVector(TreeMap(nodeA -> 4L, nodeB -> 1L, nodeC -> 1L, nodeD -> 14L, nodeE -> 5L, nodeF -> 2L)) + val vvector = + VersionVector(TreeMap(nodeA -> 4L, nodeB -> 1L, nodeC -> 1L, nodeD -> 14L, nodeE -> 5L, nodeF -> 2L)) val expected = VersionVector(TreeMap(nodeB -> 2L, nodeG -> 22L)) ORSet.subtractDots(dot, vvector) should be(expected) } @@ -497,34 +498,25 @@ class ORSetSpec extends WordSpec with Matchers { val thisDot1 = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L)) val thisDot2 = VersionVector(TreeMap(nodeB -> 5L, nodeC -> 2L)) val thisVvector = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 5L, nodeC -> 2L, nodeD -> 7L)) - val thisSet = new ORSet( - elementsMap = Map("K1" -> thisDot1, "K2" -> thisDot2), - vvector = thisVvector) + val thisSet = new ORSet(elementsMap = Map("K1" -> thisDot1, "K2" -> thisDot2), vvector = thisVvector) val thatDot1 = VersionVector(nodeA, 3L) val thatDot2 = VersionVector(nodeB, 6L) val thatVvector = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 6L, nodeC -> 1L, nodeD -> 8L)) - val thatSet = new ORSet( - elementsMap = Map("K1" -> thatDot1, "K2" -> thatDot2), - vvector = thatVvector) + val thatSet = new ORSet(elementsMap = Map("K1" -> thatDot1, "K2" -> thatDot2), vvector = thatVvector) - val expectedDots = Map( - "K1" -> VersionVector(nodeA, 3L), - "K2" -> VersionVector(TreeMap(nodeB -> 6L, nodeC -> 2L))) + val expectedDots = Map("K1" -> VersionVector(nodeA, 3L), "K2" -> VersionVector(TreeMap(nodeB -> 6L, nodeC -> 2L))) ORSet.mergeCommonKeys(commonKeys, thisSet, thatSet) should be(expectedDots) } "verify mergeDisjointKeys" in { val keys: Set[Any] = Set("K3", "K4", "K5") - val elements: Map[Any, VersionVector] = Map( - "K3" -> VersionVector(nodeA, 4L), - "K4" -> VersionVector(TreeMap(nodeA -> 3L, nodeD -> 8L)), - "K5" -> VersionVector(nodeA, 2L)) + val elements: Map[Any, VersionVector] = Map("K3" -> VersionVector(nodeA, 4L), + "K4" -> VersionVector(TreeMap(nodeA -> 3L, nodeD -> 8L)), + "K5" -> VersionVector(nodeA, 2L)) val vvector = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L)) val acc: Map[Any, VersionVector] = Map("K1" -> VersionVector(nodeA, 3L)) - val expectedDots = acc ++ Map( - "K3" -> VersionVector(nodeA, 4L), - "K4" -> VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen + val expectedDots = acc ++ Map("K3" -> VersionVector(nodeA, 4L), "K4" -> VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen ORSet.mergeDisjointKeys(keys, elements, vvector, acc) should be(expectedDots) } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala index 74a9735232..cac9e3c750 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterMapSpec.scala @@ -19,7 +19,8 @@ class PNCounterMapSpec extends WordSpec with Matchers { "be able to increment and decrement entries with implicit SelfUniqueAddress" in { implicit val node = SelfUniqueAddress(node1) - PNCounterMap().incrementBy("a", 2).incrementBy("b", 1).incrementBy("b", 2).decrementBy("a", 1).entries should be(Map("a" -> 1, "b" -> 3)) + PNCounterMap().incrementBy("a", 2).incrementBy("b", 1).incrementBy("b", 2).decrementBy("a", 1).entries should be( + Map("a" -> 1, "b" -> 3)) } "be able to increment and decrement entries" in { @@ -33,22 +34,22 @@ class PNCounterMapSpec extends WordSpec with Matchers { // merge both ways val expected = Map("a" -> 1, "b" -> 3, "c" -> 7) - (m1 merge m2).entries should be(expected) - (m2 merge m1).entries should be(expected) + m1.merge(m2).entries should be(expected) + m2.merge(m1).entries should be(expected) } "be able to remove entry" in { val m1 = PNCounterMap().increment(node1, "a", 1).increment(node1, "b", 3).increment(node1, "c", 2) val m2 = PNCounterMap().increment(node2, "c", 5) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.remove(node1, "b") - (merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 7)) + merged1.merge(m3).entries should be(Map("a" -> 1, "c" -> 7)) // but if there is a conflicting update the entry is not removed val m4 = merged1.increment(node2, "b", 10) - (m3 merge m4).entries should be(Map("a" -> 1, "b" -> 13, "c" -> 7)) + m3.merge(m4).entries should be(Map("a" -> 1, "b" -> 13, "c" -> 7)) } "be able to work with deltas" in { @@ -56,17 +57,17 @@ class PNCounterMapSpec extends WordSpec with Matchers { val m2 = PNCounterMap().increment(node2, "c", 5) val expected = Map("a" -> 1, "b" -> 3, "c" -> 7) - (PNCounterMap() mergeDelta m1.delta.get mergeDelta m2.delta.get).entries should be(expected) - (PNCounterMap() mergeDelta m2.delta.get mergeDelta m1.delta.get).entries should be(expected) + PNCounterMap().mergeDelta(m1.delta.get).mergeDelta(m2.delta.get).entries should be(expected) + PNCounterMap().mergeDelta(m2.delta.get).mergeDelta(m1.delta.get).entries should be(expected) - val merged1 = m1 merge m2 + val merged1 = m1.merge(m2) val m3 = merged1.resetDelta.remove(node1, "b") - (merged1 mergeDelta m3.delta.get).entries should be(Map("a" -> 1, "c" -> 7)) + merged1.mergeDelta(m3.delta.get).entries should be(Map("a" -> 1, "c" -> 7)) // but if there is a conflicting update the entry is not removed val m4 = merged1.resetDelta.increment(node2, "b", 10) - (m3 mergeDelta m4.delta.get).entries should be(Map("a" -> 1, "b" -> 13, "c" -> 7)) + m3.mergeDelta(m4.delta.get).entries should be(Map("a" -> 1, "b" -> 13, "c" -> 7)) } "have unapply extractor" in { diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala index 315f172b01..3826eec682 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/PNCounterSpec.scala @@ -19,12 +19,12 @@ class PNCounterSpec extends WordSpec with Matchers { "be able to increment each node's record by one" in { val c1 = PNCounter() - val c2 = c1 increment node1 - val c3 = c2 increment node1 + val c2 = c1.increment(node1) + val c3 = c2.increment(node1) - val c4 = c3 increment node2 - val c5 = c4 increment node2 - val c6 = c5.resetDelta increment node2 + val c4 = c3.increment(node2) + val c5 = c4.increment(node2) + val c6 = c5.resetDelta.increment(node2) c6.increments.state(node1) should be(2) c6.increments.state(node2) should be(3) @@ -41,12 +41,12 @@ class PNCounterSpec extends WordSpec with Matchers { "be able to decrement each node's record by one" in { val c1 = PNCounter() - val c2 = c1 decrement node1 - val c3 = c2 decrement node1 + val c2 = c1.decrement(node1) + val c3 = c2.decrement(node1) - val c4 = c3 decrement node2 - val c5 = c4 decrement node2 - val c6 = c5.resetDelta decrement node2 + val c4 = c3.decrement(node2) + val c5 = c4.decrement(node2) + val c6 = c5.resetDelta.decrement(node2) c6.decrements.state(node1) should be(2) c6.decrements.state(node2) should be(3) @@ -61,12 +61,12 @@ class PNCounterSpec extends WordSpec with Matchers { "be able to increment each node's record by arbitrary delta" in { val c1 = PNCounter() - val c2 = c1 increment (node1, 3) - val c3 = c2 increment (node1, 4) + val c2 = c1.increment(node1, 3) + val c3 = c2.increment(node1, 4) - val c4 = c3 increment (node2, 2) - val c5 = c4 increment (node2, 7) - val c6 = c5 increment node2 + val c4 = c3.increment(node2, 2) + val c5 = c4.increment(node2, 7) + val c6 = c5.increment(node2) c6.increments.state(node1) should be(7) c6.increments.state(node2) should be(10) @@ -75,12 +75,12 @@ class PNCounterSpec extends WordSpec with Matchers { "be able to increment each node's record by arbitrary BigInt delta" in { val c1 = PNCounter() - val c2 = c1 increment (node1, BigInt(3)) - val c3 = c2 increment (node1, BigInt(4)) + val c2 = c1.increment(node1, BigInt(3)) + val c3 = c2.increment(node1, BigInt(4)) - val c4 = c3 increment (node2, BigInt(2)) - val c5 = c4 increment (node2, BigInt(7)) - val c6 = c5 increment node2 + val c4 = c3.increment(node2, BigInt(2)) + val c5 = c4.increment(node2, BigInt(7)) + val c6 = c5.increment(node2) c6.increments.state(node1) should be(7) c6.increments.state(node2) should be(10) @@ -89,12 +89,12 @@ class PNCounterSpec extends WordSpec with Matchers { "be able to decrement each node's record by arbitrary delta" in { val c1 = PNCounter() - val c2 = c1 decrement (node1, 3) - val c3 = c2 decrement (node1, 4) + val c2 = c1.decrement(node1, 3) + val c3 = c2.decrement(node1, 4) - val c4 = c3 decrement (node2, 2) - val c5 = c4 decrement (node2, 7) - val c6 = c5 decrement node2 + val c4 = c3.decrement(node2, 2) + val c5 = c4.decrement(node2, 7) + val c6 = c5.decrement(node2) c6.decrements.state(node1) should be(7) c6.decrements.state(node2) should be(10) @@ -103,12 +103,12 @@ class PNCounterSpec extends WordSpec with Matchers { "be able to increment and decrement each node's record by arbitrary delta" in { val c1 = PNCounter() - val c2 = c1 increment (node1, 3) - val c3 = c2 decrement (node1, 2) + val c2 = c1.increment(node1, 3) + val c3 = c2.decrement(node1, 2) - val c4 = c3 increment (node2, 5) - val c5 = c4 decrement (node2, 2) - val c6 = c5 increment node2 + val c4 = c3.increment(node2, 5) + val c5 = c4.decrement(node2, 2) + val c6 = c5.increment(node2) c6.increments.value should be(9) c6.decrements.value should be(4) @@ -117,12 +117,12 @@ class PNCounterSpec extends WordSpec with Matchers { "be able to summarize the history to the correct aggregated value of increments and decrements" in { val c1 = PNCounter() - val c2 = c1 increment (node1, 3) - val c3 = c2 decrement (node1, 2) + val c2 = c1.increment(node1, 3) + val c3 = c2.decrement(node1, 2) - val c4 = c3 increment (node2, 5) - val c5 = c4 decrement (node2, 2) - val c6 = c5 increment node2 + val c4 = c3.increment(node2, 5) + val c5 = c4.decrement(node2, 2) + val c6 = c5.increment(node2) c6.increments.value should be(9) c6.decrements.value should be(4) @@ -133,11 +133,11 @@ class PNCounterSpec extends WordSpec with Matchers { "be able to have its history correctly merged with another GCounter" in { // counter 1 val c11 = PNCounter() - val c12 = c11 increment (node1, 3) - val c13 = c12 decrement (node1, 2) - val c14 = c13 increment (node2, 5) - val c15 = c14 decrement (node2, 2) - val c16 = c15 increment node2 + val c12 = c11.increment(node1, 3) + val c13 = c12.decrement(node1, 2) + val c14 = c13.increment(node2, 5) + val c15 = c14.decrement(node2, 2) + val c16 = c15.increment(node2) c16.increments.value should be(9) c16.decrements.value should be(4) @@ -145,23 +145,23 @@ class PNCounterSpec extends WordSpec with Matchers { // counter 1 val c21 = PNCounter() - val c22 = c21 increment (node1, 2) - val c23 = c22 decrement (node1, 3) - val c24 = c23 increment (node2, 3) - val c25 = c24 decrement (node2, 2) - val c26 = c25 increment node2 + val c22 = c21.increment(node1, 2) + val c23 = c22.decrement(node1, 3) + val c24 = c23.increment(node2, 3) + val c25 = c24.decrement(node2, 2) + val c26 = c25.increment(node2) c26.increments.value should be(6) c26.decrements.value should be(5) c26.value should be(1) // merge both ways - val merged1 = c16 merge c26 + val merged1 = c16.merge(c26) merged1.increments.value should be(9) merged1.decrements.value should be(5) merged1.value should be(4) - val merged2 = c26 merge c16 + val merged2 = c26.merge(c16) merged2.increments.value should be(9) merged2.decrements.value should be(5) merged2.value should be(4) @@ -169,8 +169,8 @@ class PNCounterSpec extends WordSpec with Matchers { "have support for pruning" in { val c1 = PNCounter() - val c2 = c1 increment node1 - val c3 = c2 decrement node2 + val c2 = c1.increment(node1) + val c3 = c2.decrement(node2) c2.modifiedByNodes should ===(Set(node1)) c2.needPruningFrom(node1) should be(true) c2.needPruningFrom(node2) should be(false) @@ -183,7 +183,7 @@ class PNCounterSpec extends WordSpec with Matchers { c4.needPruningFrom(node2) should be(true) c4.needPruningFrom(node1) should be(false) - val c5 = (c4 increment node1).pruningCleanup(node1) + val c5 = c4.increment(node1).pruningCleanup(node1) c5.modifiedByNodes should ===(Set(node2)) c5.needPruningFrom(node1) should be(false) } diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ReplicatorSettingsSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ReplicatorSettingsSpec.scala index 5bf4a2636f..d41e84b146 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/ReplicatorSettingsSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/ReplicatorSettingsSpec.scala @@ -17,8 +17,7 @@ object ReplicatorSettingsSpec { akka.remote.artery.canonical.hostname = 127.0.0.1""") } -class ReplicatorSettingsSpec extends AkkaSpec(ReplicatorSettingsSpec.config) - with WordSpecLike with BeforeAndAfterAll { +class ReplicatorSettingsSpec extends AkkaSpec(ReplicatorSettingsSpec.config) with WordSpecLike with BeforeAndAfterAll { "DistributedData" must { "have the default replicator name" in { diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/VersionVectorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/VersionVectorSpec.scala index 12f33be349..23084e2f42 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/VersionVectorSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/VersionVectorSpec.scala @@ -12,8 +12,11 @@ import org.scalatest.BeforeAndAfterAll import org.scalatest.Matchers import org.scalatest.WordSpecLike -class VersionVectorSpec extends TestKit(ActorSystem("VersionVectorSpec")) - with WordSpecLike with Matchers with BeforeAndAfterAll { +class VersionVectorSpec + extends TestKit(ActorSystem("VersionVectorSpec")) + with WordSpecLike + with Matchers + with BeforeAndAfterAll { val node1 = UniqueAddress(Address("akka.tcp", "Sys", "localhost", 2551), 1) val node2 = UniqueAddress(node1.address.copy(port = Some(2552)), 2) @@ -170,13 +173,13 @@ class VersionVectorSpec extends TestKit(ActorSystem("VersionVectorSpec")) val vv2_2 = vv1_2 + node2 val vv3_2 = vv2_2 + node2 - val merged1 = vv3_2 merge vv5_1 + val merged1 = vv3_2.merge(vv5_1) merged1.size should be(3) merged1.contains(node1) should be(true) merged1.contains(node2) should be(true) merged1.contains(node3) should be(true) - val merged2 = vv5_1 merge vv3_2 + val merged2 = vv5_1.merge(vv3_2) merged2.size should be(3) merged2.contains(node1) should be(true) merged2.contains(node2) should be(true) @@ -203,14 +206,14 @@ class VersionVectorSpec extends TestKit(ActorSystem("VersionVectorSpec")) val vv2_2 = vv1_2 + node4 val vv3_2 = vv2_2 + node4 - val merged1 = vv3_2 merge vv5_1 + val merged1 = vv3_2.merge(vv5_1) merged1.size should be(4) merged1.contains(node1) should be(true) merged1.contains(node2) should be(true) merged1.contains(node3) should be(true) merged1.contains(node4) should be(true) - val merged2 = vv5_1 merge vv3_2 + val merged2 = vv5_1.merge(vv3_2) merged2.size should be(4) merged2.contains(node1) should be(true) merged2.contains(node2) should be(true) diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala index f2c8e70e1f..4196c63f42 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/WriteAggregatorSpec.scala @@ -24,17 +24,35 @@ object WriteAggregatorSpec { val KeyA = GSetKey[String]("A") val KeyB = ORSetKey[String]("B") - def writeAggregatorProps(data: GSet[String], consistency: Replicator.WriteConsistency, - probes: Map[Address, ActorRef], nodes: Set[Address], unreachable: Set[Address], replyTo: ActorRef, durable: Boolean): Props = + def writeAggregatorProps(data: GSet[String], + consistency: Replicator.WriteConsistency, + probes: Map[Address, ActorRef], + nodes: Set[Address], + unreachable: Set[Address], + replyTo: ActorRef, + durable: Boolean): Props = Props(new TestWriteAggregator(KeyA, data, None, consistency, probes, nodes, unreachable, replyTo, durable)) - def writeAggregatorPropsWithDelta(data: ORSet[String], delta: Delta, consistency: Replicator.WriteConsistency, - probes: Map[Address, ActorRef], nodes: Set[Address], unreachable: Set[Address], replyTo: ActorRef, durable: Boolean): Props = + def writeAggregatorPropsWithDelta(data: ORSet[String], + delta: Delta, + consistency: Replicator.WriteConsistency, + probes: Map[Address, ActorRef], + nodes: Set[Address], + unreachable: Set[Address], + replyTo: ActorRef, + durable: Boolean): Props = Props(new TestWriteAggregator(KeyB, data, Some(delta), consistency, probes, nodes, unreachable, replyTo, durable)) - class TestWriteAggregator(key: Key.KeyR, data: ReplicatedData, delta: Option[Delta], consistency: Replicator.WriteConsistency, - probes: Map[Address, ActorRef], nodes: Set[Address], unreachable: Set[Address], replyTo: ActorRef, durable: Boolean) - extends WriteAggregator(key, DataEnvelope(data), delta, consistency, None, nodes, unreachable, replyTo, durable) { + class TestWriteAggregator(key: Key.KeyR, + data: ReplicatedData, + delta: Option[Delta], + consistency: Replicator.WriteConsistency, + probes: Map[Address, ActorRef], + nodes: Set[Address], + unreachable: Set[Address], + replyTo: ActorRef, + durable: Boolean) + extends WriteAggregator(key, DataEnvelope(data), delta, consistency, None, nodes, unreachable, replyTo, durable) { override def replica(address: Address): ActorSelection = context.actorSelection(probes(address).path) @@ -78,8 +96,7 @@ class WriteAggregatorSpec extends AkkaSpec(s""" dir = target/WriteAggregatorSpec-${System.currentTimeMillis}-ddata map-size = 10 MiB } - """) - with ImplicitSender { + """) with ImplicitSender { import WriteAggregatorSpec._ val protocol = @@ -113,8 +130,9 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "WriteAggregator" must { "send to at least N/2+1 replicas when WriteMajority" in { val probe = TestProbe() - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorProps( - data, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = false)) + val aggr = system.actorOf( + WriteAggregatorSpec + .writeAggregatorProps(data, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = false)) probe.expectMsgType[Write] probe.lastSender ! WriteAck @@ -128,8 +146,8 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "send to more when no immediate reply" in { val testProbes = probes() val testProbeRefs = testProbes.map { case (a, tm) => a -> tm.writeAckAdapter } - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorProps( - data, writeMajority, testProbeRefs, nodes, Set(nodeC, nodeD), testActor, durable = false)) + val aggr = system.actorOf(WriteAggregatorSpec + .writeAggregatorProps(data, writeMajority, testProbeRefs, nodes, Set(nodeC, nodeD), testActor, durable = false)) testProbes(nodeA).expectMsgType[Write] // no reply @@ -139,9 +157,7 @@ class WriteAggregatorSpec extends AkkaSpec(s""" val t = timeout / 5 - 50.milliseconds.dilated import system.dispatcher Future.sequence { - Seq( - Future { testProbes(nodeC).expectNoMsg(t) }, - Future { testProbes(nodeD).expectNoMsg(t) }) + Seq(Future { testProbes(nodeC).expectNoMsg(t) }, Future { testProbes(nodeD).expectNoMsg(t) }) }.futureValue testProbes(nodeC).expectMsgType[Write] testProbes(nodeC).lastSender ! WriteAck @@ -155,8 +171,9 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "timeout when less than required acks" in { val probe = TestProbe() - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorProps( - data, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = false)) + val aggr = system.actorOf( + WriteAggregatorSpec + .writeAggregatorProps(data, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = false)) probe.expectMsgType[Write] // no reply @@ -197,8 +214,15 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "send deltas first" in { val probe = TestProbe() - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorPropsWithDelta( - fullState2, delta, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = false)) + val aggr = system.actorOf( + WriteAggregatorSpec.writeAggregatorPropsWithDelta(fullState2, + delta, + writeMajority, + probes(probe.ref), + nodes, + Set.empty, + testActor, + durable = false)) probe.expectMsgType[DeltaPropagation] probe.lastSender ! WriteAck @@ -212,8 +236,15 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "retry with full state when no immediate reply or nack" in { val testProbes = probes() val testProbeRefs = testProbes.map { case (a, tm) => a -> tm.writeAckAdapter } - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorPropsWithDelta( - fullState2, delta, writeAll, testProbeRefs, nodes, Set.empty, testActor, durable = false)) + val aggr = system.actorOf( + WriteAggregatorSpec.writeAggregatorPropsWithDelta(fullState2, + delta, + writeAll, + testProbeRefs, + nodes, + Set.empty, + testActor, + durable = false)) testProbes(nodeA).expectMsgType[DeltaPropagation] // no reply @@ -239,8 +270,15 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "timeout when less than required acks" in { val probe = TestProbe() - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorPropsWithDelta( - fullState2, delta, writeAll, probes(probe.ref), nodes, Set.empty, testActor, durable = false)) + val aggr = system.actorOf( + WriteAggregatorSpec.writeAggregatorPropsWithDelta(fullState2, + delta, + writeAll, + probes(probe.ref), + nodes, + Set.empty, + testActor, + durable = false)) probe.expectMsgType[DeltaPropagation] // no reply @@ -268,8 +306,9 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "Durable WriteAggregator" must { "not reply before local confirmation" in { val probe = TestProbe() - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorProps( - data, writeThree, probes(probe.ref), nodes, Set.empty, testActor, durable = true)) + val aggr = system.actorOf( + WriteAggregatorSpec + .writeAggregatorProps(data, writeThree, probes(probe.ref), nodes, Set.empty, testActor, durable = true)) probe.expectMsgType[Write] probe.lastSender ! WriteAck @@ -287,8 +326,9 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "tolerate WriteNack if enough WriteAck" in { val probe = TestProbe() - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorProps( - data, writeThree, probes(probe.ref), nodes, Set.empty, testActor, durable = true)) + val aggr = system.actorOf( + WriteAggregatorSpec + .writeAggregatorProps(data, writeThree, probes(probe.ref), nodes, Set.empty, testActor, durable = true)) aggr ! UpdateSuccess(WriteAggregatorSpec.KeyA, None) // the local write probe.expectMsgType[Write] @@ -305,8 +345,9 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "reply with StoreFailure when too many nacks" in { val probe = TestProbe() - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorProps( - data, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = true)) + val aggr = system.actorOf( + WriteAggregatorSpec + .writeAggregatorProps(data, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = true)) probe.expectMsgType[Write] probe.lastSender ! WriteNack @@ -325,8 +366,9 @@ class WriteAggregatorSpec extends AkkaSpec(s""" "timeout when less than required acks" in { val probe = TestProbe() - val aggr = system.actorOf(WriteAggregatorSpec.writeAggregatorProps( - data, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = true)) + val aggr = system.actorOf( + WriteAggregatorSpec + .writeAggregatorProps(data, writeMajority, probes(probe.ref), nodes, Set.empty, testActor, durable = true)) probe.expectMsgType[Write] // no reply diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala index 5e66016758..acb8773e8c 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatedDataSerializerSpec.scala @@ -26,9 +26,10 @@ import akka.actor.RootActorPath import akka.cluster.Cluster import akka.testkit.TestActors -class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( - "ReplicatedDataSerializerSpec", - ConfigFactory.parseString(""" +class ReplicatedDataSerializerSpec + extends TestKit( + ActorSystem("ReplicatedDataSerializerSpec", + ConfigFactory.parseString(""" akka.loglevel = DEBUG akka.actor.provider=cluster akka.remote.netty.tcp.port=0 @@ -37,7 +38,10 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( serialize-messages = off allow-java-serialization = off } - """))) with WordSpecLike with Matchers with BeforeAndAfterAll { + """))) + with WordSpecLike + with Matchers + with BeforeAndAfterAll { val serializer = new ReplicatedDataSerializer(system.asInstanceOf[ExtendedActorSystem]) @@ -120,7 +124,7 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( // ORSet with ActorRef checkSerialization(ORSet().add(address1, ref1)) checkSerialization(ORSet().add(address1, ref1).add(address1, ref2)) - checkSerialization(ORSet().add(address1, ref1).add(address1, "a").add(address2, ref2) add (address2, "b")) + checkSerialization(ORSet().add(address1, ref1).add(address1, "a").add(address2, ref2).add(address2, "b")) val s5 = ORSet().add(address1, "a").add(address2, ref1) val s6 = ORSet().add(address2, ref1).add(address1, "a") @@ -133,8 +137,9 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( val echo1 = system.actorOf(TestActors.echoActorProps, "echo1") system2.actorOf(TestActors.echoActorProps, "echo2") - system.actorSelection(RootActorPath(Cluster(system2).selfAddress) / "user" / "echo2").tell( - Identify("2"), testActor) + system + .actorSelection(RootActorPath(Cluster(system2).selfAddress) / "user" / "echo2") + .tell(Identify("2"), testActor) val echo2 = expectMsgType[ActorIdentity].ref.get val msg = ORSet.empty[ActorRef].add(Cluster(system), echo1).add(Cluster(system), echo2) @@ -187,8 +192,9 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( "serialize LWWRegister" in { checkSerialization(LWWRegister(address1, "value1", LWWRegister.defaultClock[String])) - checkSerialization(LWWRegister(address1, "value2", LWWRegister.defaultClock[String]) - .withValue(address2, "value3", LWWRegister.defaultClock[String])) + checkSerialization( + LWWRegister(address1, "value2", LWWRegister.defaultClock[String]) + .withValue(address2, "value3", LWWRegister.defaultClock[String])) } "serialize GCounter" in { @@ -196,12 +202,10 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( checkSerialization(GCounter().increment(address1, 3)) checkSerialization(GCounter().increment(address1, 2).increment(address2, 5)) - checkSameContent( - GCounter().increment(address1, 2).increment(address2, 5), - GCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1)) - checkSameContent( - GCounter().increment(address1, 2).increment(address3, 5), - GCounter().increment(address3, 5).increment(address1, 2)) + checkSameContent(GCounter().increment(address1, 2).increment(address2, 5), + GCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1)) + checkSameContent(GCounter().increment(address1, 2).increment(address3, 5), + GCounter().increment(address3, 5).increment(address1, 2)) } "serialize PNCounter" in { @@ -211,15 +215,12 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( checkSerialization(PNCounter().increment(address1, 2).increment(address2, 5)) checkSerialization(PNCounter().increment(address1, 2).increment(address2, 5).decrement(address1, 1)) - checkSameContent( - PNCounter().increment(address1, 2).increment(address2, 5), - PNCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1)) - checkSameContent( - PNCounter().increment(address1, 2).increment(address3, 5), - PNCounter().increment(address3, 5).increment(address1, 2)) - checkSameContent( - PNCounter().increment(address1, 2).decrement(address1, 1).increment(address3, 5), - PNCounter().increment(address3, 5).increment(address1, 2).decrement(address1, 1)) + checkSameContent(PNCounter().increment(address1, 2).increment(address2, 5), + PNCounter().increment(address2, 5).increment(address1, 1).increment(address1, 1)) + checkSameContent(PNCounter().increment(address1, 2).increment(address3, 5), + PNCounter().increment(address3, 5).increment(address1, 2)) + checkSameContent(PNCounter().increment(address1, 2).decrement(address1, 1).increment(address3, 5), + PNCounter().increment(address3, 5).increment(address1, 2).decrement(address1, 1)) } "serialize ORMap" in { @@ -236,20 +237,29 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( checkSerialization(ORMap().put(address1, "a", GSet() + "A").remove(address2, "a").delta.get) checkSerialization(ORMap().put(address1, 1, GSet() + "A").delta.get) checkSerialization(ORMap().put(address1, 1L, GSet() + "A").delta.get) - checkSerialization(ORMap.empty[String, ORSet[String]] - .put(address1, "a", ORSet.empty[String].add(address1, "A")) - .put(address2, "b", ORSet.empty[String].add(address2, "B")) - .updated(address1, "a", ORSet.empty[String])(_.add(address1, "C")).delta.get) - checkSerialization(ORMap.empty[String, ORSet[String]] - .resetDelta - .updated(address1, "a", ORSet.empty[String])(_.add(address1, "C")).delta.get) + checkSerialization( + ORMap + .empty[String, ORSet[String]] + .put(address1, "a", ORSet.empty[String].add(address1, "A")) + .put(address2, "b", ORSet.empty[String].add(address2, "B")) + .updated(address1, "a", ORSet.empty[String])(_.add(address1, "C")) + .delta + .get) + checkSerialization( + ORMap + .empty[String, ORSet[String]] + .resetDelta + .updated(address1, "a", ORSet.empty[String])(_.add(address1, "C")) + .delta + .get) // use Flag for this test as object key because it is serializable checkSerialization(ORMap().put(address1, Flag(), GSet() + "A").delta.get) } "be compatible with old ORMap serialization" in { // Below blob was created with previous version of the serializer - val oldBlobAsBase64 = "H4sIAAAAAAAAAOOax8jlyaXMJc8lzMWXX5KRWqSXkV9copdflC7wXEWUiYGBQRaIGQQkuJS45LiEuHiL83NTUdQwwtWIC6kQpUqVKAulGBOlGJOE+LkYE4W4uJi5GB0FuJUYnUACSRABJ7AAAOLO3C3DAAAA" + val oldBlobAsBase64 = + "H4sIAAAAAAAAAOOax8jlyaXMJc8lzMWXX5KRWqSXkV9copdflC7wXEWUiYGBQRaIGQQkuJS45LiEuHiL83NTUdQwwtWIC6kQpUqVKAulGBOlGJOE+LkYE4W4uJi5GB0FuJUYnUACSRABJ7AAAOLO3C3DAAAA" checkCompatibility(oldBlobAsBase64, ORMap()) } @@ -259,13 +269,16 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( checkSerialization(LWWMap().put(address1, 1, "value1", LWWRegister.defaultClock[Any])) checkSerialization(LWWMap().put(address1, 1L, "value1", LWWRegister.defaultClock[Any])) checkSerialization(LWWMap().put(address1, Flag(), "value1", LWWRegister.defaultClock[Any])) - checkSerialization(LWWMap().put(address1, "a", "value1", LWWRegister.defaultClock[Any]) - .put(address2, "b", 17, LWWRegister.defaultClock[Any])) + checkSerialization( + LWWMap() + .put(address1, "a", "value1", LWWRegister.defaultClock[Any]) + .put(address2, "b", 17, LWWRegister.defaultClock[Any])) } "be compatible with old LWWMap serialization" in { // Below blob was created with previous version of the serializer - val oldBlobAsBase64 = "H4sIAAAAAAAAAOPy51LhUuKS4xLi4i3Oz03Vy8gvLtHLL0oXeK4iysjAwCALxAwC0kJEqZJiTBSy4AISxhwzrl2fuyRMiIAWKS4utrLEnNJUQwERAD96/peLAAAA" + val oldBlobAsBase64 = + "H4sIAAAAAAAAAOPy51LhUuKS4xLi4i3Oz03Vy8gvLtHLL0oXeK4iysjAwCALxAwC0kJEqZJiTBSy4AISxhwzrl2fuyRMiIAWKS4utrLEnNJUQwERAD96/peLAAAA" checkCompatibility(oldBlobAsBase64, LWWMap()) } @@ -275,13 +288,14 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( checkSerialization(PNCounterMap().increment(address1, 1, 3)) checkSerialization(PNCounterMap().increment(address1, 1L, 3)) checkSerialization(PNCounterMap().increment(address1, Flag(), 3)) - checkSerialization(PNCounterMap().increment(address1, "a", 3).decrement(address2, "a", 2). - increment(address2, "b", 5)) + checkSerialization( + PNCounterMap().increment(address1, "a", 3).decrement(address2, "a", 2).increment(address2, "b", 5)) } "be compatible with old PNCounterMap serialization" in { // Below blob was created with previous version of the serializer - val oldBlobAsBase64 = "H4sIAAAAAAAAAOPy51LhUuKS4xLi4i3Oz03Vy8gvLtHLL0oXeK4iysjAwCALxAwC8kJEqZJiTBTS4wISmlyqXMqE1AsxMgsxAADYQs/9gQAAAA==" + val oldBlobAsBase64 = + "H4sIAAAAAAAAAOPy51LhUuKS4xLi4i3Oz03Vy8gvLtHLL0oXeK4iysjAwCALxAwC8kJEqZJiTBTS4wISmlyqXMqE1AsxMgsxAADYQs/9gQAAAA==" checkCompatibility(oldBlobAsBase64, PNCounterMap()) } @@ -291,15 +305,18 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( checkSerialization(ORMultiMap().addBinding(address1, 1, "A")) checkSerialization(ORMultiMap().addBinding(address1, 1L, "A")) checkSerialization(ORMultiMap().addBinding(address1, Flag(), "A")) - checkSerialization(ORMultiMap.empty[String, String] - .addBinding(address1, "a", "A1") - .put(address2, "b", Set("B1", "B2", "B3")) - .addBinding(address2, "a", "A2")) + checkSerialization( + ORMultiMap + .empty[String, String] + .addBinding(address1, "a", "A1") + .put(address2, "b", Set("B1", "B2", "B3")) + .addBinding(address2, "a", "A2")) val m1 = ORMultiMap.empty[String, String].addBinding(address1, "a", "A1").addBinding(address2, "a", "A2") val m2 = ORMultiMap.empty[String, String].put(address2, "b", Set("B1", "B2", "B3")) checkSameContent(m1.merge(m2), m2.merge(m1)) - checkSerialization(ORMultiMap.empty[String, String].addBinding(address1, "a", "A1").addBinding(address1, "a", "A2").delta.get) + checkSerialization( + ORMultiMap.empty[String, String].addBinding(address1, "a", "A1").addBinding(address1, "a", "A2").delta.get) val m3 = ORMultiMap.empty[String, String].addBinding(address1, "a", "A1") val d3 = m3.resetDelta.addBinding(address1, "a", "A2").addBinding(address1, "a", "A3").delta.get checkSerialization(d3) @@ -307,7 +324,8 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( "be compatible with old ORMultiMap serialization" in { // Below blob was created with previous version of the serializer - val oldBlobAsBase64 = "H4sIAAAAAAAAAOPy51LhUuKS4xLi4i3Oz03Vy8gvLtHLL0oXeK4iysjAwCALxAwCakJEqZJiTBQK4QISxJmqSpSpqlKMjgDlsHjDpwAAAA==" + val oldBlobAsBase64 = + "H4sIAAAAAAAAAOPy51LhUuKS4xLi4i3Oz03Vy8gvLtHLL0oXeK4iysjAwCALxAwCakJEqZJiTBQK4QISxJmqSpSpqlKMjgDlsHjDpwAAAA==" checkCompatibility(oldBlobAsBase64, ORMultiMap()) } @@ -317,13 +335,17 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( checkSerialization(ORMultiMap._emptyWithValueDeltas.addBinding(address1, 1, "A")) checkSerialization(ORMultiMap._emptyWithValueDeltas.addBinding(address1, 1L, "A")) checkSerialization(ORMultiMap._emptyWithValueDeltas.addBinding(address1, Flag(), "A")) - checkSerialization(ORMultiMap.emptyWithValueDeltas[String, String].addBinding(address1, "a", "A").remove(address1, "a").delta.get) - checkSerialization(ORMultiMap.emptyWithValueDeltas[String, String] - .addBinding(address1, "a", "A1") - .put(address2, "b", Set("B1", "B2", "B3")) - .addBinding(address2, "a", "A2")) + checkSerialization( + ORMultiMap.emptyWithValueDeltas[String, String].addBinding(address1, "a", "A").remove(address1, "a").delta.get) + checkSerialization( + ORMultiMap + .emptyWithValueDeltas[String, String] + .addBinding(address1, "a", "A1") + .put(address2, "b", Set("B1", "B2", "B3")) + .addBinding(address2, "a", "A2")) - val m1 = ORMultiMap.emptyWithValueDeltas[String, String].addBinding(address1, "a", "A1").addBinding(address2, "a", "A2") + val m1 = + ORMultiMap.emptyWithValueDeltas[String, String].addBinding(address1, "a", "A1").addBinding(address2, "a", "A2") val m2 = ORMultiMap.emptyWithValueDeltas[String, String].put(address2, "b", Set("B1", "B2", "B3")) checkSameContent(m1.merge(m2), m2.merge(m1)) } @@ -344,4 +366,3 @@ class ReplicatedDataSerializerSpec extends TestKit(ActorSystem( } } - diff --git a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala index 7c58f7bba6..ffc12b7889 100644 --- a/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala +++ b/akka-distributed-data/src/test/scala/akka/cluster/ddata/protobuf/ReplicatorMessageSerializerSpec.scala @@ -29,9 +29,10 @@ import akka.cluster.ddata.VersionVector import akka.cluster.ddata.ORSet import akka.cluster.ddata.ORMultiMap -class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem( - "ReplicatorMessageSerializerSpec", - ConfigFactory.parseString(""" +class ReplicatorMessageSerializerSpec + extends TestKit( + ActorSystem("ReplicatorMessageSerializerSpec", + ConfigFactory.parseString(""" akka.actor.provider=cluster akka.remote.netty.tcp.port=0 akka.remote.artery.canonical.port = 0 @@ -39,7 +40,10 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem( serialize-messages = off allow-java-serialization = off } - """))) with WordSpecLike with Matchers with BeforeAndAfterAll { + """))) + with WordSpecLike + with Matchers + with BeforeAndAfterAll { val serializer = new ReplicatorMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) @@ -82,9 +86,10 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem( checkSerialization(Unsubscribe(keyA, ref1)) checkSerialization(Changed(keyA)(data1)) checkSerialization(DataEnvelope(data1)) - checkSerialization(DataEnvelope(data1, pruning = Map( - address1 -> PruningPerformed(System.currentTimeMillis()), - address3 -> PruningInitialized(address2, Set(address1.address))))) + checkSerialization( + DataEnvelope(data1, + pruning = Map(address1 -> PruningPerformed(System.currentTimeMillis()), + address3 -> PruningInitialized(address2, Set(address1.address))))) checkSerialization(Write("A", DataEnvelope(data1))) checkSerialization(WriteAck) checkSerialization(WriteNack) @@ -92,25 +97,24 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem( checkSerialization(Read("A")) checkSerialization(ReadResult(Some(DataEnvelope(data1)))) checkSerialization(ReadResult(None)) - checkSerialization(Status(Map( - "A" -> ByteString.fromString("a"), - "B" -> ByteString.fromString("b")), chunk = 3, totChunks = 10)) - checkSerialization(Gossip(Map( - "A" -> DataEnvelope(data1), - "B" -> DataEnvelope(GSet() + "b" + "c")), sendBack = true)) - checkSerialization(DeltaPropagation(address1, reply = true, Map( - "A" -> Delta(DataEnvelope(delta1), 1L, 1L), - "B" -> Delta(DataEnvelope(delta2), 3L, 5L), - "C" -> Delta(DataEnvelope(delta3), 1L, 1L), - "DC" -> Delta(DataEnvelope(delta4), 1L, 1L)))) + checkSerialization( + Status(Map("A" -> ByteString.fromString("a"), "B" -> ByteString.fromString("b")), chunk = 3, totChunks = 10)) + checkSerialization( + Gossip(Map("A" -> DataEnvelope(data1), "B" -> DataEnvelope(GSet() + "b" + "c")), sendBack = true)) + checkSerialization( + DeltaPropagation(address1, + reply = true, + Map("A" -> Delta(DataEnvelope(delta1), 1L, 1L), + "B" -> Delta(DataEnvelope(delta2), 3L, 5L), + "C" -> Delta(DataEnvelope(delta3), 1L, 1L), + "DC" -> Delta(DataEnvelope(delta4), 1L, 1L)))) checkSerialization(new DurableDataEnvelope(data1)) - val pruning = Map( - address1 -> PruningPerformed(System.currentTimeMillis()), - address3 -> PruningInitialized(address2, Set(address1.address))) + val pruning = Map(address1 -> PruningPerformed(System.currentTimeMillis()), + address3 -> PruningInitialized(address2, Set(address1.address))) val deserializedDurableDataEnvelope = - checkSerialization(new DurableDataEnvelope(DataEnvelope(data1, pruning, - deltaVersions = VersionVector(address1, 13L)))) + checkSerialization( + new DurableDataEnvelope(DataEnvelope(data1, pruning, deltaVersions = VersionVector(address1, 13L)))) // equals of DurableDataEnvelope is only checking the data, PruningPerformed // should be serialized val expectedPruning = pruning.filter { @@ -224,7 +228,7 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem( val a = Read("a") val v1 = cache.getOrAdd(a) v1.toString should be("v1") - cache.getOrAdd(a) should be theSameInstanceAs v1 + (cache.getOrAdd(a) should be).theSameInstanceAs(v1) } "evict cache after time-to-live" in { diff --git a/akka-docs/src/test/scala/docs/CompileOnlySpec.scala b/akka-docs/src/test/scala/docs/CompileOnlySpec.scala index afc9ae5403..af3144d48c 100644 --- a/akka-docs/src/test/scala/docs/CompileOnlySpec.scala +++ b/akka-docs/src/test/scala/docs/CompileOnlySpec.scala @@ -5,6 +5,7 @@ package docs trait CompileOnlySpec { + /** * Given a block of code... does NOT execute it. * Useful when writing code samples in tests, which should only be compiled. diff --git a/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala b/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala index c0e86efa3d..75c5486ebe 100644 --- a/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/ActorDocSpec.scala @@ -17,7 +17,7 @@ import akka.event.Logging //#imports1 import scala.concurrent.Future -import akka.actor.{ ActorRef, ActorSystem, PoisonPill, Terminated, ActorLogging } +import akka.actor.{ ActorLogging, ActorRef, ActorSystem, PoisonPill, Terminated } import org.scalatest.{ BeforeAndAfterAll, WordSpec } import akka.testkit._ import akka.util._ @@ -71,6 +71,7 @@ object ValueClassActor { class DemoActorWrapper extends Actor { //#props-factory object DemoActor { + /** * Create Props for an actor of this type. * @@ -164,10 +165,10 @@ class StoppingActorsWrapper { def receive = { case "interrupt-child" => - context stop child + context.stop(child) case "done" => - context stop self + context.stop(self) } } @@ -188,13 +189,13 @@ class Manager extends Actor { case "job" => worker ! "crunch" case Shutdown => worker ! PoisonPill - context become shuttingDown + context.become(shuttingDown) } def shuttingDown: Receive = { case "job" => sender() ! "service unavailable, shutting down" case Terminated(`worker`) => - context stop self + context.stop(self) } } //#gracefulStop-actor @@ -265,8 +266,7 @@ class Consumer extends Actor with ActorLogging with ConsumerBehavior { def receive = consumerBehavior } -class ProducerConsumer extends Actor with ActorLogging - with ProducerBehavior with ConsumerBehavior { +class ProducerConsumer extends Actor with ActorLogging with ProducerBehavior with ConsumerBehavior { def receive = producerBehavior.orElse[Any, Unit](consumerBehavior) } @@ -278,7 +278,7 @@ final case class Give(thing: Any) //#receive-orElse //#fiddle_code -import akka.actor.{ ActorSystem, Actor, ActorRef, Props, PoisonPill } +import akka.actor.{ Actor, ActorRef, ActorSystem, PoisonPill, Props } import language.postfixOps import scala.concurrent.duration._ @@ -382,9 +382,9 @@ class ActorDocSpec extends AkkaSpec(""" //#fiddle_code val testProbe = new TestProbe(system) - testProbe watch pinger + testProbe.watch(pinger) testProbe.expectTerminated(pinger) - testProbe watch ponger + testProbe.watch(ponger) testProbe.expectTerminated(ponger) system.terminate() } @@ -440,7 +440,7 @@ class ActorDocSpec extends AkkaSpec(""" case message => val target = testActor //#forward - target forward message + target.forward(message) //#forward } } @@ -451,8 +451,7 @@ class ActorDocSpec extends AkkaSpec(""" //#creating-indirectly import akka.actor.IndirectActorProducer - class DependencyInjector(applicationContext: AnyRef, beanName: String) - extends IndirectActorProducer { + class DependencyInjector(applicationContext: AnyRef, beanName: String) extends IndirectActorProducer { override def actorClass = classOf[Actor] override def produce = @@ -463,9 +462,7 @@ class ActorDocSpec extends AkkaSpec(""" //#obtain-fresh-Actor-instance-from-DI-framework } - val actorRef = system.actorOf( - Props(classOf[DependencyInjector], applicationContext, "hello"), - "helloBean") + val actorRef = system.actorOf(Props(classOf[DependencyInjector], applicationContext, "hello"), "helloBean") //#creating-indirectly } val actorRef = { @@ -633,7 +630,7 @@ class ActorDocSpec extends AkkaSpec(""" "using Identify" in { new AnyRef { //#identify - import akka.actor.{ Actor, Props, Identify, ActorIdentity, Terminated } + import akka.actor.{ Actor, ActorIdentity, Identify, Props, Terminated } class Follower extends Actor { val identifyId = 1 @@ -691,11 +688,11 @@ class ActorDocSpec extends AkkaSpec(""" val f: Future[Result] = for { x <- ask(actorA, Request).mapTo[Int] // call pattern directly - s <- (actorB ask Request).mapTo[String] // call by implicit conversion + s <- actorB.ask(Request).mapTo[String] // call by implicit conversion d <- (actorC ? Request).mapTo[Double] // call by symbolic name } yield Result(x, s, d) - f pipeTo actorD // .. or .. + f.pipeTo(actorD) // .. or .. pipe(f) to actorD //#ask-pipeTo } @@ -738,13 +735,12 @@ class ActorDocSpec extends AkkaSpec(""" "using CoordinatedShutdown" in { val someActor = system.actorOf(Props(classOf[Replier], this)) //#coordinated-shutdown-addTask - CoordinatedShutdown(system).addTask( - CoordinatedShutdown.PhaseBeforeServiceUnbind, "someTaskName") { () => - import akka.pattern.ask - import system.dispatcher - implicit val timeout = Timeout(5.seconds) - (someActor ? "stop").map(_ => Done) - } + CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "someTaskName") { () => + import akka.pattern.ask + import system.dispatcher + implicit val timeout = Timeout(5.seconds) + (someActor ? "stop").map(_ => Done) + } //#coordinated-shutdown-addTask //#coordinated-shutdown-jvm-hook diff --git a/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala b/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala index a0a4d24f5c..e9777ad273 100644 --- a/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala +++ b/akka-docs/src/test/scala/docs/actor/BlockingDispatcherSample.scala @@ -82,8 +82,7 @@ object BlockingDispatcherSample { object SeparateDispatcherSample { def main(args: Array[String]) = { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" //#my-blocking-dispatcher-config my-blocking-dispatcher { type = Dispatcher @@ -94,8 +93,7 @@ object SeparateDispatcherSample { throughput = 1 } //#my-blocking-dispatcher-config - """ - ) + """) val system = ActorSystem("SeparateDispatcherSample", config) try { diff --git a/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala b/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala index 865e49797d..b647903034 100644 --- a/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/FSMDocSpec.scala @@ -56,7 +56,7 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#when-syntax when(Idle) { case Event(SetTarget(ref), Uninitialized) => - stay using Todo(ref, Vector.empty) + stay.using(Todo(ref, Vector.empty)) } //#when-syntax @@ -73,7 +73,7 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { when(Active, stateTimeout = 1 second) { case Event(Flush | StateTimeout, t: Todo) => - goto(Idle) using t.copy(queue = Vector.empty) + goto(Idle).using(t.copy(queue = Vector.empty)) } //#when-syntax @@ -81,7 +81,7 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { whenUnhandled { // common code for both states case Event(Queue(obj), t @ Todo(_, v)) => - goto(Active) using t.copy(queue = v :+ obj) + goto(Active).using(t.copy(queue = v :+ obj)) case Event(e, s) => log.warning("received unhandled request {} in state {}/{}", e, stateName, s) @@ -110,7 +110,7 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#modifier-syntax when(SomeState) { case Event(msg, _) => - goto(Processing) using (newData) forMax (5 seconds) replying (WillDo) + goto(Processing).using(newData).forMax(5 seconds).replying(WillDo) } //#modifier-syntax @@ -140,8 +140,8 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { //#transform-syntax when(SomeState)(transform { - case Event(bytes: ByteString, read) => stay using (read + bytes.length) - } using { + case Event(bytes: ByteString, read) => stay.using(read + bytes.length) + }.using { case s @ FSM.State(state, read, timeout, stopReason, replies) if read > 1000 => goto(Processing) }) @@ -154,8 +154,8 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { } when(SomeState)(transform { - case Event(bytes: ByteString, read) => stay using (read + bytes.length) - } using processingTrigger) + case Event(bytes: ByteString, read) => stay.using(read + bytes.length) + }.using(processingTrigger)) //#alt-transform-syntax //#termination-syntax @@ -187,7 +187,8 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { onTermination { case StopEvent(FSM.Failure(_), state, data) => val lastEvents = getLog.mkString("\n\t") - log.warning("Failure in state " + state + " with data " + data + "\n" + + log.warning( + "Failure in state " + state + " with data " + data + "\n" + "Events leading up to this point:\n\t" + lastEvents) } // ... diff --git a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala index e4e65418b2..32f68d3f56 100644 --- a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSample.scala @@ -105,9 +105,11 @@ class Worker extends Actor with ActorLogging { counterService ! Increment(1) // Send current progress to the initial sender - counterService ? GetCurrentCount map { - case CurrentCount(_, count) => Progress(100.0 * count / totalCount) - } pipeTo progressListener.get + (counterService ? GetCurrentCount) + .map { + case CurrentCount(_, count) => Progress(100.0 * count / totalCount) + } + .pipeTo(progressListener.get) } } @@ -135,9 +137,7 @@ class CounterService extends Actor { // Restart the storage child when StorageException is thrown. // After 3 restarts within 5 seconds it will be stopped. - override val supervisorStrategy = OneForOneStrategy( - maxNrOfRetries = 3, - withinTimeRange = 5 seconds) { + override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 5 seconds) { case _: Storage.StorageException => Restart } @@ -162,7 +162,7 @@ class CounterService extends Actor { def initStorage(): Unit = { storage = Some(context.watch(context.actorOf(Props[Storage], name = "storage"))) // Tell the counter, if any, to use the new storage - counter foreach { _ ! UseStorage(storage) } + counter.foreach { _ ! UseStorage(storage) } // We need the initial value to be able to operate storage.get ! Get(key) } @@ -179,7 +179,7 @@ class CounterService extends Actor { for ((replyTo, msg) <- backlog) c.tell(msg, sender = replyTo) backlog = IndexedSeq.empty - case msg: Increment => forwardOrPlaceInBacklog(msg) + case msg: Increment => forwardOrPlaceInBacklog(msg) case msg: GetCurrentCount => forwardOrPlaceInBacklog(msg) @@ -188,7 +188,7 @@ class CounterService extends Actor { // We receive Terminated because we watch the child, see initStorage. storage = None // Tell the counter that there is no storage for the moment - counter foreach { _ ! UseStorage(None) } + counter.foreach { _ ! UseStorage(None) } // Try to re-establish storage after while context.system.scheduler.scheduleOnce(10 seconds, self, Reconnect) @@ -202,11 +202,10 @@ class CounterService extends Actor { // the counter. Before that we place the messages in a backlog, to be sent // to the counter when it is initialized. counter match { - case Some(c) => c forward msg + case Some(c) => c.forward(msg) case None => if (backlog.size >= MaxBacklog) - throw new ServiceUnavailable( - "CounterService not available, lack of initial value") + throw new ServiceUnavailable("CounterService not available, lack of initial value") backlog :+= (sender() -> msg) } } @@ -249,7 +248,7 @@ class Counter(key: String, initialValue: Long) extends Actor { def storeCount(): Unit = { // Delegate dangerous work, to protect our valuable state. // We can continue without storage. - storage foreach { _ ! Store(Entry(key, count)) } + storage.foreach { _ ! Store(Entry(key, count)) } } } diff --git a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala index b327d880c8..966638c920 100644 --- a/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/FaultHandlingDocSpec.scala @@ -11,8 +11,8 @@ import org.scalatest.{ WordSpec, WordSpecLike } //#testkit import com.typesafe.config.{ Config, ConfigFactory } -import org.scalatest.{ Matchers, BeforeAndAfterAll } -import akka.testkit.{ TestActors, TestKit, ImplicitSender, EventFilter } +import org.scalatest.{ BeforeAndAfterAll, Matchers } +import akka.testkit.{ EventFilter, ImplicitSender, TestActors, TestKit } //#testkit object FaultHandlingDocSpec { @@ -101,12 +101,17 @@ object FaultHandlingDocSpec { """) } //#testkit -class FaultHandlingDocSpec(_system: ActorSystem) extends TestKit(_system) - with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll { +class FaultHandlingDocSpec(_system: ActorSystem) + extends TestKit(_system) + with ImplicitSender + with WordSpecLike + with Matchers + with BeforeAndAfterAll { - def this() = this(ActorSystem( - "FaultHandlingDocSpec", - ConfigFactory.parseString(""" + def this() = + this( + ActorSystem("FaultHandlingDocSpec", + ConfigFactory.parseString(""" akka { loggers = ["akka.testkit.TestEventListener"] loglevel = "WARNING" @@ -127,7 +132,7 @@ class FaultHandlingDocSpec(_system: ActorSystem) extends TestKit(_system) supervisor ! Props[Child] val child = expectMsgType[ActorRef] // retrieve answer from TestKit’s testActor //#create - EventFilter.warning(occurrences = 1) intercept { + EventFilter.warning(occurrences = 1).intercept { //#resume child ! 42 // set state to 42 child ! "get" @@ -138,21 +143,21 @@ class FaultHandlingDocSpec(_system: ActorSystem) extends TestKit(_system) expectMsg(42) //#resume } - EventFilter[NullPointerException](occurrences = 1) intercept { + EventFilter[NullPointerException](occurrences = 1).intercept { //#restart child ! new NullPointerException // crash it harder child ! "get" expectMsg(0) //#restart } - EventFilter[IllegalArgumentException](occurrences = 1) intercept { + EventFilter[IllegalArgumentException](occurrences = 1).intercept { //#stop watch(child) // have testActor watch “child” child ! new IllegalArgumentException // break it expectMsgPF() { case Terminated(`child`) => () } //#stop } - EventFilter[Exception]("CRASH", occurrences = 2) intercept { + EventFilter[Exception]("CRASH", occurrences = 2).intercept { //#escalate-kill supervisor ! Props[Child] // create new child val child2 = expectMsgType[ActorRef] diff --git a/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala b/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala index 3bf1a69206..475a91de47 100644 --- a/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/InitializationDocSpec.scala @@ -4,8 +4,8 @@ package docs.actor -import akka.actor.{ Props, Actor } -import akka.testkit.{ ImplicitSender, AkkaSpec } +import akka.actor.{ Actor, Props } +import akka.testkit.{ AkkaSpec, ImplicitSender } object InitializationDocSpec { @@ -45,7 +45,7 @@ object InitializationDocSpec { } def initialized: Receive = { - case "U OK?" => initializeMe foreach { sender() ! _ } + case "U OK?" => initializeMe.foreach { sender() ! _ } } //#messageInit diff --git a/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala b/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala index 3790ed39b4..e3ae5353f2 100644 --- a/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/SchedulerDocSpec.scala @@ -54,11 +54,7 @@ class SchedulerDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { //This will schedule to send the Tick-message //to the tickActor after 0ms repeating every 50ms val cancellable = - system.scheduler.schedule( - 0 milliseconds, - 50 milliseconds, - tickActor, - Tick) + system.scheduler.schedule(0 milliseconds, 50 milliseconds, tickActor, Tick) //This cancels further Ticks to be sent cancellable.cancel() diff --git a/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala b/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala index 2f004cc2e5..663345c72c 100644 --- a/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/SharedMutableStateDocSpec.scala @@ -52,8 +52,9 @@ class SharedMutableStateDocSpec { // Very bad: shared mutable state will cause your // application to break in weird ways Future { state = "This will race" } - ((echoActor ? Message("With this other one")).mapTo[Message]) - .foreach { received => state = received.msg } + ((echoActor ? Message("With this other one")).mapTo[Message]).foreach { received => + state = received.msg + } // Very bad: shared mutable object allows // the other actor to mutate your own state, @@ -67,7 +68,7 @@ class SharedMutableStateDocSpec { // Example of correct approach // Completely safe: "self" is OK to close over // and it's an ActorRef, which is thread-safe - Future { expensiveCalculation() } foreach { self ! _ } + Future { expensiveCalculation() }.foreach { self ! _ } // Completely safe: we close over a fixed value // and it's an ActorRef, which is thread-safe diff --git a/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala b/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala index 0eb135f6f4..c9461704a9 100644 --- a/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/TypedActorDocSpec.scala @@ -11,7 +11,7 @@ import akka.actor.{ ActorContext, ActorRef, TypedActor, TypedProps } import akka.routing.RoundRobinGroup import akka.testkit._ -import scala.concurrent.{ Future, Await } +import scala.concurrent.{ Await, Future } import scala.concurrent.duration._ //#imports @@ -123,9 +123,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { //#typed-actor-create1 //#typed-actor-create2 val otherSquarer: Squarer = - TypedActor(system).typedActorOf(TypedProps( - classOf[Squarer], - new SquarerImpl("foo")), "name") + TypedActor(system).typedActorOf(TypedProps(classOf[Squarer], new SquarerImpl("foo")), "name") //#typed-actor-create2 //#typed-actor-calls @@ -165,10 +163,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { val actorRefToRemoteActor: ActorRef = system.deadLetters //#typed-actor-remote val typedActor: Foo with Bar = - TypedActor(system). - typedActorOf( - TypedProps[FooBar], - actorRefToRemoteActor) + TypedActor(system).typedActorOf(TypedProps[FooBar], actorRefToRemoteActor) //Use "typedActor" as a FooBar //#typed-actor-remote } @@ -205,7 +200,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { // prepare routees val routees: List[HasName] = List.fill(5) { namedActor() } - val routeePaths = routees map { r => + val routeePaths = routees.map { r => TypedActor(system).getActorRefFor(r).path.toStringWithoutAddress } @@ -222,7 +217,7 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { println("actor was: " + typedRouter.name()) // name-164 //#typed-router - routees foreach { TypedActor(system).poisonPill(_) } + routees.foreach { TypedActor(system).poisonPill(_) } TypedActor(system).poisonPill(router) } } diff --git a/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala b/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala index b996bbc4fb..24a0ed64f9 100644 --- a/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala +++ b/akka-docs/src/test/scala/docs/actor/UnnestedReceives.scala @@ -36,10 +36,10 @@ class UnnestedReceives extends Actor { def receive = { case 'Replay => //Our first message should be a 'Replay message, all others are invalid - allOldMessages() foreach process //Process all old messages/events + allOldMessages().foreach(process) //Process all old messages/events become { //Switch behavior to look for the GoAhead signal case 'GoAhead => //When we get the GoAhead signal we process all our buffered messages/events - queue foreach process + queue.foreach(process) queue.clear become { //Then we change behavior to process incoming messages/events as they arrive case msg => process(msg) diff --git a/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala b/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala index 98307f6127..40af6ebf90 100644 --- a/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala +++ b/akka-docs/src/test/scala/docs/actor/io/dns/DnsCompileOnlyDocSpec.scala @@ -34,16 +34,16 @@ object DnsCompileOnlyDocSpec { { //#actor-api-async - val resolved: Future[DnsProtocol.Resolved] = (IO(Dns) ? DnsProtocol.Resolve("google.com")).mapTo[DnsProtocol.Resolved] + val resolved: Future[DnsProtocol.Resolved] = + (IO(Dns) ? DnsProtocol.Resolve("google.com")).mapTo[DnsProtocol.Resolved] //#actor-api-async } { //#srv - val resolved: Future[DnsProtocol.Resolved] = (IO(Dns) ? DnsProtocol.Resolve("your-service", Srv)) - .mapTo[DnsProtocol.Resolved] + val resolved: Future[DnsProtocol.Resolved] = + (IO(Dns) ? DnsProtocol.Resolve("your-service", Srv)).mapTo[DnsProtocol.Resolved] //#srv } } - diff --git a/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala b/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala index 9845fe514c..be92611830 100644 --- a/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala +++ b/akka-docs/src/test/scala/docs/agent/AgentDocSpec.scala @@ -52,12 +52,12 @@ class AgentDocSpec extends AkkaSpec { //#send // send a value, enqueues this change // of the value of the Agent - agent send 7 + agent.send(7) // send a function, enqueues this change // to the value of the Agent - agent send (_ + 1) - agent send (_ * 2) + agent.send(_ + 1) + agent.send(_ * 2) //#send def longRunningOrBlockingFunction = (i: Int) => i * 1 // Just for the example code @@ -66,7 +66,7 @@ class AgentDocSpec extends AkkaSpec { // the ExecutionContext you want to run the function on implicit val ec = someExecutionContext() // sendOff a function - agent sendOff longRunningOrBlockingFunction + agent.sendOff(longRunningOrBlockingFunction) //#send-off Await.result(agent.future, 5 seconds) should be(16) @@ -76,11 +76,11 @@ class AgentDocSpec extends AkkaSpec { val agent = Agent(0)(ExecutionContext.global) //#alter // alter a value - val f1: Future[Int] = agent alter 7 + val f1: Future[Int] = agent.alter(7) // alter a function - val f2: Future[Int] = agent alter (_ + 1) - val f3: Future[Int] = agent alter (_ * 2) + val f2: Future[Int] = agent.alter(_ + 1) + val f3: Future[Int] = agent.alter(_ * 2) //#alter def longRunningOrBlockingFunction = (i: Int) => i * 1 // Just for the example code @@ -90,7 +90,7 @@ class AgentDocSpec extends AkkaSpec { // the ExecutionContext you want to run the function on implicit val ec = someExecutionContext() // alterOff a function - val f4: Future[Int] = agent alterOff longRunningOrBlockingFunction + val f4: Future[Int] = agent.alterOff(longRunningOrBlockingFunction) //#alter-off Await.result(f4, 5 seconds) should be(16) @@ -107,8 +107,8 @@ class AgentDocSpec extends AkkaSpec { atomic { txn => if (from.get < amount) false else { - from send (_ - amount) - to send (_ + amount) + from.send(_ - amount) + to.send(_ + amount) true } } @@ -142,7 +142,7 @@ class AgentDocSpec extends AkkaSpec { val agent3 = for (value <- agent1) yield value + 1 // or using map directly - val agent4 = agent1 map (_ + 1) + val agent4 = agent1.map(_ + 1) // uses flatMap val agent5 = for { diff --git a/akka-docs/src/test/scala/docs/camel/Consumers.scala b/akka-docs/src/test/scala/docs/camel/Consumers.scala index 9c9057bcf6..b3cca5bbf5 100644 --- a/akka-docs/src/test/scala/docs/camel/Consumers.scala +++ b/akka-docs/src/test/scala/docs/camel/Consumers.scala @@ -15,7 +15,7 @@ object Consumers { def endpointUri = "file:data/input/actor" def receive = { - case msg: CamelMessage => println("received %s" format msg.bodyAs[String]) + case msg: CamelMessage => println("received %s".format(msg.bodyAs[String])) } } //#Consumer1 @@ -28,7 +28,7 @@ object Consumers { def endpointUri = "jetty:http://localhost:8877/camel/default" def receive = { - case msg: CamelMessage => sender() ! ("Hello %s" format msg.bodyAs[String]) + case msg: CamelMessage => sender() ! ("Hello %s".format(msg.bodyAs[String])) } } //#Consumer2 @@ -65,7 +65,7 @@ object Consumers { def endpointUri = "jetty:http://localhost:8877/camel/default" override def replyTimeout = 500 millis def receive = { - case msg: CamelMessage => sender() ! ("Hello %s" format msg.bodyAs[String]) + case msg: CamelMessage => sender() ! ("Hello %s".format(msg.bodyAs[String])) } } //#Consumer4 diff --git a/akka-docs/src/test/scala/docs/camel/CustomRoute.scala b/akka-docs/src/test/scala/docs/camel/CustomRoute.scala index ee930790aa..59ec0ef35d 100644 --- a/akka-docs/src/test/scala/docs/camel/CustomRoute.scala +++ b/akka-docs/src/test/scala/docs/camel/CustomRoute.scala @@ -12,21 +12,20 @@ import language.existentials object CustomRoute { object Sample1 { //#CustomRoute - import akka.actor.{ Props, ActorSystem, Actor, ActorRef } - import akka.camel.{ CamelMessage, CamelExtension } + import akka.actor.{ Actor, ActorRef, ActorSystem, Props } + import akka.camel.{ CamelExtension, CamelMessage } import org.apache.camel.builder.RouteBuilder import akka.camel._ class Responder extends Actor { def receive = { case msg: CamelMessage => - sender() ! (msg.mapBody { - body: String => "received %s" format body + sender() ! (msg.mapBody { body: String => + "received %s".format(body) }) } } - class CustomRouteBuilder(system: ActorSystem, responder: ActorRef) - extends RouteBuilder { + class CustomRouteBuilder(system: ActorSystem, responder: ActorRef) extends RouteBuilder { def configure: Unit = { from("jetty:http://localhost:8877/camel/custom").to(responder) } @@ -47,10 +46,10 @@ object CustomRoute { class ErrorThrowingConsumer(override val endpointUri: String) extends Consumer { def receive = { - case msg: CamelMessage => throw new Exception("error: %s" format msg.body) + case msg: CamelMessage => throw new Exception("error: %s".format(msg.body)) } - override def onRouteDefinition = (rd) => rd.onException(classOf[Exception]). - handled(true).transform(Builder.exceptionMessage).end + override def onRouteDefinition = + (rd) => rd.onException(classOf[Exception]).handled(true).transform(Builder.exceptionMessage).end final override def preRestart(reason: Throwable, message: Option[Any]): Unit = { sender() ! Failure(reason) diff --git a/akka-docs/src/test/scala/docs/camel/Introduction.scala b/akka-docs/src/test/scala/docs/camel/Introduction.scala index 0460beed78..ad78f8a986 100644 --- a/akka-docs/src/test/scala/docs/camel/Introduction.scala +++ b/akka-docs/src/test/scala/docs/camel/Introduction.scala @@ -5,7 +5,7 @@ package docs.camel //#imports -import akka.actor.{ Props, ActorSystem } +import akka.actor.{ ActorSystem, Props } import akka.camel.CamelExtension import language.postfixOps @@ -50,7 +50,7 @@ object Introduction { def baz(): Unit = { //#Producer import akka.actor.Actor - import akka.camel.{ Producer, Oneway } + import akka.camel.{ Oneway, Producer } import akka.actor.{ ActorSystem, Props } class Orders extends Actor with Producer with Oneway { @@ -99,16 +99,12 @@ object Introduction { val camel = CamelExtension(system) val actorRef = system.actorOf(Props[MyEndpoint]) // get a future reference to the activation of the endpoint of the Consumer Actor - val activationFuture = camel.activationFutureFor(actorRef)( - timeout = 10 seconds, - executor = system.dispatcher) + val activationFuture = camel.activationFutureFor(actorRef)(timeout = 10 seconds, executor = system.dispatcher) //#CamelActivation //#CamelDeactivation system.stop(actorRef) // get a future reference to the deactivation of the endpoint of the Consumer Actor - val deactivationFuture = camel.deactivationFutureFor(actorRef)( - timeout = 10 seconds, - executor = system.dispatcher) + val deactivationFuture = camel.deactivationFutureFor(actorRef)(timeout = 10 seconds, executor = system.dispatcher) //#CamelDeactivation } diff --git a/akka-docs/src/test/scala/docs/camel/Producers.scala b/akka-docs/src/test/scala/docs/camel/Producers.scala index f6ff60e87e..ff6ffe9b5a 100644 --- a/akka-docs/src/test/scala/docs/camel/Producers.scala +++ b/akka-docs/src/test/scala/docs/camel/Producers.scala @@ -11,8 +11,8 @@ object Producers { object Sample1 { //#Producer1 import akka.actor.Actor - import akka.actor.{ Props, ActorSystem } - import akka.camel.{ Producer, CamelMessage } + import akka.actor.{ ActorSystem, Props } + import akka.camel.{ CamelMessage, Producer } import akka.util.Timeout class Producer1 extends Actor with Producer { @@ -32,8 +32,8 @@ object Producers { object Sample2 { //#RouteResponse import akka.actor.{ Actor, ActorRef } - import akka.camel.{ Producer, CamelMessage } - import akka.actor.{ Props, ActorSystem } + import akka.camel.{ CamelMessage, Producer } + import akka.actor.{ ActorSystem, Props } class ResponseReceiver extends Actor { def receive = { @@ -45,12 +45,11 @@ object Producers { class Forwarder(uri: String, target: ActorRef) extends Actor with Producer { def endpointUri = uri - override def routeResponse(msg: Any): Unit = { target forward msg } + override def routeResponse(msg: Any): Unit = { target.forward(msg) } } val system = ActorSystem("some-system") val receiver = system.actorOf(Props[ResponseReceiver]) - val forwardResponse = system.actorOf( - Props(classOf[Forwarder], this, "http://localhost:8080/news/akka", receiver)) + val forwardResponse = system.actorOf(Props(classOf[Forwarder], this, "http://localhost:8080/news/akka", receiver)) // the Forwarder sends out a request to the web page and forwards the response to // the ResponseReceiver forwardResponse ! "some request" @@ -59,13 +58,13 @@ object Producers { object Sample3 { //#TransformOutgoingMessage import akka.actor.Actor - import akka.camel.{ Producer, CamelMessage } + import akka.camel.{ CamelMessage, Producer } class Transformer(uri: String) extends Actor with Producer { def endpointUri = uri - def upperCase(msg: CamelMessage) = msg.mapBody { - body: String => body.toUpperCase + def upperCase(msg: CamelMessage) = msg.mapBody { body: String => + body.toUpperCase } override def transformOutgoingMessage(msg: Any) = msg match { @@ -76,7 +75,7 @@ object Producers { } object Sample4 { //#Oneway - import akka.actor.{ Actor, Props, ActorSystem } + import akka.actor.{ Actor, ActorSystem, Props } import akka.camel.Producer class OnewaySender(uri: String) extends Actor with Producer { @@ -92,9 +91,9 @@ object Producers { } object Sample5 { //#Correlate - import akka.camel.{ Producer, CamelMessage } + import akka.camel.{ CamelMessage, Producer } import akka.actor.Actor - import akka.actor.{ Props, ActorSystem } + import akka.actor.{ ActorSystem, Props } class Producer2 extends Actor with Producer { def endpointUri = "activemq:FOO.BAR" diff --git a/akka-docs/src/test/scala/docs/camel/PublishSubscribe.scala b/akka-docs/src/test/scala/docs/camel/PublishSubscribe.scala index c39b9ecf0c..ba70dbda91 100644 --- a/akka-docs/src/test/scala/docs/camel/PublishSubscribe.scala +++ b/akka-docs/src/test/scala/docs/camel/PublishSubscribe.scala @@ -7,13 +7,13 @@ package docs.camel object PublishSubscribe { //#PubSub import akka.actor.{ Actor, ActorRef, ActorSystem, Props } - import akka.camel.{ Producer, CamelMessage, Consumer } + import akka.camel.{ CamelMessage, Consumer, Producer } class Subscriber(name: String, uri: String) extends Actor with Consumer { def endpointUri = uri def receive = { - case msg: CamelMessage => println("%s received: %s" format (name, msg.body)) + case msg: CamelMessage => println("%s received: %s".format(name, msg.body)) } } @@ -43,6 +43,7 @@ object PublishSubscribe { val jmsSubscriber1 = system.actorOf(Props(classOf[Subscriber], "jms-subscriber-1", jmsUri)) val jmsSubscriber2 = system.actorOf(Props(classOf[Subscriber], "jms-subscriber-2", jmsUri)) val jmsPublisher = system.actorOf(Props(classOf[Publisher], "jms-publisher", jmsUri)) - val jmsPublisherBridge = system.actorOf(Props(classOf[PublisherBridge], "jetty:http://0.0.0.0:8877/camel/pub/jms", jmsPublisher)) + val jmsPublisherBridge = + system.actorOf(Props(classOf[PublisherBridge], "jetty:http://0.0.0.0:8877/camel/pub/jms", jmsPublisher)) //#PubSub } diff --git a/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala index b6aacd3aca..ded0aa4d05 100644 --- a/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/circuitbreaker/CircuitBreakerDocSpec.scala @@ -22,11 +22,8 @@ class DangerousActor extends Actor with ActorLogging { import context.dispatcher val breaker = - new CircuitBreaker( - context.system.scheduler, - maxFailures = 5, - callTimeout = 10.seconds, - resetTimeout = 1.minute).onOpen(notifyMeOnOpen()) + new CircuitBreaker(context.system.scheduler, maxFailures = 5, callTimeout = 10.seconds, resetTimeout = 1.minute) + .onOpen(notifyMeOnOpen()) def notifyMeOnOpen(): Unit = log.warning("My CircuitBreaker is now open, and will not close for one minute") @@ -37,7 +34,7 @@ class DangerousActor extends Actor with ActorLogging { def receive = { case "is my middle name" => - breaker.withCircuitBreaker(Future(dangerousCall)) pipeTo sender() + breaker.withCircuitBreaker(Future(dangerousCall)).pipeTo(sender()) case "block for me" => sender() ! breaker.withSyncCircuitBreaker(dangerousCall) } @@ -49,11 +46,8 @@ class TellPatternActor(recipient: ActorRef) extends Actor with ActorLogging { import context.dispatcher val breaker = - new CircuitBreaker( - context.system.scheduler, - maxFailures = 5, - callTimeout = 10.seconds, - resetTimeout = 1.minute).onOpen(notifyMeOnOpen()) + new CircuitBreaker(context.system.scheduler, maxFailures = 5, callTimeout = 10.seconds, resetTimeout = 1.minute) + .onOpen(notifyMeOnOpen()) def notifyMeOnOpen(): Unit = log.warning("My CircuitBreaker is now open, and will not close for one minute") @@ -88,11 +82,7 @@ class EvenNoFailureActor extends Actor { } val breaker = - new CircuitBreaker( - context.system.scheduler, - maxFailures = 5, - callTimeout = 10.seconds, - resetTimeout = 1.minute) + new CircuitBreaker(context.system.scheduler, maxFailures = 5, callTimeout = 10.seconds, resetTimeout = 1.minute) // this call will return 8888 and increase failure count at the same time breaker.withCircuitBreaker(Future(8888), evenNumberAsFailure) diff --git a/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala b/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala index 497f04e0a5..556f994957 100644 --- a/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala +++ b/akka-docs/src/test/scala/docs/cluster/FactorialBackend.scala @@ -20,7 +20,11 @@ class FactorialBackend extends Actor with ActorLogging { def receive = { case (n: Int) => - Future(factorial(n)) map { result => (n, result) } pipeTo sender() + Future(factorial(n)) + .map { result => + (n, result) + } + .pipeTo(sender()) } def factorial(n: Int): BigInt = { @@ -38,9 +42,10 @@ object FactorialBackend { def main(args: Array[String]): Unit = { // Override the configuration of the port when specified as program argument val port = if (args.isEmpty) "0" else args(0) - val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port"). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). - withFallback(ConfigFactory.load("factorial")) + val config = ConfigFactory + .parseString(s"akka.remote.netty.tcp.port=$port") + .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")) + .withFallback(ConfigFactory.load("factorial")) val system = ActorSystem("ClusterSystem", config) system.actorOf(Props[FactorialBackend], name = "factorialBackend") diff --git a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala index 7cb1c224f8..3975f6044d 100644 --- a/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala +++ b/akka-docs/src/test/scala/docs/cluster/FactorialFrontend.scala @@ -19,9 +19,7 @@ import scala.concurrent.Await //#frontend class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging { - val backend = context.actorOf( - FromConfig.props(), - name = "factorialBackendRouter") + val backend = context.actorOf(FromConfig.props(), name = "factorialBackendRouter") override def preStart(): Unit = { sendJobs() @@ -44,7 +42,7 @@ class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLog def sendJobs(): Unit = { log.info("Starting batch of factorials up to [{}]", upToN) - 1 to upToN foreach { backend ! _ } + (1 to upToN).foreach { backend ! _ } } } //#frontend @@ -53,16 +51,14 @@ object FactorialFrontend { def main(args: Array[String]): Unit = { val upToN = 200 - val config = ConfigFactory.parseString("akka.cluster.roles = [frontend]"). - withFallback(ConfigFactory.load("factorial")) + val config = + ConfigFactory.parseString("akka.cluster.roles = [frontend]").withFallback(ConfigFactory.load("factorial")) val system = ActorSystem("ClusterSystem", config) system.log.info("Factorials will start when 2 backend members in the cluster.") //#registerOnUp - Cluster(system) registerOnMemberUp { - system.actorOf( - Props(classOf[FactorialFrontend], upToN, true), - name = "factorialFrontend") + Cluster(system).registerOnMemberUp { + system.actorOf(Props(classOf[FactorialFrontend], upToN, true), name = "factorialFrontend") } //#registerOnUp @@ -78,11 +74,11 @@ abstract class FactorialFrontend2 extends Actor { import akka.cluster.metrics.HeapMetricsSelector val backend = context.actorOf( - ClusterRouterGroup( - AdaptiveLoadBalancingGroup(HeapMetricsSelector), - ClusterRouterGroupSettings( - totalInstances = 100, routeesPaths = List("/user/factorialBackend"), - allowLocalRoutees = true, useRoles = Set("backend"))).props(), + ClusterRouterGroup(AdaptiveLoadBalancingGroup(HeapMetricsSelector), + ClusterRouterGroupSettings(totalInstances = 100, + routeesPaths = List("/user/factorialBackend"), + allowLocalRoutees = true, + useRoles = Set("backend"))).props(), name = "factorialBackendRouter2") //#router-lookup-in-code @@ -97,10 +93,11 @@ abstract class FactorialFrontend3 extends Actor { import akka.cluster.metrics.SystemLoadAverageMetricsSelector val backend = context.actorOf( - ClusterRouterPool(AdaptiveLoadBalancingPool( - SystemLoadAverageMetricsSelector), ClusterRouterPoolSettings( - totalInstances = 100, maxInstancesPerNode = 3, - allowLocalRoutees = false, useRoles = Set("backend"))).props(Props[FactorialBackend]), + ClusterRouterPool(AdaptiveLoadBalancingPool(SystemLoadAverageMetricsSelector), + ClusterRouterPoolSettings(totalInstances = 100, + maxInstancesPerNode = 3, + allowLocalRoutees = false, + useRoles = Set("backend"))).props(Props[FactorialBackend]), name = "factorialBackendRouter3") //#router-deploy-in-code } diff --git a/akka-docs/src/test/scala/docs/cluster/MetricsListener.scala b/akka-docs/src/test/scala/docs/cluster/MetricsListener.scala index 9eb944c8d4..0a880728e2 100644 --- a/akka-docs/src/test/scala/docs/cluster/MetricsListener.scala +++ b/akka-docs/src/test/scala/docs/cluster/MetricsListener.scala @@ -28,7 +28,7 @@ class MetricsListener extends Actor with ActorLogging { def receive = { case ClusterMetricsChanged(clusterMetrics) => - clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics => + clusterMetrics.filter(_.address == selfAddress).foreach { nodeMetrics => logHeap(nodeMetrics) logCpu(nodeMetrics) } diff --git a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala index 8b1470cb85..8290be767f 100644 --- a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala +++ b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener.scala @@ -16,8 +16,7 @@ class SimpleClusterListener extends Actor with ActorLogging { // subscribe to cluster changes, re-subscribe when restart override def preStart(): Unit = { //#subscribe - cluster.subscribe(self, initialStateMode = InitialStateAsEvents, - classOf[MemberEvent], classOf[UnreachableMember]) + cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember]) //#subscribe } override def postStop(): Unit = cluster.unsubscribe(self) @@ -28,9 +27,7 @@ class SimpleClusterListener extends Actor with ActorLogging { case UnreachableMember(member) => log.info("Member detected as unreachable: {}", member) case MemberRemoved(member, previousStatus) => - log.info( - "Member is Removed: {} after {}", - member.address, previousStatus) + log.info("Member is Removed: {} after {}", member.address, previousStatus) case _: MemberEvent => // ignore } } diff --git a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala index 9dde5ec65c..d915167813 100644 --- a/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala +++ b/akka-docs/src/test/scala/docs/cluster/SimpleClusterListener2.scala @@ -40,9 +40,7 @@ class SimpleClusterListener2 extends Actor with ActorLogging { case UnreachableMember(member) => log.info("Member detected as unreachable: {}", member) case MemberRemoved(member, previousStatus) => - log.info( - "Member is Removed: {} after {}", - member.address, previousStatus) + log.info("Member is Removed: {} after {}", member.address, previousStatus) case _: MemberEvent => // ignore } } diff --git a/akka-docs/src/test/scala/docs/cluster/TransformationBackend.scala b/akka-docs/src/test/scala/docs/cluster/TransformationBackend.scala index d7cd697e85..0f16ad256d 100644 --- a/akka-docs/src/test/scala/docs/cluster/TransformationBackend.scala +++ b/akka-docs/src/test/scala/docs/cluster/TransformationBackend.scala @@ -31,14 +31,14 @@ class TransformationBackend extends Actor { def receive = { case TransformationJob(text) => sender() ! TransformationResult(text.toUpperCase) case state: CurrentClusterState => - state.members.filter(_.status == MemberStatus.Up) foreach register + state.members.filter(_.status == MemberStatus.Up).foreach(register) case MemberUp(m) => register(m) } def register(member: Member): Unit = if (member.hasRole("frontend")) context.actorSelection(RootActorPath(member.address) / "user" / "frontend") ! - BackendRegistration + BackendRegistration } //#backend @@ -46,9 +46,10 @@ object TransformationBackend { def main(args: Array[String]): Unit = { // Override the configuration of the port when specified as program argument val port = if (args.isEmpty) "0" else args(0) - val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port"). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")). - withFallback(ConfigFactory.load()) + val config = ConfigFactory + .parseString(s"akka.remote.netty.tcp.port=$port") + .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]")) + .withFallback(ConfigFactory.load()) val system = ActorSystem("ClusterSystem", config) system.actorOf(Props[TransformationBackend], name = "backend") diff --git a/akka-docs/src/test/scala/docs/cluster/TransformationFrontend.scala b/akka-docs/src/test/scala/docs/cluster/TransformationFrontend.scala index 381dd977a0..336d6b0d92 100644 --- a/akka-docs/src/test/scala/docs/cluster/TransformationFrontend.scala +++ b/akka-docs/src/test/scala/docs/cluster/TransformationFrontend.scala @@ -29,10 +29,10 @@ class TransformationFrontend extends Actor { case job: TransformationJob => jobCounter += 1 - backends(jobCounter % backends.size) forward job + backends(jobCounter % backends.size).forward(job) case BackendRegistration if !backends.contains(sender()) => - context watch sender() + context.watch(sender()) backends = backends :+ sender() case Terminated(a) => @@ -45,9 +45,10 @@ object TransformationFrontend { def main(args: Array[String]): Unit = { // Override the configuration of the port when specified as program argument val port = if (args.isEmpty) "0" else args(0) - val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port"). - withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]")). - withFallback(ConfigFactory.load()) + val config = ConfigFactory + .parseString(s"akka.remote.netty.tcp.port=$port") + .withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]")) + .withFallback(ConfigFactory.load()) val system = ActorSystem("ClusterSystem", config) val frontend = system.actorOf(Props[TransformationFrontend], name = "frontend") @@ -56,8 +57,9 @@ object TransformationFrontend { import system.dispatcher system.scheduler.schedule(2.seconds, 2.seconds) { implicit val timeout = Timeout(5 seconds) - (frontend ? TransformationJob("hello-" + counter.incrementAndGet())) - .foreach { result => println(result) } + (frontend ? TransformationJob("hello-" + counter.incrementAndGet())).foreach { result => + println(result) + } } } diff --git a/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala b/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala index 964b7e0f14..1bdcf36aa8 100644 --- a/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala +++ b/akka-docs/src/test/scala/docs/cluster/singleton/ClusterSingletonSupervision.scala @@ -10,7 +10,7 @@ class SupervisorActor(childProps: Props, override val supervisorStrategy: Superv val child = context.actorOf(childProps, "supervised-child") def receive = { - case msg => child forward msg + case msg => child.forward(msg) } } //#singleton-supervisor-actor @@ -23,10 +23,9 @@ abstract class ClusterSingletonSupervision extends Actor { import akka.actor.{ PoisonPill, Props } import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings } context.system.actorOf( - ClusterSingletonManager.props( - singletonProps = Props(classOf[SupervisorActor], props, supervisorStrategy), - terminationMessage = PoisonPill, - settings = ClusterSingletonManagerSettings(context.system)), + ClusterSingletonManager.props(singletonProps = Props(classOf[SupervisorActor], props, supervisorStrategy), + terminationMessage = PoisonPill, + settings = ClusterSingletonManagerSettings(context.system)), name = name) //#singleton-supervisor-actor-usage } diff --git a/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala b/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala index 46cd0120b1..4d15615267 100644 --- a/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala +++ b/akka-docs/src/test/scala/docs/config/ConfigDocSpec.scala @@ -34,7 +34,8 @@ class ConfigDocSpec extends WordSpec with Matchers { } "deployment section" in { - val conf = ConfigFactory.parseString(""" + val conf = + ConfigFactory.parseString(""" #//#deployment-section akka.actor.deployment { diff --git a/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala b/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala index d134c4ed87..7aa1900ae7 100644 --- a/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala +++ b/akka-docs/src/test/scala/docs/ddata/DistributedDataDocSpec.scala @@ -82,7 +82,7 @@ object DistributedDataDocSpec { } else { // remove log.info("Removing: {}", s) - replicator ! Update(DataKey, ORSet.empty[String], WriteLocal)(_ remove s) + replicator ! Update(DataKey, ORSet.empty[String], WriteLocal)(_.remove(s)) } case _: UpdateResponse[_] => // ignore @@ -131,13 +131,13 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { //#update-response1 case UpdateSuccess(Counter1Key, req) => // ok //#update-response1 - case unexpected => fail("Unexpected response: " + unexpected) + case unexpected => fail("Unexpected response: " + unexpected) } probe.expectMsgType[UpdateResponse[_]] match { //#update-response2 - case UpdateSuccess(Set1Key, req) => // ok - case UpdateTimeout(Set1Key, req) => + case UpdateSuccess(Set1Key, req) => // ok + case UpdateTimeout(Set1Key, req) => // write to 3 nodes failed within 1.second //#update-response2 case UpdateSuccess(Set2Key, None) => @@ -200,7 +200,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val value = g.get(Counter1Key).value case NotFound(Counter1Key, req) => // key counter1 does not exist //#get-response1 - case unexpected => fail("Unexpected response: " + unexpected) + case unexpected => fail("Unexpected response: " + unexpected) } probe.expectMsgType[GetResponse[_]] match { @@ -209,7 +209,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val elements = g.get(Set1Key).elements case GetFailure(Set1Key, req) => // read from 3 nodes failed within 1.second - case NotFound(Set1Key, req) => // key set1 does not exist + case NotFound(Set1Key, req) => // key set1 does not exist //#get-response2 case g @ GetSuccess(Set2Key, None) => val elements = g.get(Set2Key).elements @@ -292,7 +292,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val c0 = PNCounter.empty val c1 = c0 :+ 1 val c2 = c1 :+ 7 - val c3: PNCounter = c2 decrement 2 + val c3: PNCounter = c2.decrement(2) println(c3.value) // 6 //#pncounter } @@ -328,7 +328,7 @@ class DistributedDataDocSpec extends AkkaSpec(DistributedDataDocSpec.config) { val s0 = ORSet.empty[String] val s1 = s0 :+ "a" val s2 = s1 :+ "b" - val s3 = s2 remove "a" + val s3 = s2.remove("a") println(s3.elements) // b //#orset } diff --git a/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala b/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala index d73af75547..5a07e93186 100644 --- a/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala +++ b/akka-docs/src/test/scala/docs/ddata/ShoppingCart.scala @@ -42,10 +42,11 @@ class ShoppingCart(userId: String) extends Actor { val DataKey = LWWMapKey[String, LineItem]("cart-" + userId) - def receive = receiveGetCart - .orElse[Any, Unit](receiveAddItem) - .orElse[Any, Unit](receiveRemoveItem) - .orElse[Any, Unit](receiveOther) + def receive = + receiveGetCart + .orElse[Any, Unit](receiveAddItem) + .orElse[Any, Unit](receiveRemoveItem) + .orElse[Any, Unit](receiveOther) //#get-cart def receiveGetCart: Receive = { @@ -69,8 +70,8 @@ class ShoppingCart(userId: String) extends Actor { //#add-item def receiveAddItem: Receive = { case cmd @ AddItem(item) => - val update = Update(DataKey, LWWMap.empty[String, LineItem], writeMajority, Some(cmd)) { - cart => updateCart(cart, item) + val update = Update(DataKey, LWWMap.empty[String, LineItem], writeMajority, Some(cmd)) { cart => + updateCart(cart, item) } replicator ! update } @@ -109,7 +110,7 @@ class ShoppingCart(userId: String) extends Actor { def receiveOther: Receive = { case _: UpdateSuccess[_] | _: UpdateTimeout[_] => // UpdateTimeout, will eventually be replicated - case e: UpdateFailure[_] => throw new IllegalStateException("Unexpected failure: " + e) + case e: UpdateFailure[_] => throw new IllegalStateException("Unexpected failure: " + e) } } diff --git a/akka-docs/src/test/scala/docs/ddata/TwoPhaseSet.scala b/akka-docs/src/test/scala/docs/ddata/TwoPhaseSet.scala index 16cce2c4c9..45231abe7b 100644 --- a/akka-docs/src/test/scala/docs/ddata/TwoPhaseSet.scala +++ b/akka-docs/src/test/scala/docs/ddata/TwoPhaseSet.scala @@ -8,10 +8,7 @@ import akka.cluster.ddata.ReplicatedData import akka.cluster.ddata.GSet //#twophaseset -case class TwoPhaseSet( - adds: GSet[String] = GSet.empty, - removals: GSet[String] = GSet.empty) - extends ReplicatedData { +case class TwoPhaseSet(adds: GSet[String] = GSet.empty, removals: GSet[String] = GSet.empty) extends ReplicatedData { type T = TwoPhaseSet def add(element: String): TwoPhaseSet = @@ -20,11 +17,9 @@ case class TwoPhaseSet( def remove(element: String): TwoPhaseSet = copy(removals = removals.add(element)) - def elements: Set[String] = adds.elements diff removals.elements + def elements: Set[String] = adds.elements.diff(removals.elements) override def merge(that: TwoPhaseSet): TwoPhaseSet = - copy( - adds = this.adds.merge(that.adds), - removals = this.removals.merge(that.removals)) + copy(adds = this.adds.merge(that.adds), removals = this.removals.merge(that.removals)) } //#twophaseset diff --git a/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala b/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala index a7fb8c8757..7b5d67609b 100644 --- a/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala +++ b/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer.scala @@ -15,8 +15,7 @@ import akka.serialization.Serializer import docs.ddata.TwoPhaseSet import docs.ddata.protobuf.msg.TwoPhaseSetMessages -class TwoPhaseSetSerializer(val system: ExtendedActorSystem) - extends Serializer with SerializationSupport { +class TwoPhaseSetSerializer(val system: ExtendedActorSystem) extends Serializer with SerializationSupport { override def includeManifest: Boolean = false @@ -24,8 +23,7 @@ class TwoPhaseSetSerializer(val system: ExtendedActorSystem) override def toBinary(obj: AnyRef): Array[Byte] = obj match { case m: TwoPhaseSet => twoPhaseSetToProto(m).toByteArray - case _ => throw new IllegalArgumentException( - s"Can't serialize object of type ${obj.getClass}") + case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass}") } override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { @@ -63,13 +61,11 @@ class TwoPhaseSetSerializer(val system: ExtendedActorSystem) } //#serializer -class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem) - extends TwoPhaseSetSerializer(system) { +class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem) extends TwoPhaseSetSerializer(system) { //#compression override def toBinary(obj: AnyRef): Array[Byte] = obj match { case m: TwoPhaseSet => compress(twoPhaseSetToProto(m)) - case _ => throw new IllegalArgumentException( - s"Can't serialize object of type ${obj.getClass}") + case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass}") } override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { @@ -77,4 +73,3 @@ class TwoPhaseSetSerializerWithCompression(system: ExtendedActorSystem) } //#compression } - diff --git a/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala b/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala index 202d1e0f3b..3301f6e917 100644 --- a/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala +++ b/akka-docs/src/test/scala/docs/ddata/protobuf/TwoPhaseSetSerializer2.scala @@ -13,8 +13,7 @@ import akka.serialization.Serializer import docs.ddata.TwoPhaseSet import docs.ddata.protobuf.msg.TwoPhaseSetMessages -class TwoPhaseSetSerializer2(val system: ExtendedActorSystem) - extends Serializer with SerializationSupport { +class TwoPhaseSetSerializer2(val system: ExtendedActorSystem) extends Serializer with SerializationSupport { override def includeManifest: Boolean = false @@ -24,8 +23,7 @@ class TwoPhaseSetSerializer2(val system: ExtendedActorSystem) override def toBinary(obj: AnyRef): Array[Byte] = obj match { case m: TwoPhaseSet => twoPhaseSetToProto(m).toByteArray - case _ => throw new IllegalArgumentException( - s"Can't serialize object of type ${obj.getClass}") + case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass}") } override def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { diff --git a/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala index dc91edd2d8..88d8146572 100644 --- a/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/src/test/scala/docs/dispatcher/DispatcherDocSpec.scala @@ -225,21 +225,21 @@ object DispatcherDocSpec { // We inherit, in this case, from UnboundedStablePriorityMailbox // and seed it with the priority generator class MyPrioMailbox(settings: ActorSystem.Settings, config: Config) - extends UnboundedStablePriorityMailbox( - // Create a new PriorityGenerator, lower prio means more important - PriorityGenerator { - // 'highpriority messages should be treated first if possible - case 'highpriority => 0 + extends UnboundedStablePriorityMailbox( + // Create a new PriorityGenerator, lower prio means more important + PriorityGenerator { + // 'highpriority messages should be treated first if possible + case 'highpriority => 0 - // 'lowpriority messages should be treated last if possible - case 'lowpriority => 2 + // 'lowpriority messages should be treated last if possible + case 'lowpriority => 2 - // PoisonPill when no other left - case PoisonPill => 3 + // PoisonPill when no other left + case PoisonPill => 3 - // We default to 1, which is in between high and low - case otherwise => 1 - }) + // We default to 1, which is in between high and low + case otherwise => 1 + }) //#prio-mailbox //#control-aware-mailbox-messages @@ -258,13 +258,11 @@ object DispatcherDocSpec { import akka.dispatch.RequiresMessageQueue import akka.dispatch.BoundedMessageQueueSemantics - class MyBoundedActor extends MyActor - with RequiresMessageQueue[BoundedMessageQueueSemantics] + class MyBoundedActor extends MyActor with RequiresMessageQueue[BoundedMessageQueueSemantics] //#required-mailbox-class //#require-mailbox-on-actor - class MySpecialActor extends Actor - with RequiresMessageQueue[MyUnboundedMessageQueueSemantics] { + class MySpecialActor extends Actor with RequiresMessageQueue[MyUnboundedMessageQueueSemantics] { //#require-mailbox-on-actor def receive = { case _ => @@ -373,8 +371,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { case x => log.info(x.toString) } } - val a = system.actorOf(Props(classOf[Logger], this).withDispatcher( - "prio-dispatcher")) + val a = system.actorOf(Props(classOf[Logger], this).withDispatcher("prio-dispatcher")) /* * Logs: @@ -410,8 +407,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { case x => log.info(x.toString) } } - val a = system.actorOf(Props(classOf[Logger], this).withDispatcher( - "control-aware-dispatcher")) + val a = system.actorOf(Props(classOf[Logger], this).withDispatcher("control-aware-dispatcher")) /* * Logs: @@ -427,8 +423,7 @@ class DispatcherDocSpec extends AkkaSpec(DispatcherDocSpec.config) { } "require custom mailbox on dispatcher" in { - val myActor = system.actorOf(Props[MyActor].withDispatcher( - "custom-dispatcher")) + val myActor = system.actorOf(Props[MyActor].withDispatcher("custom-dispatcher")) } "require custom mailbox on actor" in { diff --git a/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala b/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala index 322ebbcc35..4795b90677 100644 --- a/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala +++ b/akka-docs/src/test/scala/docs/dispatcher/MyUnboundedMailbox.scala @@ -17,8 +17,7 @@ import scala.Option object MyUnboundedMailbox { // This is the MessageQueue implementation - class MyMessageQueue extends MessageQueue - with MyUnboundedMessageQueueSemantics { + class MyMessageQueue extends MessageQueue with MyUnboundedMessageQueueSemantics { private final val queue = new ConcurrentLinkedQueue[Envelope]() @@ -37,8 +36,7 @@ object MyUnboundedMailbox { } // This is the Mailbox implementation -class MyUnboundedMailbox extends MailboxType - with ProducesMessageQueue[MyUnboundedMailbox.MyMessageQueue] { +class MyUnboundedMailbox extends MailboxType with ProducesMessageQueue[MyUnboundedMailbox.MyMessageQueue] { import MyUnboundedMailbox._ @@ -49,9 +47,7 @@ class MyUnboundedMailbox extends MailboxType } // The create method is called to create the MessageQueue - final override def create( - owner: Option[ActorRef], - system: Option[ActorSystem]): MessageQueue = + final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new MyMessageQueue() } //#mailbox-implementation-example diff --git a/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala b/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala index b7b1c7996c..36c563a64c 100644 --- a/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala +++ b/akka-docs/src/test/scala/docs/event/EventBusDocSpec.scala @@ -6,7 +6,7 @@ package docs.event import scala.concurrent.duration._ import akka.testkit.AkkaSpec -import akka.actor.{ ActorSystem, ActorRef } +import akka.actor.{ ActorRef, ActorSystem } import akka.testkit.TestProbe object EventBusDocSpec { @@ -127,7 +127,10 @@ object EventBusDocSpec { final case class Notification(ref: ActorRef, id: Int) - class ActorBusImpl(val system: ActorSystem) extends ActorEventBus with ActorClassifier with ManagedActorClassification { + class ActorBusImpl(val system: ActorSystem) + extends ActorEventBus + with ActorClassifier + with ManagedActorClassification { type Event = Notification // is used for extracting the classifier from the incoming events diff --git a/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala b/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala index 4187690d3b..16917cab13 100644 --- a/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/event/LoggingDocSpec.scala @@ -4,7 +4,7 @@ package docs.event -import akka.actor.{ Actor, Props, DeadLetter } +import akka.actor.{ Actor, DeadLetter, Props } import akka.testkit.AkkaSpec object LoggingDocSpec { @@ -18,8 +18,7 @@ object LoggingDocSpec { log.debug("Starting") } override def preRestart(reason: Throwable, message: Option[Any]): Unit = { - log.error(reason, "Restarting due to [{}] when processing [{}]", - reason.getMessage, message.getOrElse("")) + log.error(reason, "Restarting due to [{}] when processing [{}]", reason.getMessage, message.getOrElse("")) } def receive = { case "test" => log.info("Received test") diff --git a/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala b/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala index 5381c0a5cc..85750d66c1 100644 --- a/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/extension/ExtensionDocSpec.scala @@ -27,9 +27,7 @@ import akka.actor.ExtensionId import akka.actor.ExtensionIdProvider import akka.actor.ExtendedActorSystem -object CountExtension - extends ExtensionId[CountExtensionImpl] - with ExtensionIdProvider { +object CountExtension extends ExtensionId[CountExtensionImpl] with ExtensionIdProvider { //The lookup method is required by ExtensionIdProvider, // so we return ourselves here, this allows us // to configure our extension to be loaded when diff --git a/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala index a9f329f9a3..cf10e53b32 100644 --- a/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/extension/SettingsExtensionDocSpec.scala @@ -23,9 +23,7 @@ import akka.testkit.AkkaSpec class SettingsImpl(config: Config) extends Extension { val DbUri: String = config.getString("myapp.db.uri") val CircuitBreakerTimeout: Duration = - Duration( - config.getMilliseconds("myapp.circuit-breaker.timeout"), - TimeUnit.MILLISECONDS) + Duration(config.getMilliseconds("myapp.circuit-breaker.timeout"), TimeUnit.MILLISECONDS) } //#extension diff --git a/akka-docs/src/test/scala/docs/faq/Faq.scala b/akka-docs/src/test/scala/docs/faq/Faq.scala index 4e95a26333..9f98f5370a 100644 --- a/akka-docs/src/test/scala/docs/faq/Faq.scala +++ b/akka-docs/src/test/scala/docs/faq/Faq.scala @@ -21,14 +21,15 @@ object MyActor { class MyActor extends Actor { import MyActor._ def receive = { - case message: Message => message match { - case BarMessage(bar) => sender() ! BazMessage("Got " + bar) - // warning here: - // "match may not be exhaustive. It would fail on the following input: FooMessage(_)" - //#exhaustiveness-check - case FooMessage(_) => // avoid the warning in our build logs - //#exhaustiveness-check - } + case message: Message => + message match { + case BarMessage(bar) => sender() ! BazMessage("Got " + bar) + // warning here: + // "match may not be exhaustive. It would fail on the following input: FooMessage(_)" + //#exhaustiveness-check + case FooMessage(_) => // avoid the warning in our build logs + //#exhaustiveness-check + } } } //#exhaustiveness-check diff --git a/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala b/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala index 724b6a8180..6938a60c79 100644 --- a/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala +++ b/akka-docs/src/test/scala/docs/future/FutureDocSpec.scala @@ -46,7 +46,7 @@ object FutureDocSpec { def receive = { case _ => val future = target ? "some message" - future pipeTo sender() // use the pipe pattern + future.pipeTo(sender()) // use the pipe pattern } } //#pipe-to-usage @@ -88,7 +88,7 @@ object FutureDocSpec { case Get => // user's historical activities are retrieved // via the separate repository - repository.queryHistoricalActivities(userId) pipeTo sender() + repository.queryHistoricalActivities(userId).pipeTo(sender()) } } @@ -98,10 +98,7 @@ object FutureDocSpec { //#pipe-to-user-activity-actor //#pipe-to-proxy-actor - class UserProxyActor( - userData: ActorRef, - userActivities: ActorRef - ) extends Actor { + class UserProxyActor(userData: ActorRef, userActivities: ActorRef) extends Actor { import UserProxyActor._ import akka.pattern.{ ask, pipe } implicit val ec: ExecutionContext = context.dispatcher @@ -110,9 +107,9 @@ object FutureDocSpec { def receive = { case GetUserData => - (userData ? UserDataActor.Get) pipeTo sender() + (userData ? UserDataActor.Get).pipeTo(sender()) case GetUserActivities => - (userActivities ? UserActivityActor.Get) pipeTo sender() + (userActivities ? UserActivityActor.Get).pipeTo(sender()) } } //#pipe-to-proxy-actor @@ -188,7 +185,7 @@ class FutureDocSpec extends AkkaSpec { val future = Future { "Hello" + "World" } - future foreach println + future.foreach(println) //#future-eval Await.result(future, 3 seconds) should be("HelloWorld") } @@ -198,10 +195,10 @@ class FutureDocSpec extends AkkaSpec { val f1 = Future { "Hello" + "World" } - val f2 = f1 map { x => + val f2 = f1.map { x => x.length } - f2 foreach println + f2.foreach(println) //#map val result = Await.result(f2, 3 seconds) result should be(10) @@ -214,12 +211,12 @@ class FutureDocSpec extends AkkaSpec { "Hello" + "World" } val f2 = Future.successful(3) - val f3 = f1 map { x => - f2 map { y => + val f3 = f1.map { x => + f2.map { y => x.length * y } } - f3 foreach println + f3.foreach(println) //#wrong-nested-map Await.ready(f3, 3 seconds) } @@ -230,12 +227,12 @@ class FutureDocSpec extends AkkaSpec { "Hello" + "World" } val f2 = Future.successful(3) - val f3 = f1 flatMap { x => - f2 map { y => + val f3 = f1.flatMap { x => + f2.map { y => x.length * y } } - f3 foreach println + f3.foreach(println) //#flat-map val result = Await.result(f3, 3 seconds) result should be(30) @@ -246,14 +243,14 @@ class FutureDocSpec extends AkkaSpec { val future1 = Future.successful(4) val future2 = future1.filter(_ % 2 == 0) - future2 foreach println + future2.foreach(println) val failedFilter = future1.filter(_ % 2 == 1).recover { // When filter fails, it will have a java.util.NoSuchElementException case m: NoSuchElementException => 0 } - failedFilter foreach println + failedFilter.foreach(println) //#filter val result = Await.result(future2, 3 seconds) result should be(4) @@ -273,7 +270,7 @@ class FutureDocSpec extends AkkaSpec { // Note that the execution of futures a, b, and c // are not done in parallel. - f foreach println + f.foreach(println) //#for-comprehension val result = Await.result(f, 3 seconds) result should be(24) @@ -323,7 +320,7 @@ class FutureDocSpec extends AkkaSpec { c <- ask(actor3, (a + b)).mapTo[Int] } yield c - f3 foreach println + f3.foreach(println) //#composing val result = Await.result(f3, 3 seconds).asInstanceOf[Int] result should be(3) @@ -341,7 +338,7 @@ class FutureDocSpec extends AkkaSpec { // Find the sum of the odd numbers val oddSum = futureList.map(_.sum) - oddSum foreach println + oddSum.foreach(println) //#sequence-ask Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } @@ -350,7 +347,7 @@ class FutureDocSpec extends AkkaSpec { //#sequence val futureList = Future.sequence((1 to 100).toList.map(x => Future(x * 2 - 1))) val oddSum = futureList.map(_.sum) - oddSum foreach println + oddSum.foreach(println) //#sequence Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } @@ -359,7 +356,7 @@ class FutureDocSpec extends AkkaSpec { //#traverse val futureList = Future.traverse((1 to 100).toList)(x => Future(x * 2 - 1)) val oddSum = futureList.map(_.sum) - oddSum foreach println + oddSum.foreach(println) //#traverse Await.result(oddSum, 3 seconds).asInstanceOf[Int] should be(10000) } @@ -369,7 +366,7 @@ class FutureDocSpec extends AkkaSpec { // Create a sequence of Futures val futures = for (i <- 1 to 1000) yield Future(i * 2) val futureSum = Future.fold(futures)(0)(_ + _) - futureSum foreach println + futureSum.foreach(println) //#fold Await.result(futureSum, 3 seconds) should be(1001000) } @@ -379,7 +376,7 @@ class FutureDocSpec extends AkkaSpec { // Create a sequence of Futures val futures = for (i <- 1 to 1000) yield Future(i * 2) val futureSum = Future.reduce(futures)(_ + _) - futureSum foreach println + futureSum.foreach(println) //#reduce Await.result(futureSum, 3 seconds) should be(1001000) } @@ -389,10 +386,10 @@ class FutureDocSpec extends AkkaSpec { val actor = system.actorOf(Props[MyActor]) val msg1 = -1 //#recover - val future = akka.pattern.ask(actor, msg1) recover { + val future = akka.pattern.ask(actor, msg1).recover { case e: ArithmeticException => 0 } - future foreach println + future.foreach(println) //#recover Await.result(future, 3 seconds) should be(0) } @@ -402,12 +399,12 @@ class FutureDocSpec extends AkkaSpec { val actor = system.actorOf(Props[MyActor]) val msg1 = -1 //#try-recover - val future = akka.pattern.ask(actor, msg1) recoverWith { + val future = akka.pattern.ask(actor, msg1).recoverWith { case e: ArithmeticException => Future.successful(0) case foo: IllegalArgumentException => Future.failed[Int](new IllegalStateException("All br0ken!")) } - future foreach println + future.foreach(println) //#try-recover Await.result(future, 3 seconds) should be(0) } @@ -416,8 +413,8 @@ class FutureDocSpec extends AkkaSpec { val future1 = Future { "foo" } val future2 = Future { "bar" } //#zip - val future3 = future1 zip future2 map { case (a, b) => a + " " + b } - future3 foreach println + val future3 = future1.zip(future2).map { case (a, b) => a + " " + b } + future3.foreach(println) //#zip Await.result(future3, 3 seconds) should be("foo bar") } @@ -428,12 +425,14 @@ class FutureDocSpec extends AkkaSpec { def log(cause: Throwable) = () def watchSomeTV(): Unit = () //#and-then - val result = Future { loadPage(url) } andThen { - case Failure(exception) => log(exception) - } andThen { - case _ => watchSomeTV() - } - result foreach println + val result = Future { loadPage(url) } + .andThen { + case Failure(exception) => log(exception) + } + .andThen { + case _ => watchSomeTV() + } + result.foreach(println) //#and-then Await.result(result, 3 seconds) should be("foo bar") } @@ -443,8 +442,8 @@ class FutureDocSpec extends AkkaSpec { val future2 = Future { "bar" } val future3 = Future { "pigdog" } //#fallback-to - val future4 = future1 fallbackTo future2 fallbackTo future3 - future4 foreach println + val future4 = future1.fallbackTo(future2).fallbackTo(future3) + future4.foreach(println) //#fallback-to Await.result(future4, 3 seconds) should be("foo") } @@ -454,7 +453,7 @@ class FutureDocSpec extends AkkaSpec { def doSomethingOnSuccess(r: String) = () def doSomethingOnFailure(t: Throwable) = () //#onComplete - future onComplete { + future.onComplete { case Success(result) => doSomethingOnSuccess(result) case Failure(failure) => doSomethingOnFailure(failure) } @@ -484,10 +483,10 @@ class FutureDocSpec extends AkkaSpec { // TODO after is unfortunately shadowed by ScalaTest, fix as part of #3759 // import akka.pattern.after - val delayed = akka.pattern.after(200 millis, using = system.scheduler)(Future.failed( - new IllegalStateException("OHNOES"))) + val delayed = + akka.pattern.after(200 millis, using = system.scheduler)(Future.failed(new IllegalStateException("OHNOES"))) val future = Future { Thread.sleep(1000); "foo" } - val result = Future firstCompletedOf Seq(future, delayed) + val result = Future.firstCompletedOf(Seq(future, delayed)) //#after intercept[IllegalStateException] { Await.result(result, 2 second) } } @@ -504,10 +503,7 @@ class FutureDocSpec extends AkkaSpec { } else Future.successful(5) } //Return a new future that will retry up to 10 times - val retried = akka.pattern.retry( - () => attempt(), - 10, - 100 milliseconds) + val retried = akka.pattern.retry(() => attempt(), 10, 100 milliseconds) //#retry Await.result(retried, 1 second) should ===(5) diff --git a/akka-docs/src/test/scala/docs/io/EchoServer.scala b/akka-docs/src/test/scala/docs/io/EchoServer.scala index b126ca7c44..c452e003ea 100644 --- a/akka-docs/src/test/scala/docs/io/EchoServer.scala +++ b/akka-docs/src/test/scala/docs/io/EchoServer.scala @@ -50,7 +50,7 @@ class EchoManager(handlerClass: Class[_]) extends Actor with ActorLogging { } // do not restart - override def postRestart(thr: Throwable): Unit = context stop self + override def postRestart(thr: Throwable): Unit = context.stop(self) def receive = { case Bound(localAddress) => @@ -58,7 +58,7 @@ class EchoManager(handlerClass: Class[_]) extends Actor with ActorLogging { case CommandFailed(Bind(_, local, _, _, _)) => log.warning(s"cannot bind to [$local]") - context stop self + context.stop(self) //#echo-manager case Connected(remote, local) => @@ -78,14 +78,13 @@ object EchoHandler { Props(classOf[EchoHandler], connection, remote) } -class EchoHandler(connection: ActorRef, remote: InetSocketAddress) - extends Actor with ActorLogging { +class EchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor with ActorLogging { import Tcp._ import EchoHandler._ // sign death pact: this actor terminates when connection breaks - context watch connection + context.watch(connection) // start out in optimistic write-through mode def receive = writing @@ -101,11 +100,11 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) case CommandFailed(Write(_, Ack(ack))) => connection ! ResumeWriting - context become buffering(ack) + context.become(buffering(ack)) case PeerClosed => - if (storage.isEmpty) context stop self - else context become closing + if (storage.isEmpty) context.stop(self) + else context.become(closing) } //#writing @@ -129,10 +128,10 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) } else { // then return to NACK-based again writeAll() - context become (if (peerClosed) closing else writing) + context.become(if (peerClosed) closing else writing) } - } else if (peerClosed) context stop self - else context become writing + } else if (peerClosed) context.stop(self) + else context.become(writing) } } //#buffering @@ -153,7 +152,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) case Ack(ack) => acknowledge(ack) - if (storage.isEmpty) context stop self + if (storage.isEmpty) context.stop(self) } //#closing @@ -181,7 +180,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) if (stored > maxStored) { log.warning(s"drop connection to [$remote] (buffer overrun)") - context stop self + context.stop(self) } else if (stored > highWatermark) { log.debug(s"suspending reading at $currentOffset") @@ -199,7 +198,7 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) transferred += size storageOffset += 1 - storage = storage drop 1 + storage = storage.drop(1) if (suspended && stored < lowWatermark) { log.debug("resuming reading") @@ -224,13 +223,12 @@ class EchoHandler(connection: ActorRef, remote: InetSocketAddress) //#echo-handler //#simple-echo-handler -class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) - extends Actor with ActorLogging { +class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor with ActorLogging { import Tcp._ // sign death pact: this actor terminates when connection breaks - context watch connection + context.watch(connection) case object Ack extends Event @@ -245,7 +243,7 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) case PeerClosed => closing = true }, discardOld = false) - case PeerClosed => context stop self + case PeerClosed => context.stop(self) } //#storage-omitted @@ -270,7 +268,7 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) if (stored > maxStored) { log.warning(s"drop connection to [$remote] (buffer overrun)") - context stop self + context.stop(self) } else if (stored > highWatermark) { log.debug(s"suspending reading") @@ -286,7 +284,7 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) stored -= size transferred += size - storage = storage drop 1 + storage = storage.drop(1) if (suspended && stored < lowWatermark) { log.debug("resuming reading") @@ -295,7 +293,7 @@ class SimpleEchoHandler(connection: ActorRef, remote: InetSocketAddress) } if (storage.isEmpty) { - if (closing) context stop self + if (closing) context.stop(self) else context.unbecome() } else connection ! Write(storage(0), Ack) } diff --git a/akka-docs/src/test/scala/docs/io/IODocSpec.scala b/akka-docs/src/test/scala/docs/io/IODocSpec.scala index b942c336b6..125a561b25 100644 --- a/akka-docs/src/test/scala/docs/io/IODocSpec.scala +++ b/akka-docs/src/test/scala/docs/io/IODocSpec.scala @@ -39,7 +39,7 @@ class Server extends Actor { context.parent ! b //#do-some-logging-or-setup - case CommandFailed(_: Bind) => context stop self + case CommandFailed(_: Bind) => context.stop(self) case c @ Connected(remote, local) => //#server @@ -58,7 +58,7 @@ class SimplisticHandler extends Actor { import Tcp._ def receive = { case Received(data) => sender() ! Write(data) - case PeerClosed => context stop self + case PeerClosed => context.stop(self) } } //#simplistic-handler @@ -79,13 +79,13 @@ class Client(remote: InetSocketAddress, listener: ActorRef) extends Actor { def receive = { case CommandFailed(_: Connect) => listener ! "connect failed" - context stop self + context.stop(self) case c @ Connected(remote, local) => listener ! c val connection = sender() connection ! Register(self) - context become { + context.become { case data: ByteString => connection ! Write(data) case CommandFailed(w: Write) => @@ -97,7 +97,7 @@ class Client(remote: InetSocketAddress, listener: ActorRef) extends Actor { connection ! Close case _: ConnectionClosed => listener ! "connection closed" - context stop self + context.stop(self) } } } @@ -108,7 +108,7 @@ class IODocSpec extends AkkaSpec { class Parent extends Actor { context.actorOf(Props[Server], "server") def receive = { - case msg => testActor forward msg + case msg => testActor.forward(msg) } } diff --git a/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala b/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala index a996f55892..1294f2d772 100644 --- a/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala +++ b/akka-docs/src/test/scala/docs/io/ReadBackPressure.scala @@ -4,11 +4,11 @@ package docs.io -import akka.actor.{ ActorRef, ActorLogging, Props, Actor, ActorSystem } +import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props } import akka.io.Tcp._ -import akka.io.{ Tcp, IO } +import akka.io.{ IO, Tcp } import java.net.InetSocketAddress -import akka.testkit.{ ImplicitSender, TestProbe, AkkaSpec } +import akka.testkit.{ AkkaSpec, ImplicitSender, TestProbe } import akka.util.ByteString import scala.concurrent.Await diff --git a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala index 9bce625578..172592bfdb 100644 --- a/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala +++ b/akka-docs/src/test/scala/docs/io/ScalaUdpMulticastSpec.scala @@ -15,13 +15,17 @@ import org.scalatest.BeforeAndAfterAll import akka.testkit.SocketUtil import scala.collection.JavaConverters._ -class ScalaUdpMulticastSpec extends TestKit(ActorSystem("ScalaUdpMulticastSpec")) with WordSpecLike with BeforeAndAfterAll { +class ScalaUdpMulticastSpec + extends TestKit(ActorSystem("ScalaUdpMulticastSpec")) + with WordSpecLike + with BeforeAndAfterAll { "listener" should { "send message back to sink" in { val ipv6ifaces = - NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter(iface => - iface.supportsMulticast && + NetworkInterface.getNetworkInterfaces.asScala.toSeq.filter( + iface => + iface.supportsMulticast && iface.isUp && iface.getInetAddresses.asScala.exists(_.isInstanceOf[Inet6Address])) @@ -70,4 +74,3 @@ class ScalaUdpMulticastSpec extends TestKit(ActorSystem("ScalaUdpMulticastSpec") } } - diff --git a/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala b/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala index 36adeb3100..ed32624bee 100644 --- a/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala +++ b/akka-docs/src/test/scala/docs/io/UdpDocSpec.scala @@ -50,7 +50,7 @@ object ScalaUdpDocSpec { def receive = { case Udp.Bound(local) => //#listener - nextActor forward local + nextActor.forward(local) //#listener context.become(ready(sender())) } diff --git a/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala b/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala index 6ce9f4a284..49096958f2 100644 --- a/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/pattern/BackoffSupervisorDocSpec.scala @@ -19,12 +19,11 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) val supervisor = BackoffSupervisor.props( - BackoffOpts.onStop( - childProps, - childName = "myEcho", - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly + BackoffOpts.onStop(childProps, + childName = "myEcho", + minBackoff = 3.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly )) system.actorOf(supervisor, name = "echoSupervisor") @@ -39,12 +38,11 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) val supervisor = BackoffSupervisor.props( - BackoffOpts.onFailure( - childProps, - childName = "myEcho", - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly + BackoffOpts.onFailure(childProps, + childName = "myEcho", + minBackoff = 3.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly )) system.actorOf(supervisor, name = "echoSupervisor") @@ -59,13 +57,14 @@ class BackoffSupervisorDocSpec { //#backoff-custom-stop val supervisor = BackoffSupervisor.props( - BackoffOpts.onStop( - childProps, - childName = "myEcho", - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly - ).withManualReset // the child must send BackoffSupervisor.Reset to its parent + BackoffOpts + .onStop(childProps, + childName = "myEcho", + minBackoff = 3.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly + ) + .withManualReset // the child must send BackoffSupervisor.Reset to its parent .withDefaultStoppingStrategy // Stop at any Exception thrown ) //#backoff-custom-stop @@ -81,18 +80,18 @@ class BackoffSupervisorDocSpec { //#backoff-custom-fail val supervisor = BackoffSupervisor.props( - BackoffOpts.onFailure( - childProps, - childName = "myEcho", - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly - ).withAutoReset(10.seconds) // reset if the child does not throw any errors within 10 seconds - .withSupervisorStrategy( - OneForOneStrategy() { - case _: MyException => SupervisorStrategy.Restart - case _ => SupervisorStrategy.Escalate - })) + BackoffOpts + .onFailure(childProps, + childName = "myEcho", + minBackoff = 3.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2 // adds 20% "noise" to vary the intervals slightly + ) + .withAutoReset(10.seconds) // reset if the child does not throw any errors within 10 seconds + .withSupervisorStrategy(OneForOneStrategy() { + case _: MyException => SupervisorStrategy.Restart + case _ => SupervisorStrategy.Escalate + })) //#backoff-custom-fail system.actorOf(supervisor, name = "echoSupervisor") @@ -110,13 +109,10 @@ class BackoffSupervisorDocSpec { val childProps = Props(classOf[EchoActor]) //#backoff-sharded - val supervisor = BackoffSupervisor.props(BackoffOpts.onStop( - childProps, - childName = "myEcho", - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2 - ).withFinalStopMessage(_ == StopMessage)) + val supervisor = BackoffSupervisor.props( + BackoffOpts + .onStop(childProps, childName = "myEcho", minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2) + .withFinalStopMessage(_ == StopMessage)) //#backoff-sharded //#backoff-sharded-passivation diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala index 2214e9410a..175832a8c2 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceDocSpec.scala @@ -51,7 +51,7 @@ object PersistenceDocSpec { case RecoveryCompleted => // perform init after recovery, before any other messages //... - case evt => //... + case evt => //... } override def receiveCommand: Receive = { @@ -97,13 +97,8 @@ object PersistenceDocSpec { import PersistAsync.MyPersistentActor //#backoff val childProps = Props[MyPersistentActor]() - val props = BackoffSupervisor.props( - BackoffOpts.onStop( - childProps, - childName = "myActor", - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2)) + val props = BackoffSupervisor.props(BackoffOpts + .onStop(childProps, childName = "myActor", minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2)) context.actorOf(props, name = "mySupervisor") //#backoff } @@ -122,8 +117,7 @@ object PersistenceDocSpec { case class MsgSent(s: String) extends Evt case class MsgConfirmed(deliveryId: Long) extends Evt - class MyPersistentActor(destination: ActorSelection) - extends PersistentActor with AtLeastOnceDelivery { + class MyPersistentActor(destination: ActorSelection) extends PersistentActor with AtLeastOnceDelivery { override def persistenceId: String = "persistence-id" @@ -186,9 +180,9 @@ object PersistenceDocSpec { override def persistenceId = "my-stable-persistence-id" //#snapshot-criteria - override def recovery = Recovery(fromSnapshot = SnapshotSelectionCriteria( - maxSequenceNr = 457L, - maxTimestamp = System.currentTimeMillis)) + override def recovery = + Recovery( + fromSnapshot = SnapshotSelectionCriteria(maxSequenceNr = 457L, maxTimestamp = System.currentTimeMillis)) //#snapshot-criteria //#snapshot-offer @@ -220,8 +214,12 @@ object PersistenceDocSpec { override def receiveCommand: Receive = { case c: String => { sender() ! c - persistAsync(s"evt-$c-1") { e => sender() ! e } - persistAsync(s"evt-$c-2") { e => sender() ! e } + persistAsync(s"evt-$c-1") { e => + sender() ! e + } + persistAsync(s"evt-$c-2") { e => + sender() ! e + } } } } @@ -255,9 +253,15 @@ object PersistenceDocSpec { override def receiveCommand: Receive = { case c: String => { sender() ! c - persistAsync(s"evt-$c-1") { e => sender() ! e } - persistAsync(s"evt-$c-2") { e => sender() ! e } - deferAsync(s"evt-$c-3") { e => sender() ! e } + persistAsync(s"evt-$c-1") { e => + sender() ! e + } + persistAsync(s"evt-$c-2") { e => + sender() ! e + } + deferAsync(s"evt-$c-3") { e => + sender() ! e + } } } } @@ -293,9 +297,15 @@ object PersistenceDocSpec { override def receiveCommand: Receive = { case c: String => { sender() ! c - persist(s"evt-$c-1") { e => sender() ! e } - persist(s"evt-$c-2") { e => sender() ! e } - defer(s"evt-$c-3") { e => sender() ! e } + persist(s"evt-$c-1") { e => + sender() ! e + } + persist(s"evt-$c-2") { e => + sender() ! e + } + defer(s"evt-$c-3") { e => + sender() ! e + } } } } @@ -365,11 +375,15 @@ object PersistenceDocSpec { sender() ! c persistAsync(c + "-outer-1") { outer => sender() ! outer - persistAsync(c + "-inner-1") { inner => sender() ! inner } + persistAsync(c + "-inner-1") { inner => + sender() ! inner + } } persistAsync(c + "-outer-2") { outer => sender() ! outer - persistAsync(c + "-inner-2") { inner => sender() ! inner } + persistAsync(c + "-inner-2") { inner => + sender() ! inner + } } } //#nested-persistAsync-persistAsync diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala index 45be47b509..4f87e01267 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceEventAdapterDocSpec.scala @@ -81,7 +81,10 @@ class PersistenceEventAdapterDocSpec(config: String) extends AkkaSpec(config) { } override def receiveCommand: Receive = { - case c => persist(c) { e => p.ref ! e } + case c => + persist(c) { e => + p.ref ! e + } } }) @@ -113,7 +116,10 @@ class PersistenceEventAdapterDocSpec(config: String) extends AkkaSpec(config) { } override def receiveCommand: Receive = { - case c => persist(c) { e => p.ref ! e } + case c => + persist(c) { e => + p.ref ! e + } } }) diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala index cc8b5347a8..819ce0a156 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceMultiDocSpec.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2019 Lightbend Inc. */ -import akka.persistence.{ RuntimePluginConfig, PersistentActor } +import akka.persistence.{ PersistentActor, RuntimePluginConfig } import com.typesafe.config.ConfigFactory object PersistenceMultiDocSpec { @@ -71,16 +71,22 @@ object PersistenceMultiDocSpec { override def snapshotPluginId = s"snapshot-store-plugin-$runtimeDistinction" // Configuration which contains the journal plugin id defined above - override def journalPluginConfig = ConfigFactory.empty().withValue( - s"journal-plugin-$runtimeDistinction", - context.system.settings.config.getValue("journal-plugin") // or a very different configuration coming from an external service. - ) + override def journalPluginConfig = + ConfigFactory + .empty() + .withValue(s"journal-plugin-$runtimeDistinction", + context.system.settings.config + .getValue("journal-plugin") // or a very different configuration coming from an external service. + ) // Configuration which contains the snapshot store plugin id defined above - override def snapshotPluginConfig = ConfigFactory.empty().withValue( - s"snapshot-plugin-$runtimeDistinction", - context.system.settings.config.getValue("snapshot-store-plugin") // or a very different configuration coming from an external service. - ) + override def snapshotPluginConfig = + ConfigFactory + .empty() + .withValue(s"snapshot-plugin-$runtimeDistinction", + context.system.settings.config + .getValue("snapshot-store-plugin") // or a very different configuration coming from an external service. + ) } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala index 52929e704f..794ae83b2b 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistencePluginDocSpec.scala @@ -93,7 +93,10 @@ class PersistencePluginDocSpec extends WordSpec { //#snapshot-store-plugin-config """ - val system = ActorSystem("PersistencePluginDocSpec", ConfigFactory.parseString(providerConfig).withFallback(ConfigFactory.parseString(PersistencePluginDocSpec.config))) + val system = ActorSystem("PersistencePluginDocSpec", + ConfigFactory + .parseString(providerConfig) + .withFallback(ConfigFactory.parseString(PersistencePluginDocSpec.config))) try { Persistence(system) } finally { @@ -159,21 +162,16 @@ class MyJournal extends AsyncWriteJournal { //#sync-journal-plugin-api def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = ??? - def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, - toSequenceNr: Long, max: Long)( - replayCallback: (PersistentRepr) => Unit): Future[Unit] = ??? - def asyncReadHighestSequenceNr( - persistenceId: String, - fromSequenceNr: Long): Future[Long] = ??? + def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + replayCallback: (PersistentRepr) => Unit): Future[Unit] = ??? + def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = ??? // optionally override: override def receivePluginInternal: Receive = super.receivePluginInternal } class MySnapshotStore extends SnapshotStore { - def loadAsync( - persistenceId: String, - criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = ??? + def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = ??? def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = ??? def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = ??? def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = ??? @@ -187,9 +185,9 @@ object PersistenceTCKDoc { import akka.persistence.journal.JournalSpec //#journal-tck-scala - class MyJournalSpec extends JournalSpec( - config = ConfigFactory.parseString( - """akka.persistence.journal.plugin = "my.journal.plugin"""")) { + class MyJournalSpec + extends JournalSpec( + config = ConfigFactory.parseString("""akka.persistence.journal.plugin = "my.journal.plugin"""")) { override def supportsRejectingNonSerializableObjects: CapabilityFlag = false // or CapabilityFlag.off @@ -203,9 +201,9 @@ object PersistenceTCKDoc { import akka.persistence.snapshot.SnapshotStoreSpec //#snapshot-store-tck-scala - class MySnapshotStoreSpec extends SnapshotStoreSpec( - config = ConfigFactory.parseString( - """ + class MySnapshotStoreSpec + extends SnapshotStoreSpec( + config = ConfigFactory.parseString(""" akka.persistence.snapshot-store.plugin = "my.snapshot-store.plugin" """)) { @@ -221,26 +219,24 @@ object PersistenceTCKDoc { import org.iq80.leveldb.util.FileUtils //#journal-tck-before-after-scala - class MyJournalSpec extends JournalSpec( - config = ConfigFactory.parseString( - """ + class MyJournalSpec + extends JournalSpec(config = ConfigFactory.parseString(""" akka.persistence.journal.plugin = "my.journal.plugin" """)) { override def supportsRejectingNonSerializableObjects: CapabilityFlag = true // or CapabilityFlag.on - val storageLocations = List( - new File(system.settings.config.getString("akka.persistence.journal.leveldb.dir")), - new File(config.getString("akka.persistence.snapshot-store.local.dir"))) + val storageLocations = List(new File(system.settings.config.getString("akka.persistence.journal.leveldb.dir")), + new File(config.getString("akka.persistence.snapshot-store.local.dir"))) override def beforeAll(): Unit = { super.beforeAll() - storageLocations foreach FileUtils.deleteRecursively + storageLocations.foreach(FileUtils.deleteRecursively) } override def afterAll(): Unit = { - storageLocations foreach FileUtils.deleteRecursively + storageLocations.foreach(FileUtils.deleteRecursively) super.afterAll() } diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala index cc5f6b7a2e..cac287fcdd 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceSchemaEvolutionDocSpec.scala @@ -95,7 +95,8 @@ class ProtobufReadOptional { .setRow(s.row) .setLetter(s.letter) .setSeatType(s.seatType.code) - .build().toByteArray + .build() + .toByteArray } // -- fromBinary helpers -- @@ -142,11 +143,12 @@ class RenamePlainJson { marshaller.toJson(event) override def fromJournal(event: Any, manifest: String): EventSeq = event match { - case json: JsObject => EventSeq(marshaller.fromJson(manifest match { - case V1 => rename(json, "code", "seatNr") - case V2 => json // pass-through - case unknown => throw new IllegalArgumentException(s"Unknown manifest: $unknown") - })) + case json: JsObject => + EventSeq(marshaller.fromJson(manifest match { + case V1 => rename(json, "code", "seatNr") + case V2 => json // pass-through + case unknown => throw new IllegalArgumentException(s"Unknown manifest: $unknown") + })) case _ => val c = event.getClass throw new IllegalArgumentException("Can only work with JSON, was: %s".format(c)) @@ -190,8 +192,7 @@ object SimplestCustomSerializer { // serialize the object override def toBinary(obj: AnyRef): Array[Byte] = obj match { case p: Person => s"""${p.name}|${p.surname}""".getBytes(Utf8) - case _ => throw new IllegalArgumentException( - s"Unable to serialize to bytes, clazz was: ${obj.getClass}!") + case _ => throw new IllegalArgumentException(s"Unable to serialize to bytes, clazz was: ${obj.getClass}!") } // deserialize the object, using the manifest to indicate which logic to apply @@ -201,8 +202,9 @@ object SimplestCustomSerializer { val nameAndSurname = new String(bytes, Utf8) val Array(name, surname) = nameAndSurname.split("[|]") Person(name, surname) - case _ => throw new NotSerializableException( - s"Unable to deserialize from bytes, manifest was: $manifest! Bytes length: " + + case _ => + throw new NotSerializableException( + s"Unable to deserialize from bytes, manifest was: $manifest! Bytes length: " + bytes.length) } @@ -251,9 +253,7 @@ class UserEventsAdapter extends EventAdapter { case UserDetailsChanged(null, address) => EventSeq(UserAddressChanged(address)) case UserDetailsChanged(name, null) => EventSeq(UserNameChanged(name)) case UserDetailsChanged(name, address) => - EventSeq( - UserNameChanged(name), - UserAddressChanged(address)) + EventSeq(UserNameChanged(name), UserAddressChanged(address)) case event: V2 => EventSeq(event) } @@ -270,8 +270,7 @@ class RemovedEventsAwareSerializer extends SerializerWithStringManifest { val utf8 = Charset.forName("UTF-8") override def identifier: Int = 8337 - val SkipEventManifestsEvents = Set( - "docs.persistence.CustomerBlinked" // ... + val SkipEventManifestsEvents = Set("docs.persistence.CustomerBlinked" // ... ) override def manifest(o: AnyRef): String = o.getClass.getName @@ -377,8 +376,7 @@ class JsonDataModelAdapter extends EventAdapter { case json: JsObject => EventSeq(marshaller.fromJson(json)) case _ => - throw new IllegalArgumentException( - "Unable to fromJournal a non-JSON object! Was: " + event.getClass) + throw new IllegalArgumentException("Unable to fromJournal a non-JSON object! Was: " + event.getClass) } } //#detach-models-adapter-json diff --git a/akka-docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala index 07bb5dc854..fc75c1c5a8 100644 --- a/akka-docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/PersistenceSerializerDocSpec.scala @@ -8,7 +8,7 @@ import com.typesafe.config._ import scala.concurrent.duration._ import org.scalatest.WordSpec import akka.actor.ActorSystem -import akka.serialization.{ Serializer, SerializationExtension } +import akka.serialization.{ SerializationExtension, Serializer } import akka.testkit.TestKit class PersistenceSerializerDocSpec extends WordSpec { diff --git a/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala index dedc4582cf..c373cf523d 100644 --- a/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/query/LeveldbPersistenceQueryDocSpec.scala @@ -48,16 +48,14 @@ class LeveldbPersistenceQueryDocSpec(config: String) extends AkkaSpec(config) { import akka.persistence.query.PersistenceQuery import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal - val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal]( - LeveldbReadJournal.Identifier) + val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) //#get-read-journal } "demonstrate EventsByPersistenceId" in { //#EventsByPersistenceId implicit val mat = ActorMaterializer()(system) - val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal]( - LeveldbReadJournal.Identifier) + val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[EventEnvelope, NotUsed] = queries.eventsByPersistenceId("some-persistence-id", 0L, Long.MaxValue) @@ -69,8 +67,7 @@ class LeveldbPersistenceQueryDocSpec(config: String) extends AkkaSpec(config) { "demonstrate AllPersistenceIds" in { //#AllPersistenceIds implicit val mat = ActorMaterializer()(system) - val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal]( - LeveldbReadJournal.Identifier) + val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[String, NotUsed] = queries.persistenceIds() //#AllPersistenceIds @@ -79,8 +76,7 @@ class LeveldbPersistenceQueryDocSpec(config: String) extends AkkaSpec(config) { "demonstrate EventsByTag" in { //#EventsByTag implicit val mat = ActorMaterializer()(system) - val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal]( - LeveldbReadJournal.Identifier) + val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val src: Source[EventEnvelope, NotUsed] = queries.eventsByTag(tag = "green", offset = Sequence(0L)) diff --git a/akka-docs/src/test/scala/docs/persistence/query/MyEventsByTagPublisher.scala b/akka-docs/src/test/scala/docs/persistence/query/MyEventsByTagPublisher.scala index 317a6ba95a..9df0f04658 100644 --- a/akka-docs/src/test/scala/docs/persistence/query/MyEventsByTagPublisher.scala +++ b/akka-docs/src/test/scala/docs/persistence/query/MyEventsByTagPublisher.scala @@ -20,7 +20,7 @@ object MyEventsByTagPublisher { //#events-by-tag-publisher class MyEventsByTagPublisher(tag: String, offset: Long, refreshInterval: FiniteDuration) - extends ActorPublisher[EventEnvelope] { + extends ActorPublisher[EventEnvelope] { private case object Continue @@ -31,8 +31,7 @@ class MyEventsByTagPublisher(tag: String, offset: Long, refreshInterval: FiniteD var buf = Vector.empty[EventEnvelope] import context.dispatcher - val continueTask = context.system.scheduler.schedule( - refreshInterval, refreshInterval, self, Continue) + val continueTask = context.system.scheduler.schedule(refreshInterval, refreshInterval, self, Continue) override def postStop(): Unit = { continueTask.cancel() @@ -48,8 +47,7 @@ class MyEventsByTagPublisher(tag: String, offset: Long, refreshInterval: FiniteD } object Select { - private def statement() = connection.prepareStatement( - """ + private def statement() = connection.prepareStatement(""" SELECT id, persistent_repr FROM journal WHERE tag = ? AND id > ? ORDER BY id LIMIT ? @@ -64,8 +62,7 @@ class MyEventsByTagPublisher(tag: String, offset: Long, refreshInterval: FiniteD val rs = s.executeQuery() val b = Vector.newBuilder[(Long, Array[Byte])] - while (rs.next()) - b += (rs.getLong(1) -> rs.getBytes(2)) + while (rs.next()) b += (rs.getLong(1) -> rs.getBytes(2)) b.result() } finally s.close() } @@ -94,9 +91,9 @@ class MyEventsByTagPublisher(tag: String, offset: Long, refreshInterval: FiniteD if (totalDemand <= Int.MaxValue) { val (use, keep) = buf.splitAt(totalDemand.toInt) buf = keep - use foreach onNext + use.foreach(onNext) } else { - buf foreach onNext + buf.foreach(onNext) buf = Vector.empty } } diff --git a/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala b/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala index a52f2303ca..2a63b8de9a 100644 --- a/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala +++ b/akka-docs/src/test/scala/docs/persistence/query/PersistenceQueryDocSpec.scala @@ -34,8 +34,7 @@ object PersistenceQueryDocSpec { //#advanced-journal-query-types //#my-read-journal - class MyReadJournalProvider(system: ExtendedActorSystem, config: Config) - extends ReadJournalProvider { + class MyReadJournalProvider(system: ExtendedActorSystem, config: Config) extends ReadJournalProvider { override val scaladslReadJournal: MyScaladslReadJournal = new MyScaladslReadJournal(system, config) @@ -45,11 +44,11 @@ object PersistenceQueryDocSpec { } class MyScaladslReadJournal(system: ExtendedActorSystem, config: Config) - extends akka.persistence.query.scaladsl.ReadJournal - with akka.persistence.query.scaladsl.EventsByTagQuery - with akka.persistence.query.scaladsl.EventsByPersistenceIdQuery - with akka.persistence.query.scaladsl.PersistenceIdsQuery - with akka.persistence.query.scaladsl.CurrentPersistenceIdsQuery { + extends akka.persistence.query.scaladsl.ReadJournal + with akka.persistence.query.scaladsl.EventsByTagQuery + with akka.persistence.query.scaladsl.EventsByPersistenceIdQuery + with akka.persistence.query.scaladsl.PersistenceIdsQuery + with akka.persistence.query.scaladsl.CurrentPersistenceIdsQuery { private val refreshInterval: FiniteDuration = config.getDuration("refresh-interval", MILLISECONDS).millis @@ -65,20 +64,18 @@ object PersistenceQueryDocSpec { * in the returned stream. This means that you can use the offset that is returned in `EventEnvelope` * as the `offset` parameter in a subsequent query. */ - override def eventsByTag( - tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = offset match { + override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = offset match { case Sequence(offsetValue) => val props = MyEventsByTagPublisher.props(tag, offsetValue, refreshInterval) - Source.actorPublisher[EventEnvelope](props) - .mapMaterializedValue(_ => NotUsed) + Source.actorPublisher[EventEnvelope](props).mapMaterializedValue(_ => NotUsed) case NoOffset => eventsByTag(tag, Sequence(0L)) //recursive case _ => throw new IllegalArgumentException("LevelDB does not support " + offset.getClass.getName + " offsets") } - override def eventsByPersistenceId( - persistenceId: String, fromSequenceNr: Long, - toSequenceNr: Long): Source[EventEnvelope, NotUsed] = { + override def eventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long, + toSequenceNr: Long): Source[EventEnvelope, NotUsed] = { // implement in a similar way as eventsByTag ??? } @@ -105,21 +102,19 @@ object PersistenceQueryDocSpec { } class MyJavadslReadJournal(scaladslReadJournal: MyScaladslReadJournal) - extends akka.persistence.query.javadsl.ReadJournal - with akka.persistence.query.javadsl.EventsByTagQuery - with akka.persistence.query.javadsl.EventsByPersistenceIdQuery - with akka.persistence.query.javadsl.PersistenceIdsQuery - with akka.persistence.query.javadsl.CurrentPersistenceIdsQuery { + extends akka.persistence.query.javadsl.ReadJournal + with akka.persistence.query.javadsl.EventsByTagQuery + with akka.persistence.query.javadsl.EventsByPersistenceIdQuery + with akka.persistence.query.javadsl.PersistenceIdsQuery + with akka.persistence.query.javadsl.CurrentPersistenceIdsQuery { - override def eventsByTag( - tag: String, offset: Offset = Sequence(0L)): javadsl.Source[EventEnvelope, NotUsed] = + override def eventsByTag(tag: String, offset: Offset = Sequence(0L)): javadsl.Source[EventEnvelope, NotUsed] = scaladslReadJournal.eventsByTag(tag, offset).asJava - override def eventsByPersistenceId( - persistenceId: String, fromSequenceNr: Long = 0L, - toSequenceNr: Long = Long.MaxValue): javadsl.Source[EventEnvelope, NotUsed] = - scaladslReadJournal.eventsByPersistenceId( - persistenceId, fromSequenceNr, toSequenceNr).asJava + override def eventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long = 0L, + toSequenceNr: Long = Long.MaxValue): javadsl.Source[EventEnvelope, NotUsed] = + scaladslReadJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava override def persistenceIds(): javadsl.Source[String, NotUsed] = scaladslReadJournal.persistenceIds().asJava @@ -129,8 +124,7 @@ object PersistenceQueryDocSpec { // possibility to add more plugin specific queries - def byTagsWithMeta( - tags: java.util.Set[String]): javadsl.Source[RichEvent, QueryMetadata] = { + def byTagsWithMeta(tags: java.util.Set[String]): javadsl.Source[RichEvent, QueryMetadata] = { import scala.collection.JavaConverters._ scaladslReadJournal.byTagsWithMeta(tags.asScala.toSet).asJava } @@ -199,8 +193,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { import PersistenceQueryDocSpec._ def this() { - this( - """ + this(""" akka.persistence.query.my-read-journal { class = "docs.persistence.query.PersistenceQueryDocSpec$MyReadJournalProvider" refresh-interval = 3s @@ -214,8 +207,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { //#basic-usage // obtain read journal by plugin id val readJournal = - PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]( - "akka.persistence.query.my-read-journal") + PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("akka.persistence.query.my-read-journal") // issue query to journal val source: Source[EventEnvelope, NotUsed] = @@ -223,7 +215,9 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { // materialize stream, consuming events implicit val mat = ActorMaterializer() - source.runForeach { event => println("Event: " + event) } + source.runForeach { event => + println("Event: " + event) + } //#basic-usage //#all-persistence-ids-live @@ -262,11 +256,14 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { query .mapMaterializedValue { meta => - println(s"The query is: " + + println( + s"The query is: " + s"ordered deterministically: ${meta.deterministicOrder}, " + s"infinite: ${meta.infinite}") } - .map { event => println(s"Event payload: ${event.payload}") } + .map { event => + println(s"Event payload: ${event.payload}") + } .runWith(Sink.ignore) //#advanced-journal-query-usage @@ -296,8 +293,12 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { bidProjection.latestOffset.foreach { startFromOffset => readJournal .eventsByTag("bid", Sequence(startFromOffset)) - .mapAsync(8) { envelope => (writer ? envelope.event).map(_ => envelope.offset) } - .mapAsync(1) { offset => bidProjection.saveProgress(offset) } + .mapAsync(8) { envelope => + (writer ? envelope.event).map(_ => envelope.offset) + } + .mapAsync(1) { offset => + bidProjection.saveProgress(offset) + } .runWith(Sink.ignore) } //#projection-into-different-store-actor-run @@ -305,8 +306,7 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { class RunWithAsyncFunction { val readJournal = - PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]( - "akka.persistence.query.my-read-journal") + PersistenceQuery(system).readJournalFor[MyScaladslReadJournal]("akka.persistence.query.my-read-journal") //#projection-into-different-store-simple-classes trait ExampleStore { @@ -319,10 +319,11 @@ class PersistenceQueryDocSpec(s: String) extends AkkaSpec(s) { readJournal .eventsByTag("bid", NoOffset) - .mapAsync(1) { e => store.save(e) } + .mapAsync(1) { e => + store.save(e) + } .runWith(Sink.ignore) //#projection-into-different-store-simple } } - diff --git a/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala b/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala index 1c74604352..ccb871b06e 100644 --- a/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala +++ b/akka-docs/src/test/scala/docs/remoting/RemoteDeploymentDocSpec.scala @@ -4,10 +4,10 @@ package docs.remoting -import akka.actor.{ ExtendedActorSystem, ActorSystem, Actor, ActorRef } +import akka.actor.{ Actor, ActorRef, ActorSystem, ExtendedActorSystem } import akka.testkit.{ AkkaSpec, ImplicitSender } //#import -import akka.actor.{ Props, Deploy, Address, AddressFromURIString } +import akka.actor.{ Address, AddressFromURIString, Deploy, Props } import akka.remote.RemoteScope //#import @@ -29,14 +29,14 @@ class RemoteDeploymentDocSpec extends AkkaSpec(""" import RemoteDeploymentDocSpec._ val other = ActorSystem("remote", system.settings.config) - val address = other.asInstanceOf[ExtendedActorSystem].provider.getExternalAddressFor(Address("akka.tcp", "s", "host", 1)).get + val address = + other.asInstanceOf[ExtendedActorSystem].provider.getExternalAddressFor(Address("akka.tcp", "s", "host", 1)).get override def afterTermination(): Unit = { shutdown(other) } "demonstrate programmatic deployment" in { //#deploy - val ref = system.actorOf(Props[SampleActor]. - withDeploy(Deploy(scope = RemoteScope(address)))) + val ref = system.actorOf(Props[SampleActor].withDeploy(Deploy(scope = RemoteScope(address)))) //#deploy ref.path.address should be(address) ref ! "test" diff --git a/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala index 7ce4b54d04..3554750200 100644 --- a/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/ConsistentHashingRouterDocSpec.scala @@ -55,13 +55,10 @@ class ConsistentHashingRouterDocSpec extends AkkaSpec with ImplicitSender { } val cache: ActorRef = - context.actorOf(ConsistentHashingPool(10, hashMapping = hashMapping). - props(Props[Cache]), name = "cache") + context.actorOf(ConsistentHashingPool(10, hashMapping = hashMapping).props(Props[Cache]), name = "cache") - cache ! ConsistentHashableEnvelope( - message = Entry("hello", "HELLO"), hashKey = "hello") - cache ! ConsistentHashableEnvelope( - message = Entry("hi", "HI"), hashKey = "hi") + cache ! ConsistentHashableEnvelope(message = Entry("hello", "HELLO"), hashKey = "hello") + cache ! ConsistentHashableEnvelope(message = Entry("hi", "HI"), hashKey = "hi") cache ! Get("hello") expectMsg(Some("HELLO")) diff --git a/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala index b71808b9ee..ee9e01c13d 100644 --- a/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/CustomRouterDocSpec.scala @@ -80,9 +80,8 @@ import com.typesafe.config.Config final case class RedundancyGroup(routeePaths: immutable.Iterable[String], nbrCopies: Int) extends Group { - def this(config: Config) = this( - routeePaths = immutableSeq(config.getStringList("routees.paths")), - nbrCopies = config.getInt("nbr-copies")) + def this(config: Config) = + this(routeePaths = immutableSeq(config.getStringList("routees.paths")), nbrCopies = config.getInt("nbr-copies")) override def paths(system: ActorSystem): immutable.Iterable[String] = routeePaths @@ -105,16 +104,13 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl val routees = for (n <- 1 to 7) yield TestRoutee(n) val r1 = logic.select("msg", routees) - r1.asInstanceOf[SeveralRoutees].routees should be( - Vector(TestRoutee(1), TestRoutee(2), TestRoutee(3))) + r1.asInstanceOf[SeveralRoutees].routees should be(Vector(TestRoutee(1), TestRoutee(2), TestRoutee(3))) val r2 = logic.select("msg", routees) - r2.asInstanceOf[SeveralRoutees].routees should be( - Vector(TestRoutee(4), TestRoutee(5), TestRoutee(6))) + r2.asInstanceOf[SeveralRoutees].routees should be(Vector(TestRoutee(4), TestRoutee(5), TestRoutee(6))) val r3 = logic.select("msg", routees) - r3.asInstanceOf[SeveralRoutees].routees should be( - Vector(TestRoutee(7), TestRoutee(1), TestRoutee(2))) + r3.asInstanceOf[SeveralRoutees].routees should be(Vector(TestRoutee(7), TestRoutee(1), TestRoutee(2))) //#unit-test-logic } @@ -125,18 +121,14 @@ class CustomRouterDocSpec extends AkkaSpec(CustomRouterDocSpec.config) with Impl val paths = for (n <- 1 to 10) yield ("/user/s" + n) val redundancy1: ActorRef = - system.actorOf( - RedundancyGroup(paths, nbrCopies = 3).props(), - name = "redundancy1") + system.actorOf(RedundancyGroup(paths, nbrCopies = 3).props(), name = "redundancy1") redundancy1 ! "important" //#usage-1 for (_ <- 1 to 3) expectMsg("important") //#usage-2 - val redundancy2: ActorRef = system.actorOf( - FromConfig.props(), - name = "redundancy2") + val redundancy2: ActorRef = system.actorOf(FromConfig.props(), name = "redundancy2") redundancy2 ! "very important" //#usage-2 diff --git a/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala b/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala index 7f1aa6a001..0bcbfa0e10 100644 --- a/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala +++ b/akka-docs/src/test/scala/docs/routing/RouterDocSpec.scala @@ -6,7 +6,7 @@ package docs.routing import scala.concurrent.duration._ import akka.testkit._ -import akka.actor.{ ActorRef, Props, Actor } +import akka.actor.{ Actor, ActorRef, Props } import akka.actor.Terminated import akka.routing.FromConfig import akka.routing.RoundRobinPool @@ -300,7 +300,7 @@ router-dispatcher {} var router = { val routees = Vector.fill(5) { val r = context.actorOf(Props[Worker]) - context watch r + context.watch(r) ActorRefRoutee(r) } Router(RoundRobinRoutingLogic(), routees) @@ -312,7 +312,7 @@ router-dispatcher {} case Terminated(a) => router = router.removeRoutee(a) val r = context.actorOf(Props[Worker]) - context watch r + context.watch(r) router = router.addRoutee(r) } } @@ -400,7 +400,8 @@ router-dispatcher {} for (i <- 1 to 100) router10b ! i val threads10b = Thread.getAllStackTraces.keySet.asScala.filter { _.getName contains "router10b" } val threads10bNr = threads10b.size - require(threads10bNr == 5, s"Expected 5 threads for router10b, had $threads10bNr! Got: ${threads10b.map(_.getName)}") + require(threads10bNr == 5, + s"Expected 5 threads for router10b, had $threads10bNr! Got: ${threads10b.map(_.getName)}") //#smallest-mailbox-pool-1 val router11: ActorRef = @@ -439,8 +440,7 @@ router-dispatcher {} //#scatter-gather-pool-2 val router18: ActorRef = - context.actorOf(ScatterGatherFirstCompletedPool(5, within = 10.seconds). - props(Props[Worker]), "router18") + context.actorOf(ScatterGatherFirstCompletedPool(5, within = 10.seconds).props(Props[Worker]), "router18") //#scatter-gather-pool-2 //#scatter-gather-group-1 @@ -450,9 +450,7 @@ router-dispatcher {} //#scatter-gather-group-2 val router20: ActorRef = - context.actorOf(ScatterGatherFirstCompletedGroup( - paths, - within = 10.seconds).props(), "router20") + context.actorOf(ScatterGatherFirstCompletedGroup(paths, within = 10.seconds).props(), "router20") //#scatter-gather-group-2 //#tail-chopping-pool-1 @@ -462,8 +460,7 @@ router-dispatcher {} //#tail-chopping-pool-2 val router22: ActorRef = - context.actorOf(TailChoppingPool(5, within = 10.seconds, interval = 20.millis). - props(Props[Worker]), "router22") + context.actorOf(TailChoppingPool(5, within = 10.seconds, interval = 20.millis).props(Props[Worker]), "router22") //#tail-chopping-pool-2 //#tail-chopping-group-1 @@ -473,9 +470,7 @@ router-dispatcher {} //#tail-chopping-group-2 val router24: ActorRef = - context.actorOf(TailChoppingGroup( - paths, - within = 10.seconds, interval = 20.millis).props(), "router24") + context.actorOf(TailChoppingGroup(paths, within = 10.seconds, interval = 20.millis).props(), "router24") //#tail-chopping-group-2 //#consistent-hashing-pool-1 @@ -485,9 +480,7 @@ router-dispatcher {} //#consistent-hashing-pool-2 val router26: ActorRef = - context.actorOf( - ConsistentHashingPool(5).props(Props[Worker]), - "router26") + context.actorOf(ConsistentHashingPool(5).props(Props[Worker]), "router26") //#consistent-hashing-pool-2 //#consistent-hashing-group-1 @@ -508,9 +501,7 @@ router-dispatcher {} //#resize-pool-2 val resizer = DefaultResizer(lowerBound = 2, upperBound = 15) val router30: ActorRef = - context.actorOf( - RoundRobinPool(5, Some(resizer)).props(Props[Worker]), - "router30") + context.actorOf(RoundRobinPool(5, Some(resizer)).props(Props[Worker]), "router30") //#resize-pool-2 //#optimal-size-exploring-resize-pool @@ -546,10 +537,10 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender { "demonstrate dispatcher" in { //#dispatchers val router: ActorRef = system.actorOf( - // “head” router actor will run on "router-dispatcher" dispatcher - // Worker routees will run on "pool-dispatcher" dispatcher - RandomPool(5, routerDispatcher = "router-dispatcher").props(Props[Worker]), - name = "poolWithDispatcher") + // “head” router actor will run on "router-dispatcher" dispatcher + // Worker routees will run on "pool-dispatcher" dispatcher + RandomPool(5, routerDispatcher = "router-dispatcher").props(Props[Worker]), + name = "poolWithDispatcher") //#dispatchers } @@ -559,7 +550,7 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender { import akka.routing.Broadcast router ! Broadcast("Watch out for Davy Jones' locker") //#broadcastDavyJonesWarning - receiveN(5, 5.seconds.dilated) should have length (5) + (receiveN(5, 5.seconds.dilated) should have).length(5) } "demonstrate PoisonPill" in { @@ -604,11 +595,9 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender { //#remoteRoutees import akka.actor.{ Address, AddressFromURIString } import akka.remote.routing.RemoteRouterConfig - val addresses = Seq( - Address("akka.tcp", "remotesys", "otherhost", 1234), - AddressFromURIString("akka.tcp://othersys@anotherhost:1234")) - val routerRemote = system.actorOf( - RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo])) + val addresses = Seq(Address("akka.tcp", "remotesys", "otherhost", 1234), + AddressFromURIString("akka.tcp://othersys@anotherhost:1234")) + val routerRemote = system.actorOf(RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo])) //#remoteRoutees } @@ -617,11 +606,9 @@ class RouterDocSpec extends AkkaSpec(RouterDocSpec.config) with ImplicitSender { //#remoteRoutees-artery import akka.actor.{ Address, AddressFromURIString } import akka.remote.routing.RemoteRouterConfig - val addresses = Seq( - Address("akka", "remotesys", "otherhost", 1234), - AddressFromURIString("akka://othersys@anotherhost:1234")) - val routerRemote = system.actorOf( - RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo])) + val addresses = + Seq(Address("akka", "remotesys", "otherhost", 1234), AddressFromURIString("akka://othersys@anotherhost:1234")) + val routerRemote = system.actorOf(RemoteRouterConfig(RoundRobinPool(5), addresses).props(Props[Echo])) //#remoteRoutees-artery } } diff --git a/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala b/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala index c5c892bd72..b956d79b57 100644 --- a/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/serialization/SerializationDocSpec.scala @@ -43,9 +43,7 @@ package docs.serialization { // "fromBinary" deserializes the given array, // using the type hint (if any, see "includeManifest" above) - def fromBinary( - bytes: Array[Byte], - clazz: Option[Class[_]]): AnyRef = { + def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { // Put your code that deserializes here //#... null @@ -234,13 +232,13 @@ package docs.serialization { class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { def addressFor(remoteAddr: Address): Address = - system.provider.getExternalAddressFor(remoteAddr) getOrElse - (throw new UnsupportedOperationException("cannot send to " + remoteAddr)) + system.provider + .getExternalAddressFor(remoteAddr) + .getOrElse(throw new UnsupportedOperationException("cannot send to " + remoteAddr)) } def serializeTo(ref: ActorRef, remote: Address): String = - ref.path.toSerializationFormatWithAddress(ExternalAddress(extendedSystem). - addressFor(remote)) + ref.path.toSerializationFormatWithAddress(ExternalAddress(extendedSystem).addressFor(remote)) //#external-address } @@ -262,8 +260,7 @@ package docs.serialization { } def serializeAkkaDefault(ref: ActorRef): String = - ref.path.toSerializationFormatWithAddress(ExternalAddress(theActorSystem). - addressForAkka) + ref.path.toSerializationFormatWithAddress(ExternalAddress(theActorSystem).addressForAkka) //#external-address-default } } diff --git a/akka-docs/src/test/scala/docs/stream/ActorPublisherDocSpec.scala b/akka-docs/src/test/scala/docs/stream/ActorPublisherDocSpec.scala index 29e2b1493f..ea123b9ce1 100644 --- a/akka-docs/src/test/scala/docs/stream/ActorPublisherDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/ActorPublisherDocSpec.scala @@ -55,11 +55,11 @@ object ActorPublisherDocSpec { if (totalDemand <= Int.MaxValue) { val (use, keep) = buf.splitAt(totalDemand.toInt) buf = keep - use foreach onNext + use.foreach(onNext) } else { val (use, keep) = buf.splitAt(Int.MaxValue) buf = keep - use foreach onNext + use.foreach(onNext) deliverBuf() } } @@ -80,7 +80,9 @@ class ActorPublisherDocSpec extends AkkaSpec { val jobManagerSource = Source.actorPublisher[JobManager.Job](JobManager.props) val ref = Flow[JobManager.Job] .map(_.payload.toUpperCase) - .map { elem => println(elem); elem } + .map { elem => + println(elem); elem + } .to(Sink.ignore) .runWith(jobManagerSource) diff --git a/akka-docs/src/test/scala/docs/stream/ActorSubscriberDocSpec.scala b/akka-docs/src/test/scala/docs/stream/ActorSubscriberDocSpec.scala index 55117418e1..c90b8f646c 100644 --- a/akka-docs/src/test/scala/docs/stream/ActorSubscriberDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/ActorSubscriberDocSpec.scala @@ -88,8 +88,7 @@ class ActorSubscriberDocSpec extends AkkaSpec { //#actor-subscriber-usage val N = 117 - val worker = Source(1 to N).map(WorkerPool.Msg(_, replyTo)) - .runWith(Sink.actorSubscriber(WorkerPool.props)) + val worker = Source(1 to N).map(WorkerPool.Msg(_, replyTo)).runWith(Sink.actorSubscriber(WorkerPool.props)) //#actor-subscriber-usage watch(worker) diff --git a/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala b/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala index d5ab488aa4..eecd7dcd4d 100644 --- a/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/CompositionDocSpec.scala @@ -20,11 +20,7 @@ class CompositionDocSpec extends AkkaSpec { "nonnested flow" in { //#non-nested-flow - Source.single(0) - .map(_ + 1) - .filter(_ != 0) - .map(_ - 2) - .to(Sink.fold(0)(_ + _)) + Source.single(0).map(_ + 1).filter(_ != 0).map(_ - 2).to(Sink.fold(0)(_ + _)) // ... where is the nesting? //#non-nested-flow @@ -33,17 +29,20 @@ class CompositionDocSpec extends AkkaSpec { "nested flow" in { //#nested-flow val nestedSource = - Source.single(0) // An atomic source + Source + .single(0) // An atomic source .map(_ + 1) // an atomic processing stage .named("nestedSource") // wraps up the current Source and gives it a name val nestedFlow = - Flow[Int].filter(_ != 0) // an atomic processing stage + Flow[Int] + .filter(_ != 0) // an atomic processing stage .map(_ - 2) // another atomic processing stage .named("nestedFlow") // wraps up the Flow, and gives it a name val nestedSink = - nestedFlow.to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow + nestedFlow + .to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow .named("nestedSink") // wrap it up // Create a RunnableGraph @@ -53,17 +52,20 @@ class CompositionDocSpec extends AkkaSpec { "reusing components" in { val nestedSource = - Source.single(0) // An atomic source + Source + .single(0) // An atomic source .map(_ + 1) // an atomic processing stage .named("nestedSource") // wraps up the current Source and gives it a name val nestedFlow = - Flow[Int].filter(_ != 0) // an atomic processing stage + Flow[Int] + .filter(_ != 0) // an atomic processing stage .map(_ - 2) // another atomic processing stage .named("nestedFlow") // wraps up the Flow, and gives it a name val nestedSink = - nestedFlow.to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow + nestedFlow + .to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow .named("nestedSink") // wrap it up //#reuse @@ -192,7 +194,9 @@ class CompositionDocSpec extends AkkaSpec { //#mat-combine-2 // Materializes to NotUsed (orange) - val flow2: Flow[Int, ByteString, NotUsed] = Flow[Int].map { i => ByteString(i.toString) } + val flow2: Flow[Int, ByteString, NotUsed] = Flow[Int].map { i => + ByteString(i.toString) + } // Materializes to Future[OutgoingConnection] (yellow) val flow3: Flow[ByteString, ByteString, Future[OutgoingConnection]] = @@ -217,9 +221,7 @@ class CompositionDocSpec extends AkkaSpec { def close() = p.trySuccess(None) } - def f( - p: Promise[Option[Int]], - rest: (Future[OutgoingConnection], Future[String])): Future[MyClass] = { + def f(p: Promise[Option[Int]], rest: (Future[OutgoingConnection], Future[String])): Future[MyClass] = { val connFuture = rest._1 connFuture.map(MyClass(p, _)) @@ -235,17 +237,17 @@ class CompositionDocSpec extends AkkaSpec { //#attributes-inheritance import Attributes._ val nestedSource = - Source.single(0) - .map(_ + 1) - .named("nestedSource") // Wrap, no inputBuffer set + Source.single(0).map(_ + 1).named("nestedSource") // Wrap, no inputBuffer set val nestedFlow = - Flow[Int].filter(_ != 0) + Flow[Int] + .filter(_ != 0) .via(Flow[Int].map(_ - 2).withAttributes(inputBuffer(4, 4))) // override .named("nestedFlow") // Wrap, no inputBuffer set val nestedSink = - nestedFlow.to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow + nestedFlow + .to(Sink.fold(0)(_ + _)) // wire an atomic sink to the nestedFlow .withAttributes(name("nestedSink") and inputBuffer(3, 3)) // override //#attributes-inheritance } diff --git a/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala index 94815ad7fa..f6e8af7b7c 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowDocSpec.scala @@ -152,11 +152,12 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { "various ways of transforming materialized values" in { import scala.concurrent.duration._ - val throttler = Flow.fromGraph(GraphDSL.create(Source.tick(1.second, 1.second, "test")) { implicit builder => tickSource => - import GraphDSL.Implicits._ - val zip = builder.add(ZipWith[String, Int, Int](Keep.right)) - tickSource ~> zip.in0 - FlowShape(zip.in1, zip.out) + val throttler = Flow.fromGraph(GraphDSL.create(Source.tick(1.second, 1.second, "test")) { + implicit builder => tickSource => + import GraphDSL.Implicits._ + val zip = builder.add(ZipWith[String, Int, Int](Keep.right)) + tickSource ~> zip.in0 + FlowShape(zip.in1, zip.out) }) //#flow-mat-combine @@ -225,10 +226,7 @@ class FlowDocSpec extends AkkaSpec with CompileOnlySpec { "defining asynchronous boundaries" in { //#flow-async - Source(List(1, 2, 3)) - .map(_ + 1).async - .map(_ * 2) - .to(Sink.ignore) + Source(List(1, 2, 3)).map(_ + 1).async.map(_ * 2).to(Sink.ignore) //#flow-async } @@ -261,11 +259,10 @@ object FlowDocSpec { final class RunWithMyself extends Actor { implicit val mat = ActorMaterializer() - Source.maybe - .runWith(Sink.onComplete { - case Success(done) => println(s"Completed: $done") - case Failure(ex) => println(s"Failed: ${ex.getMessage}") - }) + Source.maybe.runWith(Sink.onComplete { + case Success(done) => println(s"Completed: $done") + case Failure(ex) => println(s"Failed: ${ex.getMessage}") + }) def receive = { case "boom" => @@ -277,11 +274,10 @@ object FlowDocSpec { //#materializer-from-system-in-actor final class RunForever(implicit val mat: Materializer) extends Actor { - Source.maybe - .runWith(Sink.onComplete { - case Success(done) => println(s"Completed: $done") - case Failure(ex) => println(s"Failed: ${ex.getMessage}") - }) + Source.maybe.runWith(Sink.onComplete { + case Success(done) => println(s"Completed: $done") + case Failure(ex) => println(s"Failed: ${ex.getMessage}") + }) def receive = { case "boom" => diff --git a/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala index 1340d7915b..5b35c48717 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowErrorDocSpec.scala @@ -36,8 +36,7 @@ class FlowErrorDocSpec extends AkkaSpec { case _: ArithmeticException => Supervision.Resume case _ => Supervision.Stop } - implicit val materializer = ActorMaterializer( - ActorMaterializerSettings(system).withSupervisionStrategy(decider)) + implicit val materializer = ActorMaterializer(ActorMaterializerSettings(system).withSupervisionStrategy(decider)) val source = Source(0 to 5).map(100 / _) val result = source.runWith(Sink.fold(0)(_ + _)) // the element causing division by zero will be dropped @@ -55,7 +54,8 @@ class FlowErrorDocSpec extends AkkaSpec { case _ => Supervision.Stop } val flow = Flow[Int] - .filter(100 / _ < 50).map(elem => 100 / (5 - elem)) + .filter(100 / _ < 50) + .map(elem => 100 / (5 - elem)) .withAttributes(ActorAttributes.supervisionStrategy(decider)) val source = Source(0 to 5).via(flow) @@ -93,12 +93,14 @@ class FlowErrorDocSpec extends AkkaSpec { "demonstrate recover" in { implicit val materializer = ActorMaterializer() //#recover - Source(0 to 6).map(n => - if (n < 5) n.toString - else throw new RuntimeException("Boom!") - ).recover { - case _: RuntimeException => "stream truncated" - }.runForeach(println) + Source(0 to 6) + .map(n => + if (n < 5) n.toString + else throw new RuntimeException("Boom!")) + .recover { + case _: RuntimeException => "stream truncated" + } + .runForeach(println) //#recover /* @@ -111,7 +113,7 @@ Output: 4 stream truncated //#recover-output -*/ + */ } "demonstrate recoverWithRetries" in { @@ -119,12 +121,14 @@ stream truncated //#recoverWithRetries val planB = Source(List("five", "six", "seven", "eight")) - Source(0 to 10).map(n => - if (n < 5) n.toString - else throw new RuntimeException("Boom!") - ).recoverWithRetries(attempts = 1, { - case _: RuntimeException => planB - }).runForeach(println) + Source(0 to 10) + .map(n => + if (n < 5) n.toString + else throw new RuntimeException("Boom!")) + .recoverWithRetries(attempts = 1, { + case _: RuntimeException => planB + }) + .runForeach(println) //#recoverWithRetries /* @@ -140,7 +144,7 @@ six seven eight //#recoverWithRetries-output - */ + */ } } diff --git a/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala index 81a3192fad..72f92f98b2 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowParallelismDocSpec.scala @@ -6,7 +6,7 @@ package docs.stream import akka.NotUsed import akka.stream.FlowShape -import akka.stream.scaladsl.{ GraphDSL, Merge, Balance, Source, Flow } +import akka.stream.scaladsl.{ Balance, Flow, GraphDSL, Merge, Source } import akka.testkit.AkkaSpec class FlowParallelismDocSpec extends AkkaSpec { @@ -19,13 +19,17 @@ class FlowParallelismDocSpec extends AkkaSpec { //format: OFF //#pipelining - // Takes a scoop of batter and creates a pancake with one side cooked - val fryingPan1: Flow[ScoopOfBatter, HalfCookedPancake, NotUsed] = - Flow[ScoopOfBatter].map { batter => HalfCookedPancake() } + // Takes a scoop of batter and creates a pancake with one side cooked + val fryingPan1: Flow[ScoopOfBatter, HalfCookedPancake, NotUsed] = + Flow[ScoopOfBatter].map { batter => + HalfCookedPancake() + } - // Finishes a half-cooked pancake - val fryingPan2: Flow[HalfCookedPancake, Pancake, NotUsed] = - Flow[HalfCookedPancake].map { halfCooked => Pancake() } + // Finishes a half-cooked pancake + val fryingPan2: Flow[HalfCookedPancake, Pancake, NotUsed] = + Flow[HalfCookedPancake].map { halfCooked => + Pancake() + } //#pipelining //format: ON @@ -41,7 +45,9 @@ class FlowParallelismDocSpec extends AkkaSpec { "Demonstrate parallel processing" in { //#parallelism val fryingPan: Flow[ScoopOfBatter, Pancake, NotUsed] = - Flow[ScoopOfBatter].map { batter => Pancake() } + Flow[ScoopOfBatter].map { batter => + Pancake() + } val pancakeChef: Flow[ScoopOfBatter, Pancake, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit builder => val dispatchBatter = builder.add(Balance[ScoopOfBatter](2)) @@ -65,7 +71,6 @@ class FlowParallelismDocSpec extends AkkaSpec { //#parallel-pipeline val pancakeChef: Flow[ScoopOfBatter, Pancake, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit builder => - val dispatchBatter = builder.add(Balance[ScoopOfBatter](2)) val mergePancakes = builder.add(Merge[Pancake](2)) diff --git a/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala index a67fc7f4ec..2988a38e76 100644 --- a/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/FlowStreamRefsDocSpec.scala @@ -38,7 +38,7 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { val reply: Future[LogsOffer] = ref.map(LogsOffer(streamId, _)) // reply to sender - reply pipeTo sender() + reply.pipeTo(sender()) } def streamLogs(streamId: Long): Source[String, NotUsed] = ??? @@ -85,7 +85,7 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { val reply: Future[MeasurementsSinkReady] = ref.map(MeasurementsSinkReady(nodeId, _)) // reply to sender - reply pipeTo sender() + reply.pipeTo(sender()) } def logsSinkFor(nodeId: String): Sink[String, NotUsed] = ??? @@ -116,11 +116,14 @@ class FlowStreamRefsDocSpec extends AkkaSpec with CompileOnlySpec { import akka.stream.StreamRefAttributes // configuring Sink.sourceRef (notice that we apply the attributes to the Sink!): - Source.repeat("hello") + Source + .repeat("hello") .runWith(StreamRefs.sourceRef().addAttributes(StreamRefAttributes.subscriptionTimeout(5.seconds))) // configuring SinkRef.source: - StreamRefs.sinkRef().addAttributes(StreamRefAttributes.subscriptionTimeout(5.seconds)) + StreamRefs + .sinkRef() + .addAttributes(StreamRefAttributes.subscriptionTimeout(5.seconds)) .runWith(Sink.ignore) // not very interesting Sink, just an example //#attr-sub-timeout } diff --git a/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala index 74db079e10..9ad76240a1 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphCyclesSpec.scala @@ -4,7 +4,7 @@ package docs.stream -import akka.stream.{ ClosedShape, OverflowStrategy, ActorMaterializer } +import akka.stream.{ ActorMaterializer, ClosedShape, OverflowStrategy } import akka.stream.scaladsl._ import akka.testkit.AkkaSpec diff --git a/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala index bb79a3c056..fab5229d40 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphDSLDocSpec.scala @@ -97,10 +97,8 @@ class GraphDSLDocSpec extends AkkaSpec { //#graph-dsl-components-shape // A shape represents the input and output ports of a reusable // processing module - case class PriorityWorkerPoolShape[In, Out]( - jobsIn: Inlet[In], - priorityJobsIn: Inlet[In], - resultsOut: Outlet[Out]) extends Shape { + case class PriorityWorkerPoolShape[In, Out](jobsIn: Inlet[In], priorityJobsIn: Inlet[In], resultsOut: Outlet[Out]) + extends Shape { // It is important to provide the list of all input and output // ports with a stable order. Duplicates are not allowed. @@ -111,19 +109,16 @@ class GraphDSLDocSpec extends AkkaSpec { // A Shape must be able to create a copy of itself. Basically // it means a new instance with copies of the ports - override def deepCopy() = PriorityWorkerPoolShape( - jobsIn.carbonCopy(), - priorityJobsIn.carbonCopy(), - resultsOut.carbonCopy()) + override def deepCopy() = + PriorityWorkerPoolShape(jobsIn.carbonCopy(), priorityJobsIn.carbonCopy(), resultsOut.carbonCopy()) } //#graph-dsl-components-shape //#graph-dsl-components-create object PriorityWorkerPool { - def apply[In, Out]( - worker: Flow[In, Out, Any], - workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = { + def apply[In, Out](worker: Flow[In, Out, Any], + workerCount: Int): Graph[PriorityWorkerPoolShape[In, Out], NotUsed] = { GraphDSL.create() { implicit b => import GraphDSL.Implicits._ @@ -143,10 +138,9 @@ class GraphDSLDocSpec extends AkkaSpec { // We now expose the input ports of the priorityMerge and the output // of the resultsMerge as our PriorityWorkerPool ports // -- all neatly wrapped in our domain specific Shape - PriorityWorkerPoolShape( - jobsIn = priorityMerge.in(0), - priorityJobsIn = priorityMerge.preferred, - resultsOut = resultsMerge.out) + PriorityWorkerPoolShape(jobsIn = priorityMerge.in(0), + priorityJobsIn = priorityMerge.preferred, + resultsOut = resultsMerge.out) } } @@ -160,28 +154,30 @@ class GraphDSLDocSpec extends AkkaSpec { val worker1 = Flow[String].map("step 1 " + _) val worker2 = Flow[String].map("step 2 " + _) - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ - val priorityPool1 = b.add(PriorityWorkerPool(worker1, 4)) - val priorityPool2 = b.add(PriorityWorkerPool(worker2, 2)) + val priorityPool1 = b.add(PriorityWorkerPool(worker1, 4)) + val priorityPool2 = b.add(PriorityWorkerPool(worker2, 2)) - Source(1 to 100).map("job: " + _) ~> priorityPool1.jobsIn - Source(1 to 100).map("priority job: " + _) ~> priorityPool1.priorityJobsIn + Source(1 to 100).map("job: " + _) ~> priorityPool1.jobsIn + Source(1 to 100).map("priority job: " + _) ~> priorityPool1.priorityJobsIn - priorityPool1.resultsOut ~> priorityPool2.jobsIn - Source(1 to 100).map("one-step, priority " + _) ~> priorityPool2.priorityJobsIn + priorityPool1.resultsOut ~> priorityPool2.jobsIn + Source(1 to 100).map("one-step, priority " + _) ~> priorityPool2.priorityJobsIn - priorityPool2.resultsOut ~> Sink.foreach(println) - ClosedShape - }).run() + priorityPool2.resultsOut ~> Sink.foreach(println) + ClosedShape + }) + .run() //#graph-dsl-components-use //#graph-dsl-components-shape2 import FanInShape.{ Init, Name } class PriorityWorkerPoolShape2[In, Out](_init: Init[Out] = Name("PriorityWorkerPool")) - extends FanInShape[Out](_init) { + extends FanInShape[Out](_init) { protected override def construct(i: Init[Out]) = new PriorityWorkerPoolShape2(i) val jobsIn = newInlet[In]("jobsIn") @@ -195,8 +191,9 @@ class GraphDSLDocSpec extends AkkaSpec { "access to materialized value" in { //#graph-dsl-matvalue import GraphDSL.Implicits._ - val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.create(Sink.fold[Int, Int](0)(_ + _)) { implicit builder => fold => - FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet) + val foldFlow: Flow[Int, Int, Future[Int]] = Flow.fromGraph(GraphDSL.create(Sink.fold[Int, Int](0)(_ + _)) { + implicit builder => fold => + FlowShape(fold.in, builder.materializedValue.mapAsync(4)(identity).outlet) }) //#graph-dsl-matvalue @@ -205,15 +202,16 @@ class GraphDSLDocSpec extends AkkaSpec { //#graph-dsl-matvalue-cycle import GraphDSL.Implicits._ // This cannot produce any value: - val cyclicFold: Source[Int, Future[Int]] = Source.fromGraph(GraphDSL.create(Sink.fold[Int, Int](0)(_ + _)) { implicit builder => fold => - // - Fold cannot complete until its upstream mapAsync completes - // - mapAsync cannot complete until the materialized Future produced by - // fold completes - // As a result this Source will never emit anything, and its materialited - // Future will never complete - builder.materializedValue.mapAsync(4)(identity) ~> fold - SourceShape(builder.materializedValue.mapAsync(4)(identity).outlet) - }) + val cyclicFold: Source[Int, Future[Int]] = + Source.fromGraph(GraphDSL.create(Sink.fold[Int, Int](0)(_ + _)) { implicit builder => fold => + // - Fold cannot complete until its upstream mapAsync completes + // - mapAsync cannot complete until the materialized Future produced by + // fold completes + // As a result this Source will never emit anything, and its materialited + // Future will never complete + builder.materializedValue.mapAsync(4)(identity) ~> fold + SourceShape(builder.materializedValue.mapAsync(4)(identity).outlet) + }) //#graph-dsl-matvalue-cycle } diff --git a/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala index 45d1fb943f..07ff90e6f8 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphStageDocSpec.scala @@ -149,9 +149,7 @@ class GraphStageDocSpec extends AkkaSpec { val stringLength = Flow.fromGraph(new Map[String, Int](_.length)) val result = - Source(Vector("one", "two", "three")) - .via(stringLength) - .runFold(Seq.empty[Int])((elem, acc) => elem :+ acc) + Source(Vector("one", "two", "three")).via(stringLength).runFold(Seq.empty[Int])((elem, acc) => elem :+ acc) Await.result(result, 3.seconds) should ===(Seq(3, 3, 5)) } @@ -188,9 +186,7 @@ class GraphStageDocSpec extends AkkaSpec { val evenFilter = Flow.fromGraph(new Filter[Int](_ % 2 == 0)) val result = - Source(Vector(1, 2, 3, 4, 5, 6)) - .via(evenFilter) - .runFold(Seq.empty[Int])((elem, acc) => elem :+ acc) + Source(Vector(1, 2, 3, 4, 5, 6)).via(evenFilter).runFold(Seq.empty[Int])((elem, acc) => elem :+ acc) Await.result(result, 3.seconds) should ===(Seq(2, 4, 6)) } @@ -241,9 +237,7 @@ class GraphStageDocSpec extends AkkaSpec { val duplicator = Flow.fromGraph(new Duplicator[Int]) val result = - Source(Vector(1, 2, 3)) - .via(duplicator) - .runFold(Seq.empty[Int])((elem, acc) => elem :+ acc) + Source(Vector(1, 2, 3)).via(duplicator).runFold(Seq.empty[Int])((elem, acc) => elem :+ acc) Await.result(result, 3.seconds) should ===(Seq(1, 1, 2, 2, 3, 3)) } @@ -281,9 +275,7 @@ class GraphStageDocSpec extends AkkaSpec { val duplicator = Flow.fromGraph(new Duplicator[Int]) val result = - Source(Vector(1, 2, 3)) - .via(duplicator) - .runFold(Seq.empty[Int])((elem, acc) => elem :+ acc) + Source(Vector(1, 2, 3)).via(duplicator).runFold(Seq.empty[Int])((elem, acc) => elem :+ acc) Await.result(result, 3.seconds) should ===(Seq(1, 1, 2, 2, 3, 3)) @@ -293,11 +285,8 @@ class GraphStageDocSpec extends AkkaSpec { val sink = Sink.fold[List[Int], Int](List.empty[Int])((acc, n) => acc :+ n) //#graph-operator-chain - val resultFuture = Source(1 to 5) - .via(new Filter(_ % 2 == 0)) - .via(new Duplicator()) - .via(new Map(_ / 2)) - .runWith(sink) + val resultFuture = + Source(1 to 5).via(new Filter(_ % 2 == 0)).via(new Duplicator()).via(new Map(_ / 2)).runWith(sink) //#graph-operator-chain @@ -344,7 +333,8 @@ class GraphStageDocSpec extends AkkaSpec { val in = TestPublisher.probe[Int]() val out = TestSubscriber.probe[Int]() - Source.fromPublisher(in) + Source + .fromPublisher(in) .via(duplicator) .to(Sink.fromSubscriber(out)) .withAttributes(Attributes.inputBuffer(1, 1)) @@ -426,20 +416,21 @@ class GraphStageDocSpec extends AkkaSpec { val promise = Promise[A]() val logic = new GraphStageLogic(shape) { - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - promise.success(elem) - push(out, elem) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + promise.success(elem) + push(out, elem) - // replace handler with one that only forwards elements - setHandler(in, new InHandler { - override def onPush(): Unit = { - push(out, grab(in)) - } - }) - } - }) + // replace handler with one that only forwards elements + setHandler(in, new InHandler { + override def onPush(): Unit = { + push(out, grab(in)) + } + }) + } + }) setHandler(out, new OutHandler { override def onPull(): Unit = { @@ -455,9 +446,7 @@ class GraphStageDocSpec extends AkkaSpec { //#materialized // tests: - val flow = Source(Vector(1, 2, 3)) - .viaMat(new FirstValue)(Keep.right) - .to(Sink.ignore) + val flow = Source(Vector(1, 2, 3)).viaMat(new FirstValue)(Keep.right).to(Sink.ignore) val result: Future[Int] = flow.run() @@ -488,60 +477,58 @@ class GraphStageDocSpec extends AkkaSpec { pull(in) } - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - buffer.enqueue(elem) - if (downstreamWaiting) { - downstreamWaiting = false - val bufferedElem = buffer.dequeue() - push(out, bufferedElem) - } - if (!bufferFull) { - pull(in) - } - } + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + buffer.enqueue(elem) + if (downstreamWaiting) { + downstreamWaiting = false + val bufferedElem = buffer.dequeue() + push(out, bufferedElem) + } + if (!bufferFull) { + pull(in) + } + } - override def onUpstreamFinish(): Unit = { - if (buffer.nonEmpty) { - // emit the rest if possible - emitMultiple(out, buffer.toIterator) - } - completeStage() - } - }) + override def onUpstreamFinish(): Unit = { + if (buffer.nonEmpty) { + // emit the rest if possible + emitMultiple(out, buffer.toIterator) + } + completeStage() + } + }) - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (buffer.isEmpty) { - downstreamWaiting = true - } else { - val elem = buffer.dequeue - push(out, elem) - } - if (!bufferFull && !hasBeenPulled(in)) { - pull(in) - } - } - }) + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + if (buffer.isEmpty) { + downstreamWaiting = true + } else { + val elem = buffer.dequeue + push(out, elem) + } + if (!bufferFull && !hasBeenPulled(in)) { + pull(in) + } + } + }) } } //#detached // tests: - val result1 = Source(Vector(1, 2, 3)) - .via(new TwoBuffer) - .runFold(Vector.empty[Int])((acc, n) => acc :+ n) + val result1 = Source(Vector(1, 2, 3)).via(new TwoBuffer).runFold(Vector.empty[Int])((acc, n) => acc :+ n) Await.result(result1, 3.seconds) should ===(Vector(1, 2, 3)) val subscriber = TestSubscriber.manualProbe[Int]() val publisher = TestPublisher.probe[Int]() val flow2 = - Source.fromPublisher(publisher) - .via(new TwoBuffer) - .to(Sink.fromSubscriber(subscriber)) + Source.fromPublisher(publisher).via(new TwoBuffer).to(Sink.fromSubscriber(subscriber)) val result2 = flow2.run() diff --git a/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala b/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala index a338ead444..1f68faf43c 100644 --- a/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/GraphStageLoggingDocSpec.scala @@ -44,12 +44,8 @@ class GraphStageLoggingDocSpec extends AkkaSpec("akka.loglevel = DEBUG") { "demonstrate logging in custom graphstage" in { val n = 10 EventFilter.debug(start = "Randomly generated", occurrences = n).intercept { - Source.fromGraph(new RandomLettersSource) - .take(n) - .runWith(Sink.ignore) - .futureValue + Source.fromGraph(new RandomLettersSource).take(n).runWith(Sink.ignore).futureValue } } } - diff --git a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala index a43e44212e..5a9696629e 100644 --- a/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/HubsDocSpec.scala @@ -72,9 +72,7 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { //#pub-sub-1 // Obtain a Sink and Source which will publish and receive from the "bus" respectively. val (sink, source) = - MergeHub.source[String](perProducerBufferSize = 16) - .toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both) - .run() + MergeHub.source[String](perProducerBufferSize = 16).toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run() //#pub-sub-1 //#pub-sub-2 @@ -89,17 +87,15 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // started stream as its "topic". We add two more features, external cancellation of // the registration and automatic cleanup for very slow subscribers. val busFlow: Flow[String, String, UniqueKillSwitch] = - Flow.fromSinkAndSource(sink, source) + Flow + .fromSinkAndSource(sink, source) .joinMat(KillSwitches.singleBidi[String, String])(Keep.right) .backpressureTimeout(3.seconds) //#pub-sub-3 //#pub-sub-4 val switch: UniqueKillSwitch = - Source.repeat("Hello world!") - .viaMat(busFlow)(Keep.right) - .to(Sink.foreach(println)) - .run() + Source.repeat("Hello world!").viaMat(busFlow)(Keep.right).to(Sink.foreach(println)).run() // Shut down externally switch.shutdown() @@ -109,17 +105,17 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { "demonstrate creating a dynamic partition hub" in compileOnlySpec { //#partition-hub // A simple producer that publishes a new "message-" every second - val producer = Source.tick(1.second, 1.second, "message") - .zipWith(Source(1 to 100))((a, b) => s"$a-$b") + val producer = Source.tick(1.second, 1.second, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b") // Attach a PartitionHub Sink to the producer. This will materialize to a // corresponding Source. // (We need to use toMat and Keep.right since by default the materialized // value to the left is used) val runnableGraph: RunnableGraph[Source[String, NotUsed]] = - producer.toMat(PartitionHub.sink( - (size, elem) => math.abs(elem.hashCode % size), - startAfterNrOfConsumers = 2, bufferSize = 256))(Keep.right) + producer.toMat( + PartitionHub.sink((size, elem) => math.abs(elem.hashCode % size), + startAfterNrOfConsumers = 2, + bufferSize = 256))(Keep.right) // By running/materializing the producer, we get back a Source, which // gives us access to the elements published by the producer. @@ -134,8 +130,7 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { "demonstrate creating a dynamic stateful partition hub" in compileOnlySpec { //#partition-hub-stateful // A simple producer that publishes a new "message-" every second - val producer = Source.tick(1.second, 1.second, "message") - .zipWith(Source(1 to 100))((a, b) => s"$a-$b") + val producer = Source.tick(1.second, 1.second, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b") // New instance of the partitioner function and its state is created // for each materialization of the PartitionHub. @@ -153,9 +148,8 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // (We need to use toMat and Keep.right since by default the materialized // value to the left is used) val runnableGraph: RunnableGraph[Source[String, NotUsed]] = - producer.toMat(PartitionHub.statefulSink( - () => roundRobin(), - startAfterNrOfConsumers = 2, bufferSize = 256))(Keep.right) + producer.toMat(PartitionHub.statefulSink(() => roundRobin(), startAfterNrOfConsumers = 2, bufferSize = 256))( + Keep.right) // By running/materializing the producer, we get back a Source, which // gives us access to the elements published by the producer. @@ -174,15 +168,15 @@ class HubsDocSpec extends AkkaSpec with CompileOnlySpec { // ConsumerInfo.queueSize is the approximate number of buffered elements for a consumer. // Note that this is a moving target since the elements are consumed concurrently. val runnableGraph: RunnableGraph[Source[Int, NotUsed]] = - producer.toMat(PartitionHub.statefulSink( - () => (info, elem) => info.consumerIds.minBy(id => info.queueSize(id)), - startAfterNrOfConsumers = 2, bufferSize = 16))(Keep.right) + producer.toMat( + PartitionHub.statefulSink(() => (info, elem) => info.consumerIds.minBy(id => info.queueSize(id)), + startAfterNrOfConsumers = 2, + bufferSize = 16))(Keep.right) val fromProducer: Source[Int, NotUsed] = runnableGraph.run() fromProducer.runForeach(msg => println("consumer1: " + msg)) - fromProducer.throttle(10, 100.millis) - .runForeach(msg => println("consumer2: " + msg)) + fromProducer.throttle(10, 100.millis).runForeach(msg => println("consumer2: " + msg)) //#partition-hub-fastest } diff --git a/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala b/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala index 7b31fb3771..95d2fa0f22 100644 --- a/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/IntegrationDocSpec.scala @@ -161,24 +161,21 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { //#tweet-authors val authors: Source[Author, NotUsed] = - tweets - .filter(_.hashtags.contains(akkaTag)) - .map(_.author) + tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) //#tweet-authors //#email-addresses-mapAsync val emailAddresses: Source[String, NotUsed] = - authors - .mapAsync(4)(author => addressSystem.lookupEmail(author.handle)) - .collect { case Some(emailAddress) => emailAddress } + authors.mapAsync(4)(author => addressSystem.lookupEmail(author.handle)).collect { + case Some(emailAddress) => emailAddress + } //#email-addresses-mapAsync //#send-emails val sendEmails: RunnableGraph[NotUsed] = emailAddresses .mapAsync(4)(address => { - emailServer.send( - Email(to = address, title = "Akka", body = "I like your tweet")) + emailServer.send(Email(to = address, title = "Akka", body = "I like your tweet")) }) .to(Sink.ignore) @@ -208,19 +205,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val onErrorMessage = (ex: Throwable) => AckingReceiver.StreamFailure(ex) val probe = TestProbe() - val receiver = system.actorOf( - Props(new AckingReceiver(probe.ref, ackWith = AckMessage))) - val sink = Sink.actorRefWithAck( - receiver, - onInitMessage = InitMessage, - ackMessage = AckMessage, - onCompleteMessage = OnCompleteMessage, - onFailureMessage = onErrorMessage - ) + val receiver = system.actorOf(Props(new AckingReceiver(probe.ref, ackWith = AckMessage))) + val sink = Sink.actorRefWithAck(receiver, + onInitMessage = InitMessage, + ackMessage = AckMessage, + onCompleteMessage = OnCompleteMessage, + onFailureMessage = onErrorMessage) - words - .map(_.toLowerCase) - .runWith(sink) + words.map(_.toLowerCase).runWith(sink) probe.expectMsg("Stream initialized!") probe.expectMsg("hello") @@ -272,7 +264,8 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val emailAddresses: Source[String, NotUsed] = authors.via( - Flow[Author].mapAsync(4)(author => addressSystem.lookupEmail(author.handle)) + Flow[Author] + .mapAsync(4)(author => addressSystem.lookupEmail(author.handle)) .withAttributes(supervisionStrategy(resumingDecider))) //#email-addresses-mapAsync-supervision } @@ -287,29 +280,28 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) val emailAddresses: Source[String, NotUsed] = - authors - .mapAsyncUnordered(4)(author => addressSystem.lookupEmail(author.handle)) - .collect { case Some(emailAddress) => emailAddress } + authors.mapAsyncUnordered(4)(author => addressSystem.lookupEmail(author.handle)).collect { + case Some(emailAddress) => emailAddress + } val sendEmails: RunnableGraph[NotUsed] = emailAddresses .mapAsyncUnordered(4)(address => { - emailServer.send( - Email(to = address, title = "Akka", body = "I like your tweet")) + emailServer.send(Email(to = address, title = "Akka", body = "I like your tweet")) }) .to(Sink.ignore) sendEmails.run() //#external-service-mapAsyncUnordered - probe.receiveN(7).toSet should be(Set( - "rolandkuhn@somewhere.com", - "patriknw@somewhere.com", - "bantonsson@somewhere.com", - "drewhk@somewhere.com", - "ktosopl@somewhere.com", - "mmartynas@somewhere.com", - "akkateam@somewhere.com")) + probe.receiveN(7).toSet should be( + Set("rolandkuhn@somewhere.com", + "patriknw@somewhere.com", + "bantonsson@somewhere.com", + "drewhk@somewhere.com", + "ktosopl@somewhere.com", + "mmartynas@somewhere.com", + "akkateam@somewhere.com")) } "careful managed blocking with mapAsync" in { @@ -320,8 +312,9 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val authors = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) val phoneNumbers = - authors.mapAsync(4)(author => addressSystem.lookupPhoneNumber(author.handle)) - .collect { case Some(phoneNo) => phoneNo } + authors.mapAsync(4)(author => addressSystem.lookupPhoneNumber(author.handle)).collect { + case Some(phoneNo) => phoneNo + } //#blocking-mapAsync val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") @@ -330,8 +323,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { phoneNumbers .mapAsync(4)(phoneNo => { Future { - smsServer.send( - TextMessage(to = phoneNo, body = "I like your tweet")) + smsServer.send(TextMessage(to = phoneNo, body = "I like your tweet")) }(blockingExecutionContext) }) .to(Sink.ignore) @@ -339,14 +331,14 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { sendTextMessages.run() //#blocking-mapAsync - probe.receiveN(7).toSet should be(Set( - "rolandkuhn".hashCode.toString, - "patriknw".hashCode.toString, - "bantonsson".hashCode.toString, - "drewhk".hashCode.toString, - "ktosopl".hashCode.toString, - "mmartynas".hashCode.toString, - "akkateam".hashCode.toString)) + probe.receiveN(7).toSet should be( + Set("rolandkuhn".hashCode.toString, + "patriknw".hashCode.toString, + "bantonsson".hashCode.toString, + "drewhk".hashCode.toString, + "ktosopl".hashCode.toString, + "mmartynas".hashCode.toString, + "akkateam".hashCode.toString)) } "careful managed blocking with map" in { @@ -357,8 +349,9 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val authors = tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) val phoneNumbers = - authors.mapAsync(4)(author => addressSystem.lookupPhoneNumber(author.handle)) - .collect { case Some(phoneNo) => phoneNo } + authors.mapAsync(4)(author => addressSystem.lookupPhoneNumber(author.handle)).collect { + case Some(phoneNo) => phoneNo + } //#blocking-map val send = Flow[String] @@ -392,9 +385,7 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { implicit val timeout = Timeout(3.seconds) val saveTweets: RunnableGraph[NotUsed] = - akkaTweets - .mapAsync(4)(tweet => database ? Save(tweet)) - .to(Sink.ignore) + akkaTweets.mapAsync(4)(tweet => database ? Save(tweet)).to(Sink.ignore) //#save-tweets saveTweets.run() @@ -419,8 +410,8 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { implicit val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") val service = new SometimesSlowService - implicit val materializer = ActorMaterializer( - ActorMaterializerSettings(system).withInputBuffer(initialSize = 4, maxSize = 4)) + implicit val materializer = + ActorMaterializer(ActorMaterializerSettings(system).withInputBuffer(initialSize = 4, maxSize = 4)) Source(List("a", "B", "C", "D", "e", "F", "g", "H", "i", "J")) .map(elem => { println(s"before: $elem"); elem }) @@ -451,8 +442,8 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { implicit val blockingExecutionContext = system.dispatchers.lookup("blocking-dispatcher") val service = new SometimesSlowService - implicit val materializer = ActorMaterializer( - ActorMaterializerSettings(system).withInputBuffer(initialSize = 4, maxSize = 4)) + implicit val materializer = + ActorMaterializer(ActorMaterializerSettings(system).withInputBuffer(initialSize = 4, maxSize = 4)) Source(List("a", "B", "C", "D", "e", "F", "g", "H", "i", "J")) .map(elem => { println(s"before: $elem"); elem }) @@ -460,17 +451,17 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { .runForeach(elem => println(s"after: $elem")) //#sometimes-slow-mapAsyncUnordered - probe.receiveN(10).toSet should be(Set( - "after: A", - "after: B", - "after: C", - "after: D", - "after: E", - "after: F", - "after: G", - "after: H", - "after: I", - "after: J")) + probe.receiveN(10).toSet should be( + Set("after: A", + "after: B", + "after: C", + "after: D", + "after: E", + "after: F", + "after: G", + "after: H", + "after: I", + "after: J")) } "illustrate use of source queue" in { @@ -488,14 +479,16 @@ class IntegrationDocSpec extends AkkaSpec(IntegrationDocSpec.config) { val source = Source(1 to 10) implicit val ec = system.dispatcher - source.mapAsync(1)(x => { - queue.offer(x).map { - case QueueOfferResult.Enqueued => println(s"enqueued $x") - case QueueOfferResult.Dropped => println(s"dropped $x") - case QueueOfferResult.Failure(ex) => println(s"Offer failed ${ex.getMessage}") - case QueueOfferResult.QueueClosed => println("Source Queue closed") - } - }).runWith(Sink.ignore) + source + .mapAsync(1)(x => { + queue.offer(x).map { + case QueueOfferResult.Enqueued => println(s"enqueued $x") + case QueueOfferResult.Dropped => println(s"dropped $x") + case QueueOfferResult.Failure(ex) => println(s"Offer failed ${ex.getMessage}") + case QueueOfferResult.QueueClosed => println("Source Queue closed") + } + }) + .runWith(Sink.ignore) //#source-queue } diff --git a/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala index 6085d95e95..7227f28e34 100644 --- a/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/QuickStartDocSpec.scala @@ -10,7 +10,7 @@ import akka.stream.scaladsl._ //#stream-imports //#other-imports -import akka.{ NotUsed, Done } +import akka.{ Done, NotUsed } import akka.actor.ActorSystem import akka.util.ByteString import scala.concurrent._ @@ -50,9 +50,7 @@ class QuickStartDocSpec extends WordSpec with BeforeAndAfterAll with ScalaFuture val factorials = source.scan(BigInt(1))((acc, next) => acc * next) val result: Future[IOResult] = - factorials - .map(num => ByteString(s"$num\n")) - .runWith(FileIO.toPath(Paths.get("factorials.txt"))) + factorials.map(num => ByteString(s"$num\n")).runWith(FileIO.toPath(Paths.get("factorials.txt"))) //#transform-source //#use-transformed-sink @@ -81,9 +79,7 @@ class QuickStartDocSpec extends WordSpec with BeforeAndAfterAll with ScalaFuture //#transform-sink def lineSink(filename: String): Sink[String, Future[IOResult]] = - Flow[String] - .map(s => ByteString(s + "\n")) - .toMat(FileIO.toPath(Paths.get(filename)))(Keep.right) + Flow[String].map(s => ByteString(s + "\n")).toMat(FileIO.toPath(Paths.get(filename)))(Keep.right) //#transform-sink } diff --git a/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala b/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala index 36d4465df8..540246259e 100644 --- a/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/RateTransformationDocSpec.scala @@ -22,20 +22,16 @@ class RateTransformationDocSpec extends AkkaSpec { "conflate should summarize" in { //#conflate-summarize - val statsFlow = Flow[Double] - .conflateWithSeed(immutable.Seq(_))(_ :+ _) - .map { s => - val μ = s.sum / s.size - val se = s.map(x => pow(x - μ, 2)) - val σ = sqrt(se.sum / se.size) - (σ, μ, s.size) - } + val statsFlow = Flow[Double].conflateWithSeed(immutable.Seq(_))(_ :+ _).map { s => + val μ = s.sum / s.size + val se = s.map(x => pow(x - μ, 2)) + val σ = sqrt(se.sum / se.size) + (σ, μ, s.size) + } //#conflate-summarize - val fut = Source.fromIterator(() => Iterator.continually(Random.nextGaussian)) - .via(statsFlow) - .grouped(10) - .runWith(Sink.head) + val fut = + Source.fromIterator(() => Iterator.continually(Random.nextGaussian)).via(statsFlow).grouped(10).runWith(Sink.head) fut.futureValue } @@ -51,25 +47,17 @@ class RateTransformationDocSpec extends AkkaSpec { .mapConcat(identity) //#conflate-sample - val fut = Source(1 to 1000) - .map(_.toDouble) - .via(sampleFlow) - .runWith(Sink.fold(Seq.empty[Double])(_ :+ _)) + val fut = Source(1 to 1000).map(_.toDouble).via(sampleFlow).runWith(Sink.fold(Seq.empty[Double])(_ :+ _)) fut.futureValue } "extrapolate should repeat last" in { //#extrapolate-last - val lastFlow = Flow[Double] - .extrapolate(Iterator.continually(_)) + val lastFlow = Flow[Double].extrapolate(Iterator.continually(_)) //#extrapolate-last - val (probe, fut) = TestSource.probe[Double] - .via(lastFlow) - .grouped(10) - .toMat(Sink.head)(Keep.both) - .run() + val (probe, fut) = TestSource.probe[Double].via(lastFlow).grouped(10).toMat(Sink.head)(Keep.both).run() probe.sendNext(1.0) val extrapolated = fut.futureValue @@ -80,14 +68,10 @@ class RateTransformationDocSpec extends AkkaSpec { "extrapolate should send seed first" in { //#extrapolate-seed val initial = 2.0 - val seedFlow = Flow[Double] - .extrapolate(Iterator.continually(_), Some(initial)) + val seedFlow = Flow[Double].extrapolate(Iterator.continually(_), Some(initial)) //#extrapolate-seed - val fut = TestSource.probe[Double] - .via(seedFlow) - .grouped(10) - .runWith(Sink.head) + val fut = TestSource.probe[Double].via(seedFlow).grouped(10).runWith(Sink.head) val extrapolated = fut.futureValue extrapolated.size shouldBe 10 @@ -96,17 +80,14 @@ class RateTransformationDocSpec extends AkkaSpec { "extrapolate should track drift" in { //#extrapolate-drift - val driftFlow = Flow[Double].map(_ -> 0) - .extrapolate[(Double, Int)] { case (i, _) => Iterator.from(1).map(i -> _) } + val driftFlow = Flow[Double].map(_ -> 0).extrapolate[(Double, Int)] { case (i, _) => Iterator.from(1).map(i -> _) } //#extrapolate-drift val latch = TestLatch(2) - val realDriftFlow = Flow[Double].map(d => { latch.countDown(); d -> 0; }) - .extrapolate[(Double, Int)] { case (d, _) => latch.countDown(); Iterator.from(1).map(d -> _) } + val realDriftFlow = Flow[Double].map(d => { latch.countDown(); d -> 0; }).extrapolate[(Double, Int)] { + case (d, _) => latch.countDown(); Iterator.from(1).map(d -> _) + } - val (pub, sub) = TestSource.probe[Double] - .via(realDriftFlow) - .toMat(TestSink.probe[(Double, Int)])(Keep.both) - .run() + val (pub, sub) = TestSource.probe[Double].via(realDriftFlow).toMat(TestSink.probe[(Double, Int)])(Keep.both).run() sub.request(1) pub.sendNext(1.0) @@ -122,17 +103,12 @@ class RateTransformationDocSpec extends AkkaSpec { "expand should track drift" in { //#expand-drift - val driftFlow = Flow[Double] - .expand(i => Iterator.from(0).map(i -> _)) + val driftFlow = Flow[Double].expand(i => Iterator.from(0).map(i -> _)) //#expand-drift val latch = TestLatch(2) - val realDriftFlow = Flow[Double] - .expand(d => { latch.countDown(); Iterator.from(0).map(d -> _) }) + val realDriftFlow = Flow[Double].expand(d => { latch.countDown(); Iterator.from(0).map(d -> _) }) - val (pub, sub) = TestSource.probe[Double] - .via(realDriftFlow) - .toMat(TestSink.probe[(Double, Int)])(Keep.both) - .run() + val (pub, sub) = TestSource.probe[Double].via(realDriftFlow).toMat(TestSink.probe[(Double, Int)])(Keep.both).run() sub.request(1) pub.sendNext(1.0) diff --git a/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala b/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala index bec12ec8de..9d00d770fc 100644 --- a/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/ReactiveStreamsDocSpec.scala @@ -23,9 +23,7 @@ class ReactiveStreamsDocSpec extends AkkaSpec { trait Fixture { //#authors - val authors = Flow[Tweet] - .filter(_.hashtags.contains(akkaTag)) - .map(_.author) + val authors = Flow[Tweet].filter(_.hashtags.contains(akkaTag)).map(_.author) //#authors @@ -110,8 +108,7 @@ class ReactiveStreamsDocSpec extends AkkaSpec { //#source-fanoutPublisher val authorPublisher: Publisher[Author] = - Source.fromPublisher(tweets).via(authors) - .runWith(Sink.asPublisher(fanout = true)) + Source.fromPublisher(tweets).via(authors).runWith(Sink.asPublisher(fanout = true)) authorPublisher.subscribe(storage) authorPublisher.subscribe(alert) diff --git a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala index 0676f229ce..6974b20478 100644 --- a/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/RestartDocSpec.scala @@ -35,18 +35,16 @@ class RestartDocSpec extends AkkaSpec with CompileOnlySpec { "demonstrate a restart with backoff source" in compileOnlySpec { //#restart-with-backoff-source - val restartSource = RestartSource.withBackoff( - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly - maxRestarts = 20 // limits the amount of restarts to 20 + val restartSource = RestartSource.withBackoff(minBackoff = 3.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly + maxRestarts = 20 // limits the amount of restarts to 20 ) { () => // Create a source from a future of a source Source.fromFutureSource { // Make a single request with akka-http - Http().singleRequest(HttpRequest( - uri = "http://example.com/eventstream" - )) + Http() + .singleRequest(HttpRequest(uri = "http://example.com/eventstream")) // Unmarshall it as a source of server sent events .flatMap(Unmarshal(_).to[Source[ServerSentEvent, NotUsed]]) } diff --git a/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala b/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala index 34e4ea23a7..3c1d7c09bc 100644 --- a/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/SinkRecipeDocSpec.scala @@ -16,8 +16,7 @@ class SinkRecipeDocSpec extends RecipeSpec { //#forseachAsync-processing //def asyncProcessing(value: Int): Future[Unit] = _ - Source(1 to 100) - .runWith(Sink.foreachAsync(10)(asyncProcessing)) + Source(1 to 100).runWith(Sink.foreachAsync(10)(asyncProcessing)) //#forseachAsync-processing } } diff --git a/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala b/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala index bcf5f29155..e9bc6c60be 100644 --- a/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/StreamBuffersRateSpec.scala @@ -16,25 +16,30 @@ class StreamBuffersRateSpec extends AkkaSpec { def println(s: Any) = () //#pipelining Source(1 to 3) - .map { i => println(s"A: $i"); i }.async - .map { i => println(s"B: $i"); i }.async - .map { i => println(s"C: $i"); i }.async + .map { i => + println(s"A: $i"); i + } + .async + .map { i => + println(s"B: $i"); i + } + .async + .map { i => + println(s"C: $i"); i + } + .async .runWith(Sink.ignore) //#pipelining } "Demonstrate buffer sizes" in { //#materializer-buffer - val materializer = ActorMaterializer( - ActorMaterializerSettings(system) - .withInputBuffer( - initialSize = 64, - maxSize = 64)) + val materializer = + ActorMaterializer(ActorMaterializerSettings(system).withInputBuffer(initialSize = 64, maxSize = 64)) //#materializer-buffer //#section-buffer - val section = Flow[Int].map(_ * 2).async - .addAttributes(Attributes.inputBuffer(initial = 1, max = 1)) // the buffer size of this map is 1 + val section = Flow[Int].map(_ * 2).async.addAttributes(Attributes.inputBuffer(initial = 1, max = 1)) // the buffer size of this map is 1 val flow = section.via(Flow[Int].map(_ / 2)).async // the buffer size of this map is the default //#section-buffer } @@ -52,7 +57,8 @@ class StreamBuffersRateSpec extends AkkaSpec { Source.tick(initialDelay = 3.second, interval = 3.second, Tick()) ~> zipper.in0 - Source.tick(initialDelay = 1.second, interval = 1.second, "message!") + Source + .tick(initialDelay = 1.second, interval = 1.second, "message!") .conflateWithSeed(seed = (_) => 1)((count, _) => count + 1) ~> zipper.in1 zipper.out ~> Sink.foreach(println) diff --git a/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala b/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala index 4ec39bd90b..2fea704f05 100644 --- a/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/StreamTestKitDocSpec.scala @@ -84,8 +84,7 @@ class StreamTestKitDocSpec extends AkkaSpec { //#source-actorref val sinkUnderTest = Flow[Int].map(_.toString).toMat(Sink.fold("")(_ + _))(Keep.right) - val (ref, future) = Source.actorRef(8, OverflowStrategy.fail) - .toMat(sinkUnderTest)(Keep.both).run() + val (ref, future) = Source.actorRef(8, OverflowStrategy.fail).toMat(sinkUnderTest)(Keep.both).run() ref ! 1 ref ! 2 @@ -101,11 +100,7 @@ class StreamTestKitDocSpec extends AkkaSpec { //#test-sink-probe val sourceUnderTest = Source(1 to 4).filter(_ % 2 == 0).map(_ * 2) - sourceUnderTest - .runWith(TestSink.probe[Int]) - .request(2) - .expectNext(4, 8) - .expectComplete() + sourceUnderTest.runWith(TestSink.probe[Int]).request(2).expectNext(4, 8).expectComplete() //#test-sink-probe } @@ -113,10 +108,7 @@ class StreamTestKitDocSpec extends AkkaSpec { //#test-source-probe val sinkUnderTest = Sink.cancelled - TestSource.probe[Int] - .toMat(sinkUnderTest)(Keep.left) - .run() - .expectCancellation() + TestSource.probe[Int].toMat(sinkUnderTest)(Keep.left).run().expectCancellation() //#test-source-probe } @@ -124,9 +116,7 @@ class StreamTestKitDocSpec extends AkkaSpec { //#injecting-failure val sinkUnderTest = Sink.head[Int] - val (probe, future) = TestSource.probe[Int] - .toMat(sinkUnderTest)(Keep.both) - .run() + val (probe, future) = TestSource.probe[Int].toMat(sinkUnderTest)(Keep.both).run() probe.sendError(new Exception("boom")) Await.ready(future, 3.seconds) @@ -142,10 +132,7 @@ class StreamTestKitDocSpec extends AkkaSpec { pattern.after(10.millis * sleep, using = system.scheduler)(Future.successful(sleep)) } - val (pub, sub) = TestSource.probe[Int] - .via(flowUnderTest) - .toMat(TestSink.probe[Int])(Keep.both) - .run() + val (pub, sub) = TestSource.probe[Int].via(flowUnderTest).toMat(TestSink.probe[Int])(Keep.both).run() sub.request(n = 3) pub.sendNext(3) diff --git a/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala b/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala index 938b4db610..cc534414a1 100644 --- a/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/SubstreamDocSpec.scala @@ -21,23 +21,14 @@ class SubstreamDocSpec extends AkkaSpec { //#groupBy2 //#groupBy3 - Source(1 to 10) - .groupBy(3, _ % 3) - .mergeSubstreams - .runWith(Sink.ignore) + Source(1 to 10).groupBy(3, _ % 3).mergeSubstreams.runWith(Sink.ignore) //#groupBy3 //#groupBy4 - Source(1 to 10) - .groupBy(3, _ % 3) - .mergeSubstreamsWithParallelism(2) - .runWith(Sink.ignore) + Source(1 to 10).groupBy(3, _ % 3).mergeSubstreamsWithParallelism(2).runWith(Sink.ignore) //concatSubstreams is equivalent to mergeSubstreamsWithParallelism(1) - Source(1 to 10) - .groupBy(3, _ % 3) - .concatSubstreams - .runWith(Sink.ignore) + Source(1 to 10).groupBy(3, _ % 3).concatSubstreams.runWith(Sink.ignore) //#groupBy4 } @@ -51,8 +42,8 @@ class SubstreamDocSpec extends AkkaSpec { //#wordCount val text = "This is the first line.\n" + - "The second line.\n" + - "There is also the 3rd line\n" + "The second line.\n" + + "There is also the 3rd line\n" val charCount = Source(text.toList) .splitAfter { _ == '\n' } @@ -66,15 +57,11 @@ class SubstreamDocSpec extends AkkaSpec { "generate substreams by flatMapConcat and flatMapMerge" in { //#flatMapConcat - Source(1 to 2) - .flatMapConcat(i => Source(List.fill(3)(i))) - .runWith(Sink.ignore) + Source(1 to 2).flatMapConcat(i => Source(List.fill(3)(i))).runWith(Sink.ignore) //#flatMapConcat //#flatMapMerge - Source(1 to 2) - .flatMapMerge(2, i => Source(List.fill(3)(i))) - .runWith(Sink.ignore) + Source(1 to 2).flatMapMerge(2, i => Source(List.fill(3)(i))).runWith(Sink.ignore) //#flatMapMerge } } diff --git a/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala b/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala index ea7bf4320f..8a68f49ff7 100644 --- a/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/TwitterStreamQuickstartDocSpec.scala @@ -8,7 +8,7 @@ package docs.stream import akka.{ Done, NotUsed } import akka.actor.ActorSystem -import akka.stream.{ ClosedShape, ActorMaterializer, OverflowStrategy } +import akka.stream.{ ActorMaterializer, ClosedShape, OverflowStrategy } import akka.stream.scaladsl._ import scala.concurrent.Await import scala.concurrent.Future @@ -31,9 +31,13 @@ object TwitterStreamQuickstartDocSpec { final case class Hashtag(name: String) final case class Tweet(author: Author, timestamp: Long, body: String) { - def hashtags: Set[Hashtag] = body.split(" ").collect { - case t if t.startsWith("#") => Hashtag(t.replaceAll("[^#\\w]", "")) - }.toSet + def hashtags: Set[Hashtag] = + body + .split(" ") + .collect { + case t if t.startsWith("#") => Hashtag(t.replaceAll("[^#\\w]", "")) + } + .toSet } val akkaTag = Hashtag("#akka") @@ -50,16 +54,16 @@ object TwitterStreamQuickstartDocSpec { //#fiddle_code val tweets: Source[Tweet, NotUsed] = Source( Tweet(Author("rolandkuhn"), System.currentTimeMillis, "#akka rocks!") :: - Tweet(Author("patriknw"), System.currentTimeMillis, "#akka !") :: - Tweet(Author("bantonsson"), System.currentTimeMillis, "#akka !") :: - Tweet(Author("drewhk"), System.currentTimeMillis, "#akka !") :: - Tweet(Author("ktosopl"), System.currentTimeMillis, "#akka on the rocks!") :: - Tweet(Author("mmartynas"), System.currentTimeMillis, "wow #akka !") :: - Tweet(Author("akkateam"), System.currentTimeMillis, "#akka rocks!") :: - Tweet(Author("bananaman"), System.currentTimeMillis, "#bananas rock!") :: - Tweet(Author("appleman"), System.currentTimeMillis, "#apples rock!") :: - Tweet(Author("drama"), System.currentTimeMillis, "we compared #apples to #oranges!") :: - Nil) + Tweet(Author("patriknw"), System.currentTimeMillis, "#akka !") :: + Tweet(Author("bantonsson"), System.currentTimeMillis, "#akka !") :: + Tweet(Author("drewhk"), System.currentTimeMillis, "#akka !") :: + Tweet(Author("ktosopl"), System.currentTimeMillis, "#akka on the rocks!") :: + Tweet(Author("mmartynas"), System.currentTimeMillis, "wow #akka !") :: + Tweet(Author("akkateam"), System.currentTimeMillis, "#akka rocks!") :: + Tweet(Author("bananaman"), System.currentTimeMillis, "#bananas rock!") :: + Tweet(Author("appleman"), System.currentTimeMillis, "#apples rock!") :: + Tweet(Author("drama"), System.currentTimeMillis, "we compared #apples to #oranges!") :: + Nil) //#fiddle_code } @@ -91,9 +95,7 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { //#authors-filter-map val authors: Source[Author, NotUsed] = - tweets - .filter(_.hashtags.contains(akkaTag)) - .map(_.author) + tweets.filter(_.hashtags.contains(akkaTag)).map(_.author) //#first-sample //#authors-filter-map @@ -171,10 +173,7 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { } //#tweets-slow-consumption-dropHead - tweets - .buffer(10, OverflowStrategy.dropHead) - .map(slowComputation) - .runWith(Sink.ignore) + tweets.buffer(10, OverflowStrategy.dropHead).map(slowComputation).runWith(Sink.ignore) //#tweets-slow-consumption-dropHead } @@ -184,9 +183,9 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { //#backpressure-by-readline val completion: Future[Done] = - Source(1 to 10) - .map(i => { println(s"map => $i"); i }) - .runForeach { i => readLine(s"Element = $i; continue reading? [press enter]\n") } + Source(1 to 10).map(i => { println(s"map => $i"); i }).runForeach { i => + readLine(s"Element = $i; continue reading? [press enter]\n") + } Await.ready(completion, 1.minute) //#backpressure-by-readline @@ -200,9 +199,7 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { val sumSink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _) val counterGraph: RunnableGraph[Future[Int]] = - tweets - .via(count) - .toMat(sumSink)(Keep.right) + tweets.via(count).toMat(sumSink)(Keep.right) val sum: Future[Int] = counterGraph.run() @@ -222,10 +219,7 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { //#tweets-runnable-flow-materialized-twice val sumSink = Sink.fold[Int, Int](0)(_ + _) val counterRunnableGraph: RunnableGraph[Future[Int]] = - tweetsInMinuteFromNow - .filter(_.hashtags contains akkaTag) - .map(t => 1) - .toMat(sumSink)(Keep.right) + tweetsInMinuteFromNow.filter(_.hashtags contains akkaTag).map(t => 1).toMat(sumSink)(Keep.right) // materialize the stream once in the morning val morningTweetsCount: Future[Int] = counterRunnableGraph.run() @@ -236,7 +230,9 @@ class TwitterStreamQuickstartDocSpec extends AkkaSpec { val sum: Future[Int] = counterRunnableGraph.run() - sum.map { c => println(s"Total tweets processed: $c") } + sum.map { c => + println(s"Total tweets processed: $c") + } } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala index c5f4a1713d..8a97df08b4 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeAdhocSource.scala @@ -19,11 +19,13 @@ class RecipeAdhocSource extends RecipeSpec { //#adhoc-source def adhocSource[T](source: Source[T, _], timeout: FiniteDuration, maxRetries: Int): Source[T, _] = Source.lazily( - () => source.backpressureTimeout(timeout).recoverWithRetries(maxRetries, { - case t: TimeoutException => - Source.lazily(() => source.backpressureTimeout(timeout)).mapMaterializedValue(_ => NotUsed) - }) - ) + () => + source + .backpressureTimeout(timeout) + .recoverWithRetries(maxRetries, { + case t: TimeoutException => + Source.lazily(() => source.backpressureTimeout(timeout)).mapMaterializedValue(_ => NotUsed) + })) //#adhoc-source "Recipe for adhoc source" must { @@ -36,18 +38,15 @@ class RecipeAdhocSource extends RecipeSpec { } "start the source when there is a demand" taggedAs TimingTest in { - val sink = adhocSource(Source.repeat("a"), 200.milliseconds, 3) - .runWith(TestSink.probe[String]) + val sink = adhocSource(Source.repeat("a"), 200.milliseconds, 3).runWith(TestSink.probe[String]) sink.requestNext("a") } "shut down the source when the next demand times out" taggedAs TimingTest in { val shutdown = Promise[Done]() - val sink = adhocSource( - Source.repeat("a").watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3) - .runWith(TestSink.probe[String]) + val sink = adhocSource(Source.repeat("a").watchTermination() { (_, term) => + shutdown.completeWith(term) + }, 200.milliseconds, 3).runWith(TestSink.probe[String]) sink.requestNext("a") Thread.sleep(200) @@ -56,11 +55,9 @@ class RecipeAdhocSource extends RecipeSpec { "not shut down the source when there are still demands" taggedAs TimingTest in { val shutdown = Promise[Done]() - val sink = adhocSource( - Source.repeat("a").watchTermination() { (_, term) => - shutdown.completeWith(term) - }, 200.milliseconds, 3) - .runWith(TestSink.probe[String]) + val sink = adhocSource(Source.repeat("a").watchTermination() { (_, term) => + shutdown.completeWith(term) + }, 200.milliseconds, 3).runWith(TestSink.probe[String]) sink.requestNext("a") Thread.sleep(100) @@ -80,14 +77,11 @@ class RecipeAdhocSource extends RecipeSpec { val shutdown = Promise[Done]() val startedCount = new AtomicInteger(0) - val source = Source - .empty.mapMaterializedValue(_ => startedCount.incrementAndGet()) - .concat(Source.repeat("a")) + val source = Source.empty.mapMaterializedValue(_ => startedCount.incrementAndGet()).concat(Source.repeat("a")) val sink = adhocSource(source.watchTermination() { (_, term) => shutdown.completeWith(term) - }, 200.milliseconds, 3) - .runWith(TestSink.probe[String]) + }, 200.milliseconds, 3).runWith(TestSink.probe[String]) sink.requestNext("a") startedCount.get() should be(1) @@ -99,14 +93,11 @@ class RecipeAdhocSource extends RecipeSpec { val shutdown = Promise[Done]() val startedCount = new AtomicInteger(0) - val source = Source - .empty.mapMaterializedValue(_ => startedCount.incrementAndGet()) - .concat(Source.repeat("a")) + val source = Source.empty.mapMaterializedValue(_ => startedCount.incrementAndGet()).concat(Source.repeat("a")) val sink = adhocSource(source.watchTermination() { (_, term) => shutdown.completeWith(term) - }, 200.milliseconds, 3) - .runWith(TestSink.probe[String]) + }, 200.milliseconds, 3).runWith(TestSink.probe[String]) sink.requestNext("a") startedCount.get() should be(1) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala index 72f6202f87..c214349fe1 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeByteStrings.scala @@ -5,7 +5,7 @@ package docs.stream.cookbook import akka.NotUsed -import akka.stream.{ Attributes, Outlet, Inlet, FlowShape } +import akka.stream.{ Attributes, FlowShape, Inlet, Outlet } import akka.stream.scaladsl.{ Flow, Sink, Source } import akka.util.ByteString @@ -35,26 +35,27 @@ class RecipeByteStrings extends RecipeSpec { emitChunk() } }) - setHandler(in, new InHandler { - override def onPush(): Unit = { - val elem = grab(in) - buffer ++= elem - emitChunk() - } + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val elem = grab(in) + buffer ++= elem + emitChunk() + } - override def onUpstreamFinish(): Unit = { - if (buffer.isEmpty) completeStage() - else { - // There are elements left in buffer, so - // we keep accepting downstream pulls and push from buffer until emptied. - // - // It might be though, that the upstream finished while it was pulled, in which - // case we will not get an onPull from the downstream, because we already had one. - // In that case we need to emit from the buffer. - if (isAvailable(out)) emitChunk() - } - } - }) + override def onUpstreamFinish(): Unit = { + if (buffer.isEmpty) completeStage() + else { + // There are elements left in buffer, so + // we keep accepting downstream pulls and push from buffer until emptied. + // + // It might be though, that the upstream finished while it was pulled, in which + // case we will not get an onPull from the downstream, because we already had one. + // In that case we need to emit from the buffer. + if (isAvailable(out)) emitChunk() + } + } + }) private def emitChunk(): Unit = { if (buffer.isEmpty) { @@ -92,19 +93,21 @@ class RecipeByteStrings extends RecipeSpec { override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { private var count = 0 - setHandlers(in, out, new InHandler with OutHandler { + setHandlers(in, + out, + new InHandler with OutHandler { - override def onPull(): Unit = { - pull(in) - } + override def onPull(): Unit = { + pull(in) + } - override def onPush(): Unit = { - val chunk = grab(in) - count += chunk.size - if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes")) - else push(out, chunk) - } - }) + override def onPush(): Unit = { + val chunk = grab(in) + count += chunk.size + if (count > maximumBytes) failStage(new IllegalStateException("Too much bytes")) + else push(out, chunk) + } + }) } } @@ -114,8 +117,8 @@ class RecipeByteStrings extends RecipeSpec { val bytes1 = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9))) val bytes2 = Source(List(ByteString(1, 2), ByteString(3), ByteString(4, 5, 6), ByteString(7, 8, 9, 10))) - Await.result(bytes1.via(limiter).limit(10).runWith(Sink.seq), 3.seconds) - .fold(ByteString.empty)(_ ++ _) should be(ByteString(1, 2, 3, 4, 5, 6, 7, 8, 9)) + Await.result(bytes1.via(limiter).limit(10).runWith(Sink.seq), 3.seconds).fold(ByteString.empty)(_ ++ _) should be( + ByteString(1, 2, 3, 4, 5, 6, 7, 8, 9)) an[IllegalStateException] must be thrownBy { Await.result(bytes2.via(limiter).limit(10).runWith(Sink.seq), 3.seconds) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeCollectingMetrics.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeCollectingMetrics.scala index 8f10c3318d..43f6218e3f 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeCollectingMetrics.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeCollectingMetrics.scala @@ -4,7 +4,7 @@ package docs.stream.cookbook -import akka.stream.{ ActorMaterializerSettings, ActorMaterializer } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings } import scala.collection.immutable import scala.concurrent.Await diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala index 6c5c024e1d..32c76730dc 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDecompress.scala @@ -19,12 +19,10 @@ class RecipeDecompress extends RecipeSpec { //#decompress-gzip val compressed = - Source.single(ByteString.fromString("Hello World")) - .via(Compression.gzip) + Source.single(ByteString.fromString("Hello World")).via(Compression.gzip) //#decompress-gzip - val uncompressed = compressed.via(Compression.gunzip()) - .map(_.utf8String) + val uncompressed = compressed.via(Compression.gunzip()).map(_.utf8String) //#decompress-gzip Await.result(uncompressed.runWith(Sink.head), 3.seconds) should be("Hello World") diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala index c9e9c14f1c..8dc4dc9c32 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDigest.scala @@ -17,7 +17,7 @@ class RecipeDigest extends RecipeSpec { import java.security.MessageDigest import akka.NotUsed - import akka.stream.{ Attributes, Outlet, Inlet, FlowShape } + import akka.stream.{ Attributes, FlowShape, Inlet, Outlet } import akka.stream.scaladsl.{ Sink, Source } import akka.util.ByteString @@ -56,15 +56,8 @@ class RecipeDigest extends RecipeSpec { //#calculating-digest Await.result(digest.runWith(Sink.head), 3.seconds) should be( - ByteString( - 0xba, 0x78, 0x16, 0xbf, - 0x8f, 0x01, 0xcf, 0xea, - 0x41, 0x41, 0x40, 0xde, - 0x5d, 0xae, 0x22, 0x23, - 0xb0, 0x03, 0x61, 0xa3, - 0x96, 0x17, 0x7a, 0x9c, - 0xb4, 0x10, 0xff, 0x61, - 0xf2, 0x00, 0x15, 0xad)) + ByteString(0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, + 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad)) } } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala index 15fd0cb416..e6f5b69679 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeDroppyBroadcast.scala @@ -24,16 +24,17 @@ class RecipeDroppyBroadcast extends RecipeSpec { val mySink3 = Sink.fromSubscriber(sub3) //#droppy-bcast - val graph = RunnableGraph.fromGraph(GraphDSL.create(mySink1, mySink2, mySink3)((_, _, _)) { implicit b => (sink1, sink2, sink3) => - import GraphDSL.Implicits._ + val graph = RunnableGraph.fromGraph(GraphDSL.create(mySink1, mySink2, mySink3)((_, _, _)) { + implicit b => (sink1, sink2, sink3) => + import GraphDSL.Implicits._ - val bcast = b.add(Broadcast[Int](3)) - myElements ~> bcast + val bcast = b.add(Broadcast[Int](3)) + myElements ~> bcast - bcast.buffer(10, OverflowStrategy.dropHead) ~> sink1 - bcast.buffer(10, OverflowStrategy.dropHead) ~> sink2 - bcast.buffer(10, OverflowStrategy.dropHead) ~> sink3 - ClosedShape + bcast.buffer(10, OverflowStrategy.dropHead) ~> sink1 + bcast.buffer(10, OverflowStrategy.dropHead) ~> sink2 + bcast.buffer(10, OverflowStrategy.dropHead) ~> sink3 + ClosedShape }) //#droppy-bcast diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala index 649b10789d..589757b30f 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeGlobalRateLimit.scala @@ -5,7 +5,7 @@ package docs.stream.cookbook import akka.NotUsed -import akka.actor.{ Props, ActorRef, Actor } +import akka.actor.{ Actor, ActorRef, Props } import akka.stream.ClosedShape import akka.stream.scaladsl._ import akka.stream.testkit._ @@ -25,26 +25,22 @@ class RecipeGlobalRateLimit extends RecipeSpec { case object ReplenishTokens - def props(maxAvailableTokens: Int, tokenRefreshPeriod: FiniteDuration, - tokenRefreshAmount: Int): Props = + def props(maxAvailableTokens: Int, tokenRefreshPeriod: FiniteDuration, tokenRefreshAmount: Int): Props = Props(new Limiter(maxAvailableTokens, tokenRefreshPeriod, tokenRefreshAmount)) } - class Limiter( - val maxAvailableTokens: Int, - val tokenRefreshPeriod: FiniteDuration, - val tokenRefreshAmount: Int) extends Actor { + class Limiter(val maxAvailableTokens: Int, val tokenRefreshPeriod: FiniteDuration, val tokenRefreshAmount: Int) + extends Actor { import Limiter._ import context.dispatcher import akka.actor.Status private var waitQueue = immutable.Queue.empty[ActorRef] private var permitTokens = maxAvailableTokens - private val replenishTimer = system.scheduler.schedule( - initialDelay = tokenRefreshPeriod, - interval = tokenRefreshPeriod, - receiver = self, - ReplenishTokens) + private val replenishTimer = system.scheduler.schedule(initialDelay = tokenRefreshPeriod, + interval = tokenRefreshPeriod, + receiver = self, + ReplenishTokens) override def receive: Receive = open @@ -69,13 +65,13 @@ class RecipeGlobalRateLimit extends RecipeSpec { val (toBeReleased, remainingQueue) = waitQueue.splitAt(permitTokens) waitQueue = remainingQueue permitTokens -= toBeReleased.size - toBeReleased foreach (_ ! MayPass) + toBeReleased.foreach(_ ! MayPass) if (permitTokens > 0) context.become(open) } override def postStop(): Unit = { replenishTimer.cancel() - waitQueue foreach (_ ! Status.Failure(new IllegalStateException("limiter stopped"))) + waitQueue.foreach(_ ! Status.Failure(new IllegalStateException("limiter stopped"))) } } //#global-limiter-actor @@ -104,13 +100,15 @@ class RecipeGlobalRateLimit extends RecipeSpec { val probe = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val merge = b.add(Merge[String](2)) - source1 ~> merge ~> Sink.fromSubscriber(probe) - source2 ~> merge - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + val merge = b.add(Merge[String](2)) + source1 ~> merge ~> Sink.fromSubscriber(probe) + source2 ~> merge + ClosedShape + }) + .run() probe.expectSubscription().request(1000) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala index 1bbcba3bcd..d1ca4482ad 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeHold.scala @@ -55,20 +55,22 @@ object HoldOps { private var currentValue: T = _ private var waitingFirstValue = true - setHandlers(in, out, new InHandler with OutHandler { - override def onPush(): Unit = { - currentValue = grab(in) - if (waitingFirstValue) { - waitingFirstValue = false - if (isAvailable(out)) push(out, currentValue) - } - pull(in) - } + setHandlers(in, + out, + new InHandler with OutHandler { + override def onPush(): Unit = { + currentValue = grab(in) + if (waitingFirstValue) { + waitingFirstValue = false + if (isAvailable(out)) push(out, currentValue) + } + pull(in) + } - override def onPull(): Unit = { - if (!waitingFirstValue) push(out, currentValue) - } - }) + override def onPull(): Unit = { + if (!waitingFirstValue) push(out, currentValue) + } + }) override def preStart(): Unit = { pull(in) @@ -90,9 +92,7 @@ class RecipeHold extends RecipeSpec { val source = Source.fromPublisher(pub) val sink = Sink.fromSubscriber(sub) - source.via(new HoldWithInitial(0)).to(sink) - .withAttributes(Attributes.inputBuffer(1, 1)) - .run() + source.via(new HoldWithInitial(0)).to(sink).withAttributes(Attributes.inputBuffer(1, 1)).run() val subscription = sub.expectSubscription() sub.expectNoMessage(100.millis) diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala index b0dfe241bb..06eab4f3b3 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeLoggingElements.scala @@ -20,7 +20,9 @@ class RecipeLoggingElements extends RecipeSpec { val mySource = Source(List("1", "2", "3")) //#println-debug - val loggedSource = mySource.map { elem => println(elem); elem } + val loggedSource = mySource.map { elem => + println(elem); elem + } //#println-debug loggedSource.runWith(Sink.ignore) @@ -33,14 +35,10 @@ class RecipeLoggingElements extends RecipeSpec { //#log-custom // customise log levels - mySource.log("before-map") - .withAttributes( - Attributes.logLevels( - onElement = Logging.WarningLevel, - onFinish = Logging.InfoLevel, - onFailure = Logging.DebugLevel - ) - ) + mySource + .log("before-map") + .withAttributes(Attributes + .logLevels(onElement = Logging.WarningLevel, onFinish = Logging.InfoLevel, onFailure = Logging.DebugLevel)) .map(analyse) // or provide custom logging adapter diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala index afce91828c..a03d33d1b8 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMissedTicks.scala @@ -25,13 +25,11 @@ class RecipeMissedTicks extends RecipeSpec { //#missed-ticks val missedTicks: Flow[Tick, Int, NotUsed] = - Flow[Tick].conflateWithSeed(seed = (_) => 0)( - (missedTicks, tick) => missedTicks + 1) + Flow[Tick].conflateWithSeed(seed = (_) => 0)((missedTicks, tick) => missedTicks + 1) //#missed-ticks val latch = TestLatch(3) val realMissedTicks: Flow[Tick, Int, NotUsed] = - Flow[Tick].conflateWithSeed(seed = (_) => 0)( - (missedTicks, tick) => { latch.countDown(); missedTicks + 1 }) + Flow[Tick].conflateWithSeed(seed = (_) => 0)((missedTicks, tick) => { latch.countDown(); missedTicks + 1 }) tickStream.via(realMissedTicks).to(sink).run() diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala index 258053d17b..55695c2164 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeMultiGroupBy.scala @@ -35,14 +35,13 @@ class RecipeMultiGroupBy extends RecipeSpec { topicsForMessage.map(msg -> _) } - val multiGroups = messageAndTopic - .groupBy(2, _._2).map { - case (msg, topic) => - // do what needs to be done - //#multi-groupby - (msg, topic) + val multiGroups = messageAndTopic.groupBy(2, _._2).map { + case (msg, topic) => + // do what needs to be done //#multi-groupby - } + (msg, topic) + //#multi-groupby + } //#multi-groupby val result = multiGroups @@ -52,9 +51,7 @@ class RecipeMultiGroupBy extends RecipeSpec { .limit(10) .runWith(Sink.seq) - Await.result(result, 3.seconds).toSet should be(Set( - "1[1: a, 1: b, all: c, all: d, 1: e]", - "2[all: c, all: d]")) + Await.result(result, 3.seconds).toSet should be(Set("1[1: a, 1: b, all: c, all: d, 1: e]", "2[all: c, all: d]")) } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala index 70098ea70a..dfffd4245e 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeParseLines.scala @@ -16,25 +16,22 @@ class RecipeParseLines extends RecipeSpec { "Recipe for parsing line from bytes" must { "work" in { - val rawData = Source(List( - ByteString("Hello World"), - ByteString("\r"), - ByteString("!\r"), - ByteString("\nHello Akka!\r\nHello Streams!"), - ByteString("\r\n\r\n"))) + val rawData = Source( + List(ByteString("Hello World"), + ByteString("\r"), + ByteString("!\r"), + ByteString("\nHello Akka!\r\nHello Streams!"), + ByteString("\r\n\r\n"))) //#parse-lines import akka.stream.scaladsl.Framing - val linesStream = rawData.via(Framing.delimiter( - ByteString("\r\n"), maximumFrameLength = 100, allowTruncation = true)) + val linesStream = rawData + .via(Framing.delimiter(ByteString("\r\n"), maximumFrameLength = 100, allowTruncation = true)) .map(_.utf8String) //#parse-lines - Await.result(linesStream.limit(10).runWith(Sink.seq), 3.seconds) should be(List( - "Hello World\r!", - "Hello Akka!", - "Hello Streams!", - "")) + Await.result(linesStream.limit(10).runWith(Sink.seq), 3.seconds) should be( + List("Hello World\r!", "Hello Akka!", "Hello Streams!", "")) } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala index d1b883c07a..6641710428 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeReduceByKey.scala @@ -21,7 +21,7 @@ class RecipeReduceByKey extends RecipeSpec { //#word-count val counts: Source[(String, Int), NotUsed] = words - // split the words into separate streams first + // split the words into separate streams first .groupBy(MaximumDistinctWords, identity) //transform each element to pair with number of words in it .map(_ -> 1) @@ -31,13 +31,8 @@ class RecipeReduceByKey extends RecipeSpec { .mergeSubstreams //#word-count - Await.result(counts.limit(10).runWith(Sink.seq), 3.seconds).toSet should be(Set( - ("hello", 2), - ("world", 1), - ("and", 1), - ("universe", 1), - ("akka", 1), - ("rocks!", 1000))) + Await.result(counts.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( + Set(("hello", 2), ("world", 1), ("and", 1), ("universe", 1), ("akka", 1), ("rocks!", 1000))) } "work generalized" in { @@ -45,10 +40,9 @@ class RecipeReduceByKey extends RecipeSpec { def words = Source(List("hello", "world", "and", "hello", "universe", "akka") ++ List.fill(1000)("rocks!")) //#reduce-by-key-general - def reduceByKey[In, K, Out]( - maximumGroupSize: Int, - groupKey: (In) => K, - map: (In) => Out)(reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = { + def reduceByKey[In, K, Out](maximumGroupSize: Int, + groupKey: (In) => K, + map: (In) => Out)(reduce: (Out, Out) => Out): Flow[In, (K, Out), NotUsed] = { Flow[In] .groupBy[K](maximumGroupSize, groupKey) @@ -58,19 +52,12 @@ class RecipeReduceByKey extends RecipeSpec { } val wordCounts = words.via( - reduceByKey( - MaximumDistinctWords, - groupKey = (word: String) => word, - map = (word: String) => 1)((left: Int, right: Int) => left + right)) + reduceByKey(MaximumDistinctWords, groupKey = (word: String) => word, map = (word: String) => 1)( + (left: Int, right: Int) => left + right)) //#reduce-by-key-general - Await.result(wordCounts.limit(10).runWith(Sink.seq), 3.seconds).toSet should be(Set( - ("hello", 2), - ("world", 1), - ("and", 1), - ("universe", 1), - ("akka", 1), - ("rocks!", 1000))) + Await.result(wordCounts.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( + Set(("hello", 2), ("world", 1), ("and", 1), ("universe", 1), ("akka", 1), ("rocks!", 1000))) } } diff --git a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala index d15851ca37..890d30e8af 100644 --- a/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala +++ b/akka-docs/src/test/scala/docs/stream/cookbook/RecipeWorkerPool.scala @@ -42,8 +42,8 @@ class RecipeWorkerPool extends RecipeSpec { val processedJobs: Source[Result, NotUsed] = myJobs.via(balancer(worker, 3)) //#worker-pool - Await.result(processedJobs.limit(10).runWith(Sink.seq), 3.seconds).toSet should be(Set( - "1 done", "2 done", "3 done", "4 done", "5 done")) + Await.result(processedJobs.limit(10).runWith(Sink.seq), 3.seconds).toSet should be( + Set("1 done", "2 done", "3 done", "4 done", "5 done")) } diff --git a/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala b/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala index 188b85e90e..29b73fd82e 100644 --- a/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/io/StreamFileDocSpec.scala @@ -52,25 +52,20 @@ class StreamFileDocSpec extends AkkaSpec(UnboundedMailboxConfig) { //#file-source - val foreach: Future[IOResult] = FileIO.fromPath(file) - .to(Sink.ignore) - .run() + val foreach: Future[IOResult] = FileIO.fromPath(file).to(Sink.ignore).run() //#file-source } "configure dispatcher in code" in { //#custom-dispatcher-code - FileIO.fromPath(file) - .withAttributes(ActorAttributes.dispatcher("custom-blocking-io-dispatcher")) + FileIO.fromPath(file).withAttributes(ActorAttributes.dispatcher("custom-blocking-io-dispatcher")) //#custom-dispatcher-code } "write data into a file" in { //#file-sink val text = Source.single("Hello Akka Stream!") - val result: Future[IOResult] = text - .map(t => ByteString(t)) - .runWith(FileIO.toPath(file)) + val result: Future[IOResult] = text.map(t => ByteString(t)).runWith(FileIO.toPath(file)) //#file-sink } } diff --git a/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala b/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala index 62871755a9..c8efe20895 100644 --- a/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala +++ b/akka-docs/src/test/scala/docs/stream/io/StreamTcpDocSpec.scala @@ -31,7 +31,7 @@ class StreamTcpDocSpec extends AkkaSpec { Tcp().bind("127.0.0.1", 8888).to(Sink.ignore).run() binding.map { b => - b.unbind() onComplete { + b.unbind().onComplete { case _ => // ... } } @@ -44,14 +44,11 @@ class StreamTcpDocSpec extends AkkaSpec { val connections: Source[IncomingConnection, Future[ServerBinding]] = Tcp().bind(host, port) - connections runForeach { connection => + connections.runForeach { connection => println(s"New connection from: ${connection.remoteAddress}") val echo = Flow[ByteString] - .via(Framing.delimiter( - ByteString("\n"), - maximumFrameLength = 256, - allowTruncation = true)) + .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true)) .map(_.utf8String) .map(_ + "!!!\n") .map(ByteString(_)) @@ -71,32 +68,32 @@ class StreamTcpDocSpec extends AkkaSpec { import akka.stream.scaladsl.Framing val binding = //#welcome-banner-chat-server - connections.to(Sink.foreach { connection => + connections + .to(Sink.foreach { connection => + // server logic, parses incoming commands + val commandParser = Flow[String].takeWhile(_ != "BYE").map(_ + "!") - // server logic, parses incoming commands - val commandParser = Flow[String].takeWhile(_ != "BYE").map(_ + "!") + import connection._ + val welcomeMsg = s"Welcome to: $localAddress, you are: $remoteAddress!" + val welcome = Source.single(welcomeMsg) - import connection._ - val welcomeMsg = s"Welcome to: $localAddress, you are: $remoteAddress!" - val welcome = Source.single(welcomeMsg) + val serverLogic = Flow[ByteString] + .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true)) + .map(_.utf8String) + //#welcome-banner-chat-server + .map { command => + serverProbe.ref ! command; command + } + //#welcome-banner-chat-server + .via(commandParser) + // merge in the initial banner after parser + .merge(welcome) + .map(_ + "\n") + .map(ByteString(_)) - val serverLogic = Flow[ByteString] - .via(Framing.delimiter( - ByteString("\n"), - maximumFrameLength = 256, - allowTruncation = true)) - .map(_.utf8String) - //#welcome-banner-chat-server - .map { command => serverProbe.ref ! command; command } - //#welcome-banner-chat-server - .via(commandParser) - // merge in the initial banner after parser - .merge(welcome) - .map(_ + "\n") - .map(ByteString(_)) - - connection.handleWith(serverLogic) - }).run() + connection.handleWith(serverLogic) + }) + .run() //#welcome-banner-chat-server // make sure server is started before we connect @@ -108,7 +105,7 @@ class StreamTcpDocSpec extends AkkaSpec { def readLine(prompt: String): String = { input.get() match { case all @ cmd :: tail if input.compareAndSet(all, tail) => cmd - case _ => "q" + case _ => "q" } } @@ -124,15 +121,10 @@ class StreamTcpDocSpec extends AkkaSpec { //#repl-client val replParser = - Flow[String].takeWhile(_ != "q") - .concat(Source.single("BYE")) - .map(elem => ByteString(s"$elem\n")) + Flow[String].takeWhile(_ != "q").concat(Source.single("BYE")).map(elem => ByteString(s"$elem\n")) val repl = Flow[ByteString] - .via(Framing.delimiter( - ByteString("\n"), - maximumFrameLength = 256, - allowTruncation = true)) + .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true)) .map(_.utf8String) .map(text => println("Server: " + text)) .map(_ => readLine("> ")) diff --git a/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala b/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala index 08a67b271f..65de4ee779 100644 --- a/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala +++ b/akka-docs/src/test/scala/docs/stream/operators/SourceOrFlow.scala @@ -17,12 +17,12 @@ object SourceOrFlow { //#log Flow[String] - //#log + //#log .log(name = "myStream") - .addAttributes(Attributes.logLevels( - onElement = Attributes.LogLevels.Off, - onFailure = Attributes.LogLevels.Error, - onFinish = Attributes.LogLevels.Info)) + .addAttributes( + Attributes.logLevels(onElement = Attributes.LogLevels.Off, + onFailure = Attributes.LogLevels.Error, + onFinish = Attributes.LogLevels.Info)) //#log } @@ -30,7 +30,8 @@ object SourceOrFlow { //#conflate import scala.concurrent.duration._ - Source.cycle(() => List(1, 10, 100, 1000).iterator) + Source + .cycle(() => List(1, 10, 100, 1000).iterator) .throttle(10, per = 1.second) // faster upstream .conflate((acc, el) => acc + el) // acc: Int, el: Int .throttle(1, per = 1.second) // slow downstream @@ -45,9 +46,10 @@ object SourceOrFlow { def sum(other: Summed) = Summed(this.i + other.i) } - Source.cycle(() => List(1, 10, 100, 1000).iterator) + Source + .cycle(() => List(1, 10, 100, 1000).iterator) .throttle(10, per = 1.second) // faster upstream - .conflateWithSeed(el => Summed(el))((acc, el) => acc sum Summed(el)) // (Summed, Int) => Summed + .conflateWithSeed(el => Summed(el))((acc, el) => acc.sum(Summed(el))) // (Summed, Int) => Summed .throttle(1, per = 1.second) // slow downstream //#conflateWithSeed } diff --git a/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala b/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala index e6a61bd9bd..6b3001107b 100644 --- a/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/ParentChildSpec.scala @@ -20,7 +20,6 @@ import org.scalatest.BeforeAndAfterAll /** * Parent-Child examples */ - //#test-example class Parent extends Actor { val child = context.actorOf(Props[Child], "child") @@ -71,7 +70,6 @@ class GenericDependentParent(childMaker: ActorRefFactory => ActorRef) extends Ac /** * Test specification */ - class MockedChild extends Actor { def receive = { case "ping" => sender ! "pong" @@ -142,8 +140,8 @@ class ParentChildSpec extends WordSpec with Matchers with TestKitBase with Befor val parent = system.actorOf(Props(new Actor { val child = context.actorOf(Props(new Child), "child") def receive = { - case x if sender == child => proxy.ref forward x - case x => child forward x + case x if sender == child => proxy.ref.forward(x) + case x => child.forward(x) } })) diff --git a/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala b/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala index c09b28a320..ca8e843c06 100644 --- a/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/PlainWordSpec.scala @@ -10,8 +10,12 @@ import akka.testkit.{ ImplicitSender, TestActors, TestKit } import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } //#implicit-sender -class MySpec() extends TestKit(ActorSystem("MySpec")) with ImplicitSender - with WordSpecLike with Matchers with BeforeAndAfterAll { +class MySpec() + extends TestKit(ActorSystem("MySpec")) + with ImplicitSender + with WordSpecLike + with Matchers + with BeforeAndAfterAll { //#implicit-sender override def afterAll: Unit = { diff --git a/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala b/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala index 3e6e989cae..c467ff769d 100644 --- a/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/TestKitUsageSpec.scala @@ -19,7 +19,7 @@ import akka.actor.Actor import akka.actor.ActorRef import akka.actor.ActorSystem import akka.actor.Props -import akka.testkit.{ TestActors, DefaultTimeout, ImplicitSender, TestKit } +import akka.testkit.{ DefaultTimeout, ImplicitSender, TestActors, TestKit } import scala.concurrent.duration._ import scala.collection.immutable @@ -27,11 +27,12 @@ import scala.collection.immutable * a Test to show some TestKit examples */ class TestKitUsageSpec - extends TestKit(ActorSystem( - "TestKitUsageSpec", - ConfigFactory.parseString(TestKitUsageSpec.config))) - with DefaultTimeout with ImplicitSender - with WordSpecLike with Matchers with BeforeAndAfterAll { + extends TestKit(ActorSystem("TestKitUsageSpec", ConfigFactory.parseString(TestKitUsageSpec.config))) + with DefaultTimeout + with ImplicitSender + with WordSpecLike + with Matchers + with BeforeAndAfterAll { import TestKitUsageSpec._ val echoRef = system.actorOf(TestActors.echoActorProps) @@ -137,13 +138,12 @@ object TestKitUsageSpec { * like to test that the interesting value is received and that you cant * be bothered with the rest */ - class SequencingActor(next: ActorRef, head: immutable.Seq[String], - tail: immutable.Seq[String]) extends Actor { + class SequencingActor(next: ActorRef, head: immutable.Seq[String], tail: immutable.Seq[String]) extends Actor { def receive = { case msg => { - head foreach { next ! _ } + head.foreach { next ! _ } next ! msg - tail foreach { next ! _ } + tail.foreach { next ! _ } } } } diff --git a/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala b/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala index 7fca0ba83c..9f1cf594f3 100644 --- a/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/src/test/scala/docs/testkit/TestkitDocSpec.scala @@ -32,10 +32,10 @@ object TestKitDocSpec { class TestFsmActor extends Actor with FSM[Int, String] { startWith(1, "") when(1) { - case Event("go", _) => goto(2) using "go" + case Event("go", _) => goto(2).using("go") } when(2) { - case Event("back", _) => goto(1) using "back" + case Event("back", _) => goto(1).using("back") } } @@ -262,7 +262,7 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val target = system.actorOf(Props.empty) //#test-probe-watch val probe = TestProbe() - probe watch target + probe.watch(target) target ! PoisonPill probe.expectTerminated(target) //#test-probe-watch @@ -322,12 +322,13 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { import akka.testkit.EventFilter import com.typesafe.config.ConfigFactory - implicit val system = ActorSystem("testsystem", ConfigFactory.parseString(""" + implicit val system = ActorSystem("testsystem", + ConfigFactory.parseString(""" akka.loggers = ["akka.testkit.TestEventListener"] """)) try { val actor = system.actorOf(Props.empty) - EventFilter[ActorKilledException](occurrences = 1) intercept { + EventFilter[ActorKilledException](occurrences = 1).intercept { actor ! Kill } } finally { @@ -346,7 +347,8 @@ class TestKitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { //#put-your-test-code-here val probe = TestProbe() probe.send(testActor, "hello") - try expectMsg("hello") catch { case NonFatal(e) => system.terminate(); throw e } + try expectMsg("hello") + catch { case NonFatal(e) => system.terminate(); throw e } //#put-your-test-code-here shutdown(system) diff --git a/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala b/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala index d86b91af1f..4f051190a0 100644 --- a/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala +++ b/akka-docs/src/test/scala/tutorial_1/ActorHierarchyExperiments.scala @@ -8,7 +8,7 @@ package docs.tutorial_1 //#print-refs package com.example -import akka.actor.{ Actor, Props, ActorSystem } +import akka.actor.{ Actor, ActorSystem, Props } import scala.io.StdIn object PrintMyActorRefActor { diff --git a/akka-docs/src/test/scala/tutorial_4/Device.scala b/akka-docs/src/test/scala/tutorial_4/Device.scala index 1a8cc3645b..c4c5537583 100644 --- a/akka-docs/src/test/scala/tutorial_4/Device.scala +++ b/akka-docs/src/test/scala/tutorial_4/Device.scala @@ -31,10 +31,11 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging sender() ! DeviceManager.DeviceRegistered case DeviceManager.RequestTrackDevice(groupId, deviceId) => - log.warning( - "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", - groupId, deviceId, this.groupId, this.deviceId - ) + log.warning("Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", + groupId, + deviceId, + this.groupId, + this.deviceId) case RecordTemperature(id, value) => log.info("Recorded temperature reading {} with {}", value, id) diff --git a/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala b/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala index 5dc03f1e32..0ca3de53e0 100644 --- a/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala +++ b/akka-docs/src/test/scala/tutorial_4/DeviceGroup.scala @@ -38,7 +38,7 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { case trackMsg @ RequestTrackDevice(`groupId`, _) => deviceIdToActor.get(trackMsg.deviceId) match { case Some(deviceActor) => - deviceActor forward trackMsg + deviceActor.forward(trackMsg) case None => log.info("Creating device actor for {}", trackMsg.deviceId) val deviceActor = context.actorOf(Device.props(groupId, trackMsg.deviceId), s"device-${trackMsg.deviceId}") @@ -47,14 +47,11 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { actorToDeviceId += deviceActor -> trackMsg.deviceId //#device-group-register deviceIdToActor += trackMsg.deviceId -> deviceActor - deviceActor forward trackMsg + deviceActor.forward(trackMsg) } case RequestTrackDevice(groupId, deviceId) => - log.warning( - "Ignoring TrackDevice request for {}. This actor is responsible for {}.", - groupId, this.groupId - ) + log.warning("Ignoring TrackDevice request for {}. This actor is responsible for {}.", groupId, this.groupId) //#device-group-register //#device-group-remove diff --git a/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala b/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala index c19eb7f3e7..8217bfd010 100644 --- a/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala +++ b/akka-docs/src/test/scala/tutorial_4/DeviceManager.scala @@ -29,12 +29,12 @@ class DeviceManager extends Actor with ActorLogging { case trackMsg @ RequestTrackDevice(groupId, _) => groupIdToActor.get(groupId) match { case Some(ref) => - ref forward trackMsg + ref.forward(trackMsg) case None => log.info("Creating device group actor for {}", groupId) val groupActor = context.actorOf(DeviceGroup.props(groupId), "group-" + groupId) context.watch(groupActor) - groupActor forward trackMsg + groupActor.forward(trackMsg) groupIdToActor += groupId -> groupActor actorToGroupId += groupActor -> groupId } diff --git a/akka-docs/src/test/scala/tutorial_5/Device.scala b/akka-docs/src/test/scala/tutorial_5/Device.scala index 7b4202c475..0b1e41f4d4 100644 --- a/akka-docs/src/test/scala/tutorial_5/Device.scala +++ b/akka-docs/src/test/scala/tutorial_5/Device.scala @@ -31,10 +31,11 @@ class Device(groupId: String, deviceId: String) extends Actor with ActorLogging sender() ! DeviceManager.DeviceRegistered case DeviceManager.RequestTrackDevice(groupId, deviceId) => - log.warning( - "Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", - groupId, deviceId, this.groupId, this.deviceId - ) + log.warning("Ignoring TrackDevice request for {}-{}.This actor is responsible for {}-{}.", + groupId, + deviceId, + this.groupId, + this.deviceId) case RecordTemperature(id, value) => log.info("Recorded temperature reading {} with {}", value, id) diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala index 73eb314f8c..9609d6f4b8 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroup.scala @@ -43,21 +43,18 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { case trackMsg @ RequestTrackDevice(`groupId`, _) => deviceIdToActor.get(trackMsg.deviceId) match { case Some(ref) => - ref forward trackMsg + ref.forward(trackMsg) case None => log.info("Creating device actor for {}", trackMsg.deviceId) val deviceActor = context.actorOf(Device.props(groupId, trackMsg.deviceId), "device-" + trackMsg.deviceId) context.watch(deviceActor) - deviceActor forward trackMsg + deviceActor.forward(trackMsg) deviceIdToActor += trackMsg.deviceId -> deviceActor actorToDeviceId += deviceActor -> trackMsg.deviceId } case RequestTrackDevice(groupId, deviceId) => - log.warning( - "Ignoring TrackDevice request for {}. This actor is responsible for {}.", - groupId, this.groupId - ) + log.warning("Ignoring TrackDevice request for {}. This actor is responsible for {}.", groupId, this.groupId) case RequestDeviceList(requestId) => sender() ! ReplyDeviceList(requestId, deviceIdToActor.keySet) @@ -72,12 +69,9 @@ class DeviceGroup(groupId: String) extends Actor with ActorLogging { // ... other cases omitted case RequestAllTemperatures(requestId) => - context.actorOf(DeviceGroupQuery.props( - actorToDeviceId = actorToDeviceId, - requestId = requestId, - requester = sender(), - 3.seconds - )) + context.actorOf( + DeviceGroupQuery + .props(actorToDeviceId = actorToDeviceId, requestId = requestId, requester = sender(), 3.seconds)) } } diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala index 48abacc4ff..9f3036c593 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuery.scala @@ -13,22 +13,20 @@ import scala.concurrent.duration._ object DeviceGroupQuery { case object CollectionTimeout - def props( - actorToDeviceId: Map[ActorRef, String], - requestId: Long, - requester: ActorRef, - timeout: FiniteDuration - ): Props = { + def props(actorToDeviceId: Map[ActorRef, String], + requestId: Long, + requester: ActorRef, + timeout: FiniteDuration): Props = { Props(new DeviceGroupQuery(actorToDeviceId, requestId, requester, timeout)) } } -class DeviceGroupQuery( - actorToDeviceId: Map[ActorRef, String], - requestId: Long, - requester: ActorRef, - timeout: FiniteDuration -) extends Actor with ActorLogging { +class DeviceGroupQuery(actorToDeviceId: Map[ActorRef, String], + requestId: Long, + requester: ActorRef, + timeout: FiniteDuration) + extends Actor + with ActorLogging { import DeviceGroupQuery._ import context.dispatcher val queryTimeoutTimer = context.system.scheduler.scheduleOnce(timeout, self, CollectionTimeout) @@ -47,15 +45,10 @@ class DeviceGroupQuery( //#query-outline //#query-state override def receive: Receive = - waitingForReplies( - Map.empty, - actorToDeviceId.keySet - ) + waitingForReplies(Map.empty, actorToDeviceId.keySet) - def waitingForReplies( - repliesSoFar: Map[String, DeviceGroup.TemperatureReading], - stillWaiting: Set[ActorRef] - ): Receive = { + def waitingForReplies(repliesSoFar: Map[String, DeviceGroup.TemperatureReading], + stillWaiting: Set[ActorRef]): Receive = { case Device.RespondTemperature(0, valueOption) => val deviceActor = sender() val reading = valueOption match { @@ -79,12 +72,10 @@ class DeviceGroupQuery( //#query-state //#query-collect-reply - def receivedResponse( - deviceActor: ActorRef, - reading: DeviceGroup.TemperatureReading, - stillWaiting: Set[ActorRef], - repliesSoFar: Map[String, DeviceGroup.TemperatureReading] - ): Unit = { + def receivedResponse(deviceActor: ActorRef, + reading: DeviceGroup.TemperatureReading, + stillWaiting: Set[ActorRef], + repliesSoFar: Map[String, DeviceGroup.TemperatureReading]): Unit = { context.unwatch(deviceActor) val deviceId = actorToDeviceId(deviceActor) val newStillWaiting = stillWaiting - deviceActor diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala index 437180044f..d7ced2fc00 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupQuerySpec.scala @@ -20,12 +20,11 @@ class DeviceGroupQuerySpec extends AkkaSpec { val device1 = TestProbe() val device2 = TestProbe() - val queryActor = system.actorOf(DeviceGroupQuery.props( - actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), - requestId = 1, - requester = requester.ref, - timeout = 3.seconds - )) + val queryActor = system.actorOf( + DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), + requestId = 1, + requester = requester.ref, + timeout = 3.seconds)) device1.expectMsg(Device.ReadTemperature(requestId = 0)) device2.expectMsg(Device.ReadTemperature(requestId = 0)) @@ -33,13 +32,10 @@ class DeviceGroupQuerySpec extends AkkaSpec { queryActor.tell(Device.RespondTemperature(requestId = 0, Some(1.0)), device1.ref) queryActor.tell(Device.RespondTemperature(requestId = 0, Some(2.0)), device2.ref) - requester.expectMsg(DeviceGroup.RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> DeviceGroup.Temperature(1.0), - "device2" -> DeviceGroup.Temperature(2.0) - ) - )) + requester.expectMsg( + DeviceGroup.RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), + "device2" -> DeviceGroup.Temperature(2.0)))) } //#query-test-normal @@ -50,12 +46,11 @@ class DeviceGroupQuerySpec extends AkkaSpec { val device1 = TestProbe() val device2 = TestProbe() - val queryActor = system.actorOf(DeviceGroupQuery.props( - actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), - requestId = 1, - requester = requester.ref, - timeout = 3.seconds - )) + val queryActor = system.actorOf( + DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), + requestId = 1, + requester = requester.ref, + timeout = 3.seconds)) device1.expectMsg(Device.ReadTemperature(requestId = 0)) device2.expectMsg(Device.ReadTemperature(requestId = 0)) @@ -63,13 +58,10 @@ class DeviceGroupQuerySpec extends AkkaSpec { queryActor.tell(Device.RespondTemperature(requestId = 0, None), device1.ref) queryActor.tell(Device.RespondTemperature(requestId = 0, Some(2.0)), device2.ref) - requester.expectMsg(DeviceGroup.RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> DeviceGroup.TemperatureNotAvailable, - "device2" -> DeviceGroup.Temperature(2.0) - ) - )) + requester.expectMsg( + DeviceGroup.RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> DeviceGroup.TemperatureNotAvailable, + "device2" -> DeviceGroup.Temperature(2.0)))) } //#query-test-no-reading @@ -80,12 +72,11 @@ class DeviceGroupQuerySpec extends AkkaSpec { val device1 = TestProbe() val device2 = TestProbe() - val queryActor = system.actorOf(DeviceGroupQuery.props( - actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), - requestId = 1, - requester = requester.ref, - timeout = 3.seconds - )) + val queryActor = system.actorOf( + DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), + requestId = 1, + requester = requester.ref, + timeout = 3.seconds)) device1.expectMsg(Device.ReadTemperature(requestId = 0)) device2.expectMsg(Device.ReadTemperature(requestId = 0)) @@ -93,13 +84,10 @@ class DeviceGroupQuerySpec extends AkkaSpec { queryActor.tell(Device.RespondTemperature(requestId = 0, Some(1.0)), device1.ref) device2.ref ! PoisonPill - requester.expectMsg(DeviceGroup.RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> DeviceGroup.Temperature(1.0), - "device2" -> DeviceGroup.DeviceNotAvailable - ) - )) + requester.expectMsg( + DeviceGroup.RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), + "device2" -> DeviceGroup.DeviceNotAvailable))) } //#query-test-stopped @@ -110,12 +98,11 @@ class DeviceGroupQuerySpec extends AkkaSpec { val device1 = TestProbe() val device2 = TestProbe() - val queryActor = system.actorOf(DeviceGroupQuery.props( - actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), - requestId = 1, - requester = requester.ref, - timeout = 3.seconds - )) + val queryActor = system.actorOf( + DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), + requestId = 1, + requester = requester.ref, + timeout = 3.seconds)) device1.expectMsg(Device.ReadTemperature(requestId = 0)) device2.expectMsg(Device.ReadTemperature(requestId = 0)) @@ -124,13 +111,10 @@ class DeviceGroupQuerySpec extends AkkaSpec { queryActor.tell(Device.RespondTemperature(requestId = 0, Some(2.0)), device2.ref) device2.ref ! PoisonPill - requester.expectMsg(DeviceGroup.RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> DeviceGroup.Temperature(1.0), - "device2" -> DeviceGroup.Temperature(2.0) - ) - )) + requester.expectMsg( + DeviceGroup.RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), + "device2" -> DeviceGroup.Temperature(2.0)))) } //#query-test-stopped-later @@ -141,25 +125,21 @@ class DeviceGroupQuerySpec extends AkkaSpec { val device1 = TestProbe() val device2 = TestProbe() - val queryActor = system.actorOf(DeviceGroupQuery.props( - actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), - requestId = 1, - requester = requester.ref, - timeout = 1.second - )) + val queryActor = system.actorOf( + DeviceGroupQuery.props(actorToDeviceId = Map(device1.ref -> "device1", device2.ref -> "device2"), + requestId = 1, + requester = requester.ref, + timeout = 1.second)) device1.expectMsg(Device.ReadTemperature(requestId = 0)) device2.expectMsg(Device.ReadTemperature(requestId = 0)) queryActor.tell(Device.RespondTemperature(requestId = 0, Some(1.0)), device1.ref) - requester.expectMsg(DeviceGroup.RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> DeviceGroup.Temperature(1.0), - "device2" -> DeviceGroup.DeviceTimedOut - ) - )) + requester.expectMsg( + DeviceGroup.RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), + "device2" -> DeviceGroup.DeviceTimedOut))) } //#query-test-timeout diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala b/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala index 0a58d0e7f2..fac49cfa60 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceGroupSpec.scala @@ -122,12 +122,10 @@ class DeviceGroupSpec extends AkkaSpec { groupActor.tell(DeviceGroup.RequestAllTemperatures(requestId = 0), probe.ref) probe.expectMsg( - DeviceGroup.RespondAllTemperatures( - requestId = 0, - temperatures = Map( - "device1" -> DeviceGroup.Temperature(1.0), - "device2" -> DeviceGroup.Temperature(2.0), - "device3" -> DeviceGroup.TemperatureNotAvailable))) + DeviceGroup.RespondAllTemperatures(requestId = 0, + temperatures = Map("device1" -> DeviceGroup.Temperature(1.0), + "device2" -> DeviceGroup.Temperature(2.0), + "device3" -> DeviceGroup.TemperatureNotAvailable))) } //#group-query-integration-test diff --git a/akka-docs/src/test/scala/tutorial_5/DeviceManager.scala b/akka-docs/src/test/scala/tutorial_5/DeviceManager.scala index f253e95e78..5f9823b3bf 100644 --- a/akka-docs/src/test/scala/tutorial_5/DeviceManager.scala +++ b/akka-docs/src/test/scala/tutorial_5/DeviceManager.scala @@ -26,12 +26,12 @@ class DeviceManager extends Actor with ActorLogging { case trackMsg @ RequestTrackDevice(groupId, _) => groupIdToActor.get(groupId) match { case Some(ref) => - ref forward trackMsg + ref.forward(trackMsg) case None => log.info("Creating device group actor for {}", groupId) val groupActor = context.actorOf(DeviceGroup.props(groupId), "group-" + groupId) context.watch(groupActor) - groupActor forward trackMsg + groupActor.forward(trackMsg) groupIdToActor += groupId -> groupActor actorToGroupId += groupActor -> groupId } diff --git a/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala b/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala index 6aca175a89..127d562cdf 100644 --- a/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala +++ b/akka-docs/src/test/scala/typed/tutorial_1/ActorHierarchyExperiments.scala @@ -87,9 +87,8 @@ object SupervisingActor { } class SupervisingActor(context: ActorContext[String]) extends AbstractBehavior[String] { - private val child = context.spawn( - Behaviors.supervise(SupervisedActor()).onFailure(SupervisorStrategy.restart), - name = "supervised-actor") + private val child = context.spawn(Behaviors.supervise(SupervisedActor()).onFailure(SupervisorStrategy.restart), + name = "supervised-actor") override def onMessage(msg: String): Behavior[String] = msg match { diff --git a/akka-docs/src/test/scala/typed/tutorial_3/Device.scala b/akka-docs/src/test/scala/typed/tutorial_3/Device.scala index 84312479e4..702aac395c 100644 --- a/akka-docs/src/test/scala/typed/tutorial_3/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_3/Device.scala @@ -19,19 +19,18 @@ object Device { sealed trait DeviceMessage - final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) - extends DeviceMessage + final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends DeviceMessage final case class RespondTemperature(requestId: Long, value: Option[Double]) //#write-protocol final case class RecordTemperature(requestId: Long, value: Double, replyTo: ActorRef[TemperatureRecorded]) - extends DeviceMessage + extends DeviceMessage final case class TemperatureRecorded(requestId: Long) //#write-protocol } class Device(context: ActorContext[Device.DeviceMessage], groupId: String, deviceId: String) - extends AbstractBehavior[Device.DeviceMessage] { + extends AbstractBehavior[Device.DeviceMessage] { import Device._ var lastTemperatureReading: Option[Double] = None diff --git a/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala b/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala index 098b96a231..82818b0482 100644 --- a/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala +++ b/akka-docs/src/test/scala/typed/tutorial_3/DeviceInProgress.scala @@ -37,14 +37,13 @@ object DeviceInProgress2 { //#read-protocol-2 sealed trait DeviceMessage - final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) - extends DeviceMessage + final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends DeviceMessage final case class RespondTemperature(requestId: Long, value: Option[Double]) //#read-protocol-2 } class Device(context: ActorContext[Device.DeviceMessage], groupId: String, deviceId: String) - extends AbstractBehavior[Device.DeviceMessage] { + extends AbstractBehavior[Device.DeviceMessage] { import Device._ var lastTemperatureReading: Option[Double] = None @@ -75,8 +74,7 @@ object DeviceInProgress3 { object Device { //#write-protocol-1 sealed trait DeviceMessage - final case class RecordTemperature(value: Double) - extends DeviceMessage + final case class RecordTemperature(value: Double) extends DeviceMessage //#write-protocol-1 } } diff --git a/akka-docs/src/test/scala/typed/tutorial_4/Device.scala b/akka-docs/src/test/scala/typed/tutorial_4/Device.scala index 2873401aef..251812e2ac 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/Device.scala @@ -19,12 +19,11 @@ object Device { sealed trait DeviceMessage - final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) - extends DeviceMessage + final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends DeviceMessage final case class RespondTemperature(requestId: Long, value: Option[Double]) final case class RecordTemperature(requestId: Long, value: Double, replyTo: ActorRef[TemperatureRecorded]) - extends DeviceMessage + extends DeviceMessage final case class TemperatureRecorded(requestId: Long) //#passivate-msg @@ -33,7 +32,7 @@ object Device { } class Device(context: ActorContext[Device.DeviceMessage], groupId: String, deviceId: String) - extends AbstractBehavior[Device.DeviceMessage] { + extends AbstractBehavior[Device.DeviceMessage] { import Device._ var lastTemperatureReading: Option[Double] = None diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala index cc7c4a675f..c8f229633e 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceGroup.scala @@ -20,10 +20,8 @@ object DeviceGroup { trait DeviceGroupMessage - private final case class DeviceTerminated( - device: ActorRef[Device.DeviceMessage], - groupId: String, - deviceId: String) extends DeviceGroupMessage + private final case class DeviceTerminated(device: ActorRef[Device.DeviceMessage], groupId: String, deviceId: String) + extends DeviceGroupMessage } //#device-group-register @@ -31,7 +29,7 @@ object DeviceGroup { //#device-group-remove class DeviceGroup(context: ActorContext[DeviceGroup.DeviceGroupMessage], groupId: String) - extends AbstractBehavior[DeviceGroup.DeviceGroupMessage] { + extends AbstractBehavior[DeviceGroup.DeviceGroupMessage] { import DeviceGroup._ import DeviceManager._ @@ -57,10 +55,7 @@ class DeviceGroup(context: ActorContext[DeviceGroup.DeviceGroupMessage], groupId this case RequestTrackDevice(gId, _, _) => - context.log.warning( - "Ignoring TrackDevice request for {}. This actor is responsible for {}.", - gId, groupId - ) + context.log.warning("Ignoring TrackDevice request for {}. This actor is responsible for {}.", gId, groupId) this //#device-group-register //#device-group-remove diff --git a/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala b/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala index 69e195abe3..98b86ba040 100644 --- a/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala +++ b/akka-docs/src/test/scala/typed/tutorial_4/DeviceManager.scala @@ -24,14 +24,16 @@ object DeviceManager { //#device-registration-msgs final case class RequestTrackDevice(groupId: String, deviceId: String, replyTo: ActorRef[DeviceRegistered]) - extends DeviceManagerMessage with DeviceGroupMessage + extends DeviceManagerMessage + with DeviceGroupMessage final case class DeviceRegistered(device: ActorRef[Device.DeviceMessage]) //#device-registration-msgs //#device-list-msgs final case class RequestDeviceList(requestId: Long, groupId: String, replyTo: ActorRef[ReplyDeviceList]) - extends DeviceManagerMessage with DeviceGroupMessage + extends DeviceManagerMessage + with DeviceGroupMessage final case class ReplyDeviceList(requestId: Long, ids: Set[String]) //#device-list-msgs @@ -41,7 +43,7 @@ object DeviceManager { } class DeviceManager(context: ActorContext[DeviceManager.DeviceManagerMessage]) - extends AbstractBehavior[DeviceManager.DeviceManagerMessage] { + extends AbstractBehavior[DeviceManager.DeviceManagerMessage] { import DeviceManager._ import DeviceGroup.DeviceGroupMessage diff --git a/akka-docs/src/test/scala/typed/tutorial_5/Device.scala b/akka-docs/src/test/scala/typed/tutorial_5/Device.scala index 043f6e1010..fd599a9715 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/Device.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/Device.scala @@ -19,19 +19,18 @@ object Device { sealed trait DeviceMessage - final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) - extends DeviceMessage + final case class ReadTemperature(requestId: Long, replyTo: ActorRef[RespondTemperature]) extends DeviceMessage final case class RespondTemperature(requestId: Long, deviceId: String, value: Option[Double]) final case class RecordTemperature(requestId: Long, value: Double, replyTo: ActorRef[TemperatureRecorded]) - extends DeviceMessage + extends DeviceMessage final case class TemperatureRecorded(requestId: Long) case object Passivate extends DeviceMessage } class Device(context: ActorContext[Device.DeviceMessage], groupId: String, deviceId: String) - extends AbstractBehavior[Device.DeviceMessage] { + extends AbstractBehavior[Device.DeviceMessage] { import Device._ var lastTemperatureReading: Option[Double] = None diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala index 0a2ba9fd6a..dfc267f602 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroup.scala @@ -19,16 +19,14 @@ object DeviceGroup { trait DeviceGroupMessage - private final case class DeviceTerminated( - device: ActorRef[Device.DeviceMessage], - groupId: String, - deviceId: String) extends DeviceGroupMessage + private final case class DeviceTerminated(device: ActorRef[Device.DeviceMessage], groupId: String, deviceId: String) + extends DeviceGroupMessage } //#query-added class DeviceGroup(context: ActorContext[DeviceGroup.DeviceGroupMessage], groupId: String) - extends AbstractBehavior[DeviceGroup.DeviceGroupMessage] { + extends AbstractBehavior[DeviceGroup.DeviceGroupMessage] { import DeviceGroup._ import DeviceManager._ @@ -55,10 +53,7 @@ class DeviceGroup(context: ActorContext[DeviceGroup.DeviceGroupMessage], groupId this case RequestTrackDevice(gId, _, _) => - context.log.warning( - "Ignoring TrackDevice request for {}. This actor is responsible for {}.", - gId, groupId - ) + context.log.warning("Ignoring TrackDevice request for {}. This actor is responsible for {}.", gId, groupId) this case RequestDeviceList(requestId, gId, replyTo) => @@ -79,12 +74,8 @@ class DeviceGroup(context: ActorContext[DeviceGroup.DeviceGroupMessage], groupId case RequestAllTemperatures(requestId, gId, replyTo) => if (gId == groupId) { - context.spawnAnonymous(DeviceGroupQuery( - deviceIdToActor, - requestId = requestId, - requester = replyTo, - 3.seconds - )) + context.spawnAnonymous( + DeviceGroupQuery(deviceIdToActor, requestId = requestId, requester = replyTo, 3.seconds)) this } else Behaviors.unhandled diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala index 192df2ed42..c6cfd3cdff 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuery.scala @@ -26,12 +26,10 @@ import typed.tutorial_5.DeviceManager.TemperatureReading //#query-outline object DeviceGroupQuery { - def apply( - deviceIdToActor: Map[String, ActorRef[Device.DeviceMessage]], - requestId: Long, - requester: ActorRef[RespondAllTemperatures], - timeout: FiniteDuration - ): Behavior[DeviceGroupQueryMessage] = { + def apply(deviceIdToActor: Map[String, ActorRef[Device.DeviceMessage]], + requestId: Long, + requester: ActorRef[RespondAllTemperatures], + timeout: FiniteDuration): Behavior[DeviceGroupQueryMessage] = { Behaviors.setup { context => Behaviors.withTimers { timers => new DeviceGroupQuery(deviceIdToActor, requestId, requester, timeout, context, timers) @@ -48,14 +46,13 @@ object DeviceGroupQuery { private final case class DeviceTerminated(deviceId: String) extends DeviceGroupQueryMessage } -class DeviceGroupQuery( - deviceIdToActor: Map[String, ActorRef[DeviceMessage]], - requestId: Long, - requester: ActorRef[RespondAllTemperatures], - timeout: FiniteDuration, - context: ActorContext[DeviceGroupQuery.DeviceGroupQueryMessage], - timers: TimerScheduler[DeviceGroupQuery.DeviceGroupQueryMessage]) - extends AbstractBehavior[DeviceGroupQuery.DeviceGroupQueryMessage] { +class DeviceGroupQuery(deviceIdToActor: Map[String, ActorRef[DeviceMessage]], + requestId: Long, + requester: ActorRef[RespondAllTemperatures], + timeout: FiniteDuration, + context: ActorContext[DeviceGroupQuery.DeviceGroupQueryMessage], + timers: TimerScheduler[DeviceGroupQuery.DeviceGroupQueryMessage]) + extends AbstractBehavior[DeviceGroupQuery.DeviceGroupQueryMessage] { import DeviceGroupQuery._ timers.startSingleTimer(CollectionTimeout, CollectionTimeout, timeout) diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala index 983922d230..6753c4e0ca 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupQuerySpec.scala @@ -29,12 +29,8 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { val deviceIdToActor = Map("device1" -> device1.ref, "device2" -> device2.ref) - val queryActor = spawn(DeviceGroupQuery( - deviceIdToActor, - requestId = 1, - requester = requester.ref, - timeout = 3.seconds - )) + val queryActor = + spawn(DeviceGroupQuery(deviceIdToActor, requestId = 1, requester = requester.ref, timeout = 3.seconds)) device1.expectMessageType[Device.ReadTemperature] device2.expectMessageType[Device.ReadTemperature] @@ -42,13 +38,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { queryActor ! WrappedRespondTemperature(Device.RespondTemperature(requestId = 0, "device1", Some(1.0))) queryActor ! WrappedRespondTemperature(Device.RespondTemperature(requestId = 0, "device2", Some(2.0))) - requester.expectMessage(RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> Temperature(1.0), - "device2" -> Temperature(2.0) - ) - )) + requester.expectMessage( + RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0)))) } //#query-test-normal @@ -61,12 +53,8 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { val deviceIdToActor = Map("device1" -> device1.ref, "device2" -> device2.ref) - val queryActor = spawn(DeviceGroupQuery( - deviceIdToActor, - requestId = 1, - requester = requester.ref, - timeout = 3.seconds - )) + val queryActor = + spawn(DeviceGroupQuery(deviceIdToActor, requestId = 1, requester = requester.ref, timeout = 3.seconds)) device1.expectMessageType[Device.ReadTemperature] device2.expectMessageType[Device.ReadTemperature] @@ -74,13 +62,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { queryActor ! WrappedRespondTemperature(Device.RespondTemperature(requestId = 0, "device1", None)) queryActor ! WrappedRespondTemperature(Device.RespondTemperature(requestId = 0, "device2", Some(2.0))) - requester.expectMessage(RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> TemperatureNotAvailable, - "device2" -> Temperature(2.0) - ) - )) + requester.expectMessage( + RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> TemperatureNotAvailable, "device2" -> Temperature(2.0)))) } //#query-test-no-reading @@ -93,12 +77,8 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { val deviceIdToActor = Map("device1" -> device1.ref, "device2" -> device2.ref) - val queryActor = spawn(DeviceGroupQuery( - deviceIdToActor, - requestId = 1, - requester = requester.ref, - timeout = 3.seconds - )) + val queryActor = + spawn(DeviceGroupQuery(deviceIdToActor, requestId = 1, requester = requester.ref, timeout = 3.seconds)) device1.expectMessageType[Device.ReadTemperature] device2.expectMessageType[Device.ReadTemperature] @@ -107,13 +87,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { device2.stop() - requester.expectMessage(RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> Temperature(2.0), - "device2" -> DeviceNotAvailable - ) - )) + requester.expectMessage( + RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> Temperature(2.0), "device2" -> DeviceNotAvailable))) } //#query-test-stopped @@ -126,12 +102,8 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { val deviceIdToActor = Map("device1" -> device1.ref, "device2" -> device2.ref) - val queryActor = spawn(DeviceGroupQuery( - deviceIdToActor, - requestId = 1, - requester = requester.ref, - timeout = 3.seconds - )) + val queryActor = + spawn(DeviceGroupQuery(deviceIdToActor, requestId = 1, requester = requester.ref, timeout = 3.seconds)) device1.expectMessageType[Device.ReadTemperature] device2.expectMessageType[Device.ReadTemperature] @@ -141,13 +113,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { device2.stop() - requester.expectMessage(RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> Temperature(1.0), - "device2" -> Temperature(2.0) - ) - )) + requester.expectMessage( + RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> Temperature(1.0), "device2" -> Temperature(2.0)))) } //#query-test-stopped-later @@ -160,12 +128,8 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { val deviceIdToActor = Map("device1" -> device1.ref, "device2" -> device2.ref) - val queryActor = spawn(DeviceGroupQuery( - deviceIdToActor, - requestId = 1, - requester = requester.ref, - timeout = 200.millis - )) + val queryActor = + spawn(DeviceGroupQuery(deviceIdToActor, requestId = 1, requester = requester.ref, timeout = 200.millis)) device1.expectMessageType[Device.ReadTemperature] device2.expectMessageType[Device.ReadTemperature] @@ -174,13 +138,9 @@ class DeviceGroupQuerySpec extends ScalaTestWithActorTestKit with WordSpecLike { // no reply from device2 - requester.expectMessage(RespondAllTemperatures( - requestId = 1, - temperatures = Map( - "device1" -> Temperature(1.0), - "device2" -> DeviceTimedOut - ) - )) + requester.expectMessage( + RespondAllTemperatures(requestId = 1, + temperatures = Map("device1" -> Temperature(1.0), "device2" -> DeviceTimedOut))) } //#query-test-timeout diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala index 05b251c988..1696da1f69 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceGroupSpec.scala @@ -131,12 +131,10 @@ class DeviceGroupSpec extends ScalaTestWithActorTestKit with WordSpecLike { val allTempProbe = createTestProbe[RespondAllTemperatures]() groupActor ! RequestAllTemperatures(requestId = 0, groupId = "group", allTempProbe.ref) allTempProbe.expectMessage( - RespondAllTemperatures( - requestId = 0, - temperatures = Map( - "device1" -> Temperature(1.0), - "device2" -> Temperature(2.0), - "device3" -> TemperatureNotAvailable))) + RespondAllTemperatures(requestId = 0, + temperatures = Map("device1" -> Temperature(1.0), + "device2" -> Temperature(2.0), + "device3" -> TemperatureNotAvailable))) } //#group-query-integration-test diff --git a/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala b/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala index 781fdff45c..6a11cf3445 100644 --- a/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala +++ b/akka-docs/src/test/scala/typed/tutorial_5/DeviceManager.scala @@ -23,12 +23,14 @@ object DeviceManager { sealed trait DeviceManagerMessage final case class RequestTrackDevice(groupId: String, deviceId: String, replyTo: ActorRef[DeviceRegistered]) - extends DeviceManagerMessage with DeviceGroupMessage + extends DeviceManagerMessage + with DeviceGroupMessage final case class DeviceRegistered(device: ActorRef[Device.DeviceMessage]) final case class RequestDeviceList(requestId: Long, groupId: String, replyTo: ActorRef[ReplyDeviceList]) - extends DeviceManagerMessage with DeviceGroupMessage + extends DeviceManagerMessage + with DeviceGroupMessage final case class ReplyDeviceList(requestId: Long, ids: Set[String]) @@ -39,7 +41,9 @@ object DeviceManager { import DeviceGroupQuery.DeviceGroupQueryMessage final case class RequestAllTemperatures(requestId: Long, groupId: String, replyTo: ActorRef[RespondAllTemperatures]) - extends DeviceGroupQueryMessage with DeviceGroupMessage with DeviceManagerMessage + extends DeviceGroupQueryMessage + with DeviceGroupMessage + with DeviceManagerMessage final case class RespondAllTemperatures(requestId: Long, temperatures: Map[String, TemperatureReading]) @@ -52,7 +56,7 @@ object DeviceManager { } class DeviceManager(context: ActorContext[DeviceManager.DeviceManagerMessage]) - extends AbstractBehavior[DeviceManager.DeviceManagerMessage] { + extends AbstractBehavior[DeviceManager.DeviceManagerMessage] { import DeviceManager._ import DeviceGroup.DeviceGroupMessage diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala index b220b7b917..af626e419e 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -6,7 +6,20 @@ package akka.remote.testconductor import language.postfixOps -import akka.actor.{ Actor, ActorRef, Address, DeadLetterSuppression, Deploy, FSM, LoggingFSM, NoSerializationVerificationNeeded, OneForOneStrategy, Props, Status, SupervisorStrategy } +import akka.actor.{ + Actor, + ActorRef, + Address, + DeadLetterSuppression, + Deploy, + FSM, + LoggingFSM, + NoSerializationVerificationNeeded, + OneForOneStrategy, + Props, + Status, + SupervisorStrategy +} import akka.AkkaException import akka.ConfigurationException import akka.event.LoggingReceive @@ -16,7 +29,13 @@ import akka.remote.transport.ThrottlerTransportAdapter.Direction import akka.util.Timeout import java.net.InetSocketAddress import java.util.concurrent.ConcurrentHashMap -import org.jboss.netty.channel.{ Channel, ChannelHandlerContext, ChannelStateEvent, MessageEvent, SimpleChannelUpstreamHandler } +import org.jboss.netty.channel.{ + Channel, + ChannelHandlerContext, + ChannelStateEvent, + MessageEvent, + SimpleChannelUpstreamHandler +} import RemoteConnection.getAddrString import scala.concurrent.Await import scala.concurrent.duration._ @@ -60,12 +79,16 @@ trait Conductor { this: TestConductorExt => * @param participants gives the number of participants which shall connect * before any of their startClient() operations complete. */ - def startController(participants: Int, name: RoleName, controllerPort: InetSocketAddress): Future[InetSocketAddress] = { + def startController(participants: Int, + name: RoleName, + controllerPort: InetSocketAddress): Future[InetSocketAddress] = { if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") _controller = system.actorOf(Props(classOf[Controller], participants, controllerPort), "controller") import Settings.BarrierTimeout import system.dispatcher - controller ? GetSockAddr flatMap { case sockAddr: InetSocketAddress => startClient(name, sockAddr) map (_ => sockAddr) } + (controller ? GetSockAddr).flatMap { + case sockAddr: InetSocketAddress => startClient(name, sockAddr).map(_ => sockAddr) + } } /** @@ -75,7 +98,7 @@ trait Conductor { this: TestConductorExt => */ def sockAddr: Future[InetSocketAddress] = { import Settings.QueryTimeout - controller ? GetSockAddr mapTo classTag[InetSocketAddress] + (controller ? GetSockAddr).mapTo(classTag[InetSocketAddress]) } /** @@ -103,7 +126,7 @@ trait Conductor { this: TestConductorExt => def throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Double): Future[Done] = { import Settings.QueryTimeout requireTestConductorTranport() - controller ? Throttle(node, target, direction, rateMBit.toFloat) mapTo classTag[Done] + (controller ? Throttle(node, target, direction, rateMBit.toFloat)).mapTo(classTag[Done]) } /** @@ -126,11 +149,13 @@ trait Conductor { this: TestConductorExt => private def requireTestConductorTranport(): Unit = { if (transport.provider.remoteSettings.Artery.Enabled) { if (!transport.provider.remoteSettings.Artery.Advanced.TestMode) - throw new ConfigurationException("To use this feature you must activate the test mode " + + throw new ConfigurationException( + "To use this feature you must activate the test mode " + "by specifying `testTransport(on = true)` in your MultiNodeConfig.") } else { if (!transport.defaultAddress.protocol.contains(".trttl.gremlin.")) - throw new ConfigurationException("To use this feature you must activate the failure injector adapters " + + throw new ConfigurationException( + "To use this feature you must activate the failure injector adapters " + "(trttl, gremlin) by specifying `testTransport(on = true)` in your MultiNodeConfig.") } } @@ -160,7 +185,7 @@ trait Conductor { this: TestConductorExt => */ def disconnect(node: RoleName, target: RoleName): Future[Done] = { import Settings.QueryTimeout - controller ? Disconnect(node, target, false) mapTo classTag[Done] + (controller ? Disconnect(node, target, false)).mapTo(classTag[Done]) } /** @@ -173,7 +198,7 @@ trait Conductor { this: TestConductorExt => */ def abort(node: RoleName, target: RoleName): Future[Done] = { import Settings.QueryTimeout - controller ? Disconnect(node, target, true) mapTo classTag[Done] + (controller ? Disconnect(node, target, true)).mapTo(classTag[Done]) } /** @@ -189,7 +214,9 @@ trait Conductor { this: TestConductorExt => import system.dispatcher // the recover is needed to handle ClientDisconnectedException exception, // which is normal during shutdown - controller ? Terminate(node, Right(exitValue)) mapTo classTag[Done] recover { case _: ClientDisconnectedException => Done } + (controller ? Terminate(node, Right(exitValue))).mapTo(classTag[Done]).recover { + case _: ClientDisconnectedException => Done + } } /** @@ -212,7 +239,9 @@ trait Conductor { this: TestConductorExt => import system.dispatcher // the recover is needed to handle ClientDisconnectedException exception, // which is normal during shutdown - controller ? Terminate(node, Left(abort)) mapTo classTag[Done] recover { case _: ClientDisconnectedException => Done } + (controller ? Terminate(node, Left(abort))).mapTo(classTag[Done]).recover { + case _: ClientDisconnectedException => Done + } } /** @@ -220,7 +249,7 @@ trait Conductor { this: TestConductorExt => */ def getNodes: Future[Iterable[RoleName]] = { import Settings.QueryTimeout - controller ? GetNodes mapTo classTag[Iterable[RoleName]] + (controller ? GetNodes).mapTo(classTag[Iterable[RoleName]]) } /** @@ -233,7 +262,7 @@ trait Conductor { this: TestConductorExt => */ def removeNode(node: RoleName): Future[Done] = { import Settings.QueryTimeout - controller ? Remove(node) mapTo classTag[Done] + (controller ? Remove(node)).mapTo(classTag[Done]) } } @@ -245,7 +274,8 @@ trait Conductor { this: TestConductorExt => * * INTERNAL API. */ -private[akka] class ConductorHandler(_createTimeout: Timeout, controller: ActorRef, log: LoggingAdapter) extends SimpleChannelUpstreamHandler { +private[akka] class ConductorHandler(_createTimeout: Timeout, controller: ActorRef, log: LoggingAdapter) + extends SimpleChannelUpstreamHandler { implicit val createTimeout = _createTimeout val clients = new ConcurrentHashMap[Channel, ActorRef]() @@ -253,7 +283,8 @@ private[akka] class ConductorHandler(_createTimeout: Timeout, controller: ActorR override def channelConnected(ctx: ChannelHandlerContext, event: ChannelStateEvent) = { val channel = event.getChannel log.debug("connection from {}", getAddrString(channel)) - val fsm: ActorRef = Await.result(controller ? Controller.CreateServerFSM(channel) mapTo classTag[ActorRef], Duration.Inf) + val fsm: ActorRef = + Await.result((controller ? Controller.CreateServerFSM(channel)).mapTo(classTag[ActorRef]), Duration.Inf) clients.put(channel, fsm) } @@ -303,7 +334,9 @@ private[akka] object ServerFSM { * * INTERNAL API. */ -private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) extends Actor with LoggingFSM[ServerFSM.State, Option[ActorRef]] { +private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) + extends Actor + with LoggingFSM[ServerFSM.State, Option[ActorRef]] { import ServerFSM._ import Controller._ @@ -345,7 +378,7 @@ private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) ex when(Ready) { case Event(d: Done, Some(s)) => s ! d - stay using None + stay.using(None) case Event(op: ServerOp, _) => controller ! op stay @@ -357,7 +390,7 @@ private[akka] class ServerFSM(val controller: ActorRef, val channel: Channel) ex stay case Event(ToClient(msg), None) => channel.write(msg) - stay using Some(sender()) + stay.using(Some(sender())) case Event(ToClient(msg), _) => log.warning("cannot send {} while waiting for previous ACK", msg) stay @@ -391,8 +424,12 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP import BarrierCoordinator._ val settings = TestConductor().Settings - val connection = RemoteConnection(Server, controllerPort, settings.ServerSocketWorkerPoolSize, - new ConductorHandler(settings.QueryTimeout, self, Logging(context.system, classOf[ConductorHandler].getName))) + val connection = RemoteConnection(Server, + controllerPort, + settings.ServerSocketWorkerPoolSize, + new ConductorHandler(settings.QueryTimeout, + self, + Logging(context.system, classOf[ConductorHandler].getName))) /* * Supervision of the BarrierCoordinator means to catch all his bad emotions @@ -420,15 +457,17 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP // map keeping unanswered queries for node addresses (enqueued upon GetAddress, serviced upon NodeInfo) var addrInterest = Map[RoleName, Set[ActorRef]]() - val generation = Iterator from 1 + val generation = Iterator.from(1) override def receive = LoggingReceive { case CreateServerFSM(channel) => - val (ip, port) = channel.getRemoteAddress match { case s: InetSocketAddress => (s.getAddress.getHostAddress, s.getPort) } + val (ip, port) = channel.getRemoteAddress match { + case s: InetSocketAddress => (s.getAddress.getHostAddress, s.getPort) + } val name = ip + ":" + port + "-server" + generation.next sender() ! context.actorOf(Props(classOf[ServerFSM], self, channel).withDeploy(Deploy.local), name) case c @ NodeInfo(name, address, fsm) => - barrier forward c + barrier.forward(c) if (nodes contains name) { if (initialParticipants > 0) { for (NodeInfo(_, _, client) <- nodes.values) client ! ToClient(BarrierResult("initial startup", false)) @@ -443,33 +482,33 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP initialParticipants = 0 } if (addrInterest contains name) { - addrInterest(name) foreach (_ ! ToClient(AddressReply(name, address))) + addrInterest(name).foreach(_ ! ToClient(AddressReply(name, address))) addrInterest -= name } } case c @ ClientDisconnected(name) => nodes -= name - barrier forward c + barrier.forward(c) case op: ServerOp => op match { - case _: EnterBarrier => barrier forward op - case _: FailBarrier => barrier forward op + case _: EnterBarrier => barrier.forward(op) + case _: FailBarrier => barrier.forward(op) case GetAddress(node) => if (nodes contains node) sender() ! ToClient(AddressReply(node, nodes(node).addr)) - else addrInterest += node -> ((addrInterest get node getOrElse Set()) + sender()) + else addrInterest += node -> ((addrInterest.get(node).getOrElse(Set())) + sender()) case _: Done => //FIXME what should happen? } case op: CommandOp => op match { case Throttle(node, target, direction, rateMBit) => val t = nodes(target) - nodes(node).fsm forward ToClient(ThrottleMsg(t.addr, direction, rateMBit)) + nodes(node).fsm.forward(ToClient(ThrottleMsg(t.addr, direction, rateMBit))) case Disconnect(node, target, abort) => val t = nodes(target) - nodes(node).fsm forward ToClient(DisconnectMsg(t.addr, abort)) + nodes(node).fsm.forward(ToClient(DisconnectMsg(t.addr, abort))) case Terminate(node, shutdownOrExit) => barrier ! BarrierCoordinator.RemoveClient(node) - nodes(node).fsm forward ToClient(TerminateMsg(shutdownOrExit)) + nodes(node).fsm.forward(ToClient(TerminateMsg(shutdownOrExit))) nodes -= node case Remove(node) => barrier ! BarrierCoordinator.RemoveClient(node) @@ -500,17 +539,28 @@ private[akka] object BarrierCoordinator { } final case class BarrierTimeout(data: Data) - extends RuntimeException("timeout while waiting for barrier '" + data.barrier + "'") with NoStackTrace with Printer + extends RuntimeException("timeout while waiting for barrier '" + data.barrier + "'") + with NoStackTrace + with Printer final case class FailedBarrier(data: Data) - extends RuntimeException("failing barrier '" + data.barrier + "'") with NoStackTrace with Printer + extends RuntimeException("failing barrier '" + data.barrier + "'") + with NoStackTrace + with Printer final case class DuplicateNode(data: Data, node: Controller.NodeInfo) - extends RuntimeException(node.toString) with NoStackTrace with Printer + extends RuntimeException(node.toString) + with NoStackTrace + with Printer final case class WrongBarrier(barrier: String, client: ActorRef, data: Data) - extends RuntimeException(data.clients.find(_.fsm == client).map(_.name.toString).getOrElse(client.toString) + - " tried to enter '" + barrier + "' while we were waiting for '" + data.barrier + "'") with NoStackTrace with Printer + extends RuntimeException( + data.clients.find(_.fsm == client).map(_.name.toString).getOrElse(client.toString) + + " tried to enter '" + barrier + "' while we were waiting for '" + data.barrier + "'") + with NoStackTrace + with Printer final case class BarrierEmpty(data: Data, msg: String) extends RuntimeException(msg) with NoStackTrace with Printer final case class ClientLost(data: Data, client: RoleName) - extends RuntimeException("unannounced disconnect of " + client) with NoStackTrace with Printer + extends RuntimeException("unannounced disconnect of " + client) + with NoStackTrace + with Printer } /** @@ -525,7 +575,9 @@ private[akka] object BarrierCoordinator { * * INTERNAL API. */ -private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] { +private[akka] class BarrierCoordinator + extends Actor + with LoggingFSM[BarrierCoordinator.State, BarrierCoordinator.Data] { import BarrierCoordinator._ import Controller._ import FSM.`->` @@ -543,14 +595,14 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor whenUnhandled { case Event(n: NodeInfo, d @ Data(clients, _, _, _)) => if (clients.find(_.name == n.name).isDefined) throw new DuplicateNode(d, n) - stay using d.copy(clients = clients + n) + stay.using(d.copy(clients = clients + n)) case Event(ClientDisconnected(name), d @ Data(clients, _, arrived, _)) => if (arrived.isEmpty) - stay using d.copy(clients = clients.filterNot(_.name == name)) + stay.using(d.copy(clients = clients.filterNot(_.name == name))) else { - (clients find (_.name == name)) match { + clients.find(_.name == name) match { case None => stay - case Some(c) => throw ClientLost(d.copy(clients = clients - c, arrived = arrived filterNot (_ == c.fsm)), name) + case Some(c) => throw ClientLost(d.copy(clients = clients - c, arrived = arrived.filterNot(_ == c.fsm)), name) } } } @@ -558,18 +610,17 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor when(Idle) { case Event(EnterBarrier(name, timeout), d @ Data(clients, _, _, _)) => if (failed) - stay replying ToClient(BarrierResult(name, false)) + stay.replying(ToClient(BarrierResult(name, false))) else if (clients.map(_.fsm) == Set(sender())) - stay replying ToClient(BarrierResult(name, true)) + stay.replying(ToClient(BarrierResult(name, true))) else if (clients.find(_.fsm == sender()).isEmpty) - stay replying ToClient(BarrierResult(name, false)) + stay.replying(ToClient(BarrierResult(name, false))) else { - goto(Waiting) using d.copy(barrier = name, arrived = sender() :: Nil, - deadline = getDeadline(timeout)) + goto(Waiting).using(d.copy(barrier = name, arrived = sender() :: Nil, deadline = getDeadline(timeout))) } case Event(RemoveClient(name), d @ Data(clients, _, _, _)) => if (clients.isEmpty) throw BarrierEmpty(d, "cannot remove " + name + ": no client to remove") - stay using d.copy(clients = clients filterNot (_.name == name)) + stay.using(d.copy(clients = clients.filterNot(_.name == name))) } onTransition { @@ -589,10 +640,10 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor } else handleBarrier(d.copy(arrived = together)) case Event(RemoveClient(name), d @ Data(clients, barrier, arrived, _)) => - clients find (_.name == name) match { + clients.find(_.name == name) match { case None => stay case Some(client) => - handleBarrier(d.copy(clients = clients - client, arrived = arrived filterNot (_ == client.fsm))) + handleBarrier(d.copy(clients = clients - client, arrived = arrived.filterNot(_ == client.fsm))) } case Event(FailBarrier(name), d @ Data(_, barrier, _, _)) => if (name != barrier) throw WrongBarrier(name, sender(), d) @@ -606,12 +657,12 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor def handleBarrier(data: Data): State = { log.debug("handleBarrier({})", data) if (data.arrived.isEmpty) { - goto(Idle) using data.copy(barrier = "") + goto(Idle).using(data.copy(barrier = "")) } else if ((data.clients.map(_.fsm) -- data.arrived).isEmpty) { - data.arrived foreach (_ ! ToClient(BarrierResult(data.barrier, true))) - goto(Idle) using data.copy(barrier = "", arrived = Nil) + data.arrived.foreach(_ ! ToClient(BarrierResult(data.barrier, true))) + goto(Idle).using(data.copy(barrier = "", arrived = Nil)) } else { - stay using data + stay.using(data) } } @@ -620,4 +671,3 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor } } - diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala index 30149826d4..7ba3ec89aa 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -33,18 +33,25 @@ private[akka] sealed trait ConfirmedClientOp extends ClientOp */ private[akka] final case class Hello(name: String, addr: Address) extends NetworkOp -private[akka] final case class EnterBarrier(name: String, timeout: Option[FiniteDuration]) extends ServerOp with NetworkOp +private[akka] final case class EnterBarrier(name: String, timeout: Option[FiniteDuration]) + extends ServerOp + with NetworkOp private[akka] final case class FailBarrier(name: String) extends ServerOp with NetworkOp private[akka] final case class BarrierResult(name: String, success: Boolean) extends UnconfirmedClientOp with NetworkOp -private[akka] final case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) extends CommandOp -private[akka] final case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) extends ConfirmedClientOp with NetworkOp +private[akka] final case class Throttle(node: RoleName, target: RoleName, direction: Direction, rateMBit: Float) + extends CommandOp +private[akka] final case class ThrottleMsg(target: Address, direction: Direction, rateMBit: Float) + extends ConfirmedClientOp + with NetworkOp private[akka] final case class Disconnect(node: RoleName, target: RoleName, abort: Boolean) extends CommandOp private[akka] final case class DisconnectMsg(target: Address, abort: Boolean) extends ConfirmedClientOp with NetworkOp private[akka] final case class Terminate(node: RoleName, shutdownOrExit: Either[Boolean, Int]) extends CommandOp -private[akka] final case class TerminateMsg(shutdownOrExit: Either[Boolean, Int]) extends ConfirmedClientOp with NetworkOp +private[akka] final case class TerminateMsg(shutdownOrExit: Either[Boolean, Int]) + extends ConfirmedClientOp + with NetworkOp private[akka] final case class GetAddress(node: RoleName) extends ServerOp with NetworkOp private[akka] final case class AddressReply(node: RoleName, addr: Address) extends UnconfirmedClientOp with NetworkOp @@ -80,7 +87,7 @@ private[akka] class MsgEncoder extends OneToOneEncoder { w.setHello(TCP.Hello.newBuilder.setName(name).setAddress(address)) case EnterBarrier(name, timeout) => val barrier = TCP.EnterBarrier.newBuilder.setName(name) - timeout foreach (t => barrier.setTimeout(t.toNanos)) + timeout.foreach(t => barrier.setTimeout(t.toNanos)) barrier.setOp(BarrierOp.Enter) w.setBarrier(barrier) case BarrierResult(name, success) => @@ -89,11 +96,17 @@ private[akka] class MsgEncoder extends OneToOneEncoder { case FailBarrier(name) => w.setBarrier(TCP.EnterBarrier.newBuilder.setName(name).setOp(BarrierOp.Fail)) case ThrottleMsg(target, dir, rate) => - w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) - .setFailure(TCP.FailType.Throttle).setDirection(dir).setRateMBit(rate)) + w.setFailure( + TCP.InjectFailure.newBuilder + .setAddress(target) + .setFailure(TCP.FailType.Throttle) + .setDirection(dir) + .setRateMBit(rate)) case DisconnectMsg(target, abort) => - w.setFailure(TCP.InjectFailure.newBuilder.setAddress(target) - .setFailure(if (abort) TCP.FailType.Abort else TCP.FailType.Disconnect)) + w.setFailure( + TCP.InjectFailure.newBuilder + .setAddress(target) + .setFailure(if (abort) TCP.FailType.Abort else TCP.FailType.Disconnect)) case TerminateMsg(Right(exitValue)) => w.setFailure(TCP.InjectFailure.newBuilder.setFailure(TCP.FailType.Exit).setExitValue(exitValue)) case TerminateMsg(Left(false)) => @@ -134,9 +147,9 @@ private[akka] class MsgDecoder extends OneToOneDecoder { case BarrierOp.Succeeded => BarrierResult(barrier.getName, true) case BarrierOp.Failed => BarrierResult(barrier.getName, false) case BarrierOp.Fail => FailBarrier(barrier.getName) - case BarrierOp.Enter => EnterBarrier( - barrier.getName, - if (barrier.hasTimeout) Option(Duration.fromNanos(barrier.getTimeout)) else None) + case BarrierOp.Enter => + EnterBarrier(barrier.getName, + if (barrier.hasTimeout) Option(Duration.fromNanos(barrier.getTimeout)) else None) } } else if (w.hasFailure) { val f = w.getFailure diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala index 3e16558bc1..2487e2ed9c 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Extension.scala @@ -4,7 +4,7 @@ package akka.remote.testconductor -import akka.actor.{ Extension, ExtensionId, ExtensionIdProvider, ExtendedActorSystem, ActorContext, ActorSystem } +import akka.actor.{ ActorContext, ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider } import akka.remote.RemoteActorRefProvider import akka.util.Timeout import com.typesafe.config.Config @@ -65,10 +65,9 @@ class TestConductorExt(val system: ExtendedActorSystem) extends Extension with C val PacketSplitThreshold = config.getMillisDuration("packet-split-threshold") private def computeWPS(config: Config): Int = - ThreadPoolConfig.scaledPoolSize( - config.getInt("pool-size-min"), - config.getDouble("pool-size-factor"), - config.getInt("pool-size-max")) + ThreadPoolConfig.scaledPoolSize(config.getInt("pool-size-min"), + config.getDouble("pool-size-factor"), + config.getInt("pool-size-max")) val ServerSocketWorkerPoolSize = computeWPS(config.getConfig("netty.server-socket-worker-pool")) diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala index 2d5eb2ddc7..426bb57bd1 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/Player.scala @@ -8,17 +8,25 @@ import java.util.concurrent.TimeoutException import akka.actor._ import akka.remote.testconductor.RemoteConnection.getAddrString import scala.collection.immutable -import scala.concurrent.{ ExecutionContext, Await, Future } +import scala.concurrent.{ Await, ExecutionContext, Future } import scala.concurrent.duration._ import scala.util.control.NoStackTrace import scala.reflect.classTag import akka.util.Timeout -import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent, WriteCompletionEvent, ExceptionEvent } +import org.jboss.netty.channel.{ + Channel, + ChannelHandlerContext, + ChannelStateEvent, + ExceptionEvent, + MessageEvent, + SimpleChannelUpstreamHandler, + WriteCompletionEvent +} import akka.pattern.{ ask, AskTimeoutException } -import akka.event.{ LoggingAdapter, Logging } -import java.net.{ InetSocketAddress, ConnectException } -import akka.remote.transport.ThrottlerTransportAdapter.{ SetThrottle, TokenBucket, Blackhole, Unthrottled } -import akka.dispatch.{ UnboundedMessageQueueSemantics, RequiresMessageQueue } +import akka.event.{ Logging, LoggingAdapter } +import java.net.{ ConnectException, InetSocketAddress } +import akka.remote.transport.ThrottlerTransportAdapter.{ Blackhole, SetThrottle, TokenBucket, Unthrottled } +import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } import akka.util.ccompat._ object Player { @@ -34,12 +42,13 @@ object Player { case fsm: ActorRef => waiting = sender(); fsm ! SubscribeTransitionCallBack(self) case Transition(_, f: ClientFSM.State, t: ClientFSM.State) if f == Connecting && t == AwaitDone => // step 1, not there yet // // SI-5900 workaround - case Transition(_, f: ClientFSM.State, t: ClientFSM.State) if f == AwaitDone && t == Connected => // SI-5900 workaround - waiting ! Done; context stop self + case Transition(_, f: ClientFSM.State, t: ClientFSM.State) + if f == AwaitDone && t == Connected => // SI-5900 workaround + waiting ! Done; context.stop(self) case t: Transition[_] => - waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t)); context stop self + waiting ! Status.Failure(new RuntimeException("unexpected transition: " + t)); context.stop(self) case CurrentState(_, s: ClientFSM.State) if s == Connected => // SI-5900 workaround - waiting ! Done; context stop self + waiting ! Done; context.stop(self) case _: CurrentState[_] => } @@ -61,7 +70,8 @@ trait Player { this: TestConductorExt => case null => throw new IllegalStateException("TestConductor client not yet started") case _ if system.whenTerminated.isCompleted => - throw new IllegalStateException("TestConductor unavailable because system is terminated; you need to startNewSystem() before this point") + throw new IllegalStateException( + "TestConductor unavailable because system is terminated; you need to startNewSystem() before this point") case x => x } @@ -79,7 +89,7 @@ trait Player { this: TestConductorExt => if (_client ne null) throw new IllegalStateException("TestConductorClient already started") _client = system.actorOf(Props(classOf[ClientFSM], name, controllerAddr), "TestConductorClient") val a = system.actorOf(Player.waiterProps) - a ? client mapTo classTag[Done] + (a ? client).mapTo(classTag[Done]) } /** @@ -95,7 +105,7 @@ trait Player { this: TestConductorExt => def enter(timeout: Timeout, name: immutable.Seq[String]): Unit = { system.log.debug("entering barriers " + name.mkString("(", ", ", ")")) val stop = Deadline.now + timeout.duration - name foreach { b => + name.foreach { b => val barrierTimeout = stop.timeLeft if (barrierTimeout < Duration.Zero) { client ! ToServer(FailBarrier(b)) @@ -119,7 +129,7 @@ trait Player { this: TestConductorExt => */ def getAddressFor(name: RoleName): Future[Address] = { import Settings.QueryTimeout - client ? ToServer(GetAddress(name)) mapTo classTag[Address] + (client ? ToServer(GetAddress(name))).mapTo(classTag[Address]) } } @@ -154,24 +164,30 @@ private[akka] object ClientFSM { * * INTERNAL API. */ -private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) extends Actor - with LoggingFSM[ClientFSM.State, ClientFSM.Data] with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) + extends Actor + with LoggingFSM[ClientFSM.State, ClientFSM.Data] + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import ClientFSM._ val settings = TestConductor().Settings - val handler = new PlayerHandler(controllerAddr, settings.ClientReconnects, settings.ReconnectBackoff, - settings.ClientSocketWorkerPoolSize, self, Logging(context.system, classOf[PlayerHandler].getName), - context.system.scheduler)(context.dispatcher) + val handler = new PlayerHandler(controllerAddr, + settings.ClientReconnects, + settings.ReconnectBackoff, + settings.ClientSocketWorkerPoolSize, + self, + Logging(context.system, classOf[PlayerHandler].getName), + context.system.scheduler)(context.dispatcher) startWith(Connecting, Data(None, None)) when(Connecting, stateTimeout = settings.ConnectTimeout) { case Event(msg: ClientOp, _) => - stay replying Status.Failure(new IllegalStateException("not connected yet")) + stay.replying(Status.Failure(new IllegalStateException("not connected yet"))) case Event(Connected(channel), _) => channel.write(Hello(name.name, TestConductor().address)) - goto(AwaitDone) using Data(Some(channel), None) + goto(AwaitDone).using(Data(Some(channel), None)) case Event(e: ConnectionFailure, _) => log.error(e, "ConnectionFailure") goto(Failed) @@ -188,7 +204,7 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) log.error("received {} instead of Done", msg) goto(Failed) case Event(msg: ServerOp, _) => - stay replying Status.Failure(new IllegalStateException("not connected yet")) + stay.replying(Status.Failure(new IllegalStateException("not connected yet"))) case Event(StateTimeout, _) => log.error("connect timeout to TestConductor") goto(Failed) @@ -208,7 +224,7 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) case GetAddress(node) => Some(node.name -> sender()) case _ => None } - stay using d.copy(runningOp = token) + stay.using(d.copy(runningOp = token)) case Event(ToServer(op), Data(channel, Some((token, _)))) => log.error("cannot write {} while waiting for {}", op, token) stay @@ -218,33 +234,40 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) runningOp match { case Some((barrier, requester)) => val response = - if (b != barrier) Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + if (b != barrier) + Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) else if (!success) Status.Failure(new RuntimeException("barrier failed: " + b)) else b requester ! response case None => log.warning("did not expect {}", op) } - stay using d.copy(runningOp = None) + stay.using(d.copy(runningOp = None)) case AddressReply(node, address) => runningOp match { case Some((_, requester)) => requester ! address case None => log.warning("did not expect {}", op) } - stay using d.copy(runningOp = None) + stay.using(d.copy(runningOp = None)) case t: ThrottleMsg => import context.dispatcher // FIXME is this the right EC for the future below? - val mode = if (t.rateMBit < 0.0f) Unthrottled - else if (t.rateMBit == 0.0f) Blackhole - // Conversion needed as the TokenBucket measures in octets: 125000 Octets/s = 1Mbit/s - // FIXME: Initial capacity should be carefully chosen - else TokenBucket(capacity = 1000, tokensPerSecond = t.rateMBit * 125000.0, nanoTimeOfLastSend = 0, availableTokens = 0) + val mode = + if (t.rateMBit < 0.0f) Unthrottled + else if (t.rateMBit == 0.0f) Blackhole + // Conversion needed as the TokenBucket measures in octets: 125000 Octets/s = 1Mbit/s + // FIXME: Initial capacity should be carefully chosen + else + TokenBucket(capacity = 1000, + tokensPerSecond = t.rateMBit * 125000.0, + nanoTimeOfLastSend = 0, + availableTokens = 0) val cmdFuture = TestConductor().transport.managementCommand(SetThrottle(t.target, t.direction, mode)) - cmdFuture foreach { + cmdFuture.foreach { case true => self ! ToServer(Done) - case _ => throw new RuntimeException("Throttle was requested from the TestConductor, but no transport " + + case _ => + throw new RuntimeException("Throttle was requested from the TestConductor, but no transport " + "adapters available that support throttling. Specify `testTransport(on = true)` in your MultiNodeConfig") } stay @@ -266,7 +289,7 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) when(Failed) { case Event(msg: ClientOp, _) => - stay replying Status.Failure(new RuntimeException("cannot do " + msg + " while Failed")) + stay.replying(Status.Failure(new RuntimeException("cannot do " + msg + " while Failed"))) case Event(msg: NetworkOp, _) => log.warning("ignoring network message {} while Failed", msg) stay @@ -285,15 +308,14 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) * * INTERNAL API. */ -private[akka] class PlayerHandler( - server: InetSocketAddress, - private var reconnects: Int, - backoff: FiniteDuration, - poolSize: Int, - fsm: ActorRef, - log: LoggingAdapter, - scheduler: Scheduler)(implicit executor: ExecutionContext) - extends SimpleChannelUpstreamHandler { +private[akka] class PlayerHandler(server: InetSocketAddress, + private var reconnects: Int, + backoff: FiniteDuration, + poolSize: Int, + fsm: ActorRef, + log: LoggingAdapter, + scheduler: Scheduler)(implicit executor: ExecutionContext) + extends SimpleChannelUpstreamHandler { import ClientFSM._ @@ -301,11 +323,16 @@ private[akka] class PlayerHandler( var nextAttempt: Deadline = _ - override def channelOpen(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} open", event.getChannel) - override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} closed", event.getChannel) - override def channelBound(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} bound", event.getChannel) - override def channelUnbound(ctx: ChannelHandlerContext, event: ChannelStateEvent) = log.debug("channel {} unbound", event.getChannel) - override def writeComplete(ctx: ChannelHandlerContext, event: WriteCompletionEvent) = log.debug("channel {} written {}", event.getChannel, event.getWrittenAmount) + override def channelOpen(ctx: ChannelHandlerContext, event: ChannelStateEvent) = + log.debug("channel {} open", event.getChannel) + override def channelClosed(ctx: ChannelHandlerContext, event: ChannelStateEvent) = + log.debug("channel {} closed", event.getChannel) + override def channelBound(ctx: ChannelHandlerContext, event: ChannelStateEvent) = + log.debug("channel {} bound", event.getChannel) + override def channelUnbound(ctx: ChannelHandlerContext, event: ChannelStateEvent) = + log.debug("channel {} unbound", event.getChannel) + override def writeComplete(ctx: ChannelHandlerContext, event: WriteCompletionEvent) = + log.debug("channel {} written {}", event.getChannel, event.getWrittenAmount) override def exceptionCaught(ctx: ChannelHandlerContext, event: ExceptionEvent) = { log.debug("channel {} exception {}", event.getChannel, event.getCause) @@ -347,4 +374,3 @@ private[akka] class PlayerHandler( } } } - diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index 4ef7001fac..7b2d8a9708 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -4,7 +4,13 @@ package akka.remote.testconductor -import org.jboss.netty.channel.{ Channel, ChannelPipeline, ChannelPipelineFactory, ChannelUpstreamHandler, DefaultChannelPipeline } +import org.jboss.netty.channel.{ + Channel, + ChannelPipeline, + ChannelPipelineFactory, + ChannelUpstreamHandler, + DefaultChannelPipeline +} import org.jboss.netty.channel.socket.nio.{ NioClientSocketChannelFactory, NioServerSocketChannelFactory } import org.jboss.netty.bootstrap.{ ClientBootstrap, ServerBootstrap } import org.jboss.netty.handler.codec.frame.{ LengthFieldBasedFrameDecoder, LengthFieldPrepender } @@ -12,7 +18,7 @@ import java.net.InetSocketAddress import java.util.concurrent.Executors import akka.event.Logging import akka.util.Helpers -import org.jboss.netty.handler.codec.oneone.{ OneToOneEncoder, OneToOneDecoder } +import org.jboss.netty.handler.codec.oneone.{ OneToOneDecoder, OneToOneEncoder } import org.jboss.netty.channel.ChannelHandlerContext import akka.protobuf.Message import org.jboss.netty.buffer.ChannelBuffer @@ -53,8 +59,8 @@ private[akka] class TestConductorPipelineFactory(handler: ChannelUpstreamHandler val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) val msg = List(new MsgEncoder, new MsgDecoder) - (encap ::: proto ::: msg ::: handler :: Nil).foldLeft(new DefaultChannelPipeline) { - (pipe, handler) => pipe.addLast(Logging.simpleName(handler.getClass), handler); pipe + (encap ::: proto ::: msg ::: handler :: Nil).foldLeft(new DefaultChannelPipeline) { (pipe, handler) => + pipe.addLast(Logging.simpleName(handler.getClass), handler); pipe } } } @@ -63,10 +69,12 @@ private[akka] class TestConductorPipelineFactory(handler: ChannelUpstreamHandler * INTERNAL API. */ private[akka] sealed trait Role + /** * INTERNAL API. */ private[akka] case object Client extends Role + /** * INTERNAL API. */ @@ -79,15 +87,15 @@ private[akka] object RemoteConnection { def apply(role: Role, sockaddr: InetSocketAddress, poolSize: Int, handler: ChannelUpstreamHandler): Channel = { role match { case Client => - val socketfactory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool, - poolSize) + val socketfactory = + new NioClientSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool, poolSize) val bootstrap = new ClientBootstrap(socketfactory) bootstrap.setPipelineFactory(new TestConductorPipelineFactory(handler)) bootstrap.setOption("tcpNoDelay", true) bootstrap.connect(sockaddr).getChannel case Server => - val socketfactory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool, - poolSize) + val socketfactory = + new NioServerSocketChannelFactory(Executors.newCachedThreadPool, Executors.newCachedThreadPool, poolSize) val bootstrap = new ServerBootstrap(socketfactory) bootstrap.setPipelineFactory(new TestConductorPipelineFactory(handler)) bootstrap.setOption("reuseAddress", !Helpers.isWindows) @@ -102,5 +110,7 @@ private[akka] object RemoteConnection { } def shutdown(channel: Channel) = - try channel.close() finally try channel.getFactory.shutdown() finally channel.getFactory.releaseExternalResources() + try channel.close() + finally try channel.getFactory.shutdown() + finally channel.getFactory.releaseExternalResources() } diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala index 9aa1128321..ac7d5aeffc 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -48,8 +48,8 @@ abstract class MultiNodeConfig { * Register a config override for a specific participant. */ def nodeConfig(roles: RoleName*)(configs: Config*): Unit = { - val c = configs.reduceLeft(_ withFallback _) - _nodeConf ++= roles map { _ -> c } + val c = configs.reduceLeft(_.withFallback(_)) + _nodeConf ++= roles.map { _ -> c } } /** @@ -83,14 +83,14 @@ abstract class MultiNodeConfig { * filled. */ def role(name: String): RoleName = { - if (_roles exists (_.name == name)) throw new IllegalArgumentException("non-unique role name " + name) + if (_roles.exists(_.name == name)) throw new IllegalArgumentException("non-unique role name " + name) val r = RoleName(name) _roles :+= r r } def deployOn(role: RoleName, deployment: String): Unit = - _deployments += role -> ((_deployments get role getOrElse Vector()) :+ deployment) + _deployments += role -> ((_deployments.get(role).getOrElse(Vector())) :+ deployment) def deployOnAll(deployment: String): Unit = _allDeploy :+= deployment @@ -108,18 +108,20 @@ abstract class MultiNodeConfig { private[akka] def config: Config = { val transportConfig = - if (_testTransport) ConfigFactory.parseString( - """ + if (_testTransport) ConfigFactory.parseString(""" akka.remote.netty.tcp.applied-adapters = [trttl, gremlin] akka.remote.artery.advanced.test-mode = on """) else ConfigFactory.empty - val configs = (_nodeConf get myself).toList ::: _commonConf.toList ::: transportConfig :: MultiNodeSpec.nodeConfig :: MultiNodeSpec.baseConfig :: Nil - configs reduceLeft (_ withFallback _) + val configs = _nodeConf + .get(myself) + .toList ::: _commonConf.toList ::: transportConfig :: MultiNodeSpec.nodeConfig :: MultiNodeSpec.baseConfig :: Nil + configs.reduceLeft(_.withFallback(_)) } - private[testkit] def deployments(node: RoleName): immutable.Seq[String] = (_deployments get node getOrElse Nil) ++ _allDeploy + private[testkit] def deployments(node: RoleName): immutable.Seq[String] = + (_deployments.get(node).getOrElse(Nil)) ++ _allDeploy private[testkit] def roles: immutable.Seq[RoleName] = _roles @@ -134,8 +136,8 @@ object MultiNodeSpec { * -Dmultinode.max-nodes=4 * }}} */ - val maxNodes: Int = Option(Integer.getInteger("multinode.max-nodes")) getOrElse - (throw new IllegalStateException("need system property multinode.max-nodes to be set")) + val maxNodes: Int = Option(Integer.getInteger("multinode.max-nodes")) + .getOrElse(throw new IllegalStateException("need system property multinode.max-nodes to be set")) require(maxNodes > 0, "multinode.max-nodes must be greater than 0") @@ -177,8 +179,8 @@ object MultiNodeSpec { * -Dmultinode.server-host=server.example.com * }}} */ - val serverName: String = Option(System.getProperty("multinode.server-host")) getOrElse - (throw new IllegalStateException("need system property multinode.server-host to be set")) + val serverName: String = Option(System.getProperty("multinode.server-host")) + .getOrElse(throw new IllegalStateException("need system property multinode.server-host to be set")) require(serverName != "", "multinode.server-host must not be empty") @@ -202,19 +204,20 @@ object MultiNodeSpec { * -Dmultinode.index=0 * }}} */ - val selfIndex = Option(Integer.getInteger("multinode.index")) getOrElse - (throw new IllegalStateException("need system property multinode.index to be set")) + val selfIndex = Option(Integer.getInteger("multinode.index")) + .getOrElse(throw new IllegalStateException("need system property multinode.index to be set")) require(selfIndex >= 0 && selfIndex < maxNodes, "multinode.index is out of bounds: " + selfIndex) - private[testkit] val nodeConfig = mapToConfig(Map( - "akka.actor.provider" -> "remote", - "akka.remote.artery.canonical.hostname" -> selfName, - "akka.remote.netty.tcp.hostname" -> selfName, - "akka.remote.netty.tcp.port" -> selfPort, - "akka.remote.artery.canonical.port" -> selfPort)) + private[testkit] val nodeConfig = mapToConfig( + Map("akka.actor.provider" -> "remote", + "akka.remote.artery.canonical.hostname" -> selfName, + "akka.remote.netty.tcp.hostname" -> selfName, + "akka.remote.netty.tcp.port" -> selfPort, + "akka.remote.artery.canonical.port" -> selfPort)) - private[testkit] val baseConfig: Config = ConfigFactory.parseString(""" + private[testkit] val baseConfig: Config = + ConfigFactory.parseString(""" akka { loggers = ["akka.testkit.TestEventListener"] loglevel = "WARNING" @@ -244,7 +247,7 @@ object MultiNodeSpec { val s = Thread.currentThread.getStackTrace.map(_.getClassName).drop(1).dropWhile(_.matches(pattern)) val reduced = s.lastIndexWhere(_ == clazz.getName) match { case -1 => s - case z => s drop (z + 1) + case z => s.drop(z + 1) } reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } @@ -258,8 +261,12 @@ object MultiNodeSpec { * `AskTimeoutException: sending to terminated ref breaks promises`. Using lazy * val is fine. */ -abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: immutable.Seq[RoleName], deployments: RoleName => Seq[String]) - extends TestKit(_system) with MultiNodeSpecCallbacks { +abstract class MultiNodeSpec(val myself: RoleName, + _system: ActorSystem, + _roles: immutable.Seq[RoleName], + deployments: RoleName => Seq[String]) + extends TestKit(_system) + with MultiNodeSpecCallbacks { import MultiNodeSpec._ @@ -324,8 +331,8 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: def verifySystemShutdown: Boolean = false /* - * Test Class Interface - */ + * Test Class Interface + */ /** * Override this method to do something when the whole test is starting up. @@ -352,7 +359,8 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: * }}} */ def initialParticipants: Int - require(initialParticipants > 0, "initialParticipants must be a 'def' or early initializer, and it must be greater zero") + require(initialParticipants > 0, + "initialParticipants must be a 'def' or early initializer, and it must be greater zero") require(initialParticipants <= maxNodes, "not enough nodes to run this test") /** @@ -382,9 +390,8 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: * the innermost enclosing `within` block or the default `BarrierTimeout` */ def enterBarrier(name: String*): Unit = - testConductor.enter( - Timeout.durationToTimeout(remainingOr(testConductor.Settings.BarrierTimeout.duration)), - name.to(immutable.Seq)) + testConductor.enter(Timeout.durationToTimeout(remainingOr(testConductor.Settings.BarrierTimeout.duration)), + name.to(immutable.Seq)) /** * Query the controller for the transport address of the given node (by role name) and @@ -401,7 +408,7 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: def mute(clazz: Class[_]): Unit = sys.eventStream.publish(Mute(DeadLettersFilter(clazz)(occurrences = Int.MaxValue))) if (messageClasses.isEmpty) mute(classOf[AnyRef]) - else messageClasses foreach mute + else messageClasses.foreach(mute) } /* @@ -430,18 +437,17 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: lazy val addr = node(role).address.toString } - private val replacements = roles map (r => Replacement("@" + r.name + "@", r)) + private val replacements = roles.map(r => Replacement("@" + r.name + "@", r)) protected def injectDeployments(sys: ActorSystem, role: RoleName): Unit = { val deployer = sys.asInstanceOf[ExtendedActorSystem].provider.deployer - deployments(role) foreach { str => + deployments(role).foreach { str => val deployString = replacements.foldLeft(str) { case (base, r @ Replacement(tag, _)) => base.indexOf(tag) match { case -1 => base case start => - val replaceWith = try - r.addr + val replaceWith = try r.addr catch { case NonFatal(e) => // might happen if all test cases are ignored (excluded) and @@ -455,9 +461,10 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: } } import scala.collection.JavaConverters._ - ConfigFactory.parseString(deployString).root.asScala foreach { - case (key, value: ConfigObject) => deployer.parseConfig(key, value.toConfig) foreach deployer.deploy - case (key, x) => throw new IllegalArgumentException(s"key $key must map to deployment section, not simple value $x") + ConfigFactory.parseString(deployString).root.asScala.foreach { + case (key, value: ConfigObject) => deployer.parseConfig(key, value.toConfig).foreach(deployer.deploy) + case (key, x) => + throw new IllegalArgumentException(s"key $key must map to deployment section, not simple value $x") } } } @@ -481,7 +488,8 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: * system. */ protected def startNewSystem(): ActorSystem = { - val config = ConfigFactory.parseString(s"akka.remote.netty.tcp{port=${myAddress.port.get}\nhostname=${myAddress.host.get}}") + val config = ConfigFactory + .parseString(s"akka.remote.netty.tcp{port=${myAddress.port.get}\nhostname=${myAddress.host.get}}") .withFallback(system.settings.config) val sys = ActorSystem(system.name, config) injectDeployments(sys, myself) @@ -505,6 +513,7 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: * }}} */ trait MultiNodeSpecCallbacks { + /** * Call this before the start of the test run. NOT before every test case. */ diff --git a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/PerfFlamesSupport.scala b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/PerfFlamesSupport.scala index a768be4705..fb8ae74ec5 100644 --- a/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/PerfFlamesSupport.scala +++ b/akka-multi-node-testkit/src/main/scala/akka/remote/testkit/PerfFlamesSupport.scala @@ -27,7 +27,7 @@ private[akka] trait PerfFlamesSupport { _: MultiNodeSpec => import scala.concurrent.ExecutionContext.Implicits.global val afterDelay = akka.pattern.after(delay, system.scheduler)(Future.successful("GO!")) - afterDelay onComplete { it => + afterDelay.onComplete { it => import java.lang.management._ val name = ManagementFactory.getRuntimeMXBean.getName val pid = name.substring(0, name.indexOf('@')).toInt diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index b04dfaf967..fab8ad6b06 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -8,7 +8,7 @@ import akka.actor.ActorSystem import java.util.{ Dictionary, Properties } import org.osgi.framework._ import org.osgi.service.log.LogService -import com.typesafe.config.{ ConfigFactory, Config } +import com.typesafe.config.{ Config, ConfigFactory } /** * Abstract bundle activator implementation to bootstrap and configure an actor system in an @@ -40,9 +40,11 @@ abstract class ActorSystemActivator extends BundleActivator { * @param context the BundleContext */ def start(context: BundleContext): Unit = { - system = Some(OsgiActorSystemFactory(context, getActorSystemConfiguration(context)).createActorSystem(Option(getActorSystemName(context)))) - system foreach (addLogServiceListener(context, _)) - system foreach (configure(context, _)) + system = Some( + OsgiActorSystemFactory(context, getActorSystemConfiguration(context)) + .createActorSystem(Option(getActorSystemName(context)))) + system.foreach(addLogServiceListener(context, _)) + system.foreach(configure(context, _)) } /** @@ -82,8 +84,8 @@ abstract class ActorSystemActivator extends BundleActivator { * @param context the BundleContext */ def stop(context: BundleContext): Unit = { - registration foreach (_.unregister()) - system foreach (_.terminate()) + registration.foreach(_.unregister()) + system.foreach(_.terminate()) } /** @@ -99,8 +101,8 @@ abstract class ActorSystemActivator extends BundleActivator { registration.foreach(_.unregister()) //Cleanup val properties = new Properties() properties.put("name", system.name) - registration = Some(context.registerService(classOf[ActorSystem].getName, system, - properties.asInstanceOf[Dictionary[String, Any]])) + registration = Some( + context.registerService(classOf[ActorSystem].getName, system, properties.asInstanceOf[Dictionary[String, Any]])) } /** diff --git a/akka-osgi/src/main/scala/akka/osgi/BundleDelegatingClassLoader.scala b/akka-osgi/src/main/scala/akka/osgi/BundleDelegatingClassLoader.scala index fa89687af6..a2dfe882f8 100644 --- a/akka-osgi/src/main/scala/akka/osgi/BundleDelegatingClassLoader.scala +++ b/akka-osgi/src/main/scala/akka/osgi/BundleDelegatingClassLoader.scala @@ -6,7 +6,7 @@ package akka.osgi import java.net.URL import java.util.Enumeration -import org.osgi.framework.{ BundleContext, Bundle } +import org.osgi.framework.{ Bundle, BundleContext } import scala.util.Try import org.osgi.framework.wiring.{ BundleRevision, BundleWire, BundleWiring } import scala.collection.JavaConverters._ @@ -22,7 +22,8 @@ object BundleDelegatingClassLoader { /* * Create a bundle delegating ClassLoader for the bundle context's bundle */ - def apply(context: BundleContext): BundleDelegatingClassLoader = new BundleDelegatingClassLoader(context.getBundle, null) + def apply(context: BundleContext): BundleDelegatingClassLoader = + new BundleDelegatingClassLoader(context.getBundle, null) def apply(context: BundleContext, fallBackCLassLoader: Option[ClassLoader]): BundleDelegatingClassLoader = new BundleDelegatingClassLoader(context.getBundle, fallBackCLassLoader.orNull) @@ -32,17 +33,19 @@ object BundleDelegatingClassLoader { * A bundle delegating ClassLoader implementation - this will try to load classes and resources from the bundle * and the bundles transitive dependencies. If there's a ClassLoader specified, that will be used as a fallback. */ -class BundleDelegatingClassLoader(bundle: Bundle, fallBackClassLoader: ClassLoader) extends ClassLoader(fallBackClassLoader) { +class BundleDelegatingClassLoader(bundle: Bundle, fallBackClassLoader: ClassLoader) + extends ClassLoader(fallBackClassLoader) { private val bundles = findTransitiveBundles(bundle).toList override def findClass(name: String): Class[_] = { @tailrec def find(remaining: List[Bundle]): Class[_] = { if (remaining.isEmpty) throw new ClassNotFoundException(name) - else Try { remaining.head.loadClass(name) } match { - case Success(cls) => cls - case Failure(_) => find(remaining.tail) - } + else + Try { remaining.head.loadClass(name) } match { + case Success(cls) => cls + case Failure(_) => find(remaining.tail) + } } find(bundles) } @@ -50,17 +53,18 @@ class BundleDelegatingClassLoader(bundle: Bundle, fallBackClassLoader: ClassLoad override def findResource(name: String): URL = { @tailrec def find(remaining: List[Bundle]): URL = { if (remaining.isEmpty) getParent.getResource(name) - else Option { remaining.head.getResource(name) } match { - case Some(r) => r - case None => find(remaining.tail) - } + else + Option { remaining.head.getResource(name) } match { + case Some(r) => r + case None => find(remaining.tail) + } } find(bundles) } override def findResources(name: String): Enumeration[URL] = { - val resources = bundles.flatMap { - bundle => Option(bundle.getResources(name)).map { _.asScala.toList }.getOrElse(Nil) + val resources = bundles.flatMap { bundle => + Option(bundle.getResources(name)).map { _.asScala.toList }.getOrElse(Nil) } java.util.Collections.enumeration(resources.asJava) } @@ -80,11 +84,11 @@ class BundleDelegatingClassLoader(bundle: Bundle, fallBackClassLoader: ClassLoad else { val requiredWires: List[BundleWire] = wiring.getRequiredWires(BundleRevision.PACKAGE_NAMESPACE).asScala.toList - requiredWires.flatMap { - wire => Option(wire.getProviderWiring) map { _.getBundle } + requiredWires.flatMap { wire => + Option(wire.getProviderWiring).map { _.getBundle } }.toSet } - process(processed + b, rest ++ (direct diff processed)) + process(processed + b, rest ++ (direct.diff(processed))) } } } diff --git a/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala b/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala index 2777f9a840..a27855c030 100644 --- a/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala +++ b/akka-osgi/src/main/scala/akka/osgi/DefaultOSGiLogger.scala @@ -29,6 +29,7 @@ class DefaultOSGiLogger extends DefaultLogger { //the Default Logger needs to be aware of the LogService which is published on the EventStream context.system.eventStream.subscribe(self, classOf[LogService]) context.system.eventStream.unsubscribe(self, UnregisteringLogService.getClass) + /** * Logs every already received LogEvent and set the logger ready to log every incoming LogEvent. * @@ -71,9 +72,12 @@ class DefaultOSGiLogger extends DefaultLogger { def logMessage(logService: LogService, event: LogEvent): Unit = { event match { case error: Logging.Error if error.cause != NoCause => - logService.log(event.level.asInt, messageFormat.format(timestamp(event), event.thread.getName, event.logSource, event.message), error.cause) + logService.log(event.level.asInt, + messageFormat.format(timestamp(event), event.thread.getName, event.logSource, event.message), + error.cause) case _ => - logService.log(event.level.asInt, messageFormat.format(timestamp(event), event.thread.getName, event.logSource, event.message)) + logService.log(event.level.asInt, + messageFormat.format(timestamp(event), event.thread.getName, event.logSource, event.message)) } } diff --git a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala index 71aa1d687d..01e297935c 100644 --- a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala +++ b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala @@ -5,14 +5,16 @@ package akka.osgi import akka.actor.ActorSystem -import com.typesafe.config.{ ConfigFactory, Config } +import com.typesafe.config.{ Config, ConfigFactory } import org.osgi.framework.BundleContext /** * Factory class to create ActorSystem implementations in an OSGi environment. This mainly involves dealing with * bundle classloaders appropriately to ensure that configuration files and classes get loaded properly */ -class OsgiActorSystemFactory(val context: BundleContext, val fallbackClassLoader: Option[ClassLoader], config: Config = ConfigFactory.empty) { +class OsgiActorSystemFactory(val context: BundleContext, + val fallbackClassLoader: Option[ClassLoader], + config: Config = ConfigFactory.empty) { /* * Classloader that delegates to the bundle for which the factory is creating an ActorSystem @@ -38,7 +40,10 @@ class OsgiActorSystemFactory(val context: BundleContext, val fallbackClassLoader * Configuration files found in akka-actor bundle */ def actorSystemConfig(context: BundleContext): Config = { - config.withFallback(ConfigFactory.load(classloader).withFallback(ConfigFactory.defaultReference(OsgiActorSystemFactory.akkaActorClassLoader))) + config.withFallback( + ConfigFactory + .load(classloader) + .withFallback(ConfigFactory.defaultReference(OsgiActorSystemFactory.akkaActorClassLoader))) } /** @@ -51,6 +56,7 @@ class OsgiActorSystemFactory(val context: BundleContext, val fallbackClassLoader } object OsgiActorSystemFactory { + /** * Class loader of akka-actor bundle. */ @@ -59,5 +65,6 @@ object OsgiActorSystemFactory { /* * Create an [[OsgiActorSystemFactory]] instance to set up Akka in an OSGi environment */ - def apply(context: BundleContext, config: Config): OsgiActorSystemFactory = new OsgiActorSystemFactory(context, Some(akkaActorClassLoader), config) + def apply(context: BundleContext, config: Config): OsgiActorSystemFactory = + new OsgiActorSystemFactory(context, Some(akkaActorClassLoader), config) } diff --git a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala index 233ca655c6..12e9290964 100644 --- a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala +++ b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala @@ -14,7 +14,7 @@ import scala.concurrent.duration._ import scala.collection.immutable import akka.util.Timeout import de.kalpatec.pojosr.framework.launch.BundleDescriptor -import test.{ RuntimeNameActorSystemActivator, TestActivators, PingPongActorSystemActivator } +import test.{ PingPongActorSystemActivator, RuntimeNameActorSystemActivator, TestActivators } import test.PingPong._ import PojoSRTestSupport.bundle import org.scalatest.Matchers @@ -34,8 +34,8 @@ class PingPongActorSystemActivatorTest extends WordSpec with Matchers with PojoS import ActorSystemActivatorTest._ - val testBundles: immutable.Seq[BundleDescriptor] = buildTestBundles(List( - bundle(TEST_BUNDLE_NAME).withActivator(classOf[PingPongActorSystemActivator]))) + val testBundles: immutable.Seq[BundleDescriptor] = buildTestBundles( + List(bundle(TEST_BUNDLE_NAME).withActivator(classOf[PingPongActorSystemActivator]))) "PingPongActorSystemActivator" must { @@ -74,7 +74,8 @@ class RuntimeNameActorSystemActivatorTest extends WordSpec with Matchers with Po "register an ActorSystem and add the bundle id to the system name" in { filterErrors() { - serviceForType[ActorSystem].name should be(TestActivators.ACTOR_SYSTEM_NAME_PATTERN.format(bundleForName(TEST_BUNDLE_NAME).getBundleId)) + serviceForType[ActorSystem].name should be( + TestActivators.ACTOR_SYSTEM_NAME_PATTERN.format(bundleForName(TEST_BUNDLE_NAME).getBundleId)) } } } diff --git a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala index 7ce6c63b43..908b89450f 100644 --- a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala +++ b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala @@ -4,7 +4,7 @@ package akka.osgi -import de.kalpatec.pojosr.framework.launch.{ BundleDescriptor, PojoServiceRegistryFactory, ClasspathScanner } +import de.kalpatec.pojosr.framework.launch.{ BundleDescriptor, ClasspathScanner, PojoServiceRegistryFactory } import scala.collection.JavaConverters._ import org.apache.commons.io.IOUtils.copy @@ -14,7 +14,7 @@ import java.net.URL import java.util.jar.JarInputStream import java.io._ import org.scalatest.{ BeforeAndAfterAll, Suite } -import java.util.{ UUID, Date, ServiceLoader, HashMap } +import java.util.{ Date, HashMap, ServiceLoader, UUID } import scala.reflect.ClassTag import scala.collection.immutable import scala.concurrent.duration._ @@ -47,7 +47,12 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { val oldErr = System.err System.setErr(new PrintStream(bufferedLoadingErrors)) try { - ServiceLoader.load(classOf[PojoServiceRegistryFactory]).iterator.next.newPojoServiceRegistry(config).getBundleContext + ServiceLoader + .load(classOf[PojoServiceRegistryFactory]) + .iterator + .next + .newPojoServiceRegistry(config) + .getBundleContext } catch { case e: Throwable => oldErr.write(bufferedLoadingErrors.toByteArray); throw e } finally { @@ -62,7 +67,9 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { * Convenience method to find a bundle by symbolic name */ def bundleForName(name: String) = - context.getBundles.find(_.getSymbolicName == name).getOrElse(fail("Unable to find bundle with symbolic name %s".format(name))) + context.getBundles + .find(_.getSymbolicName == name) + .getOrElse(fail("Unable to find bundle with symbolic name %s".format(name))) /** * Convenience method to find a service by interface. If the service is not already available in the OSGi Service @@ -75,27 +82,30 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { def awaitReference[T](serviceType: Class[T], wait: FiniteDuration): ServiceReference[T] = { - @tailrec def poll(step: Duration, deadline: Deadline): ServiceReference[T] = context.getServiceReference(serviceType.getName) match { - case null => - if (deadline.isOverdue()) fail("Gave up waiting for service of type %s".format(serviceType)) - else { - Thread.sleep((step min deadline.timeLeft max Duration.Zero).toMillis) - poll(step, deadline) - } - case some => some.asInstanceOf[ServiceReference[T]] - } + @tailrec def poll(step: Duration, deadline: Deadline): ServiceReference[T] = + context.getServiceReference(serviceType.getName) match { + case null => + if (deadline.isOverdue()) fail("Gave up waiting for service of type %s".format(serviceType)) + else { + Thread.sleep((step min deadline.timeLeft max Duration.Zero).toMillis) + poll(step, deadline) + } + case some => some.asInstanceOf[ServiceReference[T]] + } poll(wait, Deadline.now + MaxWaitDuration) } protected def buildTestBundles(builders: immutable.Seq[BundleDescriptorBuilder]): immutable.Seq[BundleDescriptor] = - builders map (_.build) + builders.map(_.build) def filterErrors()(block: => Unit): Unit = - try block catch { case e: Throwable => System.err.write(bufferedLoadingErrors.toByteArray); throw e } + try block + catch { case e: Throwable => System.err.write(bufferedLoadingErrors.toByteArray); throw e } } object PojoSRTestSupport { + /** * Convenience method to define additional test bundles */ @@ -140,7 +150,9 @@ class BundleDescriptorBuilder(name: String) { */ def build: BundleDescriptor = { val file: File = tinybundleToJarFile(name) - new BundleDescriptor(getClass().getClassLoader(), new URL("jar:" + file.toURI().toString() + "!/"), extractHeaders(file)) + new BundleDescriptor(getClass().getClassLoader(), + new URL("jar:" + file.toURI().toString() + "!/"), + extractHeaders(file)) } def extractHeaders(file: File): HashMap[String, String] = { @@ -158,9 +170,9 @@ class BundleDescriptorBuilder(name: String) { def tinybundleToJarFile(name: String): File = { val file = new File("target/%s-%tQ.jar".format(name, new Date())) val fos = new FileOutputStream(file) - try copy(tinybundle.build(), fos) finally fos.close() + try copy(tinybundle.build(), fos) + finally fos.close() file } } - diff --git a/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala b/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala index 0a48832a55..ee2244e9e8 100644 --- a/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala +++ b/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala @@ -5,7 +5,7 @@ package akka.osgi.test import akka.osgi.ActorSystemActivator -import akka.actor.{ Props, ActorSystem } +import akka.actor.{ ActorSystem, Props } import PingPong._ import org.osgi.framework.BundleContext diff --git a/akka-osgi/src/test/scala/docs/osgi/Activator.scala b/akka-osgi/src/test/scala/docs/osgi/Activator.scala index 7d957c5b2f..281394b5df 100644 --- a/akka-osgi/src/test/scala/docs/osgi/Activator.scala +++ b/akka-osgi/src/test/scala/docs/osgi/Activator.scala @@ -11,7 +11,7 @@ class SomeActor extends akka.actor.Actor { } //#Activator -import akka.actor.{ Props, ActorSystem } +import akka.actor.{ ActorSystem, Props } import org.osgi.framework.BundleContext import akka.osgi.ActorSystemActivator diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala index 49ab4dfa1a..7e8d29b93c 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/EventEnvelope.scala @@ -8,8 +8,4 @@ package akka.persistence.query * Event wrapper adding meta data for the events in the result stream of * [[akka.persistence.query.scaladsl.EventsByTagQuery]] query, or similar queries. */ -final case class EventEnvelope( - offset: Offset, - persistenceId: String, - sequenceNr: Long, - event: Any) +final case class EventEnvelope(offset: Offset, persistenceId: String, sequenceNr: Long, event: Any) diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/Offset.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/Offset.scala index eeec035c64..579994c7dd 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/Offset.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/Offset.scala @@ -51,6 +51,7 @@ final case class TimeBasedUUID(value: UUID) extends Offset with Ordered[TimeBase * Used when retrieving all events. */ final case object NoOffset extends Offset { + /** * Java API: */ diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala index 495406e1c7..b26d99d33f 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/PersistenceQuery.scala @@ -16,6 +16,7 @@ import scala.reflect.ClassTag * Persistence extension for queries. */ object PersistenceQuery extends ExtensionId[PersistenceQuery] with ExtensionIdProvider { + /** * Java API. */ @@ -35,8 +36,11 @@ object PersistenceQuery extends ExtensionId[PersistenceQuery] with ExtensionIdPr } class PersistenceQuery(system: ExtendedActorSystem) - extends PersistencePlugin[scaladsl.ReadJournal, javadsl.ReadJournal, ReadJournalProvider](system)(ClassTag(classOf[ReadJournalProvider]), PersistenceQuery.pluginProvider) - with Extension { + extends PersistencePlugin[scaladsl.ReadJournal, javadsl.ReadJournal, ReadJournalProvider](system)( + ClassTag(classOf[ReadJournalProvider]), + PersistenceQuery.pluginProvider) + with Extension { + /** * Scala API: Returns the [[akka.persistence.query.scaladsl.ReadJournal]] specified by the given * read journal configuration entry. @@ -58,10 +62,12 @@ class PersistenceQuery(system: ExtendedActorSystem) * Java API: Returns the [[akka.persistence.query.javadsl.ReadJournal]] specified by the given * read journal configuration entry. */ - final def getReadJournalFor[T <: javadsl.ReadJournal](clazz: Class[T], readJournalPluginId: String, readJournalPluginConfig: Config): T = + final def getReadJournalFor[T <: javadsl.ReadJournal](clazz: Class[T], + readJournalPluginId: String, + readJournalPluginConfig: Config): T = pluginFor(readJournalPluginId, readJournalPluginConfig).javadslPlugin.asInstanceOf[T] - final def getReadJournalFor[T <: javadsl.ReadJournal](clazz: Class[T], readJournalPluginId: String): T = getReadJournalFor[T](clazz, readJournalPluginId, ConfigFactory.empty()) + final def getReadJournalFor[T <: javadsl.ReadJournal](clazz: Class[T], readJournalPluginId: String): T = + getReadJournalFor[T](clazz, readJournalPluginId, ConfigFactory.empty()) } - diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala index 41fc3a035e..9c52b10436 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/ReadJournalProvider.scala @@ -18,6 +18,7 @@ package akka.persistence.query * */ trait ReadJournalProvider { + /** * The `ReadJournal` implementation for the Scala API. * This corresponds to the instance that is returned by [[PersistenceQuery#readJournalFor]]. diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala index 2cbfa5c922..cad6d56395 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByPersistenceIdQuery.scala @@ -19,7 +19,8 @@ trait CurrentEventsByPersistenceIdQuery extends ReadJournal { * the "result set". Events that are stored after the query is completed are * not included in the event stream. */ - def currentEventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, + def currentEventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] } diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByTagQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByTagQuery.scala index 9e689ba2e4..2cd1d03705 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByTagQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/CurrentEventsByTagQuery.scala @@ -20,4 +20,3 @@ trait CurrentEventsByTagQuery extends ReadJournal { */ def currentEventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] } - diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala index fd2dcd5dd2..f1cc95650f 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/EventsByPersistenceIdQuery.scala @@ -26,7 +26,8 @@ trait EventsByPersistenceIdQuery extends ReadJournal { * Corresponding query that is completed when it reaches the end of the currently * stored events is provided by [[CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]]. */ - def eventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, + def eventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] } diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/ReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/ReadJournal.scala index 2fddd1a0b5..6ec9745b89 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/ReadJournal.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/javadsl/ReadJournal.scala @@ -27,4 +27,3 @@ package akka.persistence.query.javadsl * For Scala API see [[akka.persistence.query.scaladsl.ReadJournal]]. */ trait ReadJournal - diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsPublisher.scala index 162b0d615f..60217cf262 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsPublisher.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsPublisher.scala @@ -29,7 +29,9 @@ private[akka] object AllPersistenceIdsPublisher { */ // FIXME needs a be rewritten as a GraphStage (since 2.5.0) private[akka] class AllPersistenceIdsPublisher(liveQuery: Boolean, maxBufSize: Int, writeJournalPluginId: String) - extends ActorPublisher[String] with DeliveryBuffer[String] with ActorLogging { + extends ActorPublisher[String] + with DeliveryBuffer[String] + with ActorLogging { val journal: ActorRef = Persistence(context.system).journalFor(writeJournalPluginId) diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/DeliveryBuffer.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/DeliveryBuffer.scala index bcf92de37b..1497795915 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/DeliveryBuffer.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/DeliveryBuffer.scala @@ -22,9 +22,9 @@ private[akka] trait DeliveryBuffer[T] { _: ActorPublisher[T] => } else if (totalDemand <= Int.MaxValue) { val (use, keep) = buf.splitAt(totalDemand.toInt) buf = keep - use foreach onNext + use.foreach(onNext) } else { - buf foreach onNext + buf.foreach(onNext) buf = Vector.empty } } diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala index 1981be3fd0..c07f31c714 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdPublisher.scala @@ -18,15 +18,28 @@ import akka.persistence.query.{ EventEnvelope, Sequence } * INTERNAL API */ private[akka] object EventsByPersistenceIdPublisher { - def props(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, refreshInterval: Option[FiniteDuration], - maxBufSize: Int, writeJournalPluginId: String): Props = { + def props(persistenceId: String, + fromSequenceNr: Long, + toSequenceNr: Long, + refreshInterval: Option[FiniteDuration], + maxBufSize: Int, + writeJournalPluginId: String): Props = { refreshInterval match { case Some(interval) => - Props(new LiveEventsByPersistenceIdPublisher(persistenceId, fromSequenceNr, toSequenceNr, interval, - maxBufSize, writeJournalPluginId)) + Props( + new LiveEventsByPersistenceIdPublisher(persistenceId, + fromSequenceNr, + toSequenceNr, + interval, + maxBufSize, + writeJournalPluginId)) case None => - Props(new CurrentEventsByPersistenceIdPublisher(persistenceId, fromSequenceNr, toSequenceNr, - maxBufSize, writeJournalPluginId)) + Props( + new CurrentEventsByPersistenceIdPublisher(persistenceId, + fromSequenceNr, + toSequenceNr, + maxBufSize, + writeJournalPluginId)) } } @@ -40,10 +53,13 @@ private[akka] object EventsByPersistenceIdPublisher { * INTERNAL API */ // FIXME needs a be rewritten as a GraphStage (since 2.5.0) -private[akka] abstract class AbstractEventsByPersistenceIdPublisher( - val persistenceId: String, val fromSequenceNr: Long, - val maxBufSize: Int, val writeJournalPluginId: String) - extends ActorPublisher[EventEnvelope] with DeliveryBuffer[EventEnvelope] with ActorLogging { +private[akka] abstract class AbstractEventsByPersistenceIdPublisher(val persistenceId: String, + val fromSequenceNr: Long, + val maxBufSize: Int, + val writeJournalPluginId: String) + extends ActorPublisher[EventEnvelope] + with DeliveryBuffer[EventEnvelope] + with ActorLogging { import EventsByPersistenceIdPublisher._ val journal: ActorRef = Persistence(context.system).journalFor(writeJournalPluginId) @@ -81,18 +97,21 @@ private[akka] abstract class AbstractEventsByPersistenceIdPublisher( def replay(): Unit = { val limit = maxBufSize - buf.size - log.debug("request replay for persistenceId [{}] from [{}] to [{}] limit [{}]", persistenceId, currSeqNo, toSequenceNr, limit) + log.debug("request replay for persistenceId [{}] from [{}] to [{}] limit [{}]", + persistenceId, + currSeqNo, + toSequenceNr, + limit) journal ! ReplayMessages(currSeqNo, toSequenceNr, limit, persistenceId, self) context.become(replaying(limit)) } def replaying(limit: Int): Receive = { case ReplayedMessage(p) => - buf :+= EventEnvelope( - offset = Sequence(p.sequenceNr), - persistenceId = persistenceId, - sequenceNr = p.sequenceNr, - event = p.payload) + buf :+= EventEnvelope(offset = Sequence(p.sequenceNr), + persistenceId = persistenceId, + sequenceNr = p.sequenceNr, + event = p.payload) currSeqNo = p.sequenceNr + 1 deliverBuf() @@ -121,12 +140,13 @@ private[akka] abstract class AbstractEventsByPersistenceIdPublisher( * INTERNAL API */ // FIXME needs a be rewritten as a GraphStage (since 2.5.0) -private[akka] class LiveEventsByPersistenceIdPublisher( - persistenceId: String, fromSequenceNr: Long, override val toSequenceNr: Long, - refreshInterval: FiniteDuration, - maxBufSize: Int, writeJournalPluginId: String) - extends AbstractEventsByPersistenceIdPublisher( - persistenceId, fromSequenceNr, maxBufSize, writeJournalPluginId) { +private[akka] class LiveEventsByPersistenceIdPublisher(persistenceId: String, + fromSequenceNr: Long, + override val toSequenceNr: Long, + refreshInterval: FiniteDuration, + maxBufSize: Int, + writeJournalPluginId: String) + extends AbstractEventsByPersistenceIdPublisher(persistenceId, fromSequenceNr, maxBufSize, writeJournalPluginId) { import EventsByPersistenceIdPublisher._ val tickTask: Cancellable = @@ -158,11 +178,12 @@ private[akka] class LiveEventsByPersistenceIdPublisher( /** * INTERNAL API */ -private[akka] class CurrentEventsByPersistenceIdPublisher( - persistenceId: String, fromSequenceNr: Long, var toSeqNr: Long, - maxBufSize: Int, writeJournalPluginId: String) - extends AbstractEventsByPersistenceIdPublisher( - persistenceId, fromSequenceNr, maxBufSize, writeJournalPluginId) { +private[akka] class CurrentEventsByPersistenceIdPublisher(persistenceId: String, + fromSequenceNr: Long, + var toSeqNr: Long, + maxBufSize: Int, + writeJournalPluginId: String) + extends AbstractEventsByPersistenceIdPublisher(persistenceId, fromSequenceNr, maxBufSize, writeJournalPluginId) { import EventsByPersistenceIdPublisher._ override def toSequenceNr: Long = toSeqNr diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala index ba8e5f0193..1886b7f38b 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/EventsByTagPublisher.scala @@ -20,15 +20,17 @@ import akka.persistence.journal.leveldb.LeveldbJournal.ReplayedTaggedMessage * INTERNAL API */ private[akka] object EventsByTagPublisher { - def props(tag: String, fromOffset: Long, toOffset: Long, refreshInterval: Option[FiniteDuration], - maxBufSize: Int, writeJournalPluginId: String): Props = { + def props(tag: String, + fromOffset: Long, + toOffset: Long, + refreshInterval: Option[FiniteDuration], + maxBufSize: Int, + writeJournalPluginId: String): Props = { refreshInterval match { case Some(interval) => - Props(new LiveEventsByTagPublisher(tag, fromOffset, toOffset, interval, - maxBufSize, writeJournalPluginId)) + Props(new LiveEventsByTagPublisher(tag, fromOffset, toOffset, interval, maxBufSize, writeJournalPluginId)) case None => - Props(new CurrentEventsByTagPublisher(tag, fromOffset, toOffset, - maxBufSize, writeJournalPluginId)) + Props(new CurrentEventsByTagPublisher(tag, fromOffset, toOffset, maxBufSize, writeJournalPluginId)) } } @@ -42,10 +44,13 @@ private[akka] object EventsByTagPublisher { * INTERNAL API */ // FIXME needs a be rewritten as a GraphStage -private[akka] abstract class AbstractEventsByTagPublisher( - val tag: String, val fromOffset: Long, - val maxBufSize: Int, val writeJournalPluginId: String) - extends ActorPublisher[EventEnvelope] with DeliveryBuffer[EventEnvelope] with ActorLogging { +private[akka] abstract class AbstractEventsByTagPublisher(val tag: String, + val fromOffset: Long, + val maxBufSize: Int, + val writeJournalPluginId: String) + extends ActorPublisher[EventEnvelope] + with DeliveryBuffer[EventEnvelope] + with ActorLogging { import EventsByTagPublisher._ val journal: ActorRef = Persistence(context.system).journalFor(writeJournalPluginId) @@ -90,11 +95,10 @@ private[akka] abstract class AbstractEventsByTagPublisher( def replaying(limit: Int): Receive = { case ReplayedTaggedMessage(p, _, offset) => - buf :+= EventEnvelope( - offset = Sequence(offset), - persistenceId = p.persistenceId, - sequenceNr = p.sequenceNr, - event = p.payload) + buf :+= EventEnvelope(offset = Sequence(offset), + persistenceId = p.persistenceId, + sequenceNr = p.sequenceNr, + event = p.payload) currOffset = offset deliverBuf() @@ -123,12 +127,13 @@ private[akka] abstract class AbstractEventsByTagPublisher( * INTERNAL API */ // FIXME needs a be rewritten as a GraphStage (since 2.5.0) -private[akka] class LiveEventsByTagPublisher( - tag: String, fromOffset: Long, override val toOffset: Long, - refreshInterval: FiniteDuration, - maxBufSize: Int, writeJournalPluginId: String) - extends AbstractEventsByTagPublisher( - tag, fromOffset, maxBufSize, writeJournalPluginId) { +private[akka] class LiveEventsByTagPublisher(tag: String, + fromOffset: Long, + override val toOffset: Long, + refreshInterval: FiniteDuration, + maxBufSize: Int, + writeJournalPluginId: String) + extends AbstractEventsByTagPublisher(tag, fromOffset, maxBufSize, writeJournalPluginId) { import EventsByTagPublisher._ val tickTask: Cancellable = @@ -161,11 +166,12 @@ private[akka] class LiveEventsByTagPublisher( * INTERNAL API */ // FIXME needs a be rewritten as a GraphStage (since 2.5.0) -private[akka] class CurrentEventsByTagPublisher( - tag: String, fromOffset: Long, var _toOffset: Long, - maxBufSize: Int, writeJournalPluginId: String) - extends AbstractEventsByTagPublisher( - tag, fromOffset, maxBufSize, writeJournalPluginId) { +private[akka] class CurrentEventsByTagPublisher(tag: String, + fromOffset: Long, + var _toOffset: Long, + maxBufSize: Int, + writeJournalPluginId: String) + extends AbstractEventsByTagPublisher(tag, fromOffset, maxBufSize, writeJournalPluginId) { import EventsByTagPublisher._ override def toOffset: Long = _toOffset diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala index 1ddd68ec18..4a0128ef1f 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/javadsl/LeveldbReadJournal.scala @@ -26,10 +26,13 @@ import akka.stream.javadsl.Source * */ class LeveldbReadJournal(scaladslReadJournal: akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal) - extends ReadJournal - with PersistenceIdsQuery with CurrentPersistenceIdsQuery - with EventsByPersistenceIdQuery with CurrentEventsByPersistenceIdQuery - with EventsByTagQuery with CurrentEventsByTagQuery { + extends ReadJournal + with PersistenceIdsQuery + with CurrentPersistenceIdsQuery + with EventsByPersistenceIdQuery + with CurrentEventsByPersistenceIdQuery + with EventsByTagQuery + with CurrentEventsByTagQuery { /** * `persistenceIds` is used for retrieving all `persistenceIds` of all @@ -86,7 +89,8 @@ class LeveldbReadJournal(scaladslReadJournal: akka.persistence.query.journal.lev * The stream is completed with failure if there is a failure in executing the query in the * backend journal. */ - override def eventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, + override def eventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] = scaladslReadJournal.eventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava @@ -95,7 +99,8 @@ class LeveldbReadJournal(scaladslReadJournal: akka.persistence.query.journal.lev * is completed immediately when it reaches the end of the "result set". Events that are * stored after the query is completed are not included in the event stream. */ - override def currentEventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, + override def currentEventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] = scaladslReadJournal.currentEventsByPersistenceId(persistenceId, fromSequenceNr, toSequenceNr).asJava @@ -152,6 +157,7 @@ class LeveldbReadJournal(scaladslReadJournal: akka.persistence.query.journal.lev } object LeveldbReadJournal { + /** * The default identifier for [[LeveldbReadJournal]] to be used with * [[akka.persistence.query.PersistenceQuery#getReadJournalFor]]. @@ -161,4 +167,3 @@ object LeveldbReadJournal { */ final val Identifier = akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal.Identifier } - diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala index 8756ac7e2b..1479ea5ee0 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/journal/leveldb/scaladsl/LeveldbReadJournal.scala @@ -9,7 +9,11 @@ import java.net.URLEncoder import akka.NotUsed import akka.actor.ExtendedActorSystem import akka.event.Logging -import akka.persistence.query.journal.leveldb.{ AllPersistenceIdsPublisher, EventsByPersistenceIdPublisher, EventsByTagPublisher } +import akka.persistence.query.journal.leveldb.{ + AllPersistenceIdsPublisher, + EventsByPersistenceIdPublisher, + EventsByTagPublisher +} import akka.persistence.query.scaladsl.{ ReadJournal, _ } import akka.persistence.query.{ EventEnvelope, NoOffset, Offset, Sequence } import akka.stream.scaladsl.Source @@ -32,10 +36,14 @@ import scala.concurrent.duration._ * absolute path corresponding to the identifier, which is `"akka.persistence.query.journal.leveldb"` * for the default [[LeveldbReadJournal#Identifier]]. See `reference.conf`. */ -class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) extends ReadJournal - with PersistenceIdsQuery with CurrentPersistenceIdsQuery - with EventsByPersistenceIdQuery with CurrentEventsByPersistenceIdQuery - with EventsByTagQuery with CurrentEventsByTagQuery { +class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) + extends ReadJournal + with PersistenceIdsQuery + with CurrentPersistenceIdsQuery + with EventsByPersistenceIdQuery + with CurrentEventsByPersistenceIdQuery + with EventsByTagQuery + with CurrentEventsByTagQuery { private val refreshInterval = Some(config.getDuration("refresh-interval", MILLISECONDS).millis) private val writeJournalPluginId: String = config.getString("write-plugin") @@ -61,7 +69,8 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) extends Re */ override def persistenceIds(): Source[String, NotUsed] = { // no polling for this query, the write journal will push all changes, i.e. no refreshInterval - Source.actorPublisher[String](AllPersistenceIdsPublisher.props(liveQuery = true, maxBufSize, writeJournalPluginId)) + Source + .actorPublisher[String](AllPersistenceIdsPublisher.props(liveQuery = true, maxBufSize, writeJournalPluginId)) .mapMaterializedValue(_ => NotUsed) .named("allPersistenceIds") } @@ -72,7 +81,8 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) extends Re * actors that are created after the query is completed are not included in the stream. */ override def currentPersistenceIds(): Source[String, NotUsed] = { - Source.actorPublisher[String](AllPersistenceIdsPublisher.props(liveQuery = false, maxBufSize, writeJournalPluginId)) + Source + .actorPublisher[String](AllPersistenceIdsPublisher.props(liveQuery = false, maxBufSize, writeJournalPluginId)) .mapMaterializedValue(_ => NotUsed) .named("currentPersistenceIds") } @@ -103,10 +113,13 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) extends Re * The stream is completed with failure if there is a failure in executing the query in the * backend journal. */ - override def eventsByPersistenceId(persistenceId: String, fromSequenceNr: Long = 0L, + override def eventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long = 0L, toSequenceNr: Long = Long.MaxValue): Source[EventEnvelope, NotUsed] = { - Source.actorPublisher[EventEnvelope](EventsByPersistenceIdPublisher.props(persistenceId, fromSequenceNr, toSequenceNr, - refreshInterval, maxBufSize, writeJournalPluginId)).mapMaterializedValue(_ => NotUsed) + Source + .actorPublisher[EventEnvelope](EventsByPersistenceIdPublisher + .props(persistenceId, fromSequenceNr, toSequenceNr, refreshInterval, maxBufSize, writeJournalPluginId)) + .mapMaterializedValue(_ => NotUsed) .named("eventsByPersistenceId-" + persistenceId) } @@ -115,10 +128,13 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) extends Re * is completed immediately when it reaches the end of the "result set". Events that are * stored after the query is completed are not included in the event stream. */ - override def currentEventsByPersistenceId(persistenceId: String, fromSequenceNr: Long = 0L, + override def currentEventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long = 0L, toSequenceNr: Long = Long.MaxValue): Source[EventEnvelope, NotUsed] = { - Source.actorPublisher[EventEnvelope](EventsByPersistenceIdPublisher.props(persistenceId, fromSequenceNr, toSequenceNr, - None, maxBufSize, writeJournalPluginId)).mapMaterializedValue(_ => NotUsed) + Source + .actorPublisher[EventEnvelope](EventsByPersistenceIdPublisher + .props(persistenceId, fromSequenceNr, toSequenceNr, None, maxBufSize, writeJournalPluginId)) + .mapMaterializedValue(_ => NotUsed) .named("currentEventsByPersistenceId-" + persistenceId) } @@ -164,13 +180,15 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) extends Re override def eventsByTag(tag: String, offset: Offset = Sequence(0L)): Source[EventEnvelope, NotUsed] = offset match { case seq: Sequence => - Source.actorPublisher[EventEnvelope](EventsByTagPublisher.props(tag, seq.value, Long.MaxValue, - refreshInterval, maxBufSize, writeJournalPluginId)) + Source + .actorPublisher[EventEnvelope](EventsByTagPublisher + .props(tag, seq.value, Long.MaxValue, refreshInterval, maxBufSize, writeJournalPluginId)) .mapMaterializedValue(_ => NotUsed) .named("eventsByTag-" + URLEncoder.encode(tag, ByteString.UTF_8)) case NoOffset => eventsByTag(tag, Sequence(0L)) //recursive case _ => - throw new IllegalArgumentException("LevelDB does not support " + Logging.simpleName(offset.getClass) + " offsets") + throw new IllegalArgumentException( + "LevelDB does not support " + Logging.simpleName(offset.getClass) + " offsets") } /** @@ -181,17 +199,21 @@ class LeveldbReadJournal(system: ExtendedActorSystem, config: Config) extends Re override def currentEventsByTag(tag: String, offset: Offset = Sequence(0L)): Source[EventEnvelope, NotUsed] = offset match { case seq: Sequence => - Source.actorPublisher[EventEnvelope](EventsByTagPublisher.props(tag, seq.value, Long.MaxValue, - None, maxBufSize, writeJournalPluginId)).mapMaterializedValue(_ => NotUsed) + Source + .actorPublisher[EventEnvelope]( + EventsByTagPublisher.props(tag, seq.value, Long.MaxValue, None, maxBufSize, writeJournalPluginId)) + .mapMaterializedValue(_ => NotUsed) .named("currentEventsByTag-" + URLEncoder.encode(tag, ByteString.UTF_8)) case NoOffset => currentEventsByTag(tag, Sequence(0L)) //recursive case _ => - throw new IllegalArgumentException("LevelDB does not support " + Logging.simpleName(offset.getClass) + " offsets") + throw new IllegalArgumentException( + "LevelDB does not support " + Logging.simpleName(offset.getClass) + " offsets") } } object LeveldbReadJournal { + /** * The default identifier for [[LeveldbReadJournal]] to be used with * [[akka.persistence.query.PersistenceQuery#readJournalFor]]. @@ -201,4 +223,3 @@ object LeveldbReadJournal { */ final val Identifier = "akka.persistence.query.journal.leveldb" } - diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala index ac0f864280..aad344e388 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByPersistenceIdQuery.scala @@ -19,7 +19,8 @@ trait CurrentEventsByPersistenceIdQuery extends ReadJournal { * the "result set". Events that are stored after the query is completed are * not included in the event stream. */ - def currentEventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, + def currentEventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] } diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByTagQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByTagQuery.scala index 710b11c514..cbfeb7b7a4 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByTagQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/CurrentEventsByTagQuery.scala @@ -21,4 +21,3 @@ trait CurrentEventsByTagQuery extends ReadJournal { def currentEventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] } - diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala index 04260d699e..f34d4b5501 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByPersistenceIdQuery.scala @@ -26,7 +26,8 @@ trait EventsByPersistenceIdQuery extends ReadJournal { * Corresponding query that is completed when it reaches the end of the currently * stored events is provided by [[CurrentEventsByPersistenceIdQuery#currentEventsByPersistenceId]]. */ - def eventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, + def eventsByPersistenceId(persistenceId: String, + fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed] } diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByTagQuery.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByTagQuery.scala index 2b1c604c45..ec99fa2324 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByTagQuery.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/EventsByTagQuery.scala @@ -39,4 +39,3 @@ trait EventsByTagQuery extends ReadJournal { def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] } - diff --git a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/ReadJournal.scala b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/ReadJournal.scala index e374d68b72..c6c3e2c8ac 100644 --- a/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/ReadJournal.scala +++ b/akka-persistence-query/src/main/scala/akka/persistence/query/scaladsl/ReadJournal.scala @@ -26,4 +26,3 @@ package akka.persistence.query.scaladsl * For Java API see [[akka.persistence.query.javadsl.ReadJournal]]. */ trait ReadJournal - diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/DummyReadJournal.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/DummyReadJournal.scala index 08ac28c865..9ffcfacf5f 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/DummyReadJournal.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/DummyReadJournal.scala @@ -22,14 +22,15 @@ object DummyReadJournal { final val Identifier = "akka.persistence.query.journal.dummy" } -class DummyReadJournalForJava(readJournal: DummyReadJournal) extends javadsl.ReadJournal with javadsl.PersistenceIdsQuery { +class DummyReadJournalForJava(readJournal: DummyReadJournal) + extends javadsl.ReadJournal + with javadsl.PersistenceIdsQuery { override def persistenceIds(): akka.stream.javadsl.Source[String, NotUsed] = readJournal.persistenceIds().asJava } object DummyReadJournalProvider { - final val config: Config = ConfigFactory.parseString( - s""" + final val config: Config = ConfigFactory.parseString(s""" ${DummyReadJournal.Identifier} { class = "${classOf[DummyReadJournalProvider].getCanonicalName}" } @@ -64,9 +65,9 @@ class DummyReadJournalProvider2(sys: ExtendedActorSystem) extends DummyReadJourn class DummyReadJournalProvider3(sys: ExtendedActorSystem, conf: Config) extends DummyReadJournalProvider -class DummyReadJournalProvider4(sys: ExtendedActorSystem, conf: Config, confPath: String) extends DummyReadJournalProvider +class DummyReadJournalProvider4(sys: ExtendedActorSystem, conf: Config, confPath: String) + extends DummyReadJournalProvider class DummyReadJournalProvider5(sys: ExtendedActorSystem) extends DummyReadJournalProvider class CustomDummyReadJournalProvider5(sys: ExtendedActorSystem) extends DummyReadJournalProvider("custom") - diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/PersistenceQuerySpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/PersistenceQuerySpec.scala index feaf1b2991..021e89fffe 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/PersistenceQuerySpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/PersistenceQuerySpec.scala @@ -39,24 +39,30 @@ class PersistenceQuerySpec extends WordSpecLike with Matchers with BeforeAndAfte "be found by full config key" in { withActorSystem() { system => val readJournalPluginConfig: Config = ConfigFactory.parseString(customReadJournalPluginConfig) - PersistenceQuery.get(system).readJournalFor[DummyReadJournal]( - DummyReadJournal.Identifier, readJournalPluginConfig) + PersistenceQuery + .get(system) + .readJournalFor[DummyReadJournal](DummyReadJournal.Identifier, readJournalPluginConfig) // other combinations of constructor parameters - PersistenceQuery.get(system).readJournalFor[DummyReadJournal]( - DummyReadJournal.Identifier + "2", readJournalPluginConfig) - PersistenceQuery.get(system).readJournalFor[DummyReadJournal]( - DummyReadJournal.Identifier + "3", readJournalPluginConfig) - PersistenceQuery.get(system).readJournalFor[DummyReadJournal]( - DummyReadJournal.Identifier + "4", readJournalPluginConfig) + PersistenceQuery + .get(system) + .readJournalFor[DummyReadJournal](DummyReadJournal.Identifier + "2", readJournalPluginConfig) + PersistenceQuery + .get(system) + .readJournalFor[DummyReadJournal](DummyReadJournal.Identifier + "3", readJournalPluginConfig) + PersistenceQuery + .get(system) + .readJournalFor[DummyReadJournal](DummyReadJournal.Identifier + "4", readJournalPluginConfig) // config key existing within both the provided readJournalPluginConfig // and the actorSystem config. The journal must be created from the provided config then. - val dummyReadJournal5 = PersistenceQuery.get(system).readJournalFor[DummyReadJournal]( - DummyReadJournal.Identifier + "5", readJournalPluginConfig) + val dummyReadJournal5 = PersistenceQuery + .get(system) + .readJournalFor[DummyReadJournal](DummyReadJournal.Identifier + "5", readJournalPluginConfig) dummyReadJournal5.dummyValue should equal("custom") // config key directly coming from the provided readJournalPluginConfig, // and does not exist within the actorSystem config - PersistenceQuery.get(system).readJournalFor[DummyReadJournal]( - DummyReadJournal.Identifier + "6", readJournalPluginConfig) + PersistenceQuery + .get(system) + .readJournalFor[DummyReadJournal](DummyReadJournal.Identifier + "6", readJournalPluginConfig) } } @@ -81,7 +87,8 @@ class PersistenceQuerySpec extends WordSpecLike with Matchers with BeforeAndAfte .withFallback(ConfigFactory.load()) val sys = ActorSystem(s"sys-${systemCounter.incrementAndGet()}", config) - try block(sys) finally Await.ready(sys.terminate(), 10.seconds) + try block(sys) + finally Await.ready(sys.terminate(), 10.seconds) } } @@ -98,4 +105,3 @@ object ExampleQueryModels { class PrefixStringWithPAdapter extends ReadEventAdapter { override def fromJournal(event: Any, manifest: String) = EventSeq.single("p-" + event) } - diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsSpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsSpec.scala index da399105bd..081b56100d 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsSpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/AllPersistenceIdsSpec.scala @@ -23,8 +23,7 @@ object AllPersistenceIdsSpec { """ } -class AllPersistenceIdsSpec extends AkkaSpec(AllPersistenceIdsSpec.config) - with Cleanup with ImplicitSender { +class AllPersistenceIdsSpec extends AkkaSpec(AllPersistenceIdsSpec.config) with Cleanup with ImplicitSender { implicit val mat = ActorMaterializer()(system) @@ -47,9 +46,7 @@ class AllPersistenceIdsSpec extends AkkaSpec(AllPersistenceIdsSpec.config) val src = queries.currentPersistenceIds() val probe = src.runWith(TestSink.probe[String]) probe.within(10.seconds) { - probe.request(5) - .expectNextUnordered("a", "b", "c") - .expectComplete() + probe.request(5).expectNextUnordered("a", "b", "c").expectComplete() } } @@ -61,8 +58,7 @@ class AllPersistenceIdsSpec extends AkkaSpec(AllPersistenceIdsSpec.config) val src = queries.persistenceIds() val probe = src.runWith(TestSink.probe[String]) probe.within(10.seconds) { - probe.request(5) - .expectNextUnorderedN(List("a", "b", "c", "d")) + probe.request(5).expectNextUnorderedN(List("a", "b", "c", "d")) system.actorOf(TestActor.props("e")) ! "e1" probe.expectNext("e") diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/Cleanup.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/Cleanup.scala index e61d5bbb8f..49d717bd71 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/Cleanup.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/Cleanup.scala @@ -9,10 +9,10 @@ import java.io.File import org.apache.commons.io.FileUtils trait Cleanup { this: AkkaSpec => - val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + val storageLocations = + List("akka.persistence.journal.leveldb.dir", + "akka.persistence.journal.leveldb-shared.store.dir", + "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) override protected def atStartup(): Unit = { storageLocations.foreach(FileUtils.deleteDirectory) diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdSpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdSpec.scala index 0577b8ff3b..1a2ca49c23 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdSpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/EventsByPersistenceIdSpec.scala @@ -25,8 +25,7 @@ object EventsByPersistenceIdSpec { """ } -class EventsByPersistenceIdSpec extends AkkaSpec(EventsByPersistenceIdSpec.config) - with Cleanup with ImplicitSender { +class EventsByPersistenceIdSpec extends AkkaSpec(EventsByPersistenceIdSpec.config) with Cleanup with ImplicitSender { implicit val mat = ActorMaterializer()(system) @@ -57,7 +56,9 @@ class EventsByPersistenceIdSpec extends AkkaSpec(EventsByPersistenceIdSpec.confi val ref = setup("a") val src = queries.currentEventsByPersistenceId("a", 0L, Long.MaxValue) - src.map(_.event).runWith(TestSink.probe[Any]) + src + .map(_.event) + .runWith(TestSink.probe[Any]) .request(2) .expectNext("a-1", "a-2") .expectNoMessage(500.millis) @@ -69,28 +70,19 @@ class EventsByPersistenceIdSpec extends AkkaSpec(EventsByPersistenceIdSpec.confi "find existing events up to a sequence number" in { val ref = setup("b") val src = queries.currentEventsByPersistenceId("b", 0L, 2L) - src.map(_.event).runWith(TestSink.probe[Any]) - .request(5) - .expectNext("b-1", "b-2") - .expectComplete() + src.map(_.event).runWith(TestSink.probe[Any]).request(5).expectNext("b-1", "b-2").expectComplete() } "not see new events after demand request" in { val ref = setup("f") val src = queries.currentEventsByPersistenceId("f", 0L, Long.MaxValue) - val probe = src.map(_.event).runWith(TestSink.probe[Any]) - .request(2) - .expectNext("f-1", "f-2") - .expectNoMessage(100.millis) + val probe = + src.map(_.event).runWith(TestSink.probe[Any]).request(2).expectNext("f-1", "f-2").expectNoMessage(100.millis) ref ! "f-4" expectMsg("f-4-done") - probe - .expectNoMessage(100.millis) - .request(5) - .expectNext("f-3") - .expectComplete() // f-4 not seen + probe.expectNoMessage(100.millis).request(5).expectNext("f-3").expectComplete() // f-4 not seen } "return empty stream for cleaned journal from 0 to MaxLong" in { @@ -157,9 +149,7 @@ class EventsByPersistenceIdSpec extends AkkaSpec(EventsByPersistenceIdSpec.confi "find new events" in { val ref = setup("c") val src = queries.eventsByPersistenceId("c", 0L, Long.MaxValue) - val probe = src.map(_.event).runWith(TestSink.probe[Any]) - .request(5) - .expectNext("c-1", "c-2", "c-3") + val probe = src.map(_.event).runWith(TestSink.probe[Any]).request(5).expectNext("c-1", "c-2", "c-3") ref ! "c-4" expectMsg("c-4-done") @@ -170,9 +160,7 @@ class EventsByPersistenceIdSpec extends AkkaSpec(EventsByPersistenceIdSpec.confi "find new events up to a sequence number" in { val ref = setup("d") val src = queries.eventsByPersistenceId("d", 0L, 4L) - val probe = src.map(_.event).runWith(TestSink.probe[Any]) - .request(5) - .expectNext("d-1", "d-2", "d-3") + val probe = src.map(_.event).runWith(TestSink.probe[Any]).request(5).expectNext("d-1", "d-2", "d-3") ref ! "d-4" expectMsg("d-4-done") @@ -183,19 +171,13 @@ class EventsByPersistenceIdSpec extends AkkaSpec(EventsByPersistenceIdSpec.confi "find new events after demand request" in { val ref = setup("e") val src = queries.eventsByPersistenceId("e", 0L, Long.MaxValue) - val probe = src.map(_.event).runWith(TestSink.probe[Any]) - .request(2) - .expectNext("e-1", "e-2") - .expectNoMessage(100.millis) + val probe = + src.map(_.event).runWith(TestSink.probe[Any]).request(2).expectNext("e-1", "e-2").expectNoMessage(100.millis) ref ! "e-4" expectMsg("e-4-done") - probe - .expectNoMessage(100.millis) - .request(5) - .expectNext("e-3") - .expectNext("e-4") + probe.expectNoMessage(100.millis).request(5).expectNext("e-3").expectNext("e-4") } } diff --git a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/EventsByTagSpec.scala b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/EventsByTagSpec.scala index e8b4555c42..f17e68c361 100644 --- a/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/EventsByTagSpec.scala +++ b/akka-persistence-query/src/test/scala/akka/persistence/query/journal/leveldb/EventsByTagSpec.scala @@ -48,8 +48,7 @@ class ColorTagger extends WriteEventAdapter { override def manifest(event: Any): String = "" } -class EventsByTagSpec extends AkkaSpec(EventsByTagSpec.config) - with Cleanup with ImplicitSender { +class EventsByTagSpec extends AkkaSpec(EventsByTagSpec.config) with Cleanup with ImplicitSender { implicit val mat = ActorMaterializer()(system) @@ -75,7 +74,8 @@ class EventsByTagSpec extends AkkaSpec(EventsByTagSpec.config) expectMsg(s"a green leaf-done") val greenSrc = queries.currentEventsByTag(tag = "green", offset = NoOffset) - greenSrc.runWith(TestSink.probe[Any]) + greenSrc + .runWith(TestSink.probe[Any]) .request(2) .expectNext(EventEnvelope(Sequence(1L), "a", 2L, "a green apple")) .expectNext(EventEnvelope(Sequence(2L), "a", 3L, "a green banana")) @@ -85,7 +85,8 @@ class EventsByTagSpec extends AkkaSpec(EventsByTagSpec.config) .expectComplete() val blackSrc = queries.currentEventsByTag(tag = "black", offset = Sequence(0L)) - blackSrc.runWith(TestSink.probe[Any]) + blackSrc + .runWith(TestSink.probe[Any]) .request(5) .expectNext(EventEnvelope(Sequence(1L), "b", 1L, "a black car")) .expectComplete() @@ -95,7 +96,8 @@ class EventsByTagSpec extends AkkaSpec(EventsByTagSpec.config) val c = system.actorOf(TestActor.props("c")) val greenSrc = queries.currentEventsByTag(tag = "green", offset = Sequence(0L)) - val probe = greenSrc.runWith(TestSink.probe[Any]) + val probe = greenSrc + .runWith(TestSink.probe[Any]) .request(2) .expectNext(EventEnvelope(Sequence(1L), "a", 2L, "a green apple")) .expectNext(EventEnvelope(Sequence(2L), "a", 3L, "a green banana")) @@ -113,7 +115,8 @@ class EventsByTagSpec extends AkkaSpec(EventsByTagSpec.config) "find events from offset (exclusive)" in { val greenSrc = queries.currentEventsByTag(tag = "green", offset = Sequence(2L)) - val probe = greenSrc.runWith(TestSink.probe[Any]) + val probe = greenSrc + .runWith(TestSink.probe[Any]) .request(10) // note that banana is not included, since exclusive offset .expectNext(EventEnvelope(Sequence(3L), "b", 2L, "a green leaf")) @@ -127,7 +130,8 @@ class EventsByTagSpec extends AkkaSpec(EventsByTagSpec.config) val d = system.actorOf(TestActor.props("d")) val blackSrc = queries.eventsByTag(tag = "black", offset = NoOffset) - val probe = blackSrc.runWith(TestSink.probe[Any]) + val probe = blackSrc + .runWith(TestSink.probe[Any]) .request(2) .expectNext(EventEnvelope(Sequence(1L), "b", 1L, "a black car")) .expectNoMessage(100.millis) @@ -146,7 +150,8 @@ class EventsByTagSpec extends AkkaSpec(EventsByTagSpec.config) "find events from offset (exclusive)" in { val greenSrc = queries.eventsByTag(tag = "green", offset = Sequence(2L)) - val probe = greenSrc.runWith(TestSink.probe[Any]) + val probe = greenSrc + .runWith(TestSink.probe[Any]) .request(10) // note that banana is not included, since exclusive offset .expectNext(EventEnvelope(Sequence(3L), "b", 2L, "a green leaf")) diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala index c005351347..c9f91fe10c 100644 --- a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala +++ b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/PersistencePluginProxySpec.scala @@ -7,12 +7,12 @@ package akka.persistence.journal.leveldb import akka.actor._ import akka.persistence._ import akka.persistence.journal.PersistencePluginProxy -import akka.testkit.{ TestProbe, AkkaSpec } +import akka.testkit.{ AkkaSpec, TestProbe } import com.typesafe.config.ConfigFactory object PersistencePluginProxySpec { - lazy val config = ConfigFactory.parseString( - """ + lazy val config = + ConfigFactory.parseString(""" akka { actor { provider = remote @@ -42,8 +42,8 @@ object PersistencePluginProxySpec { } """) - lazy val startTargetConfig = ConfigFactory.parseString( - """ + lazy val startTargetConfig = + ConfigFactory.parseString(""" |akka.extensions = ["akka.persistence.journal.PersistencePluginProxyExtension"] |akka.persistence { | journal.proxy.start-target-journal = on @@ -51,12 +51,18 @@ object PersistencePluginProxySpec { |} """.stripMargin) - def targetAddressConfig(system: ActorSystem) = ConfigFactory.parseString( - s""" + def targetAddressConfig(system: ActorSystem) = + ConfigFactory.parseString(s""" |akka.extensions = ["akka.persistence.Persistence"] |akka.persistence.journal.auto-start-journals = [""] - |akka.persistence.journal.proxy.target-journal-address = "${system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress}" - |akka.persistence.snapshot-store.proxy.target-snapshot-store-address = "${system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress}" + |akka.persistence.journal.proxy.target-journal-address = "${system + .asInstanceOf[ExtendedActorSystem] + .provider + .getDefaultAddress}" + |akka.persistence.snapshot-store.proxy.target-snapshot-store-address = "${system + .asInstanceOf[ExtendedActorSystem] + .provider + .getDefaultAddress}" """.stripMargin) class ExamplePersistentActor(probe: ActorRef, name: String) extends NamedPersistentActor(name) { @@ -77,17 +83,19 @@ object PersistencePluginProxySpec { val p = context.actorOf(Props(classOf[ExamplePersistentActor], probe, context.system.name)) def receive = { - case m => p forward m + case m => p.forward(m) } } } -class PersistencePluginProxySpec extends AkkaSpec(PersistencePluginProxySpec.startTargetConfig withFallback PersistencePluginProxySpec.config) with Cleanup { +class PersistencePluginProxySpec + extends AkkaSpec(PersistencePluginProxySpec.startTargetConfig.withFallback(PersistencePluginProxySpec.config)) + with Cleanup { import PersistencePluginProxySpec._ val systemA = ActorSystem("SysA", config) - val systemB = ActorSystem("SysB", targetAddressConfig(system) withFallback PersistencePluginProxySpec.config) + val systemB = ActorSystem("SysB", targetAddressConfig(system).withFallback(PersistencePluginProxySpec.config)) override protected def afterTermination(): Unit = { shutdown(systemA) @@ -95,7 +103,7 @@ class PersistencePluginProxySpec extends AkkaSpec(PersistencePluginProxySpec.sta super.afterTermination() } - "A persistence proxy" can { + "A persistence proxy".can { "be shared by multiple actor systems" in { val address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala index 8c1de5986d..723e0b349d 100644 --- a/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala +++ b/akka-persistence-shared/src/test/scala/akka/persistence/journal/leveldb/SharedLeveldbJournalSpec.scala @@ -6,7 +6,7 @@ package akka.persistence.journal.leveldb import akka.actor._ import akka.persistence._ -import akka.testkit.{ TestProbe, AkkaSpec } +import akka.testkit.{ AkkaSpec, TestProbe } object SharedLeveldbJournalSpec { val config = @@ -45,9 +45,10 @@ object SharedLeveldbJournalSpec { case payload => probe ! payload } override def receiveCommand = { - case payload => persist(payload) { _ => - probe ! payload - } + case payload => + persist(payload) { _ => + probe ! payload + } } } @@ -56,7 +57,7 @@ object SharedLeveldbJournalSpec { def receive = { case ActorIdentity(1, Some(store)) => SharedLeveldbJournal.setStore(store, context.system) - case m => p forward m + case m => p.forward(m) } override def preStart(): Unit = @@ -77,7 +78,7 @@ class SharedLeveldbJournalSpec extends AkkaSpec(SharedLeveldbJournalSpec.config) super.afterTermination() } - "A LevelDB store" can { + "A LevelDB store".can { "be shared by multiple actor systems" in { val probeA = new TestProbe(systemA) diff --git a/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala b/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala index 421b57717a..b2256e9b6a 100644 --- a/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala +++ b/akka-persistence-shared/src/test/scala/akka/persistence/serialization/SerializerSpec.scala @@ -13,13 +13,13 @@ import akka.serialization._ import akka.testkit._ import akka.util.ByteString.UTF_8 import com.typesafe.config._ -import org.apache.commons.codec.binary.Hex.{ encodeHex, decodeHex } +import org.apache.commons.codec.binary.Hex.{ decodeHex, encodeHex } import scala.concurrent.Await import scala.concurrent.duration.Duration object SerializerSpecConfigs { - val customSerializers = ConfigFactory.parseString( - """ + val customSerializers = + ConfigFactory.parseString(""" akka.actor { serializers { my-payload = "akka.persistence.serialization.MyPayloadSerializer" @@ -40,8 +40,7 @@ object SerializerSpecConfigs { } """) - val remote = ConfigFactory.parseString( - """ + val remote = ConfigFactory.parseString(""" akka { actor { provider = remote @@ -101,11 +100,11 @@ class SnapshotSerializerPersistenceSpec extends AkkaSpec(customSerializers) { // println(s"encoded snapshot: " + String.valueOf(encodeHex(serializer.toBinary(snapshot)))) val oldSnapshot = // 32 bytes per line "a8000000aced00057372002d616b6b612e70657273697374656e63652e736572" + - "69616c697a6174696f6e2e536e617073686f7448656164657200000000000000" + - "0102000249000c73657269616c697a657249644c00086d616e69666573747400" + - "0e4c7363616c612f4f7074696f6e3b7870000000047372000b7363616c612e4e" + - "6f6e6524465024f653ca94ac0200007872000c7363616c612e4f7074696f6ee3" + - "6024a8328a45e90200007870616263" + "69616c697a6174696f6e2e536e617073686f7448656164657200000000000000" + + "0102000249000c73657269616c697a657249644c00086d616e69666573747400" + + "0e4c7363616c612f4f7074696f6e3b7870000000047372000b7363616c612e4e" + + "6f6e6524465024f653ca94ac0200007872000c7363616c612e4f7074696f6ee3" + + "6024a8328a45e90200007870616263" val bytes = decodeHex(oldSnapshot.toCharArray) val cause = intercept[NotSerializableException] { @@ -125,11 +124,11 @@ class SnapshotSerializerPersistenceSpec extends AkkaSpec(customSerializers) { // println(s"encoded snapshot: " + String.valueOf(encodeHex(serializer.toBinary(snapshot)))) val oldSnapshot = // 32 bytes per line "a8000000aced00057372002d616b6b612e70657273697374656e63652e736572" + - "69616c697a6174696f6e2e536e617073686f7448656164657200000000000000" + - "0102000249000c73657269616c697a657249644c00086d616e69666573747400" + - "0e4c7363616c612f4f7074696f6e3b7870000000047372000b7363616c612e4e" + - "6f6e6524465024f653ca94ac0200007872000c7363616c612e4f7074696f6efe" + - "6937fddb0e66740200007870616263" + "69616c697a6174696f6e2e536e617073686f7448656164657200000000000000" + + "0102000249000c73657269616c697a657249644c00086d616e69666573747400" + + "0e4c7363616c612f4f7074696f6e3b7870000000047372000b7363616c612e4e" + + "6f6e6524465024f653ca94ac0200007872000c7363616c612e4f7074696f6efe" + + "6937fddb0e66740200007870616263" val bytes = decodeHex(oldSnapshot.toCharArray) val cause = intercept[NotSerializableException] { @@ -222,12 +221,12 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(customSerializers) { // val oldData = "0a3e08c7da04120d4f6c645061796c6f61642841291a2" + - "9616b6b612e70657273697374656e63652e7365726961" + - "6c697a6174696f6e2e4f6c645061796c6f6164100d1a0" + - "2703120015a45616b6b613a2f2f4d6573736167655365" + - "7269616c697a657250657273697374656e63655370656" + - "32f73797374656d2f746573744163746f722d31233133" + - "3137373931343033" + "9616b6b612e70657273697374656e63652e7365726961" + + "6c697a6174696f6e2e4f6c645061796c6f6164100d1a0" + + "2703120015a45616b6b613a2f2f4d6573736167655365" + + "7269616c697a657250657273697374656e63655370656" + + "32f73797374656d2f746573744163746f722d31233133" + + "3137373931343033" // now the system is updated, OldPayload is replaced by MyPayload, and the // OldPayloadSerializer is adjusted to migrate OldPayload @@ -247,12 +246,12 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(customSerializers) { // println(s"encoded persistent: " + String.valueOf(encodeHex(serializer.toBinary(persistent)))) val oldData = "0a3208c3da0412022e611a28616b6b612e70657273697374656e63652e73657269616c697a" + - "6174696f6e2e4d795061796c6f6164100d1a027031200130033a0263313a02633240014a0c" + - "0a02703212026332180e20005244616b6b613a2f2f4d65737361676553657269616c697a65" + - "7250657273697374656e6365537065632f73797374656d2f746573744163746f7232232d34" + - "34373233313933375a44616b6b613a2f2f4d65737361676553657269616c697a6572506572" + - "73697374656e6365537065632f73797374656d2f746573744163746f7232232d3434373233" + - "31393337" + "6174696f6e2e4d795061796c6f6164100d1a027031200130033a0263313a02633240014a0c" + + "0a02703212026332180e20005244616b6b613a2f2f4d65737361676553657269616c697a65" + + "7250657273697374656e6365537065632f73797374656d2f746573744163746f7232232d34" + + "34373233313933375a44616b6b613a2f2f4d65737361676553657269616c697a6572506572" + + "73697374656e6365537065632f73797374656d2f746573744163746f7232232d3434373233" + + "31393337" val bytes = decodeHex(oldData.toCharArray) val expected = PersistentRepr(MyPayload(".a."), 13, "p1", "", true, Actor.noSender) @@ -277,10 +276,9 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(customSerializers) { } "handle a few unconfirmed" in { - val unconfirmed = Vector( - UnconfirmedDelivery(deliveryId = 1, destination = testActor.path, "a"), - UnconfirmedDelivery(deliveryId = 2, destination = testActor.path, "b"), - UnconfirmedDelivery(deliveryId = 3, destination = testActor.path, 42)) + val unconfirmed = Vector(UnconfirmedDelivery(deliveryId = 1, destination = testActor.path, "a"), + UnconfirmedDelivery(deliveryId = 2, destination = testActor.path, "b"), + UnconfirmedDelivery(deliveryId = 3, destination = testActor.path, 42)) val snap = AtLeastOnceDeliverySnapshot(17, unconfirmed) val serializer = serialization.findSerializerFor(snap) diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala b/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala index de7964631a..a822ce83e0 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/CapabilityFlags.scala @@ -12,7 +12,9 @@ sealed abstract class CapabilityFlag { .find { el => val clazz = Class.forName(el.getClassName) clazz.getDeclaredMethod(el.getMethodName).getReturnType == classOf[CapabilityFlag] - } map { _.getMethodName } getOrElse "[unknown]" + } + .map { _.getMethodName } + .getOrElse("[unknown]") def name: String = capturedStack def value: Boolean @@ -55,6 +57,7 @@ trait JournalCapabilityFlags extends CapabilityFlags { //#snapshot-store-flags trait SnapshotStoreCapabilityFlags extends CapabilityFlags { + /** * When `true` enables tests which check if the snapshot store properly serialize and * deserialize snapshots. diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/PluginSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/PluginSpec.scala index 2216c334dd..efcc0da915 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/PluginSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/PluginSpec.scala @@ -12,7 +12,12 @@ import com.typesafe.config._ import org.scalatest._ import java.util.UUID -abstract class PluginSpec(val config: Config) extends TestKitBase with WordSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfterEach { +abstract class PluginSpec(val config: Config) + extends TestKitBase + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with BeforeAndAfterEach { private val counter = new AtomicInteger(0) private var _extension: Persistence = _ diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/TestSerializer.scala b/akka-persistence-tck/src/main/scala/akka/persistence/TestSerializer.scala index 1bb2ff2868..80ae0a01df 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/TestSerializer.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/TestSerializer.scala @@ -40,8 +40,7 @@ class TestSerializer(system: ExtendedActorSystem) extends SerializerWithStringMa throw new IllegalStateException("currentTransportInformation was not set") case t => if (t.system ne system) - throw new IllegalStateException( - s"wrong system in currentTransportInformation, ${t.system} != $system") + throw new IllegalStateException(s"wrong system in currentTransportInformation, ${t.system} != $system") if (t.address != system.provider.getDefaultAddress) throw new IllegalStateException( s"wrong address in currentTransportInformation, ${t.address} != ${system.provider.getDefaultAddress}") diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalPerfSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalPerfSpec.scala index f58834ad31..30650ae405 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalPerfSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalPerfSpec.scala @@ -13,8 +13,9 @@ import scala.concurrent.duration._ import com.typesafe.config.Config object JournalPerfSpec { - class BenchActor(override val persistenceId: String, replyTo: ActorRef, replyAfter: Int) extends PersistentActor - with ActorLogging { + class BenchActor(override val persistenceId: String, replyTo: ActorRef, replyAfter: Int) + extends PersistentActor + with ActorLogging { var counter = 0 @@ -84,7 +85,9 @@ abstract class JournalPerfSpec(config: Config) extends JournalSpec(config) { system.actorOf(Props(classOf[BenchActor], pid, testProbe.ref, replyAfter)) def feedAndExpectLast(actor: ActorRef, mode: String, cmnds: immutable.Seq[Int]): Unit = { - cmnds foreach { c => actor ! Cmd(mode, c) } + cmnds.foreach { c => + actor ! Cmd(mode, c) + } testProbe.expectMsg(awaitDuration, cmnds.last) } diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala index d6449c68dd..bb9ef95a04 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/journal/JournalSpec.scala @@ -17,8 +17,7 @@ import akka.testkit._ import com.typesafe.config._ object JournalSpec { - val config: Config = ConfigFactory.parseString( - s""" + val config: Config = ConfigFactory.parseString(s""" akka.persistence.publish-plugin-commands = on akka.actor { serializers { @@ -43,8 +42,11 @@ object JournalSpec { * @see [[akka.persistence.journal.JournalPerfSpec]] * @see [[akka.persistence.japi.journal.JavaJournalPerfSpec]] */ -abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVerb - with OptionalTests with JournalCapabilityFlags { +abstract class JournalSpec(config: Config) + extends PluginSpec(config) + with MayVerb + with OptionalTests + with JournalCapabilityFlags { implicit lazy val system: ActorSystem = ActorSystem("JournalSpec", config.withFallback(JournalSpec.config)) @@ -82,9 +84,12 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe def writeMessages(fromSnr: Int, toSnr: Int, pid: String, sender: ActorRef, writerUuid: String): Unit = { - def persistentRepr(sequenceNr: Long) = PersistentRepr( - payload = s"a-$sequenceNr", sequenceNr = sequenceNr, persistenceId = pid, - sender = sender, writerUuid = writerUuid) + def persistentRepr(sequenceNr: Long) = + PersistentRepr(payload = s"a-$sequenceNr", + sequenceNr = sequenceNr, + persistenceId = pid, + sender = sender, + writerUuid = writerUuid) val msgs = if (supportsAtomicPersistAllOfSeveralEvents) { @@ -105,7 +110,7 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe journal ! WriteMessages(msgs, probe.ref, actorInstanceId) probe.expectMsg(WriteMessagesSuccessful) - fromSnr to toSnr foreach { i => + (fromSnr to toSnr).foreach { i => probe.expectMsgPF() { case WriteMessageSuccess(PersistentImpl(payload, `i`, `pid`, _, _, `sender`, `writerUuid`), _) => payload should be(s"a-${i}") @@ -116,42 +121,58 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe "A journal" must { "replay all messages" in { journal ! ReplayMessages(1, Long.MaxValue, Long.MaxValue, pid, receiverProbe.ref) - 1 to 5 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (1 to 5).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "replay messages using a lower sequence number bound" in { journal ! ReplayMessages(3, Long.MaxValue, Long.MaxValue, pid, receiverProbe.ref) - 3 to 5 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (3 to 5).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "replay messages using an upper sequence number bound" in { journal ! ReplayMessages(1, 3, Long.MaxValue, pid, receiverProbe.ref) - 1 to 3 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (1 to 3).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "replay messages using a count limit" in { journal ! ReplayMessages(1, Long.MaxValue, 3, pid, receiverProbe.ref) - 1 to 3 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (1 to 3).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "replay messages using a lower and upper sequence number bound" in { journal ! ReplayMessages(2, 3, Long.MaxValue, pid, receiverProbe.ref) - 2 to 3 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (2 to 3).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "replay messages using a lower and upper sequence number bound and a count limit" in { journal ! ReplayMessages(2, 5, 2, pid, receiverProbe.ref) - 2 to 3 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (2 to 3).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "replay a single if lower sequence number bound equals upper sequence number bound" in { journal ! ReplayMessages(2, 2, Long.MaxValue, pid, receiverProbe.ref) - 2 to 2 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (2 to 2).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "replay a single message if count limit equals 1" in { journal ! ReplayMessages(2, 4, 1, pid, receiverProbe.ref) - 2 to 2 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (2 to 2).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "not replay messages if count limit equals 0" in { @@ -177,27 +198,35 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe receiverProbe2.expectMsg(DeleteMessagesSuccess(cmd.toSequenceNr)) journal ! ReplayMessages(1, Long.MaxValue, Long.MaxValue, pid, receiverProbe.ref) - List(4, 5) foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + List(4, 5).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe2.expectNoMsg(200.millis) } "not reset highestSequenceNr after message deletion" in { journal ! ReplayMessages(0, Long.MaxValue, Long.MaxValue, pid, receiverProbe.ref) - 1 to 5 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (1 to 5).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) journal ! DeleteMessagesTo(pid, 3L, receiverProbe.ref) receiverProbe.expectMsg(DeleteMessagesSuccess(3L)) journal ! ReplayMessages(0, Long.MaxValue, Long.MaxValue, pid, receiverProbe.ref) - 4 to 5 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (4 to 5).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) } "not reset highestSequenceNr after journal cleanup" in { journal ! ReplayMessages(0, Long.MaxValue, Long.MaxValue, pid, receiverProbe.ref) - 1 to 5 foreach { i => receiverProbe.expectMsg(replayedMessage(i)) } + (1 to 5).foreach { i => + receiverProbe.expectMsg(replayedMessage(i)) + } receiverProbe.expectMsg(RecoverySuccess(highestSequenceNr = 5L)) journal ! DeleteMessagesTo(pid, Long.MaxValue, receiverProbe.ref) @@ -208,7 +237,7 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe } } - "A Journal optionally" may { + "A Journal optionally".may { optional(flag = supportsRejectingNonSerializableObjects) { "reject non-serializable events" in EventFilter[java.io.NotSerializableException]().intercept { @@ -218,8 +247,12 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe } val msgs = (6 to 8).map { i => val event = if (i == 7) notSerializableEvent else s"b-$i" - AtomicWrite(PersistentRepr(payload = event, sequenceNr = i, persistenceId = pid, sender = Actor.noSender, - writerUuid = writerUuid)) + AtomicWrite( + PersistentRepr(payload = event, + sequenceNr = i, + persistenceId = pid, + sender = Actor.noSender, + writerUuid = writerUuid)) } val probe = TestProbe() @@ -229,14 +262,16 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe val Pid = pid val WriterUuid = writerUuid probe.expectMsgPF() { - case WriteMessageSuccess(PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid), _) => payload should be(s"b-6") + case WriteMessageSuccess(PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid), _) => + payload should be(s"b-6") } probe.expectMsgPF() { case WriteMessageRejected(PersistentImpl(payload, 7L, Pid, _, _, Actor.noSender, WriterUuid), _, _) => payload should be(notSerializableEvent) } probe.expectMsgPF() { - case WriteMessageSuccess(PersistentImpl(payload, 8L, Pid, _, _, Actor.noSender, WriterUuid), _) => payload should be(s"b-8") + case WriteMessageSuccess(PersistentImpl(payload, 8L, Pid, _, _, Actor.noSender, WriterUuid), _) => + payload should be(s"b-8") } } } @@ -246,8 +281,12 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe val probe = TestProbe() val event = TestPayload(probe.ref) val aw = - AtomicWrite(PersistentRepr(payload = event, sequenceNr = 6L, persistenceId = pid, sender = Actor.noSender, - writerUuid = writerUuid)) + AtomicWrite( + PersistentRepr(payload = event, + sequenceNr = 6L, + persistenceId = pid, + sender = Actor.noSender, + writerUuid = writerUuid)) journal ! WriteMessages(List(aw), probe.ref, actorInstanceId) @@ -255,12 +294,14 @@ abstract class JournalSpec(config: Config) extends PluginSpec(config) with MayVe val Pid = pid val WriterUuid = writerUuid probe.expectMsgPF() { - case WriteMessageSuccess(PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid), _) => payload should be(event) + case WriteMessageSuccess(PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid), _) => + payload should be(event) } journal ! ReplayMessages(6, Long.MaxValue, Long.MaxValue, pid, receiverProbe.ref) receiverProbe.expectMsgPF() { - case ReplayedMessage(PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid)) => payload should be(event) + case ReplayedMessage(PersistentImpl(payload, 6L, Pid, _, _, Actor.noSender, WriterUuid)) => + payload should be(event) } receiverProbe.expectMsgPF() { case RecoverySuccess(highestSequenceNr) => highestSequenceNr should be >= 6L diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/scalatest/MayVerb.scala b/akka-persistence-tck/src/main/scala/akka/persistence/scalatest/MayVerb.scala index b47b903e8a..15790a19c3 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/scalatest/MayVerb.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/scalatest/MayVerb.scala @@ -20,7 +20,8 @@ trait MayVerb { def mayVerbStacktraceContextFrames = 3 def optional(whenSkippedMessage: String)(body: => Unit): Unit = - try body catch { + try body + catch { case cause: Throwable => val shortTrace = cause.getStackTrace.take(mayVerbStacktraceContextFrames) throw new TestCanceledByFailure(whenSkippedMessage, shortTrace) @@ -58,7 +59,8 @@ trait MayVerb { } object MayVerb { - case class TestCanceledByFailure(msg: String, specialStackTrace: Array[StackTraceElement]) extends TestCanceledException(Some(msg), None, 2) { + case class TestCanceledByFailure(msg: String, specialStackTrace: Array[StackTraceElement]) + extends TestCanceledException(Some(msg), None, 2) { override def getStackTrace = specialStackTrace } } diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/scalatest/OptionalTests.scala b/akka-persistence-tck/src/main/scala/akka/persistence/scalatest/OptionalTests.scala index 102de862e3..5e3e4eba2e 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/scalatest/OptionalTests.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/scalatest/OptionalTests.scala @@ -13,12 +13,15 @@ trait OptionalTests { def optional(flag: CapabilityFlag)(test: => Unit) = { val msg = s"CapabilityFlag `${flag.name}` was turned `" + (if (flag.value) "on" else "off") + - "`. " + (if (!flag.value) "To enable the related tests override it with `CapabilityFlag.on` (or `true` in Scala)." else "") + "`. " + (if (!flag.value) "To enable the related tests override it with `CapabilityFlag.on` (or `true` in Scala)." + else "") info(msg) if (flag.value) - try test catch { + try test + catch { case ex: Exception => - throw new AssertionError("Implementation did not pass this spec. " + + throw new AssertionError( + "Implementation did not pass this spec. " + "If your journal will be (by definition) unable to abide the here tested rule, you can disable this test," + s"by overriding [${flag.name}] with CapabilityFlag.off in your test class.") } diff --git a/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala b/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala index be45072ce4..84ae83fb44 100644 --- a/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala +++ b/akka-persistence-tck/src/main/scala/akka/persistence/snapshot/SnapshotStoreSpec.scala @@ -15,8 +15,7 @@ import com.typesafe.config.ConfigFactory import com.typesafe.config.Config object SnapshotStoreSpec { - val config: Config = ConfigFactory.parseString( - s""" + val config: Config = ConfigFactory.parseString(s""" akka.persistence.publish-plugin-commands = on akka.actor { serializers { @@ -40,8 +39,11 @@ object SnapshotStoreSpec { * * @see [[akka.persistence.japi.snapshot.JavaSnapshotStoreSpec]] */ -abstract class SnapshotStoreSpec(config: Config) extends PluginSpec(config) - with MayVerb with OptionalTests with SnapshotStoreCapabilityFlags { +abstract class SnapshotStoreSpec(config: Config) + extends PluginSpec(config) + with MayVerb + with OptionalTests + with SnapshotStoreCapabilityFlags { implicit lazy val system = ActorSystem("SnapshotStoreSpec", config.withFallback(SnapshotStoreSpec.config)) private var senderProbe: TestProbe = _ @@ -59,7 +61,7 @@ abstract class SnapshotStoreSpec(config: Config) extends PluginSpec(config) extension.snapshotStoreFor(null) def writeSnapshots(): Seq[SnapshotMetadata] = { - 1 to 5 map { i => + (1 to 5).map { i => val metadata = SnapshotMetadata(pid, i + 10) snapshotStore.tell(SaveSnapshot(metadata, s"s-${i}"), senderProbe.ref) senderProbe.expectMsgPF() { case SaveSnapshotSuccess(md) => md } @@ -79,7 +81,8 @@ abstract class SnapshotStoreSpec(config: Config) extends PluginSpec(config) senderProbe.expectMsg(LoadSnapshotResult(None, Long.MaxValue)) } "not load a snapshot given non-matching timestamp criteria" in { - snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest.copy(maxTimestamp = 100), Long.MaxValue), senderProbe.ref) + snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest.copy(maxTimestamp = 100), Long.MaxValue), + senderProbe.ref) senderProbe.expectMsg(LoadSnapshotResult(None, Long.MaxValue)) } "not load a snapshot given non-matching sequence number criteria" in { @@ -99,9 +102,12 @@ abstract class SnapshotStoreSpec(config: Config) extends PluginSpec(config) senderProbe.expectMsg(LoadSnapshotResult(Some(SelectedSnapshot(metadata(2), s"s-3")), 13)) } "load the most recent snapshot matching upper sequence number and timestamp bounds" in { - snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(13, metadata(2).timestamp), Long.MaxValue), senderProbe.ref) + snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(13, metadata(2).timestamp), Long.MaxValue), + senderProbe.ref) senderProbe.expectMsg(LoadSnapshotResult(Some(SelectedSnapshot(metadata(2), s"s-3")), Long.MaxValue)) - snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest.copy(maxTimestamp = metadata(2).timestamp), 13), senderProbe.ref) + snapshotStore.tell( + LoadSnapshot(pid, SnapshotSelectionCriteria.Latest.copy(maxTimestamp = metadata(2).timestamp), 13), + senderProbe.ref) senderProbe.expectMsg(LoadSnapshotResult(Some(SelectedSnapshot(metadata(2), s"s-3")), 13)) } "delete a single snapshot identified by sequenceNr in snapshot metadata" in { @@ -128,9 +134,12 @@ abstract class SnapshotStoreSpec(config: Config) extends PluginSpec(config) sub.expectMsg(cmd) senderProbe.expectMsg(DeleteSnapshotsSuccess(criteria)) - snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(md.sequenceNr, md.timestamp), Long.MaxValue), senderProbe.ref) + snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(md.sequenceNr, md.timestamp), Long.MaxValue), + senderProbe.ref) senderProbe.expectMsg(LoadSnapshotResult(None, Long.MaxValue)) - snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(metadata(3).sequenceNr, metadata(3).timestamp), Long.MaxValue), senderProbe.ref) + snapshotStore.tell( + LoadSnapshot(pid, SnapshotSelectionCriteria(metadata(3).sequenceNr, metadata(3).timestamp), Long.MaxValue), + senderProbe.ref) senderProbe.expectMsg(LoadSnapshotResult(Some(SelectedSnapshot(metadata(3), s"s-4")), Long.MaxValue)) } "not delete snapshots with non-matching upper timestamp bounds" in { @@ -144,7 +153,9 @@ abstract class SnapshotStoreSpec(config: Config) extends PluginSpec(config) sub.expectMsg(cmd) senderProbe.expectMsg(DeleteSnapshotsSuccess(criteria)) - snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria(metadata(3).sequenceNr, metadata(3).timestamp), Long.MaxValue), senderProbe.ref) + snapshotStore.tell( + LoadSnapshot(pid, SnapshotSelectionCriteria(metadata(3).sequenceNr, metadata(3).timestamp), Long.MaxValue), + senderProbe.ref) senderProbe.expectMsg(LoadSnapshotResult(Some(SelectedSnapshot(metadata(3), s"s-4")), Long.MaxValue)) } "save and overwrite snapshot with same sequence number" in { @@ -166,7 +177,7 @@ abstract class SnapshotStoreSpec(config: Config) extends PluginSpec(config) } } - "A snapshot store optionally" may { + "A snapshot store optionally".may { optional(flag = supportsSerialization) { "serialize snapshots" in { val probe = TestProbe() diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/PluginCleanup.scala b/akka-persistence-tck/src/test/scala/akka/persistence/PluginCleanup.scala index 1b1ff30cd5..3845ff92c4 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/PluginCleanup.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/PluginCleanup.scala @@ -9,9 +9,9 @@ import org.apache.commons.io.FileUtils import org.scalatest.BeforeAndAfterAll trait PluginCleanup extends BeforeAndAfterAll { _: PluginSpec => - val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + val storageLocations = + List("akka.persistence.journal.leveldb.dir", "akka.persistence.snapshot-store.local.dir").map(s => + new File(system.settings.config.getString(s))) override def beforeAll(): Unit = { storageLocations.foreach(FileUtils.deleteDirectory) diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala index dce53eb94a..5f75d360e4 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalJavaSpec.scala @@ -7,12 +7,12 @@ package akka.persistence.journal.leveldb import akka.persistence.journal.JournalSpec import akka.persistence.{ PersistenceSpec, PluginCleanup } -class LeveldbJournalJavaSpec extends JournalSpec( - config = PersistenceSpec.config( - "leveldb", - "LeveldbJournalJavaSpec", - extraConfig = Some("akka.persistence.journal.leveldb.native = off"))) - with PluginCleanup { +class LeveldbJournalJavaSpec + extends JournalSpec( + config = PersistenceSpec.config("leveldb", + "LeveldbJournalJavaSpec", + extraConfig = Some("akka.persistence.journal.leveldb.native = off"))) + with PluginCleanup { override def supportsRejectingNonSerializableObjects = true diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativePerfSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativePerfSpec.scala index 9612914b9f..2e186fb9ef 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativePerfSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativePerfSpec.scala @@ -9,12 +9,12 @@ import akka.persistence.{ PersistenceSpec, PluginCleanup } import org.scalatest.DoNotDiscover @DoNotDiscover // because only checking that compilation is OK with JournalPerfSpec -class LeveldbJournalNativePerfSpec extends JournalPerfSpec( - config = PersistenceSpec.config( - "leveldb", - "LeveldbJournalNativePerfSpec", - extraConfig = Some("akka.persistence.journal.leveldb.native = on"))) - with PluginCleanup { +class LeveldbJournalNativePerfSpec + extends JournalPerfSpec( + config = PersistenceSpec.config("leveldb", + "LeveldbJournalNativePerfSpec", + extraConfig = Some("akka.persistence.journal.leveldb.native = on"))) + with PluginCleanup { override def supportsRejectingNonSerializableObjects = true diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala index 2e429569eb..e2f0400213 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNativeSpec.scala @@ -7,12 +7,12 @@ package akka.persistence.journal.leveldb import akka.persistence.journal.JournalSpec import akka.persistence.{ PersistenceSpec, PluginCleanup } -class LeveldbJournalNativeSpec extends JournalSpec( - config = PersistenceSpec.config( - "leveldb", - "LeveldbJournalNativeSpec", - extraConfig = Some("akka.persistence.journal.leveldb.native = on"))) - with PluginCleanup { +class LeveldbJournalNativeSpec + extends JournalSpec( + config = PersistenceSpec.config("leveldb", + "LeveldbJournalNativeSpec", + extraConfig = Some("akka.persistence.journal.leveldb.native = on"))) + with PluginCleanup { override def supportsRejectingNonSerializableObjects = true diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala index e737994aff..d65f766e1d 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/journal/leveldb/LeveldbJournalNoAtomicPersistMultipleEventsSpec.scala @@ -7,12 +7,12 @@ package akka.persistence.journal.leveldb import akka.persistence.journal.JournalSpec import akka.persistence.{ PersistenceSpec, PluginCleanup } -class LeveldbJournalNoAtomicPersistMultipleEventsSpec extends JournalSpec( - config = PersistenceSpec.config( - "leveldb", - "LeveldbJournalNoAtomicPersistMultipleEventsSpec", - extraConfig = Some("akka.persistence.journal.leveldb.native = off"))) - with PluginCleanup { +class LeveldbJournalNoAtomicPersistMultipleEventsSpec + extends JournalSpec( + config = PersistenceSpec.config("leveldb", + "LeveldbJournalNoAtomicPersistMultipleEventsSpec", + extraConfig = Some("akka.persistence.journal.leveldb.native = off"))) + with PluginCleanup { /** * Setting to false to test the single message atomic write behavior of JournalSpec @@ -24,4 +24,3 @@ class LeveldbJournalNoAtomicPersistMultipleEventsSpec extends JournalSpec( override def supportsSerialization = true } - diff --git a/akka-persistence-tck/src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala b/akka-persistence-tck/src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala index 2b500a8b8e..4f6ec8cf70 100644 --- a/akka-persistence-tck/src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala +++ b/akka-persistence-tck/src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala @@ -9,14 +9,15 @@ import com.typesafe.config.ConfigFactory import akka.persistence.PluginCleanup import akka.persistence.snapshot.SnapshotStoreSpec -class LocalSnapshotStoreSpec extends SnapshotStoreSpec( - config = ConfigFactory.parseString( - """ +class LocalSnapshotStoreSpec + extends SnapshotStoreSpec( + config = + ConfigFactory.parseString(""" akka.test.timefactor = 3 akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local" akka.persistence.snapshot-store.local.dir = "target/snapshots" """)) - with PluginCleanup { + with PluginCleanup { override protected def supportsSerialization: CapabilityFlag = CapabilityFlag.on } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventAdapter.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventAdapter.scala index ea76ce8a9b..4551d70eca 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventAdapter.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventAdapter.scala @@ -7,10 +7,12 @@ package akka.persistence.typed import akka.annotation.InternalApi abstract class EventAdapter[E, P] { + /** * Type of the event to persist */ type Per = P + /** * Transform event on the way to the journal */ @@ -39,4 +41,3 @@ abstract class EventAdapter[E, P] { override def toJournal(e: E): Any = e override def fromJournal(p: Any): E = p.asInstanceOf[E] } - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventRejectedException.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventRejectedException.scala index d66e5bc5e1..b3805d1d57 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventRejectedException.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/EventRejectedException.scala @@ -8,4 +8,4 @@ package akka.persistence.typed * Thrown if a journal rejects an event e.g. due to a serialization error. */ final class EventRejectedException(persistenceId: PersistenceId, sequenceNr: Long, cause: Throwable) - extends RuntimeException(s"Rejected event, persistenceId [${persistenceId.id}], sequenceNr [$sequenceNr]", cause) + extends RuntimeException(s"Rejected event, persistenceId [${persistenceId.id}], sequenceNr [$sequenceNr]", cause) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/SideEffect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/SideEffect.scala index ba7db97755..0c38c3adad 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/SideEffect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/SideEffect.scala @@ -26,15 +26,15 @@ private[akka] class Callback[State](val sideEffect: State => Unit) extends SideE /** INTERNAL API */ @InternalApi -final private[akka] class ReplyEffectImpl[ReplyMessage, State](replyTo: ActorRef[ReplyMessage], replyWithMessage: State => ReplyMessage) - extends Callback[State](state => replyTo ! replyWithMessage(state)) { +final private[akka] class ReplyEffectImpl[ReplyMessage, State](replyTo: ActorRef[ReplyMessage], + replyWithMessage: State => ReplyMessage) + extends Callback[State](state => replyTo ! replyWithMessage(state)) { override def toString: String = "Reply" } /** INTERNAL API */ @InternalApi -final private[akka] class NoReplyEffectImpl[State] - extends Callback[State](_ => ()) { +final private[akka] class NoReplyEffectImpl[State] extends Callback[State](_ => ()) { override def toString: String = "NoReply" } @@ -47,6 +47,7 @@ private[akka] case object Stop extends SideEffect[Nothing] private[akka] case object UnstashAll extends SideEffect[Nothing] object SideEffect { + /** * Create a ChainedEffect that can be run after Effects */ @@ -68,4 +69,3 @@ object SideEffect { */ def unstashAll[State](): SideEffect[State] = UnstashAll.asInstanceOf[SideEffect[State]] } - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala index 6e28dea12c..3cc5f56d97 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/BehaviorSetup.scala @@ -23,24 +23,22 @@ import akka.util.OptionVal * INTERNAL API: Carry state for the Persistent behavior implementation behaviors. */ @InternalApi -private[akka] final class BehaviorSetup[C, E, S]( - val context: ActorContext[InternalProtocol], - val persistenceId: PersistenceId, - val emptyState: S, - val commandHandler: EventSourcedBehavior.CommandHandler[C, E, S], - val eventHandler: EventSourcedBehavior.EventHandler[S, E], - val writerIdentity: EventSourcedBehaviorImpl.WriterIdentity, - val recoveryCompleted: S => Unit, - val onRecoveryFailure: Throwable => Unit, - val onSnapshot: (SnapshotMetadata, Try[Done]) => Unit, - val tagger: E => Set[String], - val eventAdapter: EventAdapter[E, _], - val snapshotWhen: (S, E, Long) => Boolean, - val recovery: Recovery, - var holdingRecoveryPermit: Boolean, - val settings: EventSourcedSettings, - val stashState: StashState -) { +private[akka] final class BehaviorSetup[C, E, S](val context: ActorContext[InternalProtocol], + val persistenceId: PersistenceId, + val emptyState: S, + val commandHandler: EventSourcedBehavior.CommandHandler[C, E, S], + val eventHandler: EventSourcedBehavior.EventHandler[S, E], + val writerIdentity: EventSourcedBehaviorImpl.WriterIdentity, + val recoveryCompleted: S => Unit, + val onRecoveryFailure: Throwable => Unit, + val onSnapshot: (SnapshotMetadata, Try[Done]) => Unit, + val tagger: E => Set[String], + val eventAdapter: EventAdapter[E, _], + val snapshotWhen: (S, E, Long) => Boolean, + val recovery: Recovery, + var holdingRecoveryPermit: Boolean, + val settings: EventSourcedSettings, + val stashState: StashState) { import InternalProtocol.RecoveryTickEvent import akka.actor.typed.scaladsl.adapter._ @@ -56,7 +54,7 @@ private[akka] final class BehaviorSetup[C, E, S]( def log: Logger = { _log match { case OptionVal.Some(l) => l - case OptionVal.None => + case OptionVal.None => // lazy init if mdc changed val l = context.log.withMdc(mdc) _log = OptionVal.Some(l) @@ -83,11 +81,13 @@ private[akka] final class BehaviorSetup[C, E, S]( implicit val ec: ExecutionContext = context.executionContext val timer = if (snapshot) - context.system.scheduler.scheduleOnce(settings.recoveryEventTimeout, context.self.toUntyped, - RecoveryTickEvent(snapshot = true)) + context.system.scheduler + .scheduleOnce(settings.recoveryEventTimeout, context.self.toUntyped, RecoveryTickEvent(snapshot = true)) else - context.system.scheduler.schedule(settings.recoveryEventTimeout, settings.recoveryEventTimeout, - context.self.toUntyped, RecoveryTickEvent(snapshot = false)) + context.system.scheduler.schedule(settings.recoveryEventTimeout, + settings.recoveryEventTimeout, + context.self.toUntyped, + RecoveryTickEvent(snapshot = false)) recoveryTimer = OptionVal.Some(timer) } @@ -116,9 +116,6 @@ private[akka] object MDC { // format: ON def create(persistenceId: PersistenceId, phaseName: String): Map[String, Any] = { - Map( - "persistenceId" -> persistenceId.id, - "phase" -> phaseName - ) + Map("persistenceId" -> persistenceId.id, "phase" -> phaseName) } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EffectImpl.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EffectImpl.scala index d31ca101e6..3d7ae90692 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EffectImpl.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EffectImpl.scala @@ -16,7 +16,9 @@ import akka.persistence.typed.scaladsl /** INTERNAL API */ @InternalApi -private[akka] abstract class EffectImpl[+Event, State] extends javadsl.ReplyEffect[Event, State] with scaladsl.ReplyEffect[Event, State] { +private[akka] abstract class EffectImpl[+Event, State] + extends javadsl.ReplyEffect[Event, State] + with scaladsl.ReplyEffect[Event, State] { /* All events that will be persisted in this effect */ override def events: immutable.Seq[Event] = Nil @@ -37,15 +39,16 @@ private[akka] abstract class EffectImpl[+Event, State] extends javadsl.ReplyEffe /** INTERNAL API */ @InternalApi private[akka] object CompositeEffect { - def apply[Event, State](effect: scaladsl.Effect[Event, State], sideEffects: SideEffect[State]): CompositeEffect[Event, State] = + def apply[Event, State](effect: scaladsl.Effect[Event, State], + sideEffects: SideEffect[State]): CompositeEffect[Event, State] = CompositeEffect[Event, State](effect, sideEffects :: Nil) } /** INTERNAL API */ @InternalApi -private[akka] final case class CompositeEffect[Event, State]( - persistingEffect: scaladsl.Effect[Event, State], - _sideEffects: immutable.Seq[SideEffect[State]]) extends EffectImpl[Event, State] { +private[akka] final case class CompositeEffect[Event, State](persistingEffect: scaladsl.Effect[Event, State], + _sideEffects: immutable.Seq[SideEffect[State]]) + extends EffectImpl[Event, State] { override val events: immutable.Seq[Event] = persistingEffect.events @@ -65,7 +68,8 @@ private[akka] case class Persist[Event, State](event: Event) extends EffectImpl[ /** INTERNAL API */ @InternalApi -private[akka] case class PersistAll[Event, State](override val events: immutable.Seq[Event]) extends EffectImpl[Event, State] +private[akka] case class PersistAll[Event, State](override val events: immutable.Seq[Event]) + extends EffectImpl[Event, State] /** INTERNAL API */ @InternalApi @@ -74,4 +78,3 @@ private[akka] case object Unhandled extends EffectImpl[Nothing, Nothing] /** INTERNAL API */ @InternalApi private[akka] case object Stash extends EffectImpl[Nothing, Nothing] - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala index 4925491aeb..076d313880 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedBehaviorImpl.scala @@ -61,24 +61,24 @@ private[akka] object EventSourcedBehaviorImpl { @InternalApi private[akka] final case class EventSourcedBehaviorImpl[Command, Event, State]( - persistenceId: PersistenceId, - emptyState: State, - commandHandler: EventSourcedBehavior.CommandHandler[Command, Event, State], - eventHandler: EventSourcedBehavior.EventHandler[State, Event], - loggerClass: Class[_], - journalPluginId: Option[String] = None, - snapshotPluginId: Option[String] = None, - recoveryCompleted: State => Unit = ConstantFun.scalaAnyToUnit, - postStop: () => Unit = ConstantFun.unitToUnit, - preRestart: () => Unit = ConstantFun.unitToUnit, - tagger: Event => Set[String] = (_: Event) => Set.empty[String], - eventAdapter: EventAdapter[Event, Any] = NoOpEventAdapter.instance[Event], - snapshotWhen: (State, Event, Long) => Boolean = ConstantFun.scalaAnyThreeToFalse, - recovery: Recovery = Recovery(), - supervisionStrategy: SupervisorStrategy = SupervisorStrategy.stop, - onSnapshot: (SnapshotMetadata, Try[Done]) => Unit = ConstantFun.scalaAnyTwoToUnit, - onRecoveryFailure: Throwable => Unit = ConstantFun.scalaAnyToUnit -) extends EventSourcedBehavior[Command, Event, State] { + persistenceId: PersistenceId, + emptyState: State, + commandHandler: EventSourcedBehavior.CommandHandler[Command, Event, State], + eventHandler: EventSourcedBehavior.EventHandler[State, Event], + loggerClass: Class[_], + journalPluginId: Option[String] = None, + snapshotPluginId: Option[String] = None, + recoveryCompleted: State => Unit = ConstantFun.scalaAnyToUnit, + postStop: () => Unit = ConstantFun.unitToUnit, + preRestart: () => Unit = ConstantFun.unitToUnit, + tagger: Event => Set[String] = (_: Event) => Set.empty[String], + eventAdapter: EventAdapter[Event, Any] = NoOpEventAdapter.instance[Event], + snapshotWhen: (State, Event, Long) => Boolean = ConstantFun.scalaAnyThreeToFalse, + recovery: Recovery = Recovery(), + supervisionStrategy: SupervisorStrategy = SupervisorStrategy.stop, + onSnapshot: (SnapshotMetadata, Try[Done]) => Unit = ConstantFun.scalaAnyTwoToUnit, + onRecoveryFailure: Throwable => Unit = ConstantFun.scalaAnyToUnit) + extends EventSourcedBehavior[Command, Event, State] { import EventSourcedBehaviorImpl.WriterIdentity @@ -90,77 +90,83 @@ private[akka] final case class EventSourcedBehaviorImpl[Command, Event, State]( // stashState outside supervise because StashState should survive restarts due to persist failures val stashState = new StashState(settings) - Behaviors.supervise { - Behaviors.setup[Command] { _ => + Behaviors + .supervise { + Behaviors.setup[Command] { _ => + // the default impl needs context which isn't available until here, so we + // use the anyTwoToUnit as a marker to use the default + val actualOnSnapshot: (SnapshotMetadata, Try[Done]) => Unit = + if (onSnapshot == ConstantFun.scalaAnyTwoToUnit) + EventSourcedBehaviorImpl.defaultOnSnapshot[Command](ctx, _, _) + else onSnapshot - // the default impl needs context which isn't available until here, so we - // use the anyTwoToUnit as a marker to use the default - val actualOnSnapshot: (SnapshotMetadata, Try[Done]) => Unit = - if (onSnapshot == ConstantFun.scalaAnyTwoToUnit) EventSourcedBehaviorImpl.defaultOnSnapshot[Command](ctx, _, _) - else onSnapshot + val eventsourcedSetup = new BehaviorSetup(ctx.asInstanceOf[ActorContext[InternalProtocol]], + persistenceId, + emptyState, + commandHandler, + eventHandler, + WriterIdentity.newIdentity(), + recoveryCompleted, + onRecoveryFailure, + actualOnSnapshot, + tagger, + eventAdapter, + snapshotWhen, + recovery, + holdingRecoveryPermit = false, + settings = settings, + stashState = stashState) - val eventsourcedSetup = new BehaviorSetup( - ctx.asInstanceOf[ActorContext[InternalProtocol]], - persistenceId, - emptyState, - commandHandler, - eventHandler, - WriterIdentity.newIdentity(), - recoveryCompleted, - onRecoveryFailure, - actualOnSnapshot, - tagger, - eventAdapter, - snapshotWhen, - recovery, - holdingRecoveryPermit = false, - settings = settings, - stashState = stashState - ) + // needs to accept Any since we also can get messages from the journal + // not part of the protocol + val onStopInterceptor = new BehaviorInterceptor[Any, Any] { - // needs to accept Any since we also can get messages from the journal - // not part of the protocol - val onStopInterceptor = new BehaviorInterceptor[Any, Any] { - - import BehaviorInterceptor._ - def aroundReceive(ctx: typed.TypedActorContext[Any], msg: Any, target: ReceiveTarget[Any]): Behavior[Any] = { - target(ctx, msg) - } - - def aroundSignal(ctx: typed.TypedActorContext[Any], signal: Signal, target: SignalTarget[Any]): Behavior[Any] = { - if (signal == PostStop) { - eventsourcedSetup.cancelRecoveryTimer() - // clear stash to be GC friendly - stashState.clearStashBuffers() - signalPostStop(eventsourcedSetup.log) - } else if (signal == PreRestart) { - signalPreRestart(eventsourcedSetup.log) + import BehaviorInterceptor._ + def aroundReceive(ctx: typed.TypedActorContext[Any], + msg: Any, + target: ReceiveTarget[Any]): Behavior[Any] = { + target(ctx, msg) } - target(ctx, signal) - } - } - val widened = RequestingRecoveryPermit(eventsourcedSetup).widen[Any] { - case res: JournalProtocol.Response => InternalProtocol.JournalResponse(res) - case res: SnapshotProtocol.Response => InternalProtocol.SnapshotterResponse(res) - case RecoveryPermitter.RecoveryPermitGranted => InternalProtocol.RecoveryPermitGranted - case internal: InternalProtocol => internal // such as RecoveryTickEvent - case cmd: Command @unchecked => InternalProtocol.IncomingCommand(cmd) - } - Behaviors.intercept(onStopInterceptor)(widened).narrow[Command] - } - }.onFailure[JournalFailureException](supervisionStrategy) + def aroundSignal(ctx: typed.TypedActorContext[Any], + signal: Signal, + target: SignalTarget[Any]): Behavior[Any] = { + if (signal == PostStop) { + eventsourcedSetup.cancelRecoveryTimer() + // clear stash to be GC friendly + stashState.clearStashBuffers() + signalPostStop(eventsourcedSetup.log) + } else if (signal == PreRestart) { + signalPreRestart(eventsourcedSetup.log) + } + target(ctx, signal) + } + } + val widened = RequestingRecoveryPermit(eventsourcedSetup).widen[Any] { + case res: JournalProtocol.Response => InternalProtocol.JournalResponse(res) + case res: SnapshotProtocol.Response => InternalProtocol.SnapshotterResponse(res) + case RecoveryPermitter.RecoveryPermitGranted => InternalProtocol.RecoveryPermitGranted + case internal: InternalProtocol => internal // such as RecoveryTickEvent + case cmd: Command @unchecked => InternalProtocol.IncomingCommand(cmd) + } + Behaviors.intercept(onStopInterceptor)(widened).narrow[Command] + } + + } + .onFailure[JournalFailureException](supervisionStrategy) } def signalPostStop(log: Logger): Unit = { - try postStop() catch { + try postStop() + catch { case NonFatal(e) => log.warning("Exception in postStop: {}", e) } } def signalPreRestart(log: Logger): Unit = { - try preRestart() catch { + try preRestart() + catch { case NonFatal(e) => log.warning("Exception in preRestart: {}", e) } @@ -193,7 +199,8 @@ private[akka] final case class EventSourcedBehaviorImpl[Command, Event, State]( copy(snapshotPluginId = if (id != "") Some(id) else None) } - override def withSnapshotSelectionCriteria(selection: SnapshotSelectionCriteria): EventSourcedBehavior[Command, Event, State] = { + override def withSnapshotSelectionCriteria( + selection: SnapshotSelectionCriteria): EventSourcedBehavior[Command, Event, State] = { copy(recovery = Recovery(selection)) } @@ -203,10 +210,12 @@ private[akka] final case class EventSourcedBehaviorImpl[Command, Event, State]( override def eventAdapter(adapter: EventAdapter[Event, _]): EventSourcedBehavior[Command, Event, State] = copy(eventAdapter = adapter.asInstanceOf[EventAdapter[Event, Any]]) - override def onSnapshot(callback: (SnapshotMetadata, Try[Done]) => Unit): EventSourcedBehavior[Command, Event, State] = + override def onSnapshot( + callback: (SnapshotMetadata, Try[Done]) => Unit): EventSourcedBehavior[Command, Event, State] = copy(onSnapshot = callback) - override def onPersistFailure(backoffStrategy: BackoffSupervisorStrategy): EventSourcedBehavior[Command, Event, State] = + override def onPersistFailure( + backoffStrategy: BackoffSupervisorStrategy): EventSourcedBehavior[Command, Event, State] = copy(supervisionStrategy = backoffStrategy) override def onRecoveryFailure(callback: Throwable => Unit): EventSourcedBehavior[Command, Event, State] = diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala index f32a646574..06381f9acb 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/EventSourcedSettings.scala @@ -40,21 +40,18 @@ import com.typesafe.config.Config val recoveryEventTimeout: FiniteDuration = journalConfig.getDuration("recovery-event-timeout", TimeUnit.MILLISECONDS).millis - EventSourcedSettings( - stashCapacity = stashCapacity, - stashOverflowStrategy, - logOnStashing = logOnStashing, - recoveryEventTimeout, - journalPluginId, - snapshotPluginId - ) + EventSourcedSettings(stashCapacity = stashCapacity, + stashOverflowStrategy, + logOnStashing = logOnStashing, + recoveryEventTimeout, + journalPluginId, + snapshotPluginId) } private[akka] final def journalConfigFor(config: Config, journalPluginId: String): Config = { val defaultJournalPluginId = config.getString("akka.persistence.journal.plugin") val configPath = if (journalPluginId == "") defaultJournalPluginId else journalPluginId - config.getConfig(configPath) - .withFallback(config.getConfig(Persistence.JournalFallbackConfigPath)) + config.getConfig(configPath).withFallback(config.getConfig(Persistence.JournalFallbackConfigPath)) } } @@ -63,16 +60,16 @@ import com.typesafe.config.Config * INTERNAL API */ @InternalApi -private[akka] final case class EventSourcedSettings( - stashCapacity: Int, - stashOverflowStrategy: StashOverflowStrategy, - logOnStashing: Boolean, - recoveryEventTimeout: FiniteDuration, - journalPluginId: String, - snapshotPluginId: String) { +private[akka] final case class EventSourcedSettings(stashCapacity: Int, + stashOverflowStrategy: StashOverflowStrategy, + logOnStashing: Boolean, + recoveryEventTimeout: FiniteDuration, + journalPluginId: String, + snapshotPluginId: String) { require(journalPluginId != null, "journal plugin id must not be null; use empty string for 'default' journal") - require(snapshotPluginId != null, "snapshot plugin id must not be null; use empty string for 'default' snapshot store") + require(snapshotPluginId != null, + "snapshot plugin id must not be null; use empty string for 'default' snapshot store") } @@ -90,4 +87,3 @@ private[akka] object StashOverflowStrategy { case object Drop extends StashOverflowStrategy case object Fail extends StashOverflowStrategy } - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalFailureException.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalFailureException.scala index 1794502f61..0227bf6ab4 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalFailureException.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalFailureException.scala @@ -15,5 +15,7 @@ import akka.persistence.typed.PersistenceId @InternalApi final private[akka] class JournalFailureException(msg: String, cause: Throwable) extends RuntimeException(msg, cause) { def this(persistenceId: PersistenceId, sequenceNr: Long, eventType: String, cause: Throwable) = - this(s"Failed to persist event type [$eventType] with sequence number [$sequenceNr] for persistenceId [${persistenceId.id}]", cause) + this( + s"Failed to persist event type [$eventType] with sequence number [$sequenceNr] for persistenceId [${persistenceId.id}]", + cause) } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalInteractions.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalInteractions.scala index 7abc767f4c..b0ad200b0f 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalInteractions.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/JournalInteractions.scala @@ -28,45 +28,42 @@ private[akka] trait JournalInteractions[C, E, S] { // ---------- journal interactions --------- - protected def internalPersist( - state: Running.RunningState[S], - event: EventOrTagged): Running.RunningState[S] = { + protected def internalPersist(state: Running.RunningState[S], event: EventOrTagged): Running.RunningState[S] = { val newState = state.nextSequenceNr() val senderNotKnownBecauseAkkaTyped = null - val repr = PersistentRepr( - event, - persistenceId = setup.persistenceId.id, - sequenceNr = newState.seqNr, - writerUuid = setup.writerIdentity.writerUuid, - sender = senderNotKnownBecauseAkkaTyped - ) + val repr = PersistentRepr(event, + persistenceId = setup.persistenceId.id, + sequenceNr = newState.seqNr, + writerUuid = setup.writerIdentity.writerUuid, + sender = senderNotKnownBecauseAkkaTyped) val write = AtomicWrite(repr) :: Nil - setup.journal.tell(JournalProtocol.WriteMessages(write, setup.selfUntyped, setup.writerIdentity.instanceId), setup.selfUntyped) + setup.journal + .tell(JournalProtocol.WriteMessages(write, setup.selfUntyped, setup.writerIdentity.instanceId), setup.selfUntyped) newState } - protected def internalPersistAll( - events: immutable.Seq[EventOrTagged], - state: Running.RunningState[S]): Running.RunningState[S] = { + protected def internalPersistAll(events: immutable.Seq[EventOrTagged], + state: Running.RunningState[S]): Running.RunningState[S] = { if (events.nonEmpty) { var newState = state val writes = events.map { event => newState = newState.nextSequenceNr() - PersistentRepr( - event, - persistenceId = setup.persistenceId.id, - sequenceNr = newState.seqNr, - writerUuid = setup.writerIdentity.writerUuid, - sender = ActorRef.noSender) + PersistentRepr(event, + persistenceId = setup.persistenceId.id, + sequenceNr = newState.seqNr, + writerUuid = setup.writerIdentity.writerUuid, + sender = ActorRef.noSender) } val write = AtomicWrite(writes) - setup.journal.tell(JournalProtocol.WriteMessages(write :: Nil, setup.selfUntyped, setup.writerIdentity.instanceId), setup.selfUntyped) + setup.journal.tell( + JournalProtocol.WriteMessages(write :: Nil, setup.selfUntyped, setup.writerIdentity.instanceId), + setup.selfUntyped) newState } else state @@ -74,7 +71,11 @@ private[akka] trait JournalInteractions[C, E, S] { protected def replayEvents(fromSeqNr: Long, toSeqNr: Long): Unit = { setup.log.debug("Replaying messages: from: {}, to: {}", fromSeqNr, toSeqNr) - setup.journal ! ReplayMessages(fromSeqNr, toSeqNr, setup.recovery.replayMax, setup.persistenceId.id, setup.selfUntyped) + setup.journal ! ReplayMessages(fromSeqNr, + toSeqNr, + setup.recovery.replayMax, + setup.persistenceId.id, + setup.selfUntyped) } protected def requestRecoveryPermit(): Unit = { @@ -82,7 +83,8 @@ private[akka] trait JournalInteractions[C, E, S] { } /** Intended to be used in .onSignal(returnPermitOnStop) by behaviors */ - protected def returnPermitOnStop: PartialFunction[(ActorContext[InternalProtocol], Signal), Behavior[InternalProtocol]] = { + protected def returnPermitOnStop + : PartialFunction[(ActorContext[InternalProtocol], Signal), Behavior[InternalProtocol]] = { case (_, PostStop) => tryReturnRecoveryPermit("PostStop") Behaviors.stopped @@ -114,9 +116,9 @@ private[akka] trait JournalInteractions[C, E, S] { if (state.state == null) throw new IllegalStateException("A snapshot must not be a null state.") else - setup.snapshotStore.tell(SnapshotProtocol.SaveSnapshot( - SnapshotMetadata(setup.persistenceId.id, state.seqNr), - state.state), setup.selfUntyped) + setup.snapshotStore.tell( + SnapshotProtocol.SaveSnapshot(SnapshotMetadata(setup.persistenceId.id, state.seqNr), state.state), + setup.selfUntyped) } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala index 9924627e6e..e2922140a5 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingEvents.scala @@ -37,18 +37,13 @@ import akka.persistence.typed.internal.Running.WithSeqNrAccessible private[akka] object ReplayingEvents { @InternalApi - private[akka] final case class ReplayingState[State]( - seqNr: Long, - state: State, - eventSeenInInterval: Boolean, - toSeqNr: Long, - receivedPoisonPill: Boolean - ) + private[akka] final case class ReplayingState[State](seqNr: Long, + state: State, + eventSeenInInterval: Boolean, + toSeqNr: Long, + receivedPoisonPill: Boolean) - def apply[C, E, S]( - setup: BehaviorSetup[C, E, S], - state: ReplayingState[S] - ): Behavior[InternalProtocol] = + def apply[C, E, S](setup: BehaviorSetup[C, E, S], state: ReplayingState[S]): Behavior[InternalProtocol] = Behaviors.setup { ctx => // protect against event recovery stalling forever because of journal overloaded and such setup.startRecoveryTimer(snapshot = false) @@ -58,10 +53,12 @@ private[akka] object ReplayingEvents { } @InternalApi -private[akka] final class ReplayingEvents[C, E, S]( - override val setup: BehaviorSetup[C, E, S], - var state: ReplayingState[S]) - extends AbstractBehavior[InternalProtocol] with JournalInteractions[C, E, S] with StashManagement[C, E, S] with WithSeqNrAccessible { +private[akka] final class ReplayingEvents[C, E, S](override val setup: BehaviorSetup[C, E, S], + var state: ReplayingState[S]) + extends AbstractBehavior[InternalProtocol] + with JournalInteractions[C, E, S] + with StashManagement[C, E, S] + with WithSeqNrAccessible { import InternalProtocol._ import ReplayingEvents.ReplayingState @@ -84,18 +81,16 @@ private[akka] final class ReplayingEvents[C, E, S]( this } - private def onJournalResponse( - response: JournalProtocol.Response): Behavior[InternalProtocol] = { + private def onJournalResponse(response: JournalProtocol.Response): Behavior[InternalProtocol] = { try { response match { case ReplayedMessage(repr) => val event = setup.eventAdapter.fromJournal(repr.payload.asInstanceOf[setup.eventAdapter.Per]) try { - state = state.copy( - seqNr = repr.sequenceNr, - state = setup.eventHandler(state.state, event), - eventSeenInInterval = true) + state = state.copy(seqNr = repr.sequenceNr, + state = setup.eventHandler(state.state, event), + eventSeenInInterval = true) this } catch { case NonFatal(ex) => onRecoveryFailure(ex, repr.sequenceNr, Some(event)) @@ -122,8 +117,7 @@ private[akka] final class ReplayingEvents[C, E, S]( private def onCommand(cmd: InternalProtocol): Behavior[InternalProtocol] = { // during recovery, stash all incoming commands if (state.receivedPoisonPill) { - if (setup.settings.logOnStashing) setup.log.debug( - "Discarding message [{}], because actor is to be stopped", cmd) + if (setup.settings.logOnStashing) setup.log.debug("Discarding message [{}], because actor is to be stopped", cmd) Behaviors.unhandled } else { stashInternal(cmd) @@ -137,7 +131,8 @@ private[akka] final class ReplayingEvents[C, E, S]( state = state.copy(eventSeenInInterval = false) this } else { - val msg = s"Replay timed out, didn't get event within ]${setup.settings.recoveryEventTimeout}], highest sequence number seen [${state.seqNr}]" + val msg = + s"Replay timed out, didn't get event within ]${setup.settings.recoveryEventTimeout}], highest sequence number seen [${state.seqNr}]" onRecoveryFailure(new RecoveryTimedOut(msg), state.seqNr, None) } } else { @@ -146,7 +141,8 @@ private[akka] final class ReplayingEvents[C, E, S]( } def onSnapshotterResponse(response: SnapshotProtocol.Response): Behavior[InternalProtocol] = { - setup.log.warning("Unexpected [{}] from SnapshotStore, already in replaying events state.", Logging.simpleName(response)) + setup.log + .warning("Unexpected [{}] from SnapshotStore, already in replaying events state.", Logging.simpleName(response)) Behaviors.unhandled // ignore the response } @@ -158,7 +154,9 @@ private[akka] final class ReplayingEvents[C, E, S]( * @param cause failure cause. * @param message the message that was being processed when the exception was thrown */ - protected def onRecoveryFailure(cause: Throwable, sequenceNr: Long, message: Option[Any]): Behavior[InternalProtocol] = { + protected def onRecoveryFailure(cause: Throwable, + sequenceNr: Long, + message: Option[Any]): Behavior[InternalProtocol] = { try { setup.onRecoveryFailure(cause) } catch { @@ -170,7 +168,7 @@ private[akka] final class ReplayingEvents[C, E, S]( val msg = message match { case Some(evt) => s"Exception during recovery while handling [${evt.getClass.getName}] with sequence number [$sequenceNr]. " + - s"PersistenceId [${setup.persistenceId.id}]" + s"PersistenceId [${setup.persistenceId.id}]" case None => s"Exception during recovery. Last known sequence number [$sequenceNr]. PersistenceId [${setup.persistenceId.id}]" } @@ -178,24 +176,22 @@ private[akka] final class ReplayingEvents[C, E, S]( throw new JournalFailureException(msg, cause) } - protected def onRecoveryCompleted(state: ReplayingState[S]): Behavior[InternalProtocol] = try { - tryReturnRecoveryPermit("replay completed successfully") - setup.recoveryCompleted(state.state) + protected def onRecoveryCompleted(state: ReplayingState[S]): Behavior[InternalProtocol] = + try { + tryReturnRecoveryPermit("replay completed successfully") + setup.recoveryCompleted(state.state) - if (state.receivedPoisonPill && isInternalStashEmpty && !isUnstashAllInProgress) - Behaviors.stopped - else { - val running = Running[C, E, S]( - setup, - Running.RunningState[S](state.seqNr, state.state, state.receivedPoisonPill) - ) + if (state.receivedPoisonPill && isInternalStashEmpty && !isUnstashAllInProgress) + Behaviors.stopped + else { + val running = + Running[C, E, S](setup, Running.RunningState[S](state.seqNr, state.state, state.receivedPoisonPill)) - tryUnstashOne(running) + tryUnstashOne(running) + } + } finally { + setup.cancelRecoveryTimer() } - } finally { - setup.cancelRecoveryTimer() - } override def currentSequenceNumber: Long = state.seqNr } - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingSnapshot.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingSnapshot.scala index d28157e76c..8b6f32e4e3 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingSnapshot.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/ReplayingSnapshot.scala @@ -37,7 +37,8 @@ private[akka] object ReplayingSnapshot { @InternalApi private[akka] class ReplayingSnapshot[C, E, S](override val setup: BehaviorSetup[C, E, S]) - extends JournalInteractions[C, E, S] with StashManagement[C, E, S] { + extends JournalInteractions[C, E, S] + with StashManagement[C, E, S] { import InternalProtocol._ @@ -48,17 +49,19 @@ private[akka] class ReplayingSnapshot[C, E, S](override val setup: BehaviorSetup loadSnapshot(setup.recovery.fromSnapshot, setup.recovery.toSequenceNr) def stay(receivedPoisonPill: Boolean): Behavior[InternalProtocol] = { - Behaviors.receiveMessage[InternalProtocol] { - case SnapshotterResponse(r) => onSnapshotterResponse(r, receivedPoisonPill) - case JournalResponse(r) => onJournalResponse(r) - case RecoveryTickEvent(snapshot) => onRecoveryTick(snapshot) - case cmd: IncomingCommand[C] => - if (receivedPoisonPill) Behaviors.unhandled - else onCommand(cmd) - case RecoveryPermitGranted => Behaviors.unhandled // should not happen, we already have the permit - }.receiveSignal(returnPermitOnStop.orElse { - case (_, PoisonPill) => stay(receivedPoisonPill = true) - }) + Behaviors + .receiveMessage[InternalProtocol] { + case SnapshotterResponse(r) => onSnapshotterResponse(r, receivedPoisonPill) + case JournalResponse(r) => onJournalResponse(r) + case RecoveryTickEvent(snapshot) => onRecoveryTick(snapshot) + case cmd: IncomingCommand[C] => + if (receivedPoisonPill) Behaviors.unhandled + else onCommand(cmd) + case RecoveryPermitGranted => Behaviors.unhandled // should not happen, we already have the permit + } + .receiveSignal(returnPermitOnStop.orElse { + case (_, PoisonPill) => stay(receivedPoisonPill = true) + }) } stay(receivedPoisonPillInPreviousPhase) } @@ -87,7 +90,8 @@ private[akka] class ReplayingSnapshot[C, E, S](override val setup: BehaviorSetup private def onRecoveryTick(snapshot: Boolean): Behavior[InternalProtocol] = if (snapshot) { // we know we're in snapshotting mode; snapshot recovery timeout arrived - val ex = new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within ${setup.settings.recoveryEventTimeout}") + val ex = new RecoveryTimedOut( + s"Recovery timed out, didn't get snapshot within ${setup.settings.recoveryEventTimeout}") onRecoveryFailure(ex, None) } else Behaviors.same // ignore, since we received the snapshot already @@ -98,11 +102,13 @@ private[akka] class ReplayingSnapshot[C, E, S](override val setup: BehaviorSetup } def onJournalResponse(response: JournalProtocol.Response): Behavior[InternalProtocol] = { - setup.log.debug("Unexpected response from journal: [{}], may be due to an actor restart, ignoring...", response.getClass.getName) + setup.log.debug("Unexpected response from journal: [{}], may be due to an actor restart, ignoring...", + response.getClass.getName) Behaviors.unhandled } - def onSnapshotterResponse(response: SnapshotProtocol.Response, receivedPoisonPill: Boolean): Behavior[InternalProtocol] = { + def onSnapshotterResponse(response: SnapshotProtocol.Response, + receivedPoisonPill: Boolean): Behavior[InternalProtocol] = { response match { case LoadSnapshotResult(sso, toSnr) => var state: S = setup.emptyState @@ -124,13 +130,15 @@ private[akka] class ReplayingSnapshot[C, E, S](override val setup: BehaviorSetup } } - private def becomeReplayingEvents(state: S, lastSequenceNr: Long, toSnr: Long, receivedPoisonPill: Boolean): Behavior[InternalProtocol] = { + private def becomeReplayingEvents(state: S, + lastSequenceNr: Long, + toSnr: Long, + receivedPoisonPill: Boolean): Behavior[InternalProtocol] = { setup.cancelRecoveryTimer() ReplayingEvents[C, E, S]( setup, - ReplayingEvents.ReplayingState(lastSequenceNr, state, eventSeenInInterval = false, toSnr, receivedPoisonPill) - ) + ReplayingEvents.ReplayingState(lastSequenceNr, state, eventSeenInInterval = false, toSnr, receivedPoisonPill)) } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RequestingRecoveryPermit.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RequestingRecoveryPermit.scala index 9b5eb25302..f02335cd8f 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RequestingRecoveryPermit.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/RequestingRecoveryPermit.scala @@ -29,33 +29,36 @@ private[akka] object RequestingRecoveryPermit { @InternalApi private[akka] class RequestingRecoveryPermit[C, E, S](override val setup: BehaviorSetup[C, E, S]) - extends StashManagement[C, E, S] with JournalInteractions[C, E, S] { + extends StashManagement[C, E, S] + with JournalInteractions[C, E, S] { def createBehavior(): Behavior[InternalProtocol] = { // request a permit, as only once we obtain one we can start replaying requestRecoveryPermit() def stay(receivedPoisonPill: Boolean): Behavior[InternalProtocol] = { - Behaviors.receiveMessage[InternalProtocol] { - case InternalProtocol.RecoveryPermitGranted => - becomeReplaying(receivedPoisonPill) + Behaviors + .receiveMessage[InternalProtocol] { + case InternalProtocol.RecoveryPermitGranted => + becomeReplaying(receivedPoisonPill) - case _ if receivedPoisonPill => - Behaviors.unhandled - - case other => - if (receivedPoisonPill) { - if (setup.settings.logOnStashing) setup.log.debug( - "Discarding message [{}], because actor is to be stopped", other) + case _ if receivedPoisonPill => Behaviors.unhandled - } else { - stashInternal(other) - Behaviors.same - } - }.receiveSignal { - case (_, PoisonPill) => stay(receivedPoisonPill = true) - } + case other => + if (receivedPoisonPill) { + if (setup.settings.logOnStashing) + setup.log.debug("Discarding message [{}], because actor is to be stopped", other) + Behaviors.unhandled + } else { + stashInternal(other) + Behaviors.same + } + + } + .receiveSignal { + case (_, PoisonPill) => stay(receivedPoisonPill = true) + } } stay(receivedPoisonPill = false) } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala index b931f3be57..e6bc6fcb87 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/Running.scala @@ -54,11 +54,7 @@ private[akka] object Running { def currentSequenceNumber: Long } - final case class RunningState[State]( - seqNr: Long, - state: State, - receivedPoisonPill: Boolean - ) { + final case class RunningState[State](seqNr: Long, state: State, receivedPoisonPill: Boolean) { def nextSequenceNr(): RunningState[State] = copy(seqNr = seqNr + 1) @@ -81,9 +77,9 @@ private[akka] object Running { // =============================================== /** INTERNAL API */ -@InternalApi private[akka] final class Running[C, E, S]( - override val setup: BehaviorSetup[C, E, S]) - extends JournalInteractions[C, E, S] with StashManagement[C, E, S] { +@InternalApi private[akka] final class Running[C, E, S](override val setup: BehaviorSetup[C, E, S]) + extends JournalInteractions[C, E, S] + with StashManagement[C, E, S] { import InternalProtocol._ import Running.RunningState @@ -91,7 +87,9 @@ private[akka] object Running { private val persistingEventsMdc = MDC.create(setup.persistenceId, MDC.PersistingEvents) private val storingSnapshotMdc = MDC.create(setup.persistenceId, MDC.StoringSnapshot) - final class HandlingCommands(state: RunningState[S]) extends AbstractBehavior[InternalProtocol] with WithSeqNrAccessible { + final class HandlingCommands(state: RunningState[S]) + extends AbstractBehavior[InternalProtocol] + with WithSeqNrAccessible { def onMessage(msg: InternalProtocol): Behavior[InternalProtocol] = msg match { case IncomingCommand(c: C @unchecked) => onCommand(state, c) @@ -112,16 +110,15 @@ private[akka] object Running { applyEffects(cmd, state, effect.asInstanceOf[EffectImpl[E, S]]) // TODO can we avoid the cast? } - @tailrec def applyEffects( - msg: Any, - state: RunningState[S], - effect: Effect[E, S], - sideEffects: immutable.Seq[SideEffect[S]] = Nil - ): Behavior[InternalProtocol] = { + @tailrec def applyEffects(msg: Any, + state: RunningState[S], + effect: Effect[E, S], + sideEffects: immutable.Seq[SideEffect[S]] = Nil): Behavior[InternalProtocol] = { if (setup.log.isDebugEnabled && !effect.isInstanceOf[CompositeEffect[_, _]]) - setup.log.debug( - s"Handled command [{}], resulting effect: [{}], side effects: [{}]", - msg.getClass.getName, effect, sideEffects.size) + setup.log.debug(s"Handled command [{}], resulting effect: [{}], side effects: [{}]", + msg.getClass.getName, + effect, + sideEffects.size) effect match { case CompositeEffect(eff, currentSideEffects) => @@ -171,8 +168,8 @@ private[akka] object Running { case _: Unhandled.type => import akka.actor.typed.scaladsl.adapter._ - setup.context.system.toUntyped.eventStream.publish( - UnhandledMessage(msg, setup.context.system.toUntyped.deadLetters, setup.context.self.toUntyped)) + setup.context.system.toUntyped.eventStream + .publish(UnhandledMessage(msg, setup.context.system.toUntyped.deadLetters, setup.context.self.toUntyped)) tryUnstashOne(applySideEffects(sideEffects, state)) case _: Stash.type => @@ -197,23 +194,21 @@ private[akka] object Running { // =============================================== - def persistingEvents( - state: RunningState[S], - numberOfEvents: Int, - shouldSnapshotAfterPersist: Boolean, - sideEffects: immutable.Seq[SideEffect[S]] - ): Behavior[InternalProtocol] = { + def persistingEvents(state: RunningState[S], + numberOfEvents: Int, + shouldSnapshotAfterPersist: Boolean, + sideEffects: immutable.Seq[SideEffect[S]]): Behavior[InternalProtocol] = { setup.setMdc(persistingEventsMdc) new PersistingEvents(state, numberOfEvents, shouldSnapshotAfterPersist, sideEffects) } /** INTERNAL API */ - @InternalApi private[akka] class PersistingEvents( - var state: RunningState[S], - numberOfEvents: Int, - shouldSnapshotAfterPersist: Boolean, - var sideEffects: immutable.Seq[SideEffect[S]]) - extends AbstractBehavior[InternalProtocol] with WithSeqNrAccessible { + @InternalApi private[akka] class PersistingEvents(var state: RunningState[S], + numberOfEvents: Int, + shouldSnapshotAfterPersist: Boolean, + var sideEffects: immutable.Seq[SideEffect[S]]) + extends AbstractBehavior[InternalProtocol] + with WithSeqNrAccessible { private var eventCounter = 0 @@ -231,8 +226,8 @@ private[akka] object Running { def onCommand(cmd: IncomingCommand[C]): Behavior[InternalProtocol] = { if (state.receivedPoisonPill) { - if (setup.settings.logOnStashing) setup.log.debug( - "Discarding message [{}], because actor is to be stopped", cmd) + if (setup.settings.logOnStashing) + setup.log.debug("Discarding message [{}], because actor is to be stopped", cmd) Behaviors.unhandled } else { stashInternal(cmd) @@ -240,8 +235,7 @@ private[akka] object Running { } } - final def onJournalResponse( - response: Response): Behavior[InternalProtocol] = { + final def onJournalResponse(response: Response): Behavior[InternalProtocol] = { setup.log.debug("Received Journal response: {}", response) def onWriteResponse(p: PersistentRepr): Behavior[InternalProtocol] = { @@ -301,16 +295,13 @@ private[akka] object Running { // =============================================== - def storingSnapshot( - state: RunningState[S], - sideEffects: immutable.Seq[SideEffect[S]] - ): Behavior[InternalProtocol] = { + def storingSnapshot(state: RunningState[S], sideEffects: immutable.Seq[SideEffect[S]]): Behavior[InternalProtocol] = { setup.setMdc(storingSnapshotMdc) def onCommand(cmd: IncomingCommand[C]): Behavior[InternalProtocol] = { if (state.receivedPoisonPill) { - if (setup.settings.logOnStashing) setup.log.debug( - "Discarding message [{}], because actor is to be stopped", cmd) + if (setup.settings.logOnStashing) + setup.log.debug("Discarding message [{}], because actor is to be stopped", cmd) Behaviors.unhandled } else { stashUser(cmd) @@ -332,23 +323,25 @@ private[akka] object Running { case DeleteSnapshotsSuccess(_) => ??? // ignore LoadSnapshot messages - case _ => + case _ => } } - Behaviors.receiveMessage[InternalProtocol] { - case cmd: IncomingCommand[C] @unchecked => - onCommand(cmd) - case SnapshotterResponse(r) => - onSnapshotterResponse(r) - tryUnstashOne(applySideEffects(sideEffects, state)) - case _ => - Behaviors.unhandled - }.receiveSignal { - case (_, PoisonPill) => - // wait for snapshot response before stopping - storingSnapshot(state.copy(receivedPoisonPill = true), sideEffects) - } + Behaviors + .receiveMessage[InternalProtocol] { + case cmd: IncomingCommand[C] @unchecked => + onCommand(cmd) + case SnapshotterResponse(r) => + onSnapshotterResponse(r) + tryUnstashOne(applySideEffects(sideEffects, state)) + case _ => + Behaviors.unhandled + } + .receiveSignal { + case (_, PoisonPill) => + // wait for snapshot response before stopping + storingSnapshot(state.copy(receivedPoisonPill = true), sideEffects) + } } @@ -371,10 +364,9 @@ private[akka] object Running { behavior } - def applySideEffect( - effect: SideEffect[S], - state: RunningState[S], - behavior: Behavior[InternalProtocol]): Behavior[InternalProtocol] = { + def applySideEffect(effect: SideEffect[S], + state: RunningState[S], + behavior: Behavior[InternalProtocol]): Behavior[InternalProtocol] = { effect match { case _: Stop.type @unchecked => Behaviors.stopped @@ -393,4 +385,3 @@ private[akka] object Running { } } - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala index d0a9f5a096..9f5fca48e1 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/internal/StashManagement.scala @@ -40,7 +40,8 @@ private[akka] trait StashManagement[C, E, S] { private def stash(msg: InternalProtocol, buffer: StashBuffer[InternalProtocol]): Unit = { logStashMessage(msg, buffer) - try buffer.stash(msg) catch { + try buffer.stash(msg) + catch { case e: StashOverflowException => setup.settings.stashOverflowStrategy match { case StashOverflowStrategy.Drop => @@ -95,21 +96,24 @@ private[akka] trait StashManagement[C, E, S] { stashState.isUnstashAllInProgress private def logStashMessage(msg: InternalProtocol, buffer: StashBuffer[InternalProtocol]): Unit = { - if (setup.settings.logOnStashing) setup.log.debug( - "Stashing message to {} stash: [{}] ", - if (buffer eq stashState.internalStashBuffer) "internal" else "user", msg) + if (setup.settings.logOnStashing) + setup.log.debug("Stashing message to {} stash: [{}] ", + if (buffer eq stashState.internalStashBuffer) "internal" else "user", + msg) } private def logUnstashMessage(buffer: StashBuffer[InternalProtocol]): Unit = { - if (setup.settings.logOnStashing) setup.log.debug( - "Unstashing message from {} stash: [{}]", - if (buffer eq stashState.internalStashBuffer) "internal" else "user", buffer.head) + if (setup.settings.logOnStashing) + setup.log.debug("Unstashing message from {} stash: [{}]", + if (buffer eq stashState.internalStashBuffer) "internal" else "user", + buffer.head) } private def logUnstashAll(): Unit = { - if (setup.settings.logOnStashing) setup.log.debug( - "Unstashing all [{}] messages from user stash, first is: [{}]", - stashState.userStashBuffer.size, stashState.userStashBuffer.head) + if (setup.settings.logOnStashing) + setup.log.debug("Unstashing all [{}] messages from user stash, first is: [{}]", + stashState.userStashBuffer.size, + stashState.userStashBuffer.head) } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala index 853e7c62c9..20eb5475ef 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandler.scala @@ -62,7 +62,8 @@ final class CommandHandlerBuilder[Command, Event, State]() { * * @return A new, mutable, CommandHandlerBuilderByState */ - def forState[S <: State](stateClass: Class[S], statePredicate: Predicate[S]): CommandHandlerBuilderByState[Command, Event, S, State] = { + def forState[S <: State](stateClass: Class[S], + statePredicate: Predicate[S]): CommandHandlerBuilderByState[Command, Event, S, State] = { val builder = new CommandHandlerBuilderByState[Command, Event, S, State](stateClass, statePredicate) builders = builder.asInstanceOf[CommandHandlerBuilderByState[Command, Event, State, State]] :: builders builder @@ -140,9 +141,10 @@ final class CommandHandlerBuilder[Command, Event, State]() { val combined = builders.reverse match { case head :: Nil => head - case head :: tail => tail.foldLeft(head) { (acc, builder) => - acc.orElse(builder) - } + case head :: tail => + tail.foldLeft(head) { (acc, builder) => + acc.orElse(builder) + } case Nil => throw new IllegalStateException("No matchers defined") } @@ -163,7 +165,8 @@ object CommandHandlerBuilderByState { * @param stateClass The handlers defined by this builder are used when the state is an instance of the `stateClass` * @return A new, mutable, CommandHandlerBuilderByState */ - def builder[Command, Event, S <: State, State](stateClass: Class[S]): CommandHandlerBuilderByState[Command, Event, S, State] = + def builder[Command, Event, S <: State, State]( + stateClass: Class[S]): CommandHandlerBuilderByState[Command, Event, S, State] = new CommandHandlerBuilderByState(stateClass, statePredicate = trueStatePredicate) /** @@ -171,32 +174,37 @@ object CommandHandlerBuilderByState { * useful for example when state type is an Optional * @return A new, mutable, CommandHandlerBuilderByState */ - def builder[Command, Event, State](statePredicate: Predicate[State]): CommandHandlerBuilderByState[Command, Event, State, State] = + def builder[Command, Event, State]( + statePredicate: Predicate[State]): CommandHandlerBuilderByState[Command, Event, State, State] = new CommandHandlerBuilderByState(classOf[Any].asInstanceOf[Class[State]], statePredicate) /** * INTERNAL API */ @InternalApi private final case class CommandHandlerCase[Command, Event, State]( - commandPredicate: Command => Boolean, - statePredicate: State => Boolean, - handler: BiFunction[State, Command, Effect[Event, State]]) + commandPredicate: Command => Boolean, + statePredicate: State => Boolean, + handler: BiFunction[State, Command, Effect[Event, State]]) } final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @InternalApi private[akka] ( - private val stateClass: Class[S], private val statePredicate: Predicate[S]) { + private val stateClass: Class[S], + private val statePredicate: Predicate[S]) { import CommandHandlerBuilderByState.CommandHandlerCase private var cases: List[CommandHandlerCase[Command, Event, State]] = Nil private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, Effect[Event, State]]): Unit = { - cases = CommandHandlerCase[Command, Event, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, Effect[Event, State]]]) :: cases + cases = CommandHandlerCase[Command, Event, State](commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass + .isAssignableFrom(state.getClass), + handler.asInstanceOf[BiFunction[State, + Command, + Effect[Event, State]]]) :: cases } /** @@ -206,7 +214,9 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand(predicate: Predicate[Command], handler: BiFunction[S, Command, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { + def onCommand( + predicate: Predicate[Command], + handler: BiFunction[S, Command, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { addCase(cmd => predicate.test(cmd), handler) this } @@ -221,7 +231,9 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand(predicate: Predicate[Command], handler: JFunction[Command, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { + def onCommand( + predicate: Predicate[Command], + handler: JFunction[Command, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, Effect[Event, State]] { override def apply(state: S, cmd: Command): Effect[Event, State] = handler(cmd) }) @@ -235,8 +247,11 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand[C <: Command](commandClass: Class[C], handler: BiFunction[S, C, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { - addCase(cmd => commandClass.isAssignableFrom(cmd.getClass), handler.asInstanceOf[BiFunction[S, Command, Effect[Event, State]]]) + def onCommand[C <: Command]( + commandClass: Class[C], + handler: BiFunction[S, C, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { + addCase(cmd => commandClass.isAssignableFrom(cmd.getClass), + handler.asInstanceOf[BiFunction[S, Command, Effect[Event, State]]]) this } @@ -250,7 +265,9 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand[C <: Command](commandClass: Class[C], handler: JFunction[C, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { + def onCommand[C <: Command]( + commandClass: Class[C], + handler: JFunction[C, Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { onCommand[C](commandClass, new BiFunction[S, C, Effect[Event, State]] { override def apply(state: S, cmd: C): Effect[Event, State] = handler(cmd) }) @@ -265,7 +282,9 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand[C <: Command](commandClass: Class[C], handler: Supplier[Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { + def onCommand[C <: Command]( + commandClass: Class[C], + handler: Supplier[Effect[Event, State]]): CommandHandlerBuilderByState[Command, Event, S, State] = { onCommand[C](commandClass, new BiFunction[S, C, Effect[Event, State]] { override def apply(state: S, cmd: C): Effect[Event, State] = handler.get() }) @@ -314,6 +333,7 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int }) build() } + /** * Matches any command. * @@ -342,7 +362,8 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int * Compose this builder with another builder. The handlers in this builder will be tried first followed * by the handlers in `other`. */ - def orElse[S2 <: State](other: CommandHandlerBuilderByState[Command, Event, S2, State]): CommandHandlerBuilderByState[Command, Event, S2, State] = { + def orElse[S2 <: State](other: CommandHandlerBuilderByState[Command, Event, S2, State]) + : CommandHandlerBuilderByState[Command, Event, S2, State] = { val newBuilder = new CommandHandlerBuilderByState[Command, Event, S2, State](other.stateClass, other.statePredicate) // problem with overloaded constructor with `cases` as parameter newBuilder.cases = other.cases ::: cases @@ -371,7 +392,8 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int } effect match { - case OptionVal.None => throw new MatchError(s"No match found for command of type [${command.getClass.getName}]") + case OptionVal.None => + throw new MatchError(s"No match found for command of type [${command.getClass.getName}]") case OptionVal.Some(e) => e.asInstanceOf[EffectImpl[Event, State]] } } @@ -379,4 +401,3 @@ final class CommandHandlerBuilderByState[Command, Event, S <: State, State] @Int } } - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala index 8cb7f44b96..14f8cb0fdb 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/CommandHandlerWithReply.scala @@ -51,7 +51,8 @@ final class CommandHandlerWithReplyBuilder[Command, Event, State]() { * * @return A new, mutable, CommandHandlerWithReplyBuilderByState */ - def forState(statePredicate: Predicate[State]): CommandHandlerWithReplyBuilderByState[Command, Event, State, State] = { + def forState( + statePredicate: Predicate[State]): CommandHandlerWithReplyBuilderByState[Command, Event, State, State] = { val builder = CommandHandlerWithReplyBuilderByState.builder[Command, Event, State](statePredicate) builders = builder :: builders builder @@ -70,7 +71,9 @@ final class CommandHandlerWithReplyBuilder[Command, Event, State]() { * * @return A new, mutable, CommandHandlerWithReplyBuilderByState */ - def forState[S <: State](stateClass: Class[S], statePredicate: Predicate[S]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { + def forState[S <: State]( + stateClass: Class[S], + statePredicate: Predicate[S]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { val builder = new CommandHandlerWithReplyBuilderByState[Command, Event, S, State](stateClass, statePredicate) builders = builder.asInstanceOf[CommandHandlerWithReplyBuilderByState[Command, Event, State, State]] :: builders builder @@ -86,7 +89,8 @@ final class CommandHandlerWithReplyBuilder[Command, Event, State]() { * @param stateClass The handlers defined by this builder are used when the state is an instance of the `stateClass`. * @return A new, mutable, CommandHandlerWithReplyBuilderByState */ - def forStateType[S <: State](stateClass: Class[S]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { + def forStateType[S <: State]( + stateClass: Class[S]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { val builder = CommandHandlerWithReplyBuilderByState.builder[Command, Event, S, State](stateClass) builders = builder.asInstanceOf[CommandHandlerWithReplyBuilderByState[Command, Event, State, State]] :: builders builder @@ -148,9 +152,10 @@ final class CommandHandlerWithReplyBuilder[Command, Event, State]() { val combined = builders.reverse match { case head :: Nil => head - case head :: tail => tail.foldLeft(head) { (acc, builder) => - acc.orElse(builder) - } + case head :: tail => + tail.foldLeft(head) { (acc, builder) => + acc.orElse(builder) + } case Nil => throw new IllegalStateException("No matchers defined") } @@ -171,7 +176,8 @@ object CommandHandlerWithReplyBuilderByState { * @param stateClass The handlers defined by this builder are used when the state is an instance of the `stateClass` * @return A new, mutable, CommandHandlerWithReplyBuilderByState */ - def builder[Command, Event, S <: State, State](stateClass: Class[S]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = + def builder[Command, Event, S <: State, State]( + stateClass: Class[S]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = new CommandHandlerWithReplyBuilderByState(stateClass, statePredicate = trueStatePredicate) /** @@ -179,32 +185,39 @@ object CommandHandlerWithReplyBuilderByState { * useful for example when state type is an Optional * @return A new, mutable, CommandHandlerWithReplyBuilderByState */ - def builder[Command, Event, State](statePredicate: Predicate[State]): CommandHandlerWithReplyBuilderByState[Command, Event, State, State] = + def builder[Command, Event, State]( + statePredicate: Predicate[State]): CommandHandlerWithReplyBuilderByState[Command, Event, State, State] = new CommandHandlerWithReplyBuilderByState(classOf[Any].asInstanceOf[Class[State]], statePredicate) /** * INTERNAL API */ @InternalApi private final case class CommandHandlerCase[Command, Event, State]( - commandPredicate: Command => Boolean, - statePredicate: State => Boolean, - handler: BiFunction[State, Command, ReplyEffect[Event, State]]) + commandPredicate: Command => Boolean, + statePredicate: State => Boolean, + handler: BiFunction[State, Command, ReplyEffect[Event, State]]) } final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, State] @InternalApi private[persistence] ( - private val stateClass: Class[S], private val statePredicate: Predicate[S]) { + private val stateClass: Class[S], + private val statePredicate: Predicate[S]) { import CommandHandlerWithReplyBuilderByState.CommandHandlerCase private var cases: List[CommandHandlerCase[Command, Event, State]] = Nil - private def addCase(predicate: Command => Boolean, handler: BiFunction[S, Command, ReplyEffect[Event, State]]): Unit = { - cases = CommandHandlerCase[Command, Event, State]( - commandPredicate = predicate, - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - handler.asInstanceOf[BiFunction[State, Command, ReplyEffect[Event, State]]]) :: cases + private def addCase(predicate: Command => Boolean, + handler: BiFunction[S, Command, ReplyEffect[Event, State]]): Unit = { + cases = CommandHandlerCase[Command, Event, State](commandPredicate = predicate, + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else + statePredicate.test(state.asInstanceOf[S]) && stateClass + .isAssignableFrom(state.getClass), + handler + .asInstanceOf[BiFunction[State, + Command, + ReplyEffect[Event, State]]]) :: cases } /** @@ -214,7 +227,8 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand(predicate: Predicate[Command], handler: BiFunction[S, Command, ReplyEffect[Event, State]]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { + def onCommand(predicate: Predicate[Command], handler: BiFunction[S, Command, ReplyEffect[Event, State]]) + : CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { addCase(cmd => predicate.test(cmd), handler) this } @@ -229,7 +243,8 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand(predicate: Predicate[Command], handler: JFunction[Command, ReplyEffect[Event, State]]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { + def onCommand(predicate: Predicate[Command], handler: JFunction[Command, ReplyEffect[Event, State]]) + : CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { addCase(cmd => predicate.test(cmd), new BiFunction[S, Command, ReplyEffect[Event, State]] { override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) }) @@ -243,8 +258,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand[C <: Command](commandClass: Class[C], handler: BiFunction[S, C, ReplyEffect[Event, State]]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { - addCase(cmd => commandClass.isAssignableFrom(cmd.getClass), handler.asInstanceOf[BiFunction[S, Command, ReplyEffect[Event, State]]]) + def onCommand[C <: Command](commandClass: Class[C], handler: BiFunction[S, C, ReplyEffect[Event, State]]) + : CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { + addCase(cmd => commandClass.isAssignableFrom(cmd.getClass), + handler.asInstanceOf[BiFunction[S, Command, ReplyEffect[Event, State]]]) this } @@ -258,7 +275,8 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand[C <: Command](commandClass: Class[C], handler: JFunction[C, ReplyEffect[Event, State]]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { + def onCommand[C <: Command](commandClass: Class[C], handler: JFunction[C, ReplyEffect[Event, State]]) + : CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[Event, State]] { override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler(cmd) }) @@ -273,7 +291,9 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your command handlers. */ - def onCommand[C <: Command](commandClass: Class[C], handler: Supplier[ReplyEffect[Event, State]]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { + def onCommand[C <: Command]( + commandClass: Class[C], + handler: Supplier[ReplyEffect[Event, State]]): CommandHandlerWithReplyBuilderByState[Command, Event, S, State] = { onCommand[C](commandClass, new BiFunction[S, C, ReplyEffect[Event, State]] { override def apply(state: S, cmd: C): ReplyEffect[Event, State] = handler.get() }) @@ -294,7 +314,8 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * * @return A CommandHandlerWithReply from the appended states. */ - def onAnyCommand(handler: BiFunction[S, Command, ReplyEffect[Event, State]]): CommandHandlerWithReply[Command, Event, State] = { + def onAnyCommand( + handler: BiFunction[S, Command, ReplyEffect[Event, State]]): CommandHandlerWithReply[Command, Event, State] = { addCase(_ => true, handler) build() } @@ -316,12 +337,14 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * * @return A CommandHandlerWithReply from the appended states. */ - def onAnyCommand(handler: JFunction[Command, ReplyEffect[Event, State]]): CommandHandlerWithReply[Command, Event, State] = { + def onAnyCommand( + handler: JFunction[Command, ReplyEffect[Event, State]]): CommandHandlerWithReply[Command, Event, State] = { addCase(_ => true, new BiFunction[S, Command, ReplyEffect[Event, State]] { override def apply(state: S, cmd: Command): ReplyEffect[Event, State] = handler(cmd) }) build() } + /** * Matches any command. * @@ -350,8 +373,10 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St * Compose this builder with another builder. The handlers in this builder will be tried first followed * by the handlers in `other`. */ - def orElse[S2 <: State](other: CommandHandlerWithReplyBuilderByState[Command, Event, S2, State]): CommandHandlerWithReplyBuilderByState[Command, Event, S2, State] = { - val newBuilder = new CommandHandlerWithReplyBuilderByState[Command, Event, S2, State](other.stateClass, other.statePredicate) + def orElse[S2 <: State](other: CommandHandlerWithReplyBuilderByState[Command, Event, S2, State]) + : CommandHandlerWithReplyBuilderByState[Command, Event, S2, State] = { + val newBuilder = + new CommandHandlerWithReplyBuilderByState[Command, Event, S2, State](other.stateClass, other.statePredicate) // problem with overloaded constructor with `cases` as parameter newBuilder.cases = other.cases ::: cases newBuilder @@ -379,7 +404,8 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St } effect match { - case OptionVal.None => throw new MatchError(s"No match found for command of type [${command.getClass.getName}]") + case OptionVal.None => + throw new MatchError(s"No match found for command of type [${command.getClass.getName}]") case OptionVal.Some(e) => e.asInstanceOf[EffectImpl[Event, State]] } } @@ -387,4 +413,3 @@ final class CommandHandlerWithReplyBuilderByState[Command, Event, S <: State, St } } - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala index 4ac1d859f1..624a480380 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/Effect.scala @@ -25,6 +25,7 @@ import akka.persistence.typed.internal._ * Not for user extension */ @DoNotInherit sealed class EffectFactories[Event, State] { + /** * Persist a single event */ @@ -93,7 +94,8 @@ import akka.persistence.typed.internal._ * The reply message will be sent also if `withEnforcedReplies` isn't used, but then the compiler will not help * finding mistakes. */ - def reply[ReplyMessage](cmd: ExpectingReply[ReplyMessage], replyWithMessage: ReplyMessage): ReplyEffect[Event, State] = + def reply[ReplyMessage](cmd: ExpectingReply[ReplyMessage], + replyWithMessage: ReplyMessage): ReplyEffect[Event, State] = none().thenReply[ReplyMessage](cmd, new function.Function[State, ReplyMessage] { override def apply(param: State): ReplyMessage = replyWithMessage }) @@ -118,6 +120,7 @@ import akka.persistence.typed.internal._ */ @DoNotInherit abstract class Effect[+Event, State] { self: EffectImpl[Event, State] => + /** * Run the given callback. Callbacks are run sequentially. * @@ -165,7 +168,8 @@ import akka.persistence.typed.internal._ * The reply message will be sent also if `withEnforcedReplies` isn't used, but then the compiler will not help * finding mistakes. */ - def thenReply[ReplyMessage](cmd: ExpectingReply[ReplyMessage], replyWithMessage: function.Function[State, ReplyMessage]): ReplyEffect[Event, State] = + def thenReply[ReplyMessage](cmd: ExpectingReply[ReplyMessage], + replyWithMessage: function.Function[State, ReplyMessage]): ReplyEffect[Event, State] = CompositeEffect(this, SideEffect[State](newState => cmd.replyTo ! replyWithMessage(newState))) /** diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala index d36ca52520..6c1359601d 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventHandler.scala @@ -62,7 +62,8 @@ final class EventHandlerBuilder[State >: Null, Event]() { * * @return A new, mutable, EventHandlerBuilderByState */ - def forState[S <: State](stateClass: Class[S], statePredicate: Predicate[S]): EventHandlerBuilderByState[S, State, Event] = { + def forState[S <: State](stateClass: Class[S], + statePredicate: Predicate[S]): EventHandlerBuilderByState[S, State, Event] = { val builder = new EventHandlerBuilderByState[S, State, Event](stateClass, statePredicate) builders = builder.asInstanceOf[EventHandlerBuilderByState[State, State, Event]] :: builders builder @@ -141,9 +142,10 @@ final class EventHandlerBuilder[State >: Null, Event]() { val combined = builders.reverse match { case head :: Nil => head - case head :: tail => tail.foldLeft(head) { (acc, builder) => - acc.orElse(builder) - } + case head :: tail => + tail.foldLeft(head) { (acc, builder) => + acc.orElse(builder) + } case Nil => throw new IllegalStateException("No matchers defined") } @@ -178,13 +180,13 @@ object EventHandlerBuilderByState { /** * INTERNAL API */ - @InternalApi private final case class EventHandlerCase[State, Event]( - statePredicate: State => Boolean, - eventPredicate: Event => Boolean, - handler: BiFunction[State, Event, State]) + @InternalApi private final case class EventHandlerCase[State, Event](statePredicate: State => Boolean, + eventPredicate: Event => Boolean, + handler: BiFunction[State, Event, State]) } -final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private val stateClass: Class[S], private val statePredicate: Predicate[S]) { +final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private val stateClass: Class[S], + private val statePredicate: Predicate[S]) { import EventHandlerBuilderByState.EventHandlerCase @@ -192,11 +194,11 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private private def addCase(eventPredicate: Event => Boolean, handler: BiFunction[State, Event, State]): Unit = { cases = EventHandlerCase[State, Event]( - statePredicate = state => - if (state == null) statePredicate.test(state.asInstanceOf[S]) - else statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), - eventPredicate = eventPredicate, - handler) :: cases + statePredicate = state => + if (state == null) statePredicate.test(state.asInstanceOf[S]) + else statePredicate.test(state.asInstanceOf[S]) && stateClass.isAssignableFrom(state.getClass), + eventPredicate = eventPredicate, + handler) :: cases } /** @@ -206,7 +208,8 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your event handlers. */ - def onEvent[E <: Event](eventClass: Class[E], handler: BiFunction[S, E, State]): EventHandlerBuilderByState[S, State, Event] = { + def onEvent[E <: Event](eventClass: Class[E], + handler: BiFunction[S, E, State]): EventHandlerBuilderByState[S, State, Event] = { addCase(e => eventClass.isAssignableFrom(e.getClass), handler.asInstanceOf[BiFunction[State, Event, State]]) this } @@ -221,7 +224,8 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your event handlers. */ - def onEvent[E <: Event](eventClass: Class[E], handler: JFunction[E, State]): EventHandlerBuilderByState[S, State, Event] = { + def onEvent[E <: Event](eventClass: Class[E], + handler: JFunction[E, State]): EventHandlerBuilderByState[S, State, Event] = { onEvent[E](eventClass, new BiFunction[S, E, State] { override def apply(state: S, event: E): State = handler(event) }) @@ -236,7 +240,8 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private * and no further lookup is done. Therefore you must make sure that their matching conditions don't overlap, * otherwise you risk to 'shadow' part of your event handlers. */ - def onEvent[E <: Event](eventClass: Class[E], handler: Supplier[State]): EventHandlerBuilderByState[S, State, Event] = { + def onEvent[E <: Event](eventClass: Class[E], + handler: Supplier[State]): EventHandlerBuilderByState[S, State, Event] = { val supplierBiFunction = new BiFunction[S, E, State] { def apply(t: S, u: E): State = handler.get() @@ -288,7 +293,8 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private * Compose this builder with another builder. The handlers in this builder will be tried first followed * by the handlers in `other`. */ - def orElse[S2 <: State](other: EventHandlerBuilderByState[S2, State, Event]): EventHandlerBuilderByState[S2, State, Event] = { + def orElse[S2 <: State]( + other: EventHandlerBuilderByState[S2, State, Event]): EventHandlerBuilderByState[S2, State, Event] = { val newBuilder = new EventHandlerBuilderByState[S2, State, Event](other.stateClass, other.statePredicate) // problem with overloaded constructor with `cases` as parameter newBuilder.cases = other.cases ::: cases @@ -317,7 +323,8 @@ final class EventHandlerBuilderByState[S <: State, State >: Null, Event](private result match { case OptionVal.None => val stateClass = if (state == null) "null" else state.getClass.getName - throw new MatchError(s"No match found for event [${event.getClass}] and state [$stateClass]. Has this event been stored using an EventAdapter?") + throw new MatchError( + s"No match found for event [${event.getClass}] and state [$stateClass]. Has this event been stored using an EventAdapter?") case OptionVal.Some(s) => s } } diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventSourcedBehavior.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventSourcedBehavior.scala index c3500d4ec0..9f730aad22 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventSourcedBehavior.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/javadsl/EventSourcedBehavior.scala @@ -22,7 +22,10 @@ import akka.persistence.typed._ import akka.persistence.typed.internal._ @ApiMayChange -abstract class EventSourcedBehavior[Command, Event, State >: Null] private[akka] (val persistenceId: PersistenceId, onPersistFailure: Optional[BackoffSupervisorStrategy]) extends DeferredBehavior[Command] { +abstract class EventSourcedBehavior[Command, Event, State >: Null] private[akka] ( + val persistenceId: PersistenceId, + onPersistFailure: Optional[BackoffSupervisorStrategy]) + extends DeferredBehavior[Command] { def this(persistenceId: PersistenceId) = { this(persistenceId, Optional.empty[BackoffSupervisorStrategy]) @@ -211,8 +214,10 @@ abstract class EventSourcedBehavior[Command, Event, State >: Null] private[akka] * created with `Effects().reply`, `Effects().noReply`, [[Effect.thenReply]], or [[Effect.thenNoReply]]. */ @ApiMayChange -abstract class EventSourcedBehaviorWithEnforcedReplies[Command, Event, State >: Null](persistenceId: PersistenceId, backoffSupervisorStrategy: Optional[BackoffSupervisorStrategy]) - extends EventSourcedBehavior[Command, Event, State](persistenceId, backoffSupervisorStrategy) { +abstract class EventSourcedBehaviorWithEnforcedReplies[Command, Event, State >: Null]( + persistenceId: PersistenceId, + backoffSupervisorStrategy: Optional[BackoffSupervisorStrategy]) + extends EventSourcedBehavior[Command, Event, State](persistenceId, backoffSupervisorStrategy) { def this(persistenceId: PersistenceId) = { this(persistenceId, Optional.empty[BackoffSupervisorStrategy]) diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Effect.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Effect.scala index 1e79cf4cec..db6f8bc30c 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Effect.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/Effect.scala @@ -100,7 +100,8 @@ object Effect { * The reply message will be sent also if `withEnforcedReplies` isn't used, but then the compiler will not help * finding mistakes. */ - def reply[ReplyMessage, Event, State](cmd: ExpectingReply[ReplyMessage])(replyWithMessage: ReplyMessage): ReplyEffect[Event, State] = + def reply[ReplyMessage, Event, State](cmd: ExpectingReply[ReplyMessage])( + replyWithMessage: ReplyMessage): ReplyEffect[Event, State] = none[Event, State].thenReply[ReplyMessage](cmd)(_ => replyWithMessage) /** @@ -164,7 +165,8 @@ trait Effect[+Event, State] { * The reply message will be sent also if `withEnforcedReplies` isn't used, but then the compiler will not help * finding mistakes. */ - def thenReply[ReplyMessage](cmd: ExpectingReply[ReplyMessage])(replyWithMessage: State => ReplyMessage): ReplyEffect[Event, State] = + def thenReply[ReplyMessage](cmd: ExpectingReply[ReplyMessage])( + replyWithMessage: State => ReplyMessage): ReplyEffect[Event, State] = CompositeEffect(this, new ReplyEffectImpl[ReplyMessage, State](cmd.replyTo, replyWithMessage)) /** @@ -184,4 +186,3 @@ trait Effect[+Event, State] { * Not intended for user extension. */ @DoNotInherit trait ReplyEffect[+Event, State] extends Effect[Event, State] - diff --git a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/EventSourcedBehavior.scala b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/EventSourcedBehavior.scala index 5e4980128f..9173d6788a 100644 --- a/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/EventSourcedBehavior.scala +++ b/akka-persistence-typed/src/main/scala/akka/persistence/typed/scaladsl/EventSourcedBehavior.scala @@ -46,10 +46,10 @@ object EventSourcedBehavior { * Create a `Behavior` for a persistent actor. */ def apply[Command, Event, State]( - persistenceId: PersistenceId, - emptyState: State, - commandHandler: (State, Command) => Effect[Event, State], - eventHandler: (State, Event) => State): EventSourcedBehavior[Command, Event, State] = { + persistenceId: PersistenceId, + emptyState: State, + commandHandler: (State, Command) => Effect[Event, State], + eventHandler: (State, Event) => State): EventSourcedBehavior[Command, Event, State] = { val loggerClass = LoggerClass.detectLoggerClassFromStack(classOf[EventSourcedBehavior[_, _, _]]) EventSourcedBehaviorImpl(persistenceId, emptyState, commandHandler, eventHandler, loggerClass) } @@ -60,10 +60,10 @@ object EventSourcedBehavior { * created with [[Effect.reply]], [[Effect.noReply]], [[Effect.thenReply]], or [[Effect.thenNoReply]]. */ def withEnforcedReplies[Command <: ExpectingReply[_], Event, State]( - persistenceId: PersistenceId, - emptyState: State, - commandHandler: (State, Command) => ReplyEffect[Event, State], - eventHandler: (State, Event) => State): EventSourcedBehavior[Command, Event, State] = { + persistenceId: PersistenceId, + emptyState: State, + commandHandler: (State, Command) => ReplyEffect[Event, State], + eventHandler: (State, Event) => State): EventSourcedBehavior[Command, Event, State] = { val loggerClass = LoggerClass.detectLoggerClassFromStack(classOf[EventSourcedBehavior[_, _, _]]) EventSourcedBehaviorImpl(persistenceId, emptyState, commandHandler, eventHandler, loggerClass) } @@ -86,7 +86,8 @@ object EventSourcedBehavior { * * @see [[Effect]] for possible effects of a command. */ - def command[Command, Event, State](commandHandler: Command => Effect[Event, State]): (State, Command) => Effect[Event, State] = + def command[Command, Event, State]( + commandHandler: Command => Effect[Event, State]): (State, Command) => Effect[Event, State] = (_, cmd) => commandHandler(cmd) } @@ -106,9 +107,11 @@ object EventSourcedBehavior { case impl: ActorContextAdapter[_] => extractConcreteBehavior(impl.currentBehavior) match { case w: Running.WithSeqNrAccessible => w.currentSequenceNumber - case s => throw new IllegalStateException(s"Cannot extract the lastSequenceNumber in state ${s.getClass.getName}") + case s => + throw new IllegalStateException(s"Cannot extract the lastSequenceNumber in state ${s.getClass.getName}") } - case c => throw new IllegalStateException(s"Cannot extract the lastSequenceNumber from context ${c.getClass.getName}") + case c => + throw new IllegalStateException(s"Cannot extract the lastSequenceNumber from context ${c.getClass.getName}") } } @@ -127,6 +130,7 @@ object EventSourcedBehavior { * The `callback` function is called to notify that the recovery process has finished. */ def onRecoveryCompleted(callback: State => Unit): EventSourcedBehavior[Command, Event, State] + /** * The `callback` function is called to notify that recovery has failed. For setting a supervision * strategy `onPersistFailure` @@ -156,6 +160,7 @@ object EventSourcedBehavior { * `predicate` receives the State, Event and the sequenceNr used for the Event */ def snapshotWhen(predicate: (State, Event, Long) => Boolean): EventSourcedBehavior[Command, Event, State] + /** * Snapshot every N events * @@ -204,4 +209,3 @@ object EventSourcedBehavior { */ def onPersistFailure(backoffStrategy: BackoffSupervisorStrategy): EventSourcedBehavior[Command, Event, State] } - diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala index f9565edc3b..98b35a50d3 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/ManyRecoveriesSpec.scala @@ -22,20 +22,17 @@ object ManyRecoveriesSpec { final case class Evt(s: String) - def persistentBehavior( - name: String, - probe: TestProbe[String], - latch: Option[TestLatch]): EventSourcedBehavior[Cmd, Evt, String] = - EventSourcedBehavior[Cmd, Evt, String]( - persistenceId = PersistenceId(name), - emptyState = "", - commandHandler = CommandHandler.command { - case Cmd(s) => Effect.persist(Evt(s)).thenRun(_ => probe.ref ! s"$name-$s") - }, - eventHandler = { - case (state, _) => latch.foreach(Await.ready(_, 10.seconds)); state - } - ) + def persistentBehavior(name: String, + probe: TestProbe[String], + latch: Option[TestLatch]): EventSourcedBehavior[Cmd, Evt, String] = + EventSourcedBehavior[Cmd, Evt, String](persistenceId = PersistenceId(name), + emptyState = "", + commandHandler = CommandHandler.command { + case Cmd(s) => Effect.persist(Evt(s)).thenRun(_ => probe.ref ! s"$name-$s") + }, + eventHandler = { + case (state, _) => latch.foreach(Await.ready(_, 10.seconds)); state + }) def forwardBehavior(sender: TestProbe[String]): Behaviors.Receive[Int] = Behaviors.receiveMessagePartial[Int] { @@ -87,7 +84,7 @@ class ManyRecoveriesSpec extends ScalaTestWithActorTestKit(s""" latch.countDown() forN(100)(_ => probe.receiveMessage()) should - be(forN(100)(i => s"a$i-B")) + be(forN(100)(i => s"a$i-B")) } } } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala index d6b1acb8b8..2197fa7579 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/RecoveryPermitterSpec.scala @@ -39,27 +39,26 @@ object RecoveryPermitterSpec { case object Recovered extends Event - def persistentBehavior( - name: String, - commandProbe: TestProbe[Any], - eventProbe: TestProbe[Any], - throwOnRecovery: Boolean = false): Behavior[Command] = - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId(name), - emptyState = EmptyState, - commandHandler = CommandHandler.command { - case StopActor => Effect.stop() - case command => commandProbe.ref ! command; Effect.none - }, - eventHandler = { (state, event) => eventProbe.ref ! event; state } - ).onRecoveryCompleted { _ => - eventProbe.ref ! Recovered - if (throwOnRecovery) throw new TE - } + def persistentBehavior(name: String, + commandProbe: TestProbe[Any], + eventProbe: TestProbe[Any], + throwOnRecovery: Boolean = false): Behavior[Command] = + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId(name), + emptyState = EmptyState, + commandHandler = CommandHandler.command { + case StopActor => Effect.stop() + case command => commandProbe.ref ! command; Effect.none + }, + eventHandler = { (state, event) => + eventProbe.ref ! event; state + }).onRecoveryCompleted { _ => + eventProbe.ref ! Recovered + if (throwOnRecovery) throw new TE + } def forwardingBehavior(target: TestProbe[Any]): Behavior[Any] = - Behaviors.receive[Any] { - (_, any) => target.ref ! any; Behaviors.same + Behaviors.receive[Any] { (_, any) => + target.ref ! any; Behaviors.same } } @@ -186,21 +185,19 @@ class RecoveryPermitterSpec extends ScalaTestWithActorTestKit(s""" val stopProbe = createTestProbe[ActorRef[Command]]() val parent = EventFilter.error(occurrences = 1, start = "Exception during recovery.").intercept { - spawn( - Behaviors.setup[Command](ctx => { - val persistentActor = - ctx.spawnAnonymous(persistentBehavior("p3", p3, p3, throwOnRecovery = true)) - Behaviors.receive[Command] { - case (_, StopActor) => - stopProbe.ref ! persistentActor - ctx.stop(persistentActor) - Behavior.same - case (_, message) => - persistentActor ! message - Behaviors.same - } - }) - ) + spawn(Behaviors.setup[Command](ctx => { + val persistentActor = + ctx.spawnAnonymous(persistentBehavior("p3", p3, p3, throwOnRecovery = true)) + Behaviors.receive[Command] { + case (_, StopActor) => + stopProbe.ref ! persistentActor + ctx.stop(persistentActor) + Behavior.same + case (_, message) => + persistentActor ! message + Behaviors.same + } + })) } p3.expectMessage(Recovered) // stop it diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala index 4e9bca4eaa..f8a267a11d 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/internal/StashStateSpec.scala @@ -40,28 +40,29 @@ class StashStateSpec extends ScalaTestWithActorTestKit with WordSpecLike { val settings = dummySettings() Behaviors.setup[InternalProtocol] { _ => val stashState = new StashState(settings) - Behaviors.receiveMessagePartial[InternalProtocol] { - case RecoveryPermitGranted => - stashState.internalStashBuffer.stash(RecoveryPermitGranted) - probe.ref ! stashState.internalStashBuffer.size - Behaviors.same[InternalProtocol] - case _: IncomingCommand[_] => Behaviors.stopped - }.receiveSignal { - case (_, _) => - stashState.clearStashBuffers() - Behaviors.stopped[InternalProtocol] - } + Behaviors + .receiveMessagePartial[InternalProtocol] { + case RecoveryPermitGranted => + stashState.internalStashBuffer.stash(RecoveryPermitGranted) + probe.ref ! stashState.internalStashBuffer.size + Behaviors.same[InternalProtocol] + case _: IncomingCommand[_] => Behaviors.stopped + } + .receiveSignal { + case (_, _) => + stashState.clearStashBuffers() + Behaviors.stopped[InternalProtocol] + } } } } private def dummySettings(capacity: Int = 42) = - EventSourcedSettings( - stashCapacity = capacity, - stashOverflowStrategy = StashOverflowStrategy.Fail, - logOnStashing = false, - recoveryEventTimeout = 3.seconds, - journalPluginId = "", - snapshotPluginId = "") + EventSourcedSettings(stashCapacity = capacity, + stashOverflowStrategy = StashOverflowStrategy.Fail, + logOnStashing = false, + recoveryEventTimeout = 3.seconds, + journalPluginId = "", + snapshotPluginId = "") } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala index 21869b23e5..fd00006004 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorFailureSpec.scala @@ -37,9 +37,10 @@ class ChaosJournal extends InmemJournal { Future.failed(TestException("database says no")) } else if (pid == "reject-first" && reject) { reject = false - Future.successful(messages.map(_ => Try { - throw TestException("I don't like it") - })) + Future.successful(messages.map(_ => + Try { + throw TestException("I don't like it") + })) } else { super.asyncWriteMessages(messages) } @@ -59,8 +60,7 @@ class ChaosJournal extends InmemJournal { object EventSourcedBehaviorFailureSpec { - val conf: Config = ConfigFactory.parseString( - s""" + val conf: Config = ConfigFactory.parseString(s""" akka.loglevel = DEBUG akka.persistence.journal.plugin = "failure-journal" failure-journal = $${akka.persistence.journal.inmem} @@ -70,38 +70,40 @@ object EventSourcedBehaviorFailureSpec { """).withFallback(ConfigFactory.defaultReference()).resolve() } -class EventSourcedBehaviorFailureSpec extends ScalaTestWithActorTestKit(EventSourcedBehaviorFailureSpec.conf) with WordSpecLike { +class EventSourcedBehaviorFailureSpec + extends ScalaTestWithActorTestKit(EventSourcedBehaviorFailureSpec.conf) + with WordSpecLike { implicit val testSettings: TestKitSettings = TestKitSettings(system) - def failingPersistentActor(pid: PersistenceId, probe: ActorRef[String]): EventSourcedBehavior[String, String, String] = - EventSourcedBehavior[String, String, String]( - pid, "", - (_, cmd) => { - if (cmd == "wrong") - throw new TestException("wrong command") - probe.tell("persisting") - Effect.persist(cmd) - }, - (state, event) => { - probe.tell(event) - state + event - } - ).onRecoveryCompleted { _ => + def failingPersistentActor(pid: PersistenceId, + probe: ActorRef[String]): EventSourcedBehavior[String, String, String] = + EventSourcedBehavior[String, String, String](pid, + "", + (_, cmd) => { + if (cmd == "wrong") + throw new TestException("wrong command") + probe.tell("persisting") + Effect.persist(cmd) + }, + (state, event) => { + probe.tell(event) + state + event + }) + .onRecoveryCompleted { _ => probe.tell("starting") } .onPostStop(() => probe.tell("stopped")) .onPreRestart(() => probe.tell("restarting")) - .onPersistFailure(SupervisorStrategy.restartWithBackoff(1.milli, 5.millis, 0.1) - .withLoggingEnabled(enabled = false)) + .onPersistFailure( + SupervisorStrategy.restartWithBackoff(1.milli, 5.millis, 0.1).withLoggingEnabled(enabled = false)) "A typed persistent actor (failures)" must { "call onRecoveryFailure when replay fails" in { val probe = TestProbe[String]() val excProbe = TestProbe[Throwable]() - spawn(failingPersistentActor(PersistenceId("fail-recovery"), probe.ref) - .onRecoveryFailure(t => excProbe.ref ! t)) + spawn(failingPersistentActor(PersistenceId("fail-recovery"), probe.ref).onRecoveryFailure(t => excProbe.ref ! t)) excProbe.expectMessageType[TestException].message shouldEqual "Nope" probe.expectMessage("restarting") @@ -109,8 +111,8 @@ class EventSourcedBehaviorFailureSpec extends ScalaTestWithActorTestKit(EventSou "handle exceptions in onRecoveryFailure" in { val probe = TestProbe[String]() - val pa = spawn(failingPersistentActor(PersistenceId("fail-recovery-twice"), probe.ref) - .onRecoveryFailure(_ => throw TestException("recovery call back failure"))) + val pa = spawn(failingPersistentActor(PersistenceId("fail-recovery-twice"), probe.ref).onRecoveryFailure(_ => + throw TestException("recovery call back failure"))) pa ! "one" probe.expectMessage("starting") probe.expectMessage("persisting") @@ -155,10 +157,10 @@ class EventSourcedBehaviorFailureSpec extends ScalaTestWithActorTestKit(EventSou "handles rejections" in { val probe = TestProbe[String]() val behav = - Behaviors.supervise( - failingPersistentActor(PersistenceId("reject-first"), probe.ref)).onFailure[EventRejectedException]( - SupervisorStrategy.restartWithBackoff(1.milli, 5.millis, 0.1) - .withLoggingEnabled(enabled = false)) + Behaviors + .supervise(failingPersistentActor(PersistenceId("reject-first"), probe.ref)) + .onFailure[EventRejectedException]( + SupervisorStrategy.restartWithBackoff(1.milli, 5.millis, 0.1).withLoggingEnabled(enabled = false)) val c = spawn(behav) // First time fails, second time should work and call onRecoveryComplete probe.expectMessage("starting") @@ -185,7 +187,8 @@ class EventSourcedBehaviorFailureSpec extends ScalaTestWithActorTestKit(EventSou "restart supervisor strategy if command handler throws" in { val probe = TestProbe[String]() - val behav = Behaviors.supervise(failingPersistentActor(PersistenceId("wrong-command-2"), probe.ref)) + val behav = Behaviors + .supervise(failingPersistentActor(PersistenceId("wrong-command-2"), probe.ref)) .onFailure[TestException](SupervisorStrategy.restart) val c = spawn(behav) probe.expectMessage("starting") diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorReplySpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorReplySpec.scala index a8464d6a74..b7dce6c4bd 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorReplySpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorReplySpec.scala @@ -20,8 +20,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike object EventSourcedBehaviorReplySpec { - def conf: Config = ConfigFactory.parseString( - s""" + def conf: Config = ConfigFactory.parseString(s""" akka.loglevel = INFO # akka.persistence.typed.log-stashing = on akka.persistence.journal.leveldb.dir = "target/typed-persistence-${UUID.randomUUID().toString}" @@ -44,38 +43,44 @@ object EventSourcedBehaviorReplySpec { def counter(persistenceId: PersistenceId): Behavior[Command[_]] = Behaviors.setup(ctx => counter(ctx, persistenceId)) - def counter( - ctx: ActorContext[Command[_]], - persistenceId: PersistenceId): EventSourcedBehavior[Command[_], Event, State] = { - EventSourcedBehavior.withEnforcedReplies[Command[_], Event, State]( - persistenceId, - emptyState = State(0, Vector.empty), - commandHandler = (state, command) => command match { + def counter(ctx: ActorContext[Command[_]], + persistenceId: PersistenceId): EventSourcedBehavior[Command[_], Event, State] = { + EventSourcedBehavior.withEnforcedReplies[Command[_], Event, State](persistenceId, + emptyState = State(0, Vector.empty), + commandHandler = (state, command) => + command match { - case cmd: IncrementWithConfirmation => - Effect.persist(Incremented(1)) - .thenReply(cmd)(_ => Done) + case cmd: IncrementWithConfirmation => + Effect + .persist(Incremented(1)) + .thenReply(cmd)(_ => Done) - case cmd: IncrementReplyLater => - Effect.persist(Incremented(1)) - .thenRun((_: State) => ctx.self ! ReplyNow(cmd.replyTo)) - .thenNoReply() + case cmd: IncrementReplyLater => + Effect + .persist(Incremented(1)) + .thenRun((_: State) => + ctx.self ! ReplyNow(cmd.replyTo)) + .thenNoReply() - case cmd: ReplyNow => - Effect.reply(cmd)(Done) + case cmd: ReplyNow => + Effect.reply(cmd)(Done) - case query: GetValue => - Effect.reply(query)(state) + case query: GetValue => + Effect.reply(query)(state) - }, - eventHandler = (state, evt) => evt match { - case Incremented(delta) => - State(state.value + delta, state.history :+ state.value) - }) + }, + eventHandler = (state, evt) => + evt match { + case Incremented(delta) => + State(state.value + delta, + state.history :+ state.value) + }) } } -class EventSourcedBehaviorReplySpec extends ScalaTestWithActorTestKit(EventSourcedBehaviorReplySpec.conf) with WordSpecLike { +class EventSourcedBehaviorReplySpec + extends ScalaTestWithActorTestKit(EventSourcedBehaviorReplySpec.conf) + with WordSpecLike { import EventSourcedBehaviorReplySpec._ diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala index dffe6459dd..057315fb06 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorSpec.scala @@ -69,8 +69,7 @@ object EventSourcedBehaviorSpec { } // also used from PersistentActorTest - def conf: Config = ConfigFactory.parseString( - s""" + def conf: Config = ConfigFactory.parseString(s""" akka.loglevel = INFO akka.loggers = [akka.testkit.TestEventListener] # akka.persistence.typed.log-stashing = on @@ -94,7 +93,9 @@ object EventSourcedBehaviorSpec { final case object IncrementLater extends Command final case object IncrementAfterReceiveTimeout extends Command final case object IncrementTwiceAndThenLog extends Command - final case class IncrementWithConfirmation(override val replyTo: ActorRef[Done]) extends Command with ExpectingReply[Done] + final case class IncrementWithConfirmation(override val replyTo: ActorRef[Done]) + extends Command + with ExpectingReply[Done] final case object DoNothingAndThenLog extends Command final case object EmptyEventsListAndThenLog extends Command final case class GetValue(replyTo: ActorRef[State]) extends Command @@ -117,131 +118,145 @@ object EventSourcedBehaviorSpec { def counter(persistenceId: PersistenceId)(implicit system: ActorSystem[_]): Behavior[Command] = Behaviors.setup(ctx => counter(ctx, persistenceId)) - def counter(persistenceId: PersistenceId, logging: ActorRef[String])(implicit system: ActorSystem[_]): Behavior[Command] = + def counter(persistenceId: PersistenceId, logging: ActorRef[String])( + implicit system: ActorSystem[_]): Behavior[Command] = Behaviors.setup(ctx => counter(ctx, persistenceId, logging)) - def counter(ctx: ActorContext[Command], persistenceId: PersistenceId)(implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = - counter(ctx, persistenceId, loggingActor = TestProbe[String].ref, probe = TestProbe[(State, Event)].ref, TestProbe[Try[Done]].ref) + def counter(ctx: ActorContext[Command], persistenceId: PersistenceId)( + implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + counter(ctx, + persistenceId, + loggingActor = TestProbe[String].ref, + probe = TestProbe[(State, Event)].ref, + TestProbe[Try[Done]].ref) - def counter(ctx: ActorContext[Command], persistenceId: PersistenceId, logging: ActorRef[String])(implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + def counter(ctx: ActorContext[Command], persistenceId: PersistenceId, logging: ActorRef[String])( + implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = counter(ctx, persistenceId, loggingActor = logging, probe = TestProbe[(State, Event)].ref, TestProbe[Try[Done]].ref) - def counterWithProbe(ctx: ActorContext[Command], persistenceId: PersistenceId, probe: ActorRef[(State, Event)], snapshotProbe: ActorRef[Try[Done]])(implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + def counterWithProbe(ctx: ActorContext[Command], + persistenceId: PersistenceId, + probe: ActorRef[(State, Event)], + snapshotProbe: ActorRef[Try[Done]])( + implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = counter(ctx, persistenceId, TestProbe[String].ref, probe, snapshotProbe) - def counterWithProbe(ctx: ActorContext[Command], persistenceId: PersistenceId, probe: ActorRef[(State, Event)])(implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + def counterWithProbe(ctx: ActorContext[Command], persistenceId: PersistenceId, probe: ActorRef[(State, Event)])( + implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = counter(ctx, persistenceId, TestProbe[String].ref, probe, TestProbe[Try[Done]].ref) - def counterWithSnapshotProbe(ctx: ActorContext[Command], persistenceId: PersistenceId, probe: ActorRef[Try[Done]])(implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = + def counterWithSnapshotProbe(ctx: ActorContext[Command], persistenceId: PersistenceId, probe: ActorRef[Try[Done]])( + implicit system: ActorSystem[_]): EventSourcedBehavior[Command, Event, State] = counter(ctx, persistenceId, TestProbe[String].ref, TestProbe[(State, Event)].ref, snapshotProbe = probe) - def counter( - ctx: ActorContext[Command], - persistenceId: PersistenceId, - loggingActor: ActorRef[String], - probe: ActorRef[(State, Event)], - snapshotProbe: ActorRef[Try[Done]]): EventSourcedBehavior[Command, Event, State] = { - EventSourcedBehavior[Command, Event, State]( - persistenceId, - emptyState = State(0, Vector.empty), - commandHandler = (state, cmd) => cmd match { - case Increment => - Effect.persist(Incremented(1)) + def counter(ctx: ActorContext[Command], + persistenceId: PersistenceId, + loggingActor: ActorRef[String], + probe: ActorRef[(State, Event)], + snapshotProbe: ActorRef[Try[Done]]): EventSourcedBehavior[Command, Event, State] = { + EventSourcedBehavior[Command, Event, State](persistenceId, + emptyState = State(0, Vector.empty), + commandHandler = (state, cmd) => + cmd match { + case Increment => + Effect.persist(Incremented(1)) - case IncrementThenLogThenStop => - Effect.persist(Incremented(1)) - .thenRun { (_: State) => - loggingActor ! firstLogging - } - .thenStop + case IncrementThenLogThenStop => + Effect + .persist(Incremented(1)) + .thenRun { (_: State) => + loggingActor ! firstLogging + } + .thenStop - case IncrementTwiceThenLogThenStop => - Effect.persist(Incremented(1), Incremented(2)) - .thenRun { (_: State) => - loggingActor ! firstLogging - } - .thenStop + case IncrementTwiceThenLogThenStop => + Effect + .persist(Incremented(1), Incremented(2)) + .thenRun { (_: State) => + loggingActor ! firstLogging + } + .thenStop - case IncrementWithPersistAll(n) => - Effect.persist((0 until n).map(_ => Incremented(1))) + case IncrementWithPersistAll(n) => + Effect.persist((0 until n).map(_ => Incremented(1))) - case cmd: IncrementWithConfirmation => - Effect.persist(Incremented(1)) - .thenReply(cmd)(_ => Done) + case cmd: IncrementWithConfirmation => + Effect.persist(Incremented(1)).thenReply(cmd)(_ => Done) - case GetValue(replyTo) => - replyTo ! state - Effect.none + case GetValue(replyTo) => + replyTo ! state + Effect.none - case IncrementLater => - // purpose is to test signals - val delay = ctx.spawnAnonymous(Behaviors.withTimers[Tick.type] { timers => - timers.startSingleTimer(Tick, Tick, 10.millis) - Behaviors.receive((_, msg) => msg match { - case Tick => Behaviors.stopped - }) - }) - ctx.watchWith(delay, DelayFinished) - Effect.none + case IncrementLater => + // purpose is to test signals + val delay = ctx.spawnAnonymous(Behaviors.withTimers[Tick.type] { + timers => + timers.startSingleTimer(Tick, Tick, 10.millis) + Behaviors.receive((_, msg) => + msg match { + case Tick => Behaviors.stopped + }) + }) + ctx.watchWith(delay, DelayFinished) + Effect.none - case DelayFinished => - Effect.persist(Incremented(10)) + case DelayFinished => + Effect.persist(Incremented(10)) - case IncrementAfterReceiveTimeout => - ctx.setReceiveTimeout(10.millis, Timeout) - Effect.none + case IncrementAfterReceiveTimeout => + ctx.setReceiveTimeout(10.millis, Timeout) + Effect.none - case Timeout => - ctx.cancelReceiveTimeout() - Effect.persist(Incremented(100)) + case Timeout => + ctx.cancelReceiveTimeout() + Effect.persist(Incremented(100)) - case IncrementTwiceAndThenLog => - Effect - .persist(Incremented(1), Incremented(1)) - .thenRun { (_: State) => - loggingActor ! firstLogging - } - .thenRun { _ => - loggingActor ! secondLogging - } + case IncrementTwiceAndThenLog => + Effect + .persist(Incremented(1), Incremented(1)) + .thenRun { (_: State) => + loggingActor ! firstLogging + } + .thenRun { _ => + loggingActor ! secondLogging + } - case EmptyEventsListAndThenLog => - Effect - .persist(List.empty) // send empty list of events - .thenRun { _ => - loggingActor ! firstLogging - } + case EmptyEventsListAndThenLog => + Effect + .persist(List.empty) // send empty list of events + .thenRun { _ => + loggingActor ! firstLogging + } - case DoNothingAndThenLog => - Effect - .none - .thenRun { _ => - loggingActor ! firstLogging - } + case DoNothingAndThenLog => + Effect.none.thenRun { _ => + loggingActor ! firstLogging + } - case LogThenStop => - Effect.none[Event, State] - .thenRun { _ => - loggingActor ! firstLogging - } - .thenStop + case LogThenStop => + Effect + .none[Event, State] + .thenRun { _ => + loggingActor ! firstLogging + } + .thenStop - case Fail => - throw new TestException("boom!") + case Fail => + throw new TestException("boom!") - case StopIt => - Effect.none.thenStop() + case StopIt => + Effect.none.thenStop() - }, - eventHandler = (state, evt) => evt match { - case Incremented(delta) => - probe ! ((state, evt)) - State(state.value + delta, state.history :+ state.value) - }).onRecoveryCompleted(_ => ()) - .onSnapshot { - case (_, result) => - snapshotProbe ! result - } + }, + eventHandler = (state, evt) => + evt match { + case Incremented(delta) => + probe ! ((state, evt)) + State(state.value + delta, state.history :+ state.value) + }).onRecoveryCompleted(_ => ()).onSnapshot { + case (_, result) => + snapshotProbe ! result + } } } @@ -255,8 +270,8 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh import akka.actor.typed.scaladsl.adapter._ implicit val materializer = ActorMaterializer()(system.toUntyped) - val queries: LeveldbReadJournal = PersistenceQuery(system.toUntyped).readJournalFor[LeveldbReadJournal]( - LeveldbReadJournal.Identifier) + val queries: LeveldbReadJournal = + PersistenceQuery(system.toUntyped).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) // needed for the untyped event filter implicit val actorSystem = system.toUntyped @@ -395,7 +410,8 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh "work when wrapped in other behavior" in { val probe = TestProbe[State] - val behavior = Behaviors.supervise[Command](counter(nextPid)) + val behavior = Behaviors + .supervise[Command](counter(nextPid)) .onFailure(SupervisorStrategy.restartWithBackoff(1.second, 10.seconds, 0.1)) val c = spawn(behavior) c ! Increment @@ -417,7 +433,9 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh val snapshotProbe = TestProbe[Try[Done]] val alwaysSnapshot: Behavior[Command] = Behaviors.setup { ctx => - counterWithSnapshotProbe(ctx, pid, snapshotProbe.ref).snapshotWhen { (_, _, _) => true } + counterWithSnapshotProbe(ctx, pid, snapshotProbe.ref).snapshotWhen { (_, _, _) => + true + } } val c = spawn(alwaysSnapshot) val watchProbe = watcher(c) @@ -444,8 +462,9 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh val pid = nextPid val snapshotProbe = TestProbe[Try[Done]] val snapshotAtTwo = Behaviors.setup[Command](ctx => - counterWithSnapshotProbe(ctx, pid, snapshotProbe.ref).snapshotWhen { (s, _, _) => s.value == 2 } - ) + counterWithSnapshotProbe(ctx, pid, snapshotProbe.ref).snapshotWhen { (s, _, _) => + s.value == 2 + }) val c: ActorRef[Command] = spawn(snapshotAtTwo) val watchProbe = watcher(c) val replyProbe = TestProbe[State]() @@ -481,10 +500,8 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh // no snapshot should have happened val probeC2 = TestProbe[(State, Event)]() val snapshotProbe = TestProbe[Try[Done]]() - val c2 = spawn(Behaviors.setup[Command](ctx => - counterWithProbe(ctx, pid, probeC2.ref, snapshotProbe.ref) - .snapshotEvery(2)) - ) + val c2 = spawn( + Behaviors.setup[Command](ctx => counterWithProbe(ctx, pid, probeC2.ref, snapshotProbe.ref).snapshotEvery(2))) probeC2.expectMessage[(State, Event)]((State(0, Vector()), Incremented(1))) val watchProbeC2 = watcher(c2) c2 ! Increment @@ -493,9 +510,7 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh watchProbeC2.expectMessage("Terminated") val probeC3 = TestProbe[(State, Event)]() - val c3 = spawn(Behaviors.setup[Command](ctx => - counterWithProbe(ctx, pid, probeC3.ref).snapshotEvery(2)) - ) + val c3 = spawn(Behaviors.setup[Command](ctx => counterWithProbe(ctx, pid, probeC3.ref).snapshotEvery(2))) // this time it should have been snapshotted so no events to replay probeC3.expectNoMessage() c3 ! GetValue(replyProbe.ref) @@ -505,9 +520,8 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh "snapshot every N sequence nrs when persisting multiple events" in { val pid = nextPid val snapshotProbe = TestProbe[Try[Done]]() - val c = spawn(Behaviors.setup[Command](ctx => - counterWithSnapshotProbe(ctx, pid, snapshotProbe.ref).snapshotEvery(2)) - ) + val c = + spawn(Behaviors.setup[Command](ctx => counterWithSnapshotProbe(ctx, pid, snapshotProbe.ref).snapshotEvery(2))) val watchProbe = watcher(c) val replyProbe = TestProbe[State]() @@ -519,9 +533,7 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh watchProbe.expectMessage("Terminated") val probeC2 = TestProbe[(State, Event)]() - val c2 = spawn(Behaviors.setup[Command](ctx => - counterWithProbe(ctx, pid, probeC2.ref).snapshotEvery(2)) - ) + val c2 = spawn(Behaviors.setup[Command](ctx => counterWithProbe(ctx, pid, probeC2.ref).snapshotEvery(2))) probeC2.expectNoMessage() c2 ! GetValue(replyProbe.ref) replyProbe.expectMessage(State(3, Vector(0, 1, 2))) @@ -541,9 +553,7 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh "tag events" in { val pid = nextPid - val c = spawn(Behaviors.setup[Command](ctx => - counter(ctx, pid).withTagger(_ => Set("tag1", "tag2"))) - ) + val c = spawn(Behaviors.setup[Command](ctx => counter(ctx, pid).withTagger(_ => Set("tag1", "tag2")))) val replyProbe = TestProbe[State]() c ! Increment @@ -561,7 +571,7 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh //#install-event-adapter persistentBehavior.eventAdapter(new WrapperEventAdapter[Event]) - //#install-event-adapter + //#install-event-adapter }) val replyProbe = TestProbe[State]() @@ -572,9 +582,7 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh val events = queries.currentEventsByPersistenceId(pid.id).runWith(Sink.seq).futureValue events shouldEqual List(EventEnvelope(Sequence(1), pid.id, 1, Wrapper(Incremented(1)))) - val c2 = spawn(Behaviors.setup[Command](ctx => - counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event]) - )) + val c2 = spawn(Behaviors.setup[Command](ctx => counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event]))) c2 ! GetValue(replyProbe.ref) replyProbe.expectMessage(State(1, Vector(0))) @@ -582,9 +590,7 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh "adapter multiple events with persist all" in { val pid = nextPid - val c = spawn(Behaviors.setup[Command](ctx => - counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event])) - ) + val c = spawn(Behaviors.setup[Command](ctx => counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event]))) val replyProbe = TestProbe[State]() c ! IncrementWithPersistAll(2) @@ -592,14 +598,10 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh replyProbe.expectMessage(State(2, Vector(0, 1))) val events = queries.currentEventsByPersistenceId(pid.id).runWith(Sink.seq).futureValue - events shouldEqual List( - EventEnvelope(Sequence(1), pid.id, 1, Wrapper(Incremented(1))), - EventEnvelope(Sequence(2), pid.id, 2, Wrapper(Incremented(1))) - ) + events shouldEqual List(EventEnvelope(Sequence(1), pid.id, 1, Wrapper(Incremented(1))), + EventEnvelope(Sequence(2), pid.id, 2, Wrapper(Incremented(1)))) - val c2 = spawn(Behaviors.setup[Command](ctx => - counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event]) - )) + val c2 = spawn(Behaviors.setup[Command](ctx => counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event]))) c2 ! GetValue(replyProbe.ref) replyProbe.expectMessage(State(2, Vector(0, 1))) } @@ -607,10 +609,7 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh "adapt and tag events" in { val pid = nextPid val c = spawn(Behaviors.setup[Command](ctx => - counter(ctx, pid) - .withTagger(_ => Set("tag99")) - .eventAdapter(new WrapperEventAdapter[Event])) - ) + counter(ctx, pid).withTagger(_ => Set("tag99")).eventAdapter(new WrapperEventAdapter[Event]))) val replyProbe = TestProbe[State]() c ! Increment @@ -620,9 +619,7 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh val events = queries.currentEventsByPersistenceId(pid.id).runWith(Sink.seq).futureValue events shouldEqual List(EventEnvelope(Sequence(1), pid.id, 1, Wrapper(Incremented(1)))) - val c2 = spawn(Behaviors.setup[Command](ctx => - counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event])) - ) + val c2 = spawn(Behaviors.setup[Command](ctx => counter(ctx, pid).eventAdapter(new WrapperEventAdapter[Event]))) c2 ! GetValue(replyProbe.ref) replyProbe.expectMessage(State(1, Vector(0))) @@ -631,11 +628,10 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh } "handle scheduled message arriving before recovery completed " in { - val c = spawn(Behaviors.withTimers[Command] { - timers => - timers.startSingleTimer("tick", Increment, 1.millis) - Thread.sleep(30) // now it's probably already in the mailbox, and will be stashed - counter(nextPid) + val c = spawn(Behaviors.withTimers[Command] { timers => + timers.startSingleTimer("tick", Increment, 1.millis) + Thread.sleep(30) // now it's probably already in the mailbox, and will be stashed + counter(nextPid) }) val probe = TestProbe[State] @@ -647,11 +643,10 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh } "handle scheduled message arriving after recovery completed " in { - val c = spawn(Behaviors.withTimers[Command] { - timers => - // probably arrives after recovery completed - timers.startSingleTimer("tick", Increment, 200.millis) - counter(nextPid) + val c = spawn(Behaviors.withTimers[Command] { timers => + // probably arrives after recovery completed + timers.startSingleTimer("tick", Increment, 200.millis) + counter(nextPid) }) val probe = TestProbe[State] @@ -664,11 +659,11 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh "fail after recovery timeout" in { EventFilter.error(start = "Persistence failure when replaying snapshot", occurrences = 1).intercept { - val c = spawn(Behaviors.setup[Command](ctx => - counter(ctx, nextPid) - .withSnapshotPluginId("slow-snapshot-store") - .withJournalPluginId("short-recovery-timeout")) - ) + val c = spawn( + Behaviors.setup[Command](ctx => + counter(ctx, nextPid) + .withSnapshotPluginId("slow-snapshot-store") + .withJournalPluginId("short-recovery-timeout"))) val probe = TestProbe[State] @@ -701,7 +696,10 @@ class EventSourcedBehaviorSpec extends ScalaTestWithActorTestKit(EventSourcedBeh val probe = TestProbe[String]() val w = Behaviors.setup[Any] { (ctx) => ctx.watch(toWatch) - Behaviors.receive[Any] { (_, _) => Behaviors.same } + Behaviors + .receive[Any] { (_, _) => + Behaviors.same + } .receiveSignal { case (_, _: Terminated) => probe.ref ! "Terminated" diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala index 56cbbceb94..a2f883ac30 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedBehaviorStashSpec.scala @@ -27,8 +27,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike object EventSourcedBehaviorStashSpec { - def conf: Config = ConfigFactory.parseString( - s""" + def conf: Config = ConfigFactory.parseString(s""" #akka.loglevel = DEBUG akka.loggers = [akka.testkit.TestEventListener] #akka.persistence.typed.log-stashing = on @@ -55,7 +54,8 @@ object EventSourcedBehaviorStashSpec { final case class GetValue(replyTo: ActorRef[State]) extends Command[State] final case class Unhandled(replyTo: ActorRef[NotUsed]) extends Command[NotUsed] final case class Throw(id: String, t: Throwable, override val replyTo: ActorRef[Ack]) extends Command[Ack] - final case class IncrementThenThrow(id: String, t: Throwable, override val replyTo: ActorRef[Ack]) extends Command[Ack] + final case class IncrementThenThrow(id: String, t: Throwable, override val replyTo: ActorRef[Ack]) + extends Command[Ack] final case class Slow(id: String, latch: CountDownLatch, override val replyTo: ActorRef[Ack]) extends Command[Ack] final case class Ack(id: String) @@ -69,49 +69,49 @@ object EventSourcedBehaviorStashSpec { final case class State(value: Int, active: Boolean) def counter(persistenceId: PersistenceId): Behavior[Command[_]] = - Behaviors.supervise[Command[_]] { - Behaviors.setup(_ => eventSourcedCounter(persistenceId)) - }.onFailure(SupervisorStrategy.restart.withLoggingEnabled(enabled = false)) + Behaviors + .supervise[Command[_]] { + Behaviors.setup(_ => eventSourcedCounter(persistenceId)) + } + .onFailure(SupervisorStrategy.restart.withLoggingEnabled(enabled = false)) - def eventSourcedCounter( - persistenceId: PersistenceId): EventSourcedBehavior[Command[_], Event, State] = { - EventSourcedBehavior.withEnforcedReplies[Command[_], Event, State]( - persistenceId, - emptyState = State(0, active = true), - commandHandler = (state, command) => { - if (state.active) active(state, command) - else inactive(state, command) - }, - eventHandler = (state, evt) => evt match { - case Incremented(delta) => - if (!state.active) throw new IllegalStateException - State(state.value + delta, active = true) - case ValueUpdated(value) => - State(value, active = state.active) - case Activated => - if (state.active) throw new IllegalStateException - state.copy(active = true) - case Deactivated => - if (!state.active) throw new IllegalStateException - state.copy(active = false) - }) - .onPersistFailure(SupervisorStrategy.restartWithBackoff(1.second, maxBackoff = 2.seconds, 0.0) + def eventSourcedCounter(persistenceId: PersistenceId): EventSourcedBehavior[Command[_], Event, State] = { + EventSourcedBehavior + .withEnforcedReplies[Command[_], Event, State](persistenceId, + emptyState = State(0, active = true), + commandHandler = (state, command) => { + if (state.active) active(state, command) + else inactive(state, command) + }, + eventHandler = (state, evt) => + evt match { + case Incremented(delta) => + if (!state.active) throw new IllegalStateException + State(state.value + delta, active = true) + case ValueUpdated(value) => + State(value, active = state.active) + case Activated => + if (state.active) throw new IllegalStateException + state.copy(active = true) + case Deactivated => + if (!state.active) throw new IllegalStateException + state.copy(active = false) + }) + .onPersistFailure(SupervisorStrategy + .restartWithBackoff(1.second, maxBackoff = 2.seconds, 0.0) .withLoggingEnabled(enabled = false)) } private def active(state: State, command: Command[_]): ReplyEffect[Event, State] = { command match { case cmd: Increment => - Effect.persist(Incremented(1)) - .thenReply(cmd)(_ => Ack(cmd.id)) + Effect.persist(Incremented(1)).thenReply(cmd)(_ => Ack(cmd.id)) case cmd @ UpdateValue(_, value, _) => - Effect.persist(ValueUpdated(value)) - .thenReply(cmd)(_ => Ack(cmd.id)) + Effect.persist(ValueUpdated(value)).thenReply(cmd)(_ => Ack(cmd.id)) case query: GetValue => Effect.reply(query)(state) case cmd: Deactivate => - Effect.persist(Deactivated) - .thenReply(cmd)(_ => Ack(cmd.id)) + Effect.persist(Deactivated).thenReply(cmd)(_ => Ack(cmd.id)) case cmd: Activate => // already active Effect.reply(cmd)(Ack(cmd.id)) @@ -121,9 +121,7 @@ object EventSourcedBehaviorStashSpec { replyTo ! Ack(id) throw t case cmd: IncrementThenThrow => - Effect.persist(Incremented(1)) - .thenRun((_: State) => throw cmd.t) - .thenNoReply() + Effect.persist(Incremented(1)).thenRun((_: State) => throw cmd.t).thenNoReply() case cmd: Slow => cmd.latch.await(30, TimeUnit.SECONDS) Effect.reply(cmd)(Ack(cmd.id)) @@ -135,17 +133,14 @@ object EventSourcedBehaviorStashSpec { case _: Increment => Effect.stash() case cmd @ UpdateValue(_, value, _) => - Effect.persist(ValueUpdated(value)) - .thenReply(cmd)(_ => Ack(cmd.id)) + Effect.persist(ValueUpdated(value)).thenReply(cmd)(_ => Ack(cmd.id)) case query: GetValue => Effect.reply(query)(state) case cmd: Deactivate => // already inactive Effect.reply(cmd)(Ack(cmd.id)) case cmd: Activate => - Effect.persist(Activated) - .thenUnstashAll() - .thenReply(cmd)(_ => Ack(cmd.id)) + Effect.persist(Activated).thenUnstashAll().thenReply(cmd)(_ => Ack(cmd.id)) case _: Unhandled => Effect.unhandled.thenNoReply() case Throw(id, t, replyTo) => @@ -159,7 +154,9 @@ object EventSourcedBehaviorStashSpec { } } -class EventSourcedBehaviorStashSpec extends ScalaTestWithActorTestKit(EventSourcedBehaviorStashSpec.conf) with WordSpecLike { +class EventSourcedBehaviorStashSpec + extends ScalaTestWithActorTestKit(EventSourcedBehaviorStashSpec.conf) + with WordSpecLike { import EventSourcedBehaviorStashSpec._ @@ -491,44 +488,41 @@ class EventSourcedBehaviorStashSpec extends ScalaTestWithActorTestKit(EventSourc "discard when stash has reached limit with default dropped setting" in { val probe = TestProbe[AnyRef]() system.toUntyped.eventStream.subscribe(probe.ref.toUntyped, classOf[Dropped]) - val behavior = EventSourcedBehavior[String, String, Boolean]( - PersistenceId("stash-is-full-drop"), - emptyState = false, - commandHandler = { (state, command) => - state match { - case false => - command match { - case "ping" => - probe.ref ! "pong" - Effect.none - case "start-stashing" => - Effect.persist("start-stashing") - case msg => - probe.ref ! msg - Effect.none - } + val behavior = EventSourcedBehavior[String, String, Boolean](PersistenceId("stash-is-full-drop"), + emptyState = false, + commandHandler = { (state, command) => + state match { + case false => + command match { + case "ping" => + probe.ref ! "pong" + Effect.none + case "start-stashing" => + Effect.persist("start-stashing") + case msg => + probe.ref ! msg + Effect.none + } - case true => - command match { - case "unstash" => - Effect.persist("unstash") - .thenUnstashAll() - // FIXME this is run before unstash, so not sequentially as the docs say - // https://github.com/akka/akka/issues/26489 - .thenRun(_ => - probe.ref ! "done-unstashing" - ) - case _ => - Effect.stash() - } - } - }, - { - case (_, "start-stashing") => true - case (_, "unstash") => false - case (_, _) => throw new IllegalArgumentException() - } - ) + case true => + command match { + case "unstash" => + Effect + .persist("unstash") + .thenUnstashAll() + // FIXME this is run before unstash, so not sequentially as the docs say + // https://github.com/akka/akka/issues/26489 + .thenRun(_ => + probe.ref ! "done-unstashing") + case _ => + Effect.stash() + } + } + }, { + case (_, "start-stashing") => true + case (_, "unstash") => false + case (_, _) => throw new IllegalArgumentException() + }) val c = spawn(behavior) @@ -559,24 +553,20 @@ class EventSourcedBehaviorStashSpec extends ScalaTestWithActorTestKit(EventSourc "fail when stash has reached limit if configured to fail" in { // persistence settings is system wide, so we need to have a custom testkit/actorsystem here - val failStashTestKit = ActorTestKit( - "EventSourcedBehaviorStashSpec-stash-overflow-fail", - ConfigFactory.parseString("akka.persistence.typed.stash-overflow-strategy=fail").withFallback(EventSourcedBehaviorStashSpec.conf) - ) + val failStashTestKit = ActorTestKit("EventSourcedBehaviorStashSpec-stash-overflow-fail", + ConfigFactory + .parseString("akka.persistence.typed.stash-overflow-strategy=fail") + .withFallback(EventSourcedBehaviorStashSpec.conf)) try { val probe = failStashTestKit.createTestProbe[AnyRef]() - val behavior = EventSourcedBehavior[String, String, String]( - PersistenceId("stash-is-full-fail"), - "", - commandHandler = { + val behavior = + EventSourcedBehavior[String, String, String](PersistenceId("stash-is-full-fail"), "", commandHandler = { case (_, "ping") => probe.ref ! "pong" Effect.none case (_, _) => Effect.stash() - }, - (state, _) => state - ) + }, (state, _) => state) val c = failStashTestKit.spawn(behavior) diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala index 2595a76ae1..07056a7eb8 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/EventSourcedSequenceNumberSpec.scala @@ -13,34 +13,25 @@ import org.scalatest.WordSpecLike object EventSourcedSequenceNumberSpec { - private val conf = ConfigFactory.parseString( - s""" + private val conf = ConfigFactory.parseString(s""" akka.persistence.journal.plugin = "akka.persistence.journal.inmem" """) } -class EventSourcedSequenceNumberSpec extends ScalaTestWithActorTestKit(EventSourcedSequenceNumberSpec.conf) with WordSpecLike { +class EventSourcedSequenceNumberSpec + extends ScalaTestWithActorTestKit(EventSourcedSequenceNumberSpec.conf) + with WordSpecLike { private def behavior(pid: PersistenceId, probe: ActorRef[String]): Behavior[String] = Behaviors.setup(ctx => - EventSourcedBehavior[String, String, String]( - pid, - "", - { (_, command) => - probe ! (EventSourcedBehavior.lastSequenceNumber(ctx) + " onCommand") - Effect.persist(command).thenRun(_ => - probe ! (EventSourcedBehavior.lastSequenceNumber(ctx) + " thenRun") - ) - }, - { (state, evt) => - probe ! (EventSourcedBehavior.lastSequenceNumber(ctx) + " eventHandler") - state + evt - } - ).onRecoveryCompleted(_ => - probe ! (EventSourcedBehavior.lastSequenceNumber(ctx) + " onRecoveryComplete") - ) - ) + EventSourcedBehavior[String, String, String](pid, "", { (_, command) => + probe ! (EventSourcedBehavior.lastSequenceNumber(ctx) + " onCommand") + Effect.persist(command).thenRun(_ => probe ! (EventSourcedBehavior.lastSequenceNumber(ctx) + " thenRun")) + }, { (state, evt) => + probe ! (EventSourcedBehavior.lastSequenceNumber(ctx) + " eventHandler") + state + evt + }).onRecoveryCompleted(_ => probe ! (EventSourcedBehavior.lastSequenceNumber(ctx) + " onRecoveryComplete"))) "The sequence number" must { diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala index 41fec801b5..2114c22ce8 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/NullEmptyStateSpec.scala @@ -14,8 +14,7 @@ import org.scalatest.WordSpecLike object NullEmptyStateSpec { - private val conf = ConfigFactory.parseString( - s""" + private val conf = ConfigFactory.parseString(s""" akka.persistence.journal.plugin = "akka.persistence.journal.inmem" """) } @@ -25,22 +24,20 @@ class NullEmptyStateSpec extends ScalaTestWithActorTestKit(NullEmptyStateSpec.co implicit val testSettings = TestKitSettings(system) def primitiveState(persistenceId: PersistenceId, probe: ActorRef[String]): Behavior[String] = - EventSourcedBehavior[String, String, String]( - persistenceId, - emptyState = null, - commandHandler = (_, command) => { - if (command == "stop") - Effect.stop() - else - Effect.persist(command) - }, - eventHandler = (state, event) => { - probe.tell("eventHandler:" + state + ":" + event) - if (state == null) event else state + event - } - ).onRecoveryCompleted { s => - probe.tell("onRecoveryCompleted:" + s) - } + EventSourcedBehavior[String, String, String](persistenceId, + emptyState = null, + commandHandler = (_, command) => { + if (command == "stop") + Effect.stop() + else + Effect.persist(command) + }, + eventHandler = (state, event) => { + probe.tell("eventHandler:" + state + ":" + event) + if (state == null) event else state + event + }).onRecoveryCompleted { s => + probe.tell("onRecoveryCompleted:" + s) + } "A typed persistent actor with primitive state" must { "persist events and update state" in { diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala index 886ca5d3f0..d2be023b06 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/OptionalSnapshotStoreSpec.scala @@ -24,19 +24,15 @@ object OptionalSnapshotStoreSpec { case class Event(id: Long = System.currentTimeMillis()) - def persistentBehavior( - probe: TestProbe[State], - name: String = UUID.randomUUID().toString) = - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId(name), - emptyState = State(), - commandHandler = CommandHandler.command { - _ => Effect.persist(Event()).thenRun(probe.ref ! _) - }, - eventHandler = { - case (_, _) => State() - } - ).snapshotWhen { case _ => true } + def persistentBehavior(probe: TestProbe[State], name: String = UUID.randomUUID().toString) = + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId(name), + emptyState = State(), + commandHandler = CommandHandler.command { _ => + Effect.persist(Event()).thenRun(probe.ref ! _) + }, + eventHandler = { + case (_, _) => State() + }).snapshotWhen { case _ => true } def persistentBehaviorWithSnapshotPlugin(probe: TestProbe[State]) = persistentBehavior(probe).withSnapshotPluginId("akka.persistence.snapshot-store.local") diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala index 8691d0020d..63bf098a58 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PerformanceSpec.scala @@ -68,36 +68,40 @@ object PerformanceSpec { } def behavior(name: String, probe: TestProbe[Reply])(other: (Command, Parameters) => Effect[String, String]) = { - Behaviors.supervise({ - val parameters = Parameters() - EventSourcedBehavior[Command, String, String]( - persistenceId = PersistenceId(name), - "", - commandHandler = CommandHandler.command { - case StopMeasure => Effect.none.thenRun(_ => probe.ref ! StopMeasure) - case FailAt(sequence) => Effect.none.thenRun(_ => parameters.failAt = sequence) - case command => other(command, parameters) - }, - eventHandler = { - case (state, _) => state - } - ).onRecoveryCompleted { _ => + Behaviors + .supervise({ + val parameters = Parameters() + EventSourcedBehavior[Command, String, String](persistenceId = PersistenceId(name), + "", + commandHandler = CommandHandler.command { + case StopMeasure => + Effect.none.thenRun(_ => probe.ref ! StopMeasure) + case FailAt(sequence) => + Effect.none.thenRun(_ => parameters.failAt = sequence) + case command => other(command, parameters) + }, + eventHandler = { + case (state, _) => state + }).onRecoveryCompleted { _ => if (parameters.every(1000)) print("r") } - }).onFailure(SupervisorStrategy.restart) + }) + .onFailure(SupervisorStrategy.restart) } def eventSourcedTestPersistenceBehavior(name: String, probe: TestProbe[Reply]) = behavior(name, probe) { case (CommandWithEvent(evt), parameters) => - Effect.persist(evt).thenRun(_ => { - parameters.persistCalls += 1 - if (parameters.every(1000)) print(".") - if (parameters.shouldFail) { - probe.ref ! ExpectedFail - throw TestException("boom") - } - }) + Effect + .persist(evt) + .thenRun(_ => { + parameters.persistCalls += 1 + if (parameters.every(1000)) print(".") + if (parameters.shouldFail) { + probe.ref ! ExpectedFail + throw TestException("boom") + } + }) case _ => Effect.none } } @@ -118,9 +122,11 @@ class PerformanceSpec extends ScalaTestWithActorTestKit(ConfigFactory.parseStrin val loadCycles = system.settings.config.getInt("akka.persistence.performance.cycles.load") - def stressPersistentActor(persistentActor: ActorRef[Command], probe: TestProbe[Reply], - failAt: Option[Long], description: String): Unit = { - failAt foreach { persistentActor ! FailAt(_) } + def stressPersistentActor(persistentActor: ActorRef[Command], + probe: TestProbe[Reply], + failAt: Option[Long], + description: String): Unit = { + failAt.foreach { persistentActor ! FailAt(_) } val m = new Measure(loadCycles) m.startMeasure() val parameters = Parameters(0, failAt = failAt.getOrElse(-1)) diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala index ff6d8b51a7..7d04002dd3 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PersistentActorCompileOnlyTest.scala @@ -29,22 +29,17 @@ object PersistentActorCompileOnlyTest { case class ExampleState(events: List[String] = Nil) - EventSourcedBehavior[MyCommand, MyEvent, ExampleState]( - persistenceId = PersistenceId("sample-id-1"), - - emptyState = ExampleState(Nil), - - commandHandler = CommandHandler.command { - case Cmd(data, sender) => - Effect.persist(Evt(data)) - .thenRun { _ => - sender ! Ack - } - }, - - eventHandler = { - case (state, Evt(data)) => state.copy(data :: state.events) - }) + EventSourcedBehavior[MyCommand, MyEvent, ExampleState](persistenceId = PersistenceId("sample-id-1"), + emptyState = ExampleState(Nil), + commandHandler = CommandHandler.command { + case Cmd(data, sender) => + Effect.persist(Evt(data)).thenRun { _ => + sender ! Ack + } + }, + eventHandler = { + case (state, Evt(data)) => state.copy(data :: state.events) + }) } object RecoveryComplete { @@ -71,38 +66,40 @@ object PersistentActorCompileOnlyTest { val response: Future[RecoveryComplete.Response] = sideEffectProcessor.ask(Request(correlationId, data, _)) - response.map(response => AcknowledgeSideEffect(response.correlationId)) - .foreach(sender ! _) + response.map(response => AcknowledgeSideEffect(response.correlationId)).foreach(sender ! _) } - val behavior: Behavior[Command] = Behaviors.setup(ctx => - EventSourcedBehavior[Command, Event, EventsInFlight]( - persistenceId = PersistenceId("recovery-complete-id"), - - emptyState = EventsInFlight(0, Map.empty), - - commandHandler = (state, cmd) => cmd match { - case DoSideEffect(data) => - Effect.persist(IntentRecorded(state.nextCorrelationId, data)).thenRun { _ => - performSideEffect(ctx.self, state.nextCorrelationId, data) - } - case AcknowledgeSideEffect(correlationId) => - Effect.persist(SideEffectAcknowledged(correlationId)) - }, - - eventHandler = (state, evt) => evt match { - case IntentRecorded(correlationId, data) => - EventsInFlight( - nextCorrelationId = correlationId + 1, - dataByCorrelationId = state.dataByCorrelationId + (correlationId -> data)) - case SideEffectAcknowledged(correlationId) => - state.copy(dataByCorrelationId = state.dataByCorrelationId - correlationId) - }).onRecoveryCompleted(state => + val behavior: Behavior[Command] = Behaviors.setup( + ctx => + EventSourcedBehavior[Command, Event, EventsInFlight](persistenceId = PersistenceId("recovery-complete-id"), + emptyState = EventsInFlight(0, Map.empty), + commandHandler = (state, cmd) => + cmd match { + case DoSideEffect(data) => + Effect + .persist( + IntentRecorded(state.nextCorrelationId, data)) + .thenRun { _ => + performSideEffect(ctx.self, + state.nextCorrelationId, + data) + } + case AcknowledgeSideEffect(correlationId) => + Effect.persist(SideEffectAcknowledged(correlationId)) + }, + eventHandler = (state, evt) => + evt match { + case IntentRecorded(correlationId, data) => + EventsInFlight( + nextCorrelationId = correlationId + 1, + dataByCorrelationId = state.dataByCorrelationId + (correlationId -> data)) + case SideEffectAcknowledged(correlationId) => + state.copy( + dataByCorrelationId = state.dataByCorrelationId - correlationId) + }).onRecoveryCompleted(state => state.dataByCorrelationId.foreach { case (correlationId, data) => performSideEffect(ctx.self, correlationId, data) - } - ) - ) + })) } @@ -123,18 +120,20 @@ object PersistentActorCompileOnlyTest { emptyState = Happy, commandHandler = { (state, command) => state match { - case Happy => command match { - case Greet(whom) => - println(s"Super happy to meet you $whom!") - Effect.none - case MoodSwing => Effect.persist(MoodChanged(Sad)) - } - case Sad => command match { - case Greet(whom) => - println(s"hi $whom") - Effect.none - case MoodSwing => Effect.persist(MoodChanged(Happy)) - } + case Happy => + command match { + case Greet(whom) => + println(s"Super happy to meet you $whom!") + Effect.none + case MoodSwing => Effect.persist(MoodChanged(Sad)) + } + case Sad => + command match { + case Greet(whom) => + println(s"hi $whom") + Effect.none + case MoodSwing => Effect.persist(MoodChanged(Happy)) + } } }, eventHandler = { @@ -160,17 +159,20 @@ object PersistentActorCompileOnlyTest { case class State(tasksInFlight: List[Task]) - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("asdf"), - emptyState = State(Nil), - commandHandler = CommandHandler.command { - case RegisterTask(task) => Effect.persist(TaskRegistered(task)) - case TaskDone(task) => Effect.persist(TaskRemoved(task)) - }, - eventHandler = (state, evt) => evt match { - case TaskRegistered(task) => State(task :: state.tasksInFlight) - case TaskRemoved(task) => State(state.tasksInFlight.filter(_ != task)) - }).snapshotWhen { (state, e, seqNr) => state.tasksInFlight.isEmpty } + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("asdf"), + emptyState = State(Nil), + commandHandler = CommandHandler.command { + case RegisterTask(task) => Effect.persist(TaskRegistered(task)) + case TaskDone(task) => Effect.persist(TaskRemoved(task)) + }, + eventHandler = (state, evt) => + evt match { + case TaskRegistered(task) => State(task :: state.tasksInFlight) + case TaskRemoved(task) => + State(state.tasksInFlight.filter(_ != task)) + }).snapshotWhen { (state, e, seqNr) => + state.tasksInFlight.isEmpty + } } object SpawnChild { @@ -187,25 +189,26 @@ object PersistentActorCompileOnlyTest { def worker(task: Task): Behavior[Nothing] = ??? - val behavior: Behavior[Command] = Behaviors.setup(ctx => - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("asdf"), - emptyState = State(Nil), - commandHandler = (_, cmd) => cmd match { - case RegisterTask(task) => - Effect.persist(TaskRegistered(task)) - .thenRun { _ => - val child = ctx.spawn[Nothing](worker(task), task) - // This assumes *any* termination of the child may trigger a `TaskDone`: - ctx.watchWith(child, TaskDone(task)) - } - case TaskDone(task) => Effect.persist(TaskRemoved(task)) - }, - eventHandler = (state, evt) => evt match { - case TaskRegistered(task) => State(task :: state.tasksInFlight) - case TaskRemoved(task) => State(state.tasksInFlight.filter(_ != task)) - }) - ) + val behavior: Behavior[Command] = Behaviors.setup( + ctx => + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("asdf"), + emptyState = State(Nil), + commandHandler = (_, cmd) => + cmd match { + case RegisterTask(task) => + Effect.persist(TaskRegistered(task)).thenRun { _ => + val child = ctx.spawn[Nothing](worker(task), task) + // This assumes *any* termination of the child may trigger a `TaskDone`: + ctx.watchWith(child, TaskDone(task)) + } + case TaskDone(task) => Effect.persist(TaskRemoved(task)) + }, + eventHandler = (state, evt) => + evt match { + case TaskRegistered(task) => State(task :: state.tasksInFlight) + case TaskRemoved(task) => + State(state.tasksInFlight.filter(_ != task)) + })) } @@ -233,8 +236,8 @@ object PersistentActorCompileOnlyTest { case class ItemRemoved(id: Id) extends Event /* - * The metadata registry - */ + * The metadata registry + */ case class GetMetaData(id: Id, sender: ActorRef[MetaData]) case class MetaData(id: Id, name: String, price: Int) val metadataRegistry: ActorRef[GetMetaData] = ??? @@ -247,47 +250,43 @@ object PersistentActorCompileOnlyTest { val adapt = ctx.messageAdapter((m: MetaData) => GotMetaData(m)) def addItem(id: Id, self: ActorRef[Command]) = - Effect - .persist[Event, List[Id]](ItemAdded(id)) - .thenRun(_ => metadataRegistry ! GetMetaData(id, adapt)) + Effect.persist[Event, List[Id]](ItemAdded(id)).thenRun(_ => metadataRegistry ! GetMetaData(id, adapt)) - EventSourcedBehavior[Command, Event, List[Id]]( - persistenceId = PersistenceId("basket-1"), - emptyState = Nil, - commandHandler = { (state, cmd) => - if (isFullyHydrated(basket, state)) - cmd match { - case AddItem(id) => addItem(id, ctx.self) - case RemoveItem(id) => Effect.persist(ItemRemoved(id)) - case GotMetaData(data) => - basket = basket.updatedWith(data) - Effect.none - case GetTotalPrice(sender) => - sender ! basket.items.map(_.price).sum - Effect.none - } - else - cmd match { - case AddItem(id) => addItem(id, ctx.self) - case RemoveItem(id) => Effect.persist(ItemRemoved(id)) - case GotMetaData(data) => - basket = basket.updatedWith(data) - if (isFullyHydrated(basket, state)) { - stash.foreach(ctx.self ! _) - stash = Nil - } - Effect.none - case cmd: GetTotalPrice => - stash :+= cmd - Effect.none - } - }, - eventHandler = (state, evt) => evt match { - case ItemAdded(id) => id +: state - case ItemRemoved(id) => state.filter(_ != id) - }).onRecoveryCompleted(state => - state.foreach(id => metadataRegistry ! GetMetaData(id, adapt)) - ) + EventSourcedBehavior[Command, Event, List[Id]](persistenceId = PersistenceId("basket-1"), + emptyState = Nil, + commandHandler = { (state, cmd) => + if (isFullyHydrated(basket, state)) + cmd match { + case AddItem(id) => addItem(id, ctx.self) + case RemoveItem(id) => Effect.persist(ItemRemoved(id)) + case GotMetaData(data) => + basket = basket.updatedWith(data) + Effect.none + case GetTotalPrice(sender) => + sender ! basket.items.map(_.price).sum + Effect.none + } else + cmd match { + case AddItem(id) => addItem(id, ctx.self) + case RemoveItem(id) => Effect.persist(ItemRemoved(id)) + case GotMetaData(data) => + basket = basket.updatedWith(data) + if (isFullyHydrated(basket, state)) { + stash.foreach(ctx.self ! _) + stash = Nil + } + Effect.none + case cmd: GetTotalPrice => + stash :+= cmd + Effect.none + } + }, + eventHandler = (state, evt) => + evt match { + case ItemAdded(id) => id +: state + case ItemRemoved(id) => state.filter(_ != id) + }).onRecoveryCompleted(state => + state.foreach(id => metadataRegistry ! GetMetaData(id, adapt))) } } @@ -315,7 +314,8 @@ object PersistentActorCompileOnlyTest { // Example factoring out a chained effect rather than using `andThen` val commonChainedEffects = SideEffect[Mood](_ => println("Command processed")) // Then in a command handler: - Effect.persist(Remembered("Yep")) // persist event + Effect + .persist(Remembered("Yep")) // persist event .andThen(commonChainedEffects) // add on common chained effect //#commonChainedEffects @@ -328,13 +328,13 @@ object PersistentActorCompileOnlyTest { changeMoodIfNeeded(state, Happy) .thenRun { _ => sender ! Ack - }.andThen(commonChainedEffects) + } + .andThen(commonChainedEffects) case Remember(memory) => // A more elaborate example to show we still have full control over the effects // if needed (e.g. when some logic is factored out but you want to add more effects) val commonEffects: Effect[Event, Mood] = changeMoodIfNeeded(state, Happy) - Effect.persist(commonEffects.events :+ Remembered(memory)) - .andThen(commonChainedEffects) + Effect.persist(commonEffects.events :+ Remembered(memory)).andThen(commonChainedEffects) } } @@ -343,11 +343,10 @@ object PersistentActorCompileOnlyTest { case (state, Remembered(_)) => state } - EventSourcedBehavior[Command, Event, Mood]( - persistenceId = PersistenceId("myPersistenceId"), - emptyState = Sad, - commandHandler, - eventHandler) + EventSourcedBehavior[Command, Event, Mood](persistenceId = PersistenceId("myPersistenceId"), + emptyState = Sad, + commandHandler, + eventHandler) } @@ -362,20 +361,17 @@ object PersistentActorCompileOnlyTest { private val commandHandler: CommandHandler[Command, Event, State] = CommandHandler.command { case Enough => - Effect.persist(Done) - .thenRun((_: State) => println("yay")) - .thenStop + Effect.persist(Done).thenRun((_: State) => println("yay")).thenStop } private val eventHandler: (State, Event) => State = { case (state, Done) => state } - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("myPersistenceId"), - emptyState = new State, - commandHandler, - eventHandler) + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("myPersistenceId"), + emptyState = new State, + commandHandler, + eventHandler) } object AndThenPatternMatch { @@ -383,20 +379,18 @@ object PersistentActorCompileOnlyTest { class First extends State class Second extends State - EventSourcedBehavior[String, String, State]( - persistenceId = PersistenceId("myPersistenceId"), - emptyState = new First, - commandHandler = CommandHandler.command { - cmd => - Effect.persist(cmd).thenRun { - case _: First => println("first") - case _: Second => println("second") - } - }, - eventHandler = { - case (_: First, _) => new Second - case (state, _) => state - }) + EventSourcedBehavior[String, String, State](persistenceId = PersistenceId("myPersistenceId"), + emptyState = new First, + commandHandler = CommandHandler.command { cmd => + Effect.persist(cmd).thenRun { + case _: First => println("first") + case _: Second => println("second") + } + }, + eventHandler = { + case (_: First, _) => new Second + case (state, _) => state + }) } diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala index ba916b64c8..63f97b2595 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/PrimitiveStateSpec.scala @@ -14,8 +14,7 @@ import org.scalatest.WordSpecLike object PrimitiveStateSpec { - private val conf = ConfigFactory.parseString( - s""" + private val conf = ConfigFactory.parseString(s""" akka.persistence.journal.plugin = "akka.persistence.journal.inmem" """) } @@ -25,22 +24,20 @@ class PrimitiveStateSpec extends ScalaTestWithActorTestKit(PrimitiveStateSpec.co implicit val testSettings = TestKitSettings(system) def primitiveState(persistenceId: PersistenceId, probe: ActorRef[String]): Behavior[Int] = - EventSourcedBehavior[Int, Int, Int]( - persistenceId, - emptyState = 0, - commandHandler = (_, command) => { - if (command < 0) - Effect.stop() - else - Effect.persist(command) - }, - eventHandler = (state, event) => { - probe.tell("eventHandler:" + state + ":" + event) - state + event - } - ).onRecoveryCompleted { n => - probe.tell("onRecoveryCompleted:" + n) - } + EventSourcedBehavior[Int, Int, Int](persistenceId, + emptyState = 0, + commandHandler = (_, command) => { + if (command < 0) + Effect.stop() + else + Effect.persist(command) + }, + eventHandler = (state, event) => { + probe.tell("eventHandler:" + state + ":" + event) + state + event + }).onRecoveryCompleted { n => + probe.tell("onRecoveryCompleted:" + n) + } "A typed persistent actor with primitive state" must { "persist events and update state" in { diff --git a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala index da8beb3c63..40d88d9040 100644 --- a/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala +++ b/akka-persistence-typed/src/test/scala/akka/persistence/typed/scaladsl/SnapshotMutableStateSpec.scala @@ -54,8 +54,7 @@ object SnapshotMutableStateSpec { def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria) = ??? } - def conf: Config = ConfigFactory.parseString( - s""" + def conf: Config = ConfigFactory.parseString(s""" akka.loglevel = INFO akka.persistence.journal.leveldb.dir = "target/typed-persistence-${UUID.randomUUID().toString}" akka.persistence.journal.plugin = "akka.persistence.journal.leveldb" @@ -73,29 +72,29 @@ object SnapshotMutableStateSpec { final class MutableState(var value: Int) - def counter( - persistenceId: PersistenceId, - probe: ActorRef[String]): EventSourcedBehavior[Command, Event, MutableState] = { - EventSourcedBehavior[Command, Event, MutableState]( - persistenceId, - emptyState = new MutableState(0), - commandHandler = (state, cmd) => cmd match { - case Increment => - Effect.persist(Incremented) + def counter(persistenceId: PersistenceId, + probe: ActorRef[String]): EventSourcedBehavior[Command, Event, MutableState] = { + EventSourcedBehavior[Command, Event, MutableState](persistenceId, + emptyState = new MutableState(0), + commandHandler = (state, cmd) => + cmd match { + case Increment => + Effect.persist(Incremented) - case GetValue(replyTo) => - replyTo ! state.value - Effect.none - }, - eventHandler = (state, evt) => evt match { - case Incremented => - state.value += 1 - probe ! s"incremented-${state.value}" - state - }).onSnapshot { - case (meta, Success(_)) => probe ! s"snapshot-success-${meta.sequenceNr}" - case (meta, Failure(_)) => probe ! s"snapshot-failure-${meta.sequenceNr}" - } + case GetValue(replyTo) => + replyTo ! state.value + Effect.none + }, + eventHandler = (state, evt) => + evt match { + case Incremented => + state.value += 1 + probe ! s"incremented-${state.value}" + state + }).onSnapshot { + case (meta, Success(_)) => probe ! s"snapshot-success-${meta.sequenceNr}" + case (meta, Failure(_)) => probe ! s"snapshot-failure-${meta.sequenceNr}" + } } } @@ -113,7 +112,9 @@ class SnapshotMutableStateSpec extends ScalaTestWithActorTestKit(SnapshotMutable val pid = nextPid() val probe = TestProbe[String]() def snapshotState3: Behavior[Command] = - counter(pid, probe.ref).snapshotWhen { (state, _, _) => state.value == 3 } + counter(pid, probe.ref).snapshotWhen { (state, _, _) => + state.value == 3 + } val c = spawn(snapshotState3) (1 to 5).foreach { n => diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithCommandHandlersInState.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithCommandHandlersInState.scala index 71898351a9..19d1a7c56a 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithCommandHandlersInState.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithCommandHandlersInState.scala @@ -25,15 +25,14 @@ object AccountExampleWithCommandHandlersInState { // Command sealed trait AccountCommand[Reply] extends ExpectingReply[Reply] final case class CreateAccount()(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] final case class Deposit(amount: BigDecimal)(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] final case class Withdraw(amount: BigDecimal)(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] - final case class GetBalance()(override val replyTo: ActorRef[CurrentBalance]) - extends AccountCommand[CurrentBalance] + extends AccountCommand[OperationResult] + final case class GetBalance()(override val replyTo: ActorRef[CurrentBalance]) extends AccountCommand[CurrentBalance] final case class CloseAccount()(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] // Reply sealed trait AccountCommandReply @@ -63,8 +62,7 @@ object AccountExampleWithCommandHandlersInState { override def applyCommand(cmd: AccountCommand[_]): ReplyEffect = cmd match { case c: CreateAccount => - Effect.persist(AccountCreated) - .thenReply(c)(_ => Confirmed) + Effect.persist(AccountCreated).thenReply(c)(_ => Confirmed) case _ => // CreateAccount before handling any other commands Effect.unhandled.thenNoReply() @@ -82,13 +80,11 @@ object AccountExampleWithCommandHandlersInState { override def applyCommand(cmd: AccountCommand[_]): ReplyEffect = cmd match { case c @ Deposit(amount) => - Effect.persist(Deposited(amount)) - .thenReply(c)(_ => Confirmed) + Effect.persist(Deposited(amount)).thenReply(c)(_ => Confirmed) case c @ Withdraw(amount) => if (canWithdraw(amount)) { - Effect.persist(Withdrawn(amount)) - .thenReply(c)(_ => Confirmed) + Effect.persist(Withdrawn(amount)).thenReply(c)(_ => Confirmed) } else { Effect.reply(c)(Rejected(s"Insufficient balance $balance to be able to withdraw $amount")) @@ -99,8 +95,7 @@ object AccountExampleWithCommandHandlersInState { case c: CloseAccount => if (balance == Zero) - Effect.persist(AccountClosed) - .thenReply(c)(_ => Confirmed) + Effect.persist(AccountClosed).thenReply(c)(_ => Confirmed) else Effect.reply(c)(Rejected("Can't close account with non-zero balance")) @@ -144,12 +139,10 @@ object AccountExampleWithCommandHandlersInState { PersistenceId(s"Account|$accountNumber"), EmptyAccount, (state, cmd) => state.applyCommand(cmd), - (state, event) => state.applyEvent(event) - ) + (state, event) => state.applyEvent(event)) } } //##account-entity } - diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithEventHandlersInState.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithEventHandlersInState.scala index 314c38b841..a8d0f73220 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithEventHandlersInState.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithEventHandlersInState.scala @@ -29,17 +29,16 @@ object AccountExampleWithEventHandlersInState { sealed trait AccountCommand[Reply] extends ExpectingReply[Reply] //#reply-command final case class CreateAccount()(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] final case class Deposit(amount: BigDecimal)(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] //#reply-command final case class Withdraw(amount: BigDecimal)(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] //#reply-command - final case class GetBalance()(override val replyTo: ActorRef[CurrentBalance]) - extends AccountCommand[CurrentBalance] + final case class GetBalance()(override val replyTo: ActorRef[CurrentBalance]) extends AccountCommand[CurrentBalance] final case class CloseAccount()(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] // Reply //#reply-command @@ -96,24 +95,23 @@ object AccountExampleWithEventHandlersInState { //#withEnforcedReplies def behavior(accountNumber: String): Behavior[AccountCommand[AccountCommandReply]] = { - EventSourcedBehavior.withEnforcedReplies( - PersistenceId(s"Account|$accountNumber"), - EmptyAccount, - commandHandler, - eventHandler - ) + EventSourcedBehavior.withEnforcedReplies(PersistenceId(s"Account|$accountNumber"), + EmptyAccount, + commandHandler, + eventHandler) } //#withEnforcedReplies - private val commandHandler: (Account, AccountCommand[_]) => ReplyEffect[AccountEvent, Account] = { - (state, cmd) => - state match { - case EmptyAccount => cmd match { + private val commandHandler: (Account, AccountCommand[_]) => ReplyEffect[AccountEvent, Account] = { (state, cmd) => + state match { + case EmptyAccount => + cmd match { case c: CreateAccount => createAccount(c) case _ => Effect.unhandled.thenNoReply() // CreateAccount before handling any other commands } - case acc @ OpenedAccount(_) => cmd match { + case acc @ OpenedAccount(_) => + cmd match { case c: Deposit => deposit(c) case c: Withdraw => withdraw(acc, c) case c: GetBalance => getBalance(acc, c) @@ -121,39 +119,36 @@ object AccountExampleWithEventHandlersInState { case c: CreateAccount => Effect.reply(c)(Rejected("Account is already created")) } - case ClosedAccount => - cmd match { - case c @ (_: Deposit | _: Withdraw) => - Effect.reply(c)(Rejected("Account is closed")) - case c: GetBalance => - Effect.reply(c)(CurrentBalance(Zero)) - case c: CloseAccount => - Effect.reply(c)(Rejected("Account is already closed")) - case c: CreateAccount => - Effect.reply(c)(Rejected("Account is already created")) - } - } + case ClosedAccount => + cmd match { + case c @ (_: Deposit | _: Withdraw) => + Effect.reply(c)(Rejected("Account is closed")) + case c: GetBalance => + Effect.reply(c)(CurrentBalance(Zero)) + case c: CloseAccount => + Effect.reply(c)(Rejected("Account is already closed")) + case c: CreateAccount => + Effect.reply(c)(Rejected("Account is already created")) + } + } } - private val eventHandler: (Account, AccountEvent) => Account = { - (state, event) => state.applyEvent(event) + private val eventHandler: (Account, AccountEvent) => Account = { (state, event) => + state.applyEvent(event) } private def createAccount(cmd: CreateAccount): ReplyEffect[AccountEvent, Account] = { - Effect.persist(AccountCreated) - .thenReply(cmd)(_ => Confirmed) + Effect.persist(AccountCreated).thenReply(cmd)(_ => Confirmed) } private def deposit(cmd: Deposit): ReplyEffect[AccountEvent, Account] = { - Effect.persist(Deposited(cmd.amount)) - .thenReply(cmd)(_ => Confirmed) + Effect.persist(Deposited(cmd.amount)).thenReply(cmd)(_ => Confirmed) } //#reply private def withdraw(acc: OpenedAccount, cmd: Withdraw): ReplyEffect[AccountEvent, Account] = { if (acc.canWithdraw(cmd.amount)) { - Effect.persist(Withdrawn(cmd.amount)) - .thenReply(cmd)(_ => Confirmed) + Effect.persist(Withdrawn(cmd.amount)).thenReply(cmd)(_ => Confirmed) } else { Effect.reply(cmd)(Rejected(s"Insufficient balance ${acc.balance} to be able to withdraw ${cmd.amount}")) @@ -167,8 +162,7 @@ object AccountExampleWithEventHandlersInState { private def closeAccount(acc: OpenedAccount, cmd: CloseAccount): ReplyEffect[AccountEvent, Account] = { if (acc.balance == Zero) - Effect.persist(AccountClosed) - .thenReply(cmd)(_ => Confirmed) + Effect.persist(AccountClosed).thenReply(cmd)(_ => Confirmed) else Effect.reply(cmd)(Rejected("Can't close account with non-zero balance")) } @@ -177,4 +171,3 @@ object AccountExampleWithEventHandlersInState { //#account-entity } - diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithOptionState.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithOptionState.scala index 6a4381ee89..c5d5f1e12b 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithOptionState.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/AccountExampleWithOptionState.scala @@ -25,15 +25,14 @@ object AccountExampleWithOptionState { // Command sealed trait AccountCommand[Reply] extends ExpectingReply[Reply] final case class CreateAccount()(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] final case class Deposit(amount: BigDecimal)(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] final case class Withdraw(amount: BigDecimal)(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] - final case class GetBalance()(override val replyTo: ActorRef[CurrentBalance]) - extends AccountCommand[CurrentBalance] + extends AccountCommand[OperationResult] + final case class GetBalance()(override val replyTo: ActorRef[CurrentBalance]) extends AccountCommand[CurrentBalance] final case class CloseAccount()(override val replyTo: ActorRef[OperationResult]) - extends AccountCommand[OperationResult] + extends AccountCommand[OperationResult] // Reply sealed trait AccountCommandReply @@ -65,13 +64,11 @@ object AccountExampleWithOptionState { override def applyCommand(cmd: AccountCommand[_]): ReplyEffect = cmd match { case c @ Deposit(amount) => - Effect.persist(Deposited(amount)) - .thenReply(c)(_ => Confirmed) + Effect.persist(Deposited(amount)).thenReply(c)(_ => Confirmed) case c @ Withdraw(amount) => if (canWithdraw(amount)) { - Effect.persist(Withdrawn(amount)) - .thenReply(c)(_ => Confirmed) + Effect.persist(Withdrawn(amount)).thenReply(c)(_ => Confirmed) } else { Effect.reply(c)(Rejected(s"Insufficient balance $balance to be able to withdraw $amount")) @@ -82,8 +79,7 @@ object AccountExampleWithOptionState { case c: CloseAccount => if (balance == Zero) - Effect.persist(AccountClosed) - .thenReply(c)(_ => Confirmed) + Effect.persist(AccountClosed).thenReply(c)(_ => Confirmed) else Effect.reply(c)(Rejected("Can't close account with non-zero balance")) @@ -126,22 +122,22 @@ object AccountExampleWithOptionState { EventSourcedBehavior.withEnforcedReplies[AccountCommand[AccountCommandReply], AccountEvent, Option[Account]]( PersistenceId(s"Account|$accountNumber"), None, - (state, cmd) => state match { - case None => onFirstCommand(cmd) - case Some(account) => account.applyCommand(cmd) - }, - (state, event) => state match { - case None => Some(onFirstEvent(event)) - case Some(account) => Some(account.applyEvent(event)) - } - ) + (state, cmd) => + state match { + case None => onFirstCommand(cmd) + case Some(account) => account.applyCommand(cmd) + }, + (state, event) => + state match { + case None => Some(onFirstEvent(event)) + case Some(account) => Some(account.applyEvent(event)) + }) } def onFirstCommand(cmd: AccountCommand[_]): ReplyEffect = { cmd match { case c: CreateAccount => - Effect.persist(AccountCreated) - .thenReply(c)(_ => Confirmed) + Effect.persist(AccountCreated).thenReply(c)(_ => Confirmed) case _ => // CreateAccount before handling any other commands Effect.unhandled.thenNoReply() @@ -159,4 +155,3 @@ object AccountExampleWithOptionState { //#account-entity } - diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala index 5800f529ab..70b5f2274e 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BasicPersistentBehaviorCompileOnly.scala @@ -32,32 +32,29 @@ object BasicPersistentBehaviorCompileOnly { //#command-handler import akka.persistence.typed.scaladsl.Effect - val commandHandler: (State, Command) => Effect[Event, State] = { - (state, command) => - command match { - case Add(data) => Effect.persist(Added(data)) - case Clear => Effect.persist(Cleared) - } + val commandHandler: (State, Command) => Effect[Event, State] = { (state, command) => + command match { + case Add(data) => Effect.persist(Added(data)) + case Clear => Effect.persist(Cleared) + } } //#command-handler //#event-handler - val eventHandler: (State, Event) => State = { - (state, event) => - event match { - case Added(data) => state.copy((data :: state.history).take(5)) - case Cleared => State(Nil) - } + val eventHandler: (State, Event) => State = { (state, event) => + event match { + case Added(data) => state.copy((data :: state.history).take(5)) + case Cleared => State(Nil) + } } //#event-handler //#behavior def behavior(id: String): EventSourcedBehavior[Command, Event, State] = - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId(id), - emptyState = State(Nil), - commandHandler = commandHandler, - eventHandler = eventHandler) + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId(id), + emptyState = State(Nil), + commandHandler = commandHandler, + eventHandler = eventHandler) //#behavior } @@ -68,80 +65,68 @@ object BasicPersistentBehaviorCompileOnly { final case class State() val behavior: Behavior[Command] = - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("abc"), - emptyState = State(), - commandHandler = - (state, cmd) => - throw new RuntimeException("TODO: process the command & return an Effect"), - eventHandler = - (state, evt) => - throw new RuntimeException("TODO: process the event return the next state") - ) + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"), + emptyState = State(), + commandHandler = (state, cmd) => + throw new RuntimeException( + "TODO: process the command & return an Effect"), + eventHandler = (state, evt) => + throw new RuntimeException( + "TODO: process the event return the next state")) //#structure //#recovery val recoveryBehavior: Behavior[Command] = - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("abc"), - emptyState = State(), - commandHandler = - (state, cmd) => - throw new RuntimeException("TODO: process the command & return an Effect"), - eventHandler = - (state, evt) => - throw new RuntimeException("TODO: process the event return the next state") - ).onRecoveryCompleted { state => + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"), + emptyState = State(), + commandHandler = (state, cmd) => + throw new RuntimeException( + "TODO: process the command & return an Effect"), + eventHandler = (state, evt) => + throw new RuntimeException( + "TODO: process the event return the next state")) + .onRecoveryCompleted { state => throw new RuntimeException("TODO: add some end-of-recovery side-effect here") } //#recovery //#tagging val taggingBehavior: Behavior[Command] = - EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("abc"), - emptyState = State(), - commandHandler = - (state, cmd) => - throw new RuntimeException("TODO: process the command & return an Effect"), - eventHandler = - (state, evt) => - throw new RuntimeException("TODO: process the event return the next state") - ).withTagger(_ => Set("tag1", "tag2")) + EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"), + emptyState = State(), + commandHandler = (state, cmd) => + throw new RuntimeException( + "TODO: process the command & return an Effect"), + eventHandler = (state, evt) => + throw new RuntimeException( + "TODO: process the event return the next state")).withTagger(_ => + Set("tag1", "tag2")) //#tagging //#wrapPersistentBehavior - val samplePersistentBehavior = EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("abc"), - emptyState = State(), - commandHandler = - (state, cmd) => - throw new RuntimeException("TODO: process the command & return an Effect"), - eventHandler = - (state, evt) => - throw new RuntimeException("TODO: process the event return the next state") - ).onRecoveryCompleted { state => + val samplePersistentBehavior = EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"), + emptyState = State(), + commandHandler = (state, cmd) => + throw new RuntimeException( + "TODO: process the command & return an Effect"), + eventHandler = (state, evt) => + throw new RuntimeException( + "TODO: process the event return the next state")) + .onRecoveryCompleted { state => throw new RuntimeException("TODO: add some end-of-recovery side-effect here") } - val debugAlwaysSnapshot: Behavior[Command] = Behaviors.setup { - context => - samplePersistentBehavior.snapshotWhen((state, _, _) => { - context.log.info( - "Snapshot actor {} => state: {}", - context.self.path.name, state) - true - }) + val debugAlwaysSnapshot: Behavior[Command] = Behaviors.setup { context => + samplePersistentBehavior.snapshotWhen((state, _, _) => { + context.log.info("Snapshot actor {} => state: {}", context.self.path.name, state) + true + }) } //#wrapPersistentBehavior //#supervision val supervisedBehavior = samplePersistentBehavior.onPersistFailure( - SupervisorStrategy.restartWithBackoff( - minBackoff = 10.seconds, - maxBackoff = 60.seconds, - randomFactor = 0.1 - )) + SupervisorStrategy.restartWithBackoff(minBackoff = 10.seconds, maxBackoff = 60.seconds, randomFactor = 0.1)) //#supervision // #actor-context @@ -150,47 +135,43 @@ object BasicPersistentBehaviorCompileOnly { val behaviorWithContext: Behavior[String] = Behaviors.setup { context => - EventSourcedBehavior[String, String, State]( - persistenceId = PersistenceId("myPersistenceId"), - emptyState = new State, - commandHandler = CommandHandler.command { - cmd => - context.log.info("Got command {}", cmd) - Effect.persist(cmd).thenRun { state => - context.log.info("event persisted, new state {}", state) - } - }, - eventHandler = { - case (state, _) => state - }) + EventSourcedBehavior[String, String, State](persistenceId = PersistenceId("myPersistenceId"), + emptyState = new State, + commandHandler = CommandHandler.command { cmd => + context.log.info("Got command {}", cmd) + Effect.persist(cmd).thenRun { state => + context.log.info("event persisted, new state {}", state) + } + }, + eventHandler = { + case (state, _) => state + }) } // #actor-context //#snapshottingEveryN - val snapshottingEveryN = EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("abc"), - emptyState = State(), - commandHandler = - (state, cmd) => - throw new RuntimeException("TODO: process the command & return an Effect"), - eventHandler = - (state, evt) => - throw new RuntimeException("TODO: process the event return the next state") - ).snapshotEvery(100) + val snapshottingEveryN = EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"), + emptyState = State(), + commandHandler = (state, cmd) => + throw new RuntimeException( + "TODO: process the command & return an Effect"), + eventHandler = (state, evt) => + throw new RuntimeException( + "TODO: process the event return the next state")) + .snapshotEvery(100) //#snapshottingEveryN final case class BookingCompleted(orderNr: String) extends Event //#snapshottingPredicate - val snapshottingPredicate = EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("abc"), - emptyState = State(), - commandHandler = - (state, cmd) => - throw new RuntimeException("TODO: process the command & return an Effect"), - eventHandler = - (state, evt) => - throw new RuntimeException("TODO: process the event return the next state") - ).snapshotWhen { + val snapshottingPredicate = EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"), + emptyState = State(), + commandHandler = (state, cmd) => + throw new RuntimeException( + "TODO: process the command & return an Effect"), + eventHandler = (state, evt) => + throw new RuntimeException( + "TODO: process the event return the next state")) + .snapshotWhen { case (state, BookingCompleted(_), sequenceNumber) => true case (state, event, sequenceNumber) => false } @@ -199,16 +180,15 @@ object BasicPersistentBehaviorCompileOnly { //#snapshotSelection import akka.persistence.SnapshotSelectionCriteria - val snapshotSelection = EventSourcedBehavior[Command, Event, State]( - persistenceId = PersistenceId("abc"), - emptyState = State(), - commandHandler = - (state, cmd) => - throw new RuntimeException("TODO: process the command & return an Effect"), - eventHandler = - (state, evt) => - throw new RuntimeException("TODO: process the event return the next state") - ).withSnapshotSelectionCriteria(SnapshotSelectionCriteria.None) + val snapshotSelection = EventSourcedBehavior[Command, Event, State](persistenceId = PersistenceId("abc"), + emptyState = State(), + commandHandler = (state, cmd) => + throw new RuntimeException( + "TODO: process the command & return an Effect"), + eventHandler = (state, evt) => + throw new RuntimeException( + "TODO: process the event return the next state")) + .withSnapshotSelectionCriteria(SnapshotSelectionCriteria.None) //#snapshotSelection } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostExample.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostExample.scala index 7c6c9137a2..8d2bb43e45 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostExample.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/BlogPostExample.scala @@ -15,13 +15,9 @@ object BlogPostExample { //#event sealed trait BlogEvent - final case class PostAdded( - postId: String, - content: PostContent) extends BlogEvent + final case class PostAdded(postId: String, content: PostContent) extends BlogEvent - final case class BodyChanged( - postId: String, - newBody: String) extends BlogEvent + final case class BodyChanged(postId: String, newBody: String) extends BlogEvent final case class Published(postId: String) extends BlogEvent //#event @@ -56,33 +52,35 @@ object BlogPostExample { //#behavior def behavior(entityId: String): Behavior[BlogCommand] = - EventSourcedBehavior[BlogCommand, BlogEvent, BlogState]( - persistenceId = PersistenceId(s"Blog-$entityId"), - emptyState = BlankState, - commandHandler, - eventHandler) + EventSourcedBehavior[BlogCommand, BlogEvent, BlogState](persistenceId = PersistenceId(s"Blog-$entityId"), + emptyState = BlankState, + commandHandler, + eventHandler) //#behavior //#command-handler private val commandHandler: (BlogState, BlogCommand) => Effect[BlogEvent, BlogState] = { (state, command) => state match { - case BlankState => command match { - case cmd: AddPost => addPost(cmd) - case _ => Effect.unhandled - } + case BlankState => + command match { + case cmd: AddPost => addPost(cmd) + case _ => Effect.unhandled + } - case draftState: DraftState => command match { - case cmd: ChangeBody => changeBody(draftState, cmd) - case Publish(replyTo) => publish(draftState, replyTo) - case GetPost(replyTo) => getPost(draftState, replyTo) - case _: AddPost => Effect.unhandled - } + case draftState: DraftState => + command match { + case cmd: ChangeBody => changeBody(draftState, cmd) + case Publish(replyTo) => publish(draftState, replyTo) + case GetPost(replyTo) => getPost(draftState, replyTo) + case _: AddPost => Effect.unhandled + } - case publishedState: PublishedState => command match { - case GetPost(replyTo) => getPost(publishedState, replyTo) - case _ => Effect.unhandled - } + case publishedState: PublishedState => + command match { + case GetPost(replyTo) => getPost(publishedState, replyTo) + case _ => Effect.unhandled + } } } @@ -125,22 +123,24 @@ object BlogPostExample { private val eventHandler: (BlogState, BlogEvent) => BlogState = { (state, event) => state match { - case BlankState => event match { - case PostAdded(_, content) => - DraftState(content) - case _ => throw new IllegalStateException(s"unexpected event [$event] in state [$state]") - } + case BlankState => + event match { + case PostAdded(_, content) => + DraftState(content) + case _ => throw new IllegalStateException(s"unexpected event [$event] in state [$state]") + } - case draftState: DraftState => event match { + case draftState: DraftState => + event match { - case BodyChanged(_, newBody) => - draftState.withBody(newBody) + case BodyChanged(_, newBody) => + draftState.withBody(newBody) - case Published(_) => - PublishedState(draftState.content) + case Published(_) => + PublishedState(draftState.content) - case _ => throw new IllegalStateException(s"unexpected event [$event] in state [$state]") - } + case _ => throw new IllegalStateException(s"unexpected event [$event] in state [$state]") + } case _: PublishedState => // no more changes after published @@ -150,4 +150,3 @@ object BlogPostExample { //#event-handler } - diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/MovieWatchList.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/MovieWatchList.scala index b66d624733..76068e7fd3 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/MovieWatchList.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/MovieWatchList.scala @@ -30,26 +30,23 @@ object MovieWatchList { } } - private val commandHandler: CommandHandler[Command, Event, MovieList] = { - (state, cmd) => - cmd match { - case AddMovie(movieId) => - Effect.persist(MovieAdded(movieId)) - case RemoveMovie(movieId) => - Effect.persist(MovieRemoved(movieId)) - case GetMovieList(replyTo) => - replyTo ! state - Effect.none - } + private val commandHandler: CommandHandler[Command, Event, MovieList] = { (state, cmd) => + cmd match { + case AddMovie(movieId) => + Effect.persist(MovieAdded(movieId)) + case RemoveMovie(movieId) => + Effect.persist(MovieRemoved(movieId)) + case GetMovieList(replyTo) => + replyTo ! state + Effect.none + } } def behavior(userId: String): Behavior[Command] = { - EventSourcedBehavior[Command, Event, MovieList]( - persistenceId = PersistenceId(s"movies-$userId"), - emptyState = MovieList(Set.empty), - commandHandler, - eventHandler = (state, event) => state.applyEvent(event) - ) + EventSourcedBehavior[Command, Event, MovieList](persistenceId = PersistenceId(s"movies-$userId"), + emptyState = MovieList(Set.empty), + commandHandler, + eventHandler = (state, event) => state.applyEvent(event)) } } diff --git a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala index 9958f3584a..ecb6237ad5 100644 --- a/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala +++ b/akka-persistence-typed/src/test/scala/docs/akka/persistence/typed/StashingExample.scala @@ -30,12 +30,11 @@ object StashingExample { final case class State(taskIdInProgress: Option[String]) def apply(persistenceId: PersistenceId): Behavior[Command] = - EventSourcedBehavior[Command, Event, State]( - persistenceId = persistenceId, - emptyState = State(None), - commandHandler = (state, command) => onCommand(state, command), - eventHandler = (state, event) => applyEvent(state, event) - ).onPersistFailure(SupervisorStrategy.restartWithBackoff(1.second, 30.seconds, 0.2)) + EventSourcedBehavior[Command, Event, State](persistenceId = persistenceId, + emptyState = State(None), + commandHandler = (state, command) => onCommand(state, command), + eventHandler = (state, event) => applyEvent(state, event)) + .onPersistFailure(SupervisorStrategy.restartWithBackoff(1.second, 30.seconds, 0.2)) private def onCommand(state: State, command: Command): Effect[Event, State] = { state.taskIdInProgress match { @@ -65,8 +64,7 @@ object StashingExample { case EndTask(taskId) => if (inProgress == taskId) - Effect.persist(TaskCompleted(taskId)) - .thenUnstashAll() // continue with next task + Effect.persist(TaskCompleted(taskId)).thenUnstashAll() // continue with next task else // other task in progress, wait with new task until later Effect.stash() diff --git a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala index ba98fddc40..1aaeeb7671 100644 --- a/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/AtLeastOnceDelivery.scala @@ -24,8 +24,9 @@ object AtLeastOnceDelivery { * with [[AtLeastOnceDeliveryLike#setDeliverySnapshot]]. */ @SerialVersionUID(1L) - case class AtLeastOnceDeliverySnapshot(currentDeliveryId: Long, unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery]) - extends Message { + case class AtLeastOnceDeliverySnapshot(currentDeliveryId: Long, + unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery]) + extends Message { /** * Java API @@ -42,6 +43,7 @@ object AtLeastOnceDelivery { */ @SerialVersionUID(1L) case class UnconfirmedWarning(unconfirmedDeliveries: immutable.Seq[UnconfirmedDelivery]) { + /** * Java API */ @@ -56,6 +58,7 @@ object AtLeastOnceDelivery { * and [[AtLeastOnceDeliverySnapshot]]. */ case class UnconfirmedDelivery(deliveryId: Long, destination: ActorPath, message: Any) { + /** * Java API */ @@ -262,7 +265,9 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { s"Too many unconfirmed messages, maximum allowed is [$maxUnconfirmedMessages]") val deliveryId = nextDeliverySequenceNr() - val now = if (recoveryRunning) { System.nanoTime() - redeliverInterval.toNanos } else System.nanoTime() + val now = if (recoveryRunning) { + System.nanoTime() - redeliverInterval.toNanos + } else System.nanoTime() val d = Delivery(destination, deliveryIdToMessage(deliveryId), now, attempt = 0) if (recoveryRunning) @@ -277,9 +282,10 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { @InternalApi private[akka] final def internalDeliver(destination: ActorSelection)(deliveryIdToMessage: Long => Any): Unit = { val isWildcardSelection = destination.pathString.contains("*") - require(!isWildcardSelection, "Delivering to wildcard actor selections is not supported by AtLeastOnceDelivery. " + - "Introduce an mediator Actor which this AtLeastOnceDelivery Actor will deliver the messages to," + - "and will handle the logic of fan-out and collecting individual confirmations, until it can signal confirmation back to this Actor.") + require(!isWildcardSelection, + "Delivering to wildcard actor selections is not supported by AtLeastOnceDelivery. " + + "Introduce an mediator Actor which this AtLeastOnceDelivery Actor will deliver the messages to," + + "and will handle the logic of fan-out and collecting individual confirmations, until it can signal confirmation back to this Actor.") internalDeliver(ActorPath.fromString(destination.toSerializationFormat))(deliveryIdToMessage) } @@ -308,8 +314,7 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { val deadline = now - redeliverInterval.toNanos var warnings = Vector.empty[UnconfirmedDelivery] - unconfirmed - .iterator + unconfirmed.iterator .filter { case (_, delivery) => delivery.timestamp <= deadline } .take(redeliveryBurstLimit) .foreach { @@ -344,7 +349,9 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { def getDeliverySnapshot: AtLeastOnceDeliverySnapshot = AtLeastOnceDeliverySnapshot( deliverySequenceNr, - unconfirmed.iterator.map { case (deliveryId, d) => UnconfirmedDelivery(deliveryId, d.destination, d.message) }.to(immutable.IndexedSeq)) + unconfirmed.iterator + .map { case (deliveryId, d) => UnconfirmedDelivery(deliveryId, d.destination, d.message) } + .to(immutable.IndexedSeq)) /** * If snapshot from [[#getDeliverySnapshot]] was saved it will be received during recovery @@ -403,7 +410,10 @@ trait AtLeastOnceDeliveryLike extends Eventsourced { * @see [[AtLeastOnceDeliveryLike]] */ @deprecated("Use AbstractPersistentActorWithAtLeastOnceDelivery instead.", since = "2.5.0") -abstract class UntypedPersistentActorWithAtLeastOnceDelivery extends UntypedPersistentActor with AtLeastOnceDeliveryLike { +abstract class UntypedPersistentActorWithAtLeastOnceDelivery + extends UntypedPersistentActor + with AtLeastOnceDeliveryLike { + /** * Java API: Send the message created by the `deliveryIdToMessage` function to * the `destination` actor. It will retry sending the message until @@ -461,7 +471,10 @@ abstract class UntypedPersistentActorWithAtLeastOnceDelivery extends UntypedPers * @see [[AtLeastOnceDelivery]] * @see [[AtLeastOnceDeliveryLike]] */ -abstract class AbstractPersistentActorWithAtLeastOnceDelivery extends AbstractPersistentActor with AtLeastOnceDeliveryLike { +abstract class AbstractPersistentActorWithAtLeastOnceDelivery + extends AbstractPersistentActor + with AtLeastOnceDeliveryLike { + /** * Java API: Send the message created by the `deliveryIdToMessage` function to * the `destination` actor. It will retry sending the message until diff --git a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala index 9324fadcbd..5f5d502b6d 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Eventsourced.scala @@ -32,7 +32,9 @@ private[persistence] object Eventsourced { } /** INTERNAL API: forces actor to stash incoming commands until all these invocations are handled */ - private[akka] final case class StashingHandlerInvocation(evt: Any, handler: Any => Unit) extends PendingHandlerInvocation + private[akka] final case class StashingHandlerInvocation(evt: Any, handler: Any => Unit) + extends PendingHandlerInvocation + /** INTERNAL API: does not force the actor to stash commands; Originates from either `persistAsync` or `defer` calls */ private[akka] final case class AsyncHandlerInvocation(evt: Any, handler: Any => Unit) extends PendingHandlerInvocation @@ -45,8 +47,11 @@ private[persistence] object Eventsourced { * * Scala API and implementation details of [[PersistentActor]] and [[AbstractPersistentActor]]. */ -private[persistence] trait Eventsourced extends Snapshotter with PersistenceStash - with PersistenceIdentity with PersistenceRecovery { +private[persistence] trait Eventsourced + extends Snapshotter + with PersistenceStash + with PersistenceIdentity + with PersistenceRecovery { import Eventsourced._ import JournalProtocol._ import SnapshotProtocol.{ LoadSnapshotFailed, LoadSnapshotResult } @@ -148,11 +153,18 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = event match { case Some(evt) => - log.error(cause, "Exception in receiveRecover when replaying event type [{}] with sequence number [{}] for " + - "persistenceId [{}].", evt.getClass.getName, lastSequenceNr, persistenceId) + log.error(cause, + "Exception in receiveRecover when replaying event type [{}] with sequence number [{}] for " + + "persistenceId [{}].", + evt.getClass.getName, + lastSequenceNr, + persistenceId) case None => - log.error(cause, "Persistence failure when replaying events for persistenceId [{}]. " + - "Last known sequence number [{}]", persistenceId, lastSequenceNr) + log.error(cause, + "Persistence failure when replaying events for persistenceId [{}]. " + + "Last known sequence number [{}]", + persistenceId, + lastSequenceNr) } /** @@ -169,8 +181,11 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas * @param event the event that was to be persisted */ protected def onPersistFailure(cause: Throwable, event: Any, seqNr: Long): Unit = { - log.error(cause, "Failed to persist event type [{}] with sequence number [{}] for persistenceId [{}].", - event.getClass.getName, seqNr, persistenceId) + log.error(cause, + "Failed to persist event type [{}] with sequence number [{}] for persistenceId [{}].", + event.getClass.getName, + seqNr, + persistenceId) } /** @@ -182,14 +197,17 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas * @param event the event that was to be persisted */ protected def onPersistRejected(cause: Throwable, event: Any, seqNr: Long): Unit = { - log.error( - cause, - "Rejected to persist event type [{}] with sequence number [{}] for persistenceId [{}] due to [{}].", - event.getClass.getName, seqNr, persistenceId, cause.getMessage) + log.error(cause, + "Rejected to persist event type [{}] with sequence number [{}] for persistenceId [{}] due to [{}].", + event.getClass.getName, + seqNr, + persistenceId, + cause.getMessage) } private def stashInternally(currMsg: Any): Unit = - try internalStash.stash() catch { + try internalStash.stash() + catch { case e: StashOverflowException => internalStashOverflowStrategy match { case DiscardToDeadLetterStrategy => @@ -278,15 +296,26 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas message match { case RecoveryCompleted => // mute case SaveSnapshotFailure(m, e) => - log.warning("Failed to saveSnapshot given metadata [{}] due to: [{}: {}]", m, e.getClass.getCanonicalName, e.getMessage) + log.warning("Failed to saveSnapshot given metadata [{}] due to: [{}: {}]", + m, + e.getClass.getCanonicalName, + e.getMessage) case DeleteSnapshotFailure(m, e) => - log.warning("Failed to deleteSnapshot given metadata [{}] due to: [{}: {}]", m, e.getClass.getCanonicalName, e.getMessage) + log.warning("Failed to deleteSnapshot given metadata [{}] due to: [{}: {}]", + m, + e.getClass.getCanonicalName, + e.getMessage) case DeleteSnapshotsFailure(c, e) => - log.warning("Failed to deleteSnapshots given criteria [{}] due to: [{}: {}]", c, e.getClass.getCanonicalName, e.getMessage) + log.warning("Failed to deleteSnapshots given criteria [{}] due to: [{}: {}]", + c, + e.getClass.getCanonicalName, + e.getMessage) case DeleteMessagesFailure(e, toSequenceNr) => - log.warning( - "Failed to deleteMessages toSequenceNr [{}] for persistenceId [{}] due to [{}: {}].", - toSequenceNr, persistenceId, e.getClass.getCanonicalName, e.getMessage) + log.warning("Failed to deleteMessages toSequenceNr [{}] for persistenceId [{}] due to [{}: {}].", + toSequenceNr, + persistenceId, + e.getClass.getCanonicalName, + e.getMessage) case m => super.unhandled(m) } } @@ -343,11 +372,17 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas */ @InternalApi final private[akka] def internalPersist[A](event: A)(handler: A => Unit): Unit = { - if (recoveryRunning) throw new IllegalStateException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.") + if (recoveryRunning) + throw new IllegalStateException( + "Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.") pendingStashingPersistInvocations += 1 - pendingInvocations addLast StashingHandlerInvocation(event, handler.asInstanceOf[Any => Unit]) - eventBatch ::= AtomicWrite(PersistentRepr(event, persistenceId = persistenceId, - sequenceNr = nextSequenceNr(), writerUuid = writerUuid, sender = sender())) + pendingInvocations.addLast(StashingHandlerInvocation(event, handler.asInstanceOf[Any => Unit])) + eventBatch ::= AtomicWrite( + PersistentRepr(event, + persistenceId = persistenceId, + sequenceNr = nextSequenceNr(), + writerUuid = writerUuid, + sender = sender())) } /** @@ -355,14 +390,21 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas */ @InternalApi final private[akka] def internalPersistAll[A](events: immutable.Seq[A])(handler: A => Unit): Unit = { - if (recoveryRunning) throw new IllegalStateException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.") + if (recoveryRunning) + throw new IllegalStateException( + "Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.") if (events.nonEmpty) { events.foreach { event => pendingStashingPersistInvocations += 1 - pendingInvocations addLast StashingHandlerInvocation(event, handler.asInstanceOf[Any => Unit]) + pendingInvocations.addLast(StashingHandlerInvocation(event, handler.asInstanceOf[Any => Unit])) } - eventBatch ::= AtomicWrite(events.map(PersistentRepr.apply(_, persistenceId = persistenceId, - sequenceNr = nextSequenceNr(), writerUuid = writerUuid, sender = sender()))) + eventBatch ::= AtomicWrite( + events.map( + PersistentRepr.apply(_, + persistenceId = persistenceId, + sequenceNr = nextSequenceNr(), + writerUuid = writerUuid, + sender = sender()))) } } @@ -371,10 +413,16 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas */ @InternalApi final private[akka] def internalPersistAsync[A](event: A)(handler: A => Unit): Unit = { - if (recoveryRunning) throw new IllegalStateException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.") - pendingInvocations addLast AsyncHandlerInvocation(event, handler.asInstanceOf[Any => Unit]) - eventBatch ::= AtomicWrite(PersistentRepr(event, persistenceId = persistenceId, - sequenceNr = nextSequenceNr(), writerUuid = writerUuid, sender = sender())) + if (recoveryRunning) + throw new IllegalStateException( + "Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.") + pendingInvocations.addLast(AsyncHandlerInvocation(event, handler.asInstanceOf[Any => Unit])) + eventBatch ::= AtomicWrite( + PersistentRepr(event, + persistenceId = persistenceId, + sequenceNr = nextSequenceNr(), + writerUuid = writerUuid, + sender = sender())) } /** @@ -382,13 +430,20 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas */ @InternalApi final private[akka] def internalPersistAllAsync[A](events: immutable.Seq[A])(handler: A => Unit): Unit = { - if (recoveryRunning) throw new IllegalStateException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.") + if (recoveryRunning) + throw new IllegalStateException( + "Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later.") if (events.nonEmpty) { events.foreach { event => - pendingInvocations addLast AsyncHandlerInvocation(event, handler.asInstanceOf[Any => Unit]) + pendingInvocations.addLast(AsyncHandlerInvocation(event, handler.asInstanceOf[Any => Unit])) } - eventBatch ::= AtomicWrite(events.map(PersistentRepr(_, persistenceId = persistenceId, - sequenceNr = nextSequenceNr(), writerUuid = writerUuid, sender = sender()))) + eventBatch ::= AtomicWrite( + events.map( + PersistentRepr(_, + persistenceId = persistenceId, + sequenceNr = nextSequenceNr(), + writerUuid = writerUuid, + sender = sender()))) } } @@ -397,11 +452,13 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas */ @InternalApi final private[akka] def internalDeferAsync[A](event: A)(handler: A => Unit): Unit = { - if (recoveryRunning) throw new IllegalStateException("Cannot defer during replay. Events can be deferred when receiving RecoveryCompleted or later.") + if (recoveryRunning) + throw new IllegalStateException( + "Cannot defer during replay. Events can be deferred when receiving RecoveryCompleted or later.") if (pendingInvocations.isEmpty) { handler(event) } else { - pendingInvocations addLast AsyncHandlerInvocation(event, handler.asInstanceOf[Any => Unit]) + pendingInvocations.addLast(AsyncHandlerInvocation(event, handler.asInstanceOf[Any => Unit])) eventBatch = NonPersistentRepr(event, sender()) :: eventBatch } } @@ -411,12 +468,14 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas */ @InternalApi final private[akka] def internalDefer[A](event: A)(handler: A => Unit): Unit = { - if (recoveryRunning) throw new IllegalStateException("Cannot defer during replay. Events can be deferred when receiving RecoveryCompleted or later.") + if (recoveryRunning) + throw new IllegalStateException( + "Cannot defer during replay. Events can be deferred when receiving RecoveryCompleted or later.") if (pendingInvocations.isEmpty) { handler(event) } else { pendingStashingPersistInvocations += 1 - pendingInvocations addLast StashingHandlerInvocation(event, handler.asInstanceOf[Any => Unit]) + pendingInvocations.addLast(StashingHandlerInvocation(event, handler.asInstanceOf[Any => Unit])) eventBatch = NonPersistentRepr(event, sender()) :: eventBatch } } @@ -438,8 +497,10 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas if (toSequenceNr == Long.MaxValue || toSequenceNr <= lastSequenceNr) journal ! DeleteMessagesTo(persistenceId, toSequenceNr, self) else - self ! DeleteMessagesFailure(new RuntimeException( - s"toSequenceNr [$toSequenceNr] must be less than or equal to lastSequenceNr [$lastSequenceNr]"), toSequenceNr) + self ! DeleteMessagesFailure( + new RuntimeException( + s"toSequenceNr [$toSequenceNr] must be less than or equal to lastSequenceNr [$lastSequenceNr]"), + toSequenceNr) } /** @@ -452,10 +513,9 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas * Or delete all by using `Long.MaxValue` as the `toSequenceNr` * {{{ m.copy(sequenceNr = Long.MaxValue) }}} */ - @InternalApi private[akka] def internalDeleteMessagesBeforeSnapshot( - e: SaveSnapshotSuccess, - keepNrOfBatches: Int, - snapshotAfter: Int): Unit = { + @InternalApi private[akka] def internalDeleteMessagesBeforeSnapshot(e: SaveSnapshotSuccess, + keepNrOfBatches: Int, + snapshotAfter: Int): Unit = { /* Delete old events but keep the latest around 1. It's not safe to delete all events immediately because snapshots are typically stored with a weaker consistency level. A replay might "see" the deleted events before it sees the stored @@ -539,7 +599,8 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas } private val recoveryBehavior: Receive = { - val _receiveRecover = try receiveRecover catch { + val _receiveRecover = try receiveRecover + catch { case NonFatal(e) => try onRecoveryFailure(e, Some(e)) finally context.stop(self) @@ -562,42 +623,43 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas override def recoveryRunning: Boolean = true - override def stateReceive(receive: Receive, message: Any) = try message match { - case LoadSnapshotResult(sso, toSnr) => - timeoutCancellable.cancel() - sso.foreach { - case SelectedSnapshot(metadata, snapshot) => - val offer = SnapshotOffer(metadata, snapshot) - if (recoveryBehavior.isDefinedAt(offer)) { - setLastSequenceNr(metadata.sequenceNr) - // Since we are recovering we can ignore the receive behavior from the stack - Eventsourced.super.aroundReceive(recoveryBehavior, offer) - } else { - unhandled(offer) - } - } - changeState(recovering(recoveryBehavior, timeout)) - journal ! ReplayMessages(lastSequenceNr + 1L, toSnr, replayMax, persistenceId, self) + override def stateReceive(receive: Receive, message: Any) = + try message match { + case LoadSnapshotResult(sso, toSnr) => + timeoutCancellable.cancel() + sso.foreach { + case SelectedSnapshot(metadata, snapshot) => + val offer = SnapshotOffer(metadata, snapshot) + if (recoveryBehavior.isDefinedAt(offer)) { + setLastSequenceNr(metadata.sequenceNr) + // Since we are recovering we can ignore the receive behavior from the stack + Eventsourced.super.aroundReceive(recoveryBehavior, offer) + } else { + unhandled(offer) + } + } + changeState(recovering(recoveryBehavior, timeout)) + journal ! ReplayMessages(lastSequenceNr + 1L, toSnr, replayMax, persistenceId, self) - case LoadSnapshotFailed(cause) => - timeoutCancellable.cancel() - try onRecoveryFailure(cause, event = None) finally context.stop(self) - returnRecoveryPermit() + case LoadSnapshotFailed(cause) => + timeoutCancellable.cancel() + try onRecoveryFailure(cause, event = None) + finally context.stop(self) + returnRecoveryPermit() - case RecoveryTick(true) => - try onRecoveryFailure( - new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within $timeout"), - event = None) - finally context.stop(self) - returnRecoveryPermit() + case RecoveryTick(true) => + try onRecoveryFailure(new RecoveryTimedOut(s"Recovery timed out, didn't get snapshot within $timeout"), + event = None) + finally context.stop(self) + returnRecoveryPermit() - case other => - stashInternally(other) - } catch { - case NonFatal(e) => - returnRecoveryPermit() - throw e - } + case other => + stashInternally(other) + } catch { + case NonFatal(e) => + returnRecoveryPermit() + throw e + } private def returnRecoveryPermit(): Unit = extension.recoveryPermitter.tell(RecoveryPermitter.ReturnRecoveryPermit, self) @@ -629,50 +691,54 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas override def recoveryRunning: Boolean = _recoveryRunning - override def stateReceive(receive: Receive, message: Any) = try message match { - case ReplayedMessage(p) => - try { - eventSeenInInterval = true - updateLastSequenceNr(p) - Eventsourced.super.aroundReceive(recoveryBehavior, p) - } catch { - case NonFatal(t) => - timeoutCancellable.cancel() - try onRecoveryFailure(t, Some(p.payload)) finally context.stop(self) - returnRecoveryPermit() - } - case RecoverySuccess(highestSeqNr) => - timeoutCancellable.cancel() - onReplaySuccess() // callback for subclass implementation - sequenceNr = highestSeqNr - setLastSequenceNr(highestSeqNr) - _recoveryRunning = false - try Eventsourced.super.aroundReceive(recoveryBehavior, RecoveryCompleted) - finally transitToProcessingState() // in finally in case exception and resume strategy - // if exception from RecoveryCompleted the permit is returned in below catch - returnRecoveryPermit() - case ReplayMessagesFailure(cause) => - timeoutCancellable.cancel() - try onRecoveryFailure(cause, event = None) finally context.stop(self) - returnRecoveryPermit() - case RecoveryTick(false) if !eventSeenInInterval => - timeoutCancellable.cancel() - try onRecoveryFailure( - new RecoveryTimedOut(s"Recovery timed out, didn't get event within $timeout, highest sequence number seen $lastSequenceNr"), - event = None) - finally context.stop(self) - returnRecoveryPermit() - case RecoveryTick(false) => - eventSeenInInterval = false - case RecoveryTick(true) => - // snapshot tick, ignore - case other => - stashInternally(other) - } catch { - case NonFatal(e) => - returnRecoveryPermit() - throw e - } + override def stateReceive(receive: Receive, message: Any) = + try message match { + case ReplayedMessage(p) => + try { + eventSeenInInterval = true + updateLastSequenceNr(p) + Eventsourced.super.aroundReceive(recoveryBehavior, p) + } catch { + case NonFatal(t) => + timeoutCancellable.cancel() + try onRecoveryFailure(t, Some(p.payload)) + finally context.stop(self) + returnRecoveryPermit() + } + case RecoverySuccess(highestSeqNr) => + timeoutCancellable.cancel() + onReplaySuccess() // callback for subclass implementation + sequenceNr = highestSeqNr + setLastSequenceNr(highestSeqNr) + _recoveryRunning = false + try Eventsourced.super.aroundReceive(recoveryBehavior, RecoveryCompleted) + finally transitToProcessingState() // in finally in case exception and resume strategy + // if exception from RecoveryCompleted the permit is returned in below catch + returnRecoveryPermit() + case ReplayMessagesFailure(cause) => + timeoutCancellable.cancel() + try onRecoveryFailure(cause, event = None) + finally context.stop(self) + returnRecoveryPermit() + case RecoveryTick(false) if !eventSeenInInterval => + timeoutCancellable.cancel() + try onRecoveryFailure( + new RecoveryTimedOut( + s"Recovery timed out, didn't get event within $timeout, highest sequence number seen $lastSequenceNr"), + event = None) + finally context.stop(self) + returnRecoveryPermit() + case RecoveryTick(false) => + eventSeenInInterval = false + case RecoveryTick(true) => + // snapshot tick, ignore + case other => + stashInternally(other) + } catch { + case NonFatal(e) => + returnRecoveryPermit() + throw e + } private def returnRecoveryPermit(): Unit = extension.recoveryPermitter.tell(RecoveryPermitter.ReturnRecoveryPermit, self) @@ -734,7 +800,8 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas // while message is in flight, in that case the handler has already been discarded if (id == instanceId) { onWriteMessageComplete(err = false) - try onPersistFailure(cause, p.payload, p.sequenceNr) finally context.stop(self) + try onPersistFailure(cause, p.payload, p.sequenceNr) + finally context.stop(self) } case LoopMessageSuccess(l, id) => // instanceId mismatch can happen for persistAsync and defer in case of actor restart @@ -772,12 +839,13 @@ private[persistence] trait Eventsourced extends Snapshotter with PersistenceStas override def stateReceive(receive: Receive, message: Any) = if (common.isDefinedAt(message)) common(message) - else try { - Eventsourced.super.aroundReceive(receive, message) - aroundReceiveComplete(err = false) - } catch { - case NonFatal(e) => aroundReceiveComplete(err = true); throw e - } + else + try { + Eventsourced.super.aroundReceive(receive, message) + aroundReceiveComplete(err = false) + } catch { + case NonFatal(e) => aroundReceiveComplete(err = true); throw e + } private def aroundReceiveComplete(err: Boolean): Unit = { if (eventBatch.nonEmpty) flushBatch() diff --git a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala index 6633e8fdd1..21a01b9317 100644 --- a/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala +++ b/akka-persistence/src/main/scala/akka/persistence/JournalProtocol.scala @@ -17,8 +17,10 @@ private[persistence] object JournalProtocol { /** Marker trait shared by internal journal messages. */ sealed trait Message extends Protocol.Message + /** Internal journal command. */ sealed trait Request extends Message + /** Internal journal acknowledgement. */ sealed trait Response extends Message @@ -27,7 +29,7 @@ private[persistence] object JournalProtocol { * (inclusive). `Long.MaxValue` may be used as `toSequenceNr` to delete all persistent messages. */ final case class DeleteMessagesTo(persistenceId: String, toSequenceNr: Long, persistentActor: ActorRef) - extends Request + extends Request /** * Request to write messages. @@ -35,15 +37,17 @@ private[persistence] object JournalProtocol { * @param messages messages to be written. * @param persistentActor write requestor. */ - final case class WriteMessages(messages: immutable.Seq[PersistentEnvelope], persistentActor: ActorRef, actorInstanceId: Int) - extends Request with NoSerializationVerificationNeeded + final case class WriteMessages(messages: immutable.Seq[PersistentEnvelope], + persistentActor: ActorRef, + actorInstanceId: Int) + extends Request + with NoSerializationVerificationNeeded /** * Reply message to a successful [[WriteMessages]] request. This reply is sent to the requestor * before all subsequent [[WriteMessageSuccess]] replies. */ - case object WriteMessagesSuccessful - extends Response + case object WriteMessagesSuccessful extends Response /** * Reply message to a failed [[WriteMessages]] request. This reply is sent to the requestor @@ -51,8 +55,7 @@ private[persistence] object JournalProtocol { * * @param cause failure cause. */ - final case class WriteMessagesFailed(cause: Throwable) - extends Response + final case class WriteMessagesFailed(cause: Throwable) extends Response /** * Reply message to a successful [[WriteMessages]] request. For each contained [[PersistentRepr]] message @@ -60,8 +63,7 @@ private[persistence] object JournalProtocol { * * @param persistent successfully written message. */ - final case class WriteMessageSuccess(persistent: PersistentRepr, actorInstanceId: Int) - extends Response + final case class WriteMessageSuccess(persistent: PersistentRepr, actorInstanceId: Int) extends Response /** * Reply message to a rejected [[WriteMessages]] request. The write of this message was rejected before @@ -72,7 +74,8 @@ private[persistence] object JournalProtocol { * @param cause failure cause. */ final case class WriteMessageRejected(message: PersistentRepr, cause: Throwable, actorInstanceId: Int) - extends Response with NoSerializationVerificationNeeded + extends Response + with NoSerializationVerificationNeeded /** * Reply message to a failed [[WriteMessages]] request. For each contained [[PersistentRepr]] message @@ -82,7 +85,8 @@ private[persistence] object JournalProtocol { * @param cause failure cause. */ final case class WriteMessageFailure(message: PersistentRepr, cause: Throwable, actorInstanceId: Int) - extends Response with NoSerializationVerificationNeeded + extends Response + with NoSerializationVerificationNeeded /** * Reply message to a [[WriteMessages]] with a non-persistent message. @@ -90,7 +94,8 @@ private[persistence] object JournalProtocol { * @param message looped message. */ final case class LoopMessageSuccess(message: Any, actorInstanceId: Int) - extends Response with NoSerializationVerificationNeeded + extends Response + with NoSerializationVerificationNeeded /** * Request to replay messages to `persistentActor`. @@ -101,8 +106,12 @@ private[persistence] object JournalProtocol { * @param persistenceId requesting persistent actor id. * @param persistentActor requesting persistent actor. */ - final case class ReplayMessages(fromSequenceNr: Long, toSequenceNr: Long, max: Long, - persistenceId: String, persistentActor: ActorRef) extends Request + final case class ReplayMessages(fromSequenceNr: Long, + toSequenceNr: Long, + max: Long, + persistenceId: String, + persistentActor: ActorRef) + extends Request /** * Reply message to a [[ReplayMessages]] request. A separate reply is sent to the requestor for each @@ -111,7 +120,9 @@ private[persistence] object JournalProtocol { * @param persistent replayed message. */ final case class ReplayedMessage(persistent: PersistentRepr) - extends Response with DeadLetterSuppression with NoSerializationVerificationNeeded + extends Response + with DeadLetterSuppression + with NoSerializationVerificationNeeded /** * Reply message to a successful [[ReplayMessages]] request. This reply is sent to the requestor @@ -122,14 +133,12 @@ private[persistence] object JournalProtocol { * * @param highestSequenceNr highest stored sequence number. */ - case class RecoverySuccess(highestSequenceNr: Long) - extends Response with DeadLetterSuppression + case class RecoverySuccess(highestSequenceNr: Long) extends Response with DeadLetterSuppression /** * Reply message to a failed [[ReplayMessages]] request. This reply is sent to the requestor * if a replay could not be successfully completed. */ - final case class ReplayMessagesFailure(cause: Throwable) - extends Response with DeadLetterSuppression + final case class ReplayMessagesFailure(cause: Throwable) extends Response with DeadLetterSuppression } diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala index 25c81f31df..74c6bbff70 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistence.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistence.scala @@ -116,6 +116,7 @@ trait PersistenceRecovery { } trait PersistenceStash extends Stash with StashFactory { + /** * The returned [[StashOverflowStrategy]] object determines how to handle the message failed to stash * when the internal Stash capacity exceeded. @@ -125,6 +126,7 @@ trait PersistenceStash extends Stash with StashFactory { } trait RuntimePluginConfig { + /** * Additional configuration of the journal plugin servicing this persistent actor. * When empty, the whole configuration of the journal plugin will be taken from the [[Config]] loaded into the @@ -152,6 +154,7 @@ trait RuntimePluginConfig { * Persistence extension provider. */ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider { + /** Java API. */ override def get(system: ActorSystem): Persistence = super.get(system) @@ -161,7 +164,7 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider { /** INTERNAL API. */ private[persistence] case class PluginHolder(actor: ActorRef, adapters: EventAdapters, config: Config) - extends Extension + extends Extension /** Config path to fall-back to if a setting is not defined in a specific plugin's config section */ val JournalFallbackConfigPath = "akka.persistence.journal-plugin-fallback" @@ -205,7 +208,8 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { val configPath = config.getString("snapshot-store.plugin") if (isEmpty(configPath)) { - log.warning("No default snapshot store configured! " + + log.warning( + "No default snapshot store configured! " + "To configure a default snapshot-store plugin set the `akka.persistence.snapshot-store.plugin` key. " + "For details see 'reference.conf'") NoSnapshotStorePluginId @@ -214,9 +218,11 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { // Lazy, so user is not forced to configure defaults when she is not using them. lazy val defaultInternalStashOverflowStrategy: StashOverflowStrategy = - system.dynamicAccess.createInstanceFor[StashOverflowStrategyConfigurator](config.getString( - "internal-stash-overflow-strategy"), EmptyImmutableSeq) - .map(_.create(system.settings.config)).get + system.dynamicAccess + .createInstanceFor[StashOverflowStrategyConfigurator](config.getString("internal-stash-overflow-strategy"), + EmptyImmutableSeq) + .map(_.create(system.settings.config)) + .get val settings = new PersistenceSettings(config) @@ -226,18 +232,22 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { /** Discovered persistence journal and snapshot store plugins. */ private val pluginExtensionId = new AtomicReference[Map[String, ExtensionId[PluginHolder]]](Map.empty) - config.getStringList("journal.auto-start-journals").forEach(new Consumer[String] { - override def accept(id: String): Unit = { - log.info(s"Auto-starting journal plugin `$id`") - journalFor(id) - } - }) - config.getStringList("snapshot-store.auto-start-snapshot-stores").forEach(new Consumer[String] { - override def accept(id: String): Unit = { - log.info(s"Auto-starting snapshot store `$id`") - snapshotStoreFor(id) - } - }) + config + .getStringList("journal.auto-start-journals") + .forEach(new Consumer[String] { + override def accept(id: String): Unit = { + log.info(s"Auto-starting journal plugin `$id`") + journalFor(id) + } + }) + config + .getStringList("snapshot-store.auto-start-snapshot-stores") + .forEach(new Consumer[String] { + override def accept(id: String): Unit = { + log.info(s"Auto-starting snapshot store `$id`") + snapshotStoreFor(id) + } + }) /** * Returns an [[akka.persistence.journal.EventAdapters]] object which serves as a per-journal collection of bound event adapters. @@ -265,7 +275,7 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { * Looks up [[akka.persistence.journal.EventAdapters]] by journal plugin's ActorRef. */ private[akka] final def adaptersFor(journalPluginActor: ActorRef): EventAdapters = { - pluginExtensionId.get().values collectFirst { + pluginExtensionId.get().values.collectFirst { case ext if ext(system).actor == journalPluginActor => ext(system).adapters } match { case Some(adapters) => adapters @@ -279,7 +289,8 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { * When empty, looks in `akka.persistence.journal.plugin` to find configuration entry path. * When configured, uses `journalPluginId` as absolute path to the journal configuration entry. */ - private[akka] final def journalConfigFor(journalPluginId: String, journalPluginConfig: Config = ConfigFactory.empty): Config = { + private[akka] final def journalConfigFor(journalPluginId: String, + journalPluginConfig: Config = ConfigFactory.empty): Config = { val configPath = if (isEmpty(journalPluginId)) defaultJournalPluginId else journalPluginId pluginHolderFor(configPath, JournalFallbackConfigPath, journalPluginConfig).config } @@ -303,7 +314,8 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { * When configured, uses `journalPluginId` as absolute path to the journal configuration entry. * Configuration entry must contain few required fields, such as `class`. See `src/main/resources/reference.conf`. */ - private[akka] final def journalFor(journalPluginId: String, journalPluginConfig: Config = ConfigFactory.empty): ActorRef = { + private[akka] final def journalFor(journalPluginId: String, + journalPluginConfig: Config = ConfigFactory.empty): ActorRef = { val configPath = if (isEmpty(journalPluginId)) defaultJournalPluginId else journalPluginId pluginHolderFor(configPath, JournalFallbackConfigPath, journalPluginConfig).actor } @@ -316,12 +328,15 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { * When configured, uses `snapshotPluginId` as absolute path to the snapshot store configuration entry. * Configuration entry must contain few required fields, such as `class`. See `src/main/resources/reference.conf`. */ - private[akka] final def snapshotStoreFor(snapshotPluginId: String, snapshotPluginConfig: Config = ConfigFactory.empty): ActorRef = { + private[akka] final def snapshotStoreFor(snapshotPluginId: String, + snapshotPluginConfig: Config = ConfigFactory.empty): ActorRef = { val configPath = if (isEmpty(snapshotPluginId)) defaultSnapshotPluginId else snapshotPluginId pluginHolderFor(configPath, SnapshotStoreFallbackConfigPath, snapshotPluginConfig).actor } - @tailrec private def pluginHolderFor(configPath: String, fallbackPath: String, additionalConfig: Config): PluginHolder = { + @tailrec private def pluginHolderFor(configPath: String, + fallbackPath: String, + additionalConfig: Config): PluginHolder = { val extensionIdMap = pluginExtensionId.get extensionIdMap.get(configPath) match { case Some(extensionId) => @@ -336,8 +351,10 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { private def createPlugin(configPath: String, pluginConfig: Config): ActorRef = { val pluginActorName = configPath val pluginClassName = pluginConfig.getString("class") match { - case "" => throw new IllegalArgumentException("Plugin class name must be defined in config property " + - s"[$configPath.class]") + case "" => + throw new IllegalArgumentException( + "Plugin class name must be defined in config property " + + s"[$configPath.class]") case className => className } log.debug(s"Create plugin: $pluginActorName $pluginClassName") @@ -369,16 +386,15 @@ class Persistence(val system: ExtendedActorSystem) extends Extension { private def id(ref: ActorRef) = ref.path.toStringWithoutAddress - private class PluginHolderExtensionId(configPath: String, fallbackPath: String, additionalConfig: Config) extends ExtensionId[PluginHolder] { + private class PluginHolderExtensionId(configPath: String, fallbackPath: String, additionalConfig: Config) + extends ExtensionId[PluginHolder] { def this(configPath: String, fallbackPath: String) = this(configPath, fallbackPath, ConfigFactory.empty) override def createExtension(system: ExtendedActorSystem): PluginHolder = { val mergedConfig = additionalConfig.withFallback(system.settings.config) - require( - !isEmpty(configPath) && mergedConfig.hasPath(configPath), - s"'reference.conf' is missing persistence plugin config path: '$configPath'") - val config: Config = mergedConfig.getConfig(configPath) - .withFallback(mergedConfig.getConfig(fallbackPath)) + require(!isEmpty(configPath) && mergedConfig.hasPath(configPath), + s"'reference.conf' is missing persistence plugin config path: '$configPath'") + val config: Config = mergedConfig.getConfig(configPath).withFallback(mergedConfig.getConfig(fallbackPath)) val plugin: ActorRef = createPlugin(configPath, config) val adapters: EventAdapters = createAdapters(configPath, mergedConfig) diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala b/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala index c4788928aa..46edd3203a 100644 --- a/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala +++ b/akka-persistence/src/main/scala/akka/persistence/PersistencePlugin.scala @@ -21,9 +21,9 @@ import scala.util.Failure */ @InternalApi private[akka] object PersistencePlugin { - final private[persistence] case class PluginHolder[ScalaDsl, JavaDsl]( - scaladslPlugin: ScalaDsl, javadslPlugin: JavaDsl) - extends Extension + final private[persistence] case class PluginHolder[ScalaDsl, JavaDsl](scaladslPlugin: ScalaDsl, + javadslPlugin: JavaDsl) + extends Extension } /** @@ -39,7 +39,8 @@ private[akka] trait PluginProvider[T, ScalaDsl, JavaDsl] { * INTERNAL API */ @InternalApi -private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](system: ExtendedActorSystem)(implicit ev: PluginProvider[T, ScalaDsl, JavaDsl]) { +private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](system: ExtendedActorSystem)( + implicit ev: PluginProvider[T, ScalaDsl, JavaDsl]) { private val plugins = new AtomicReference[Map[String, ExtensionId[PluginHolder[ScalaDsl, JavaDsl]]]](Map.empty) private val log = Logging(system, getClass) @@ -55,10 +56,7 @@ private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](s val extensionId = new ExtensionId[PluginHolder[ScalaDsl, JavaDsl]] { override def createExtension(system: ExtendedActorSystem): PluginHolder[ScalaDsl, JavaDsl] = { val provider = createPlugin(configPath, readJournalPluginConfig) - PluginHolder( - ev.scalaDsl(provider), - ev.javaDsl(provider) - ) + PluginHolder(ev.scalaDsl(provider), ev.javaDsl(provider)) } } plugins.compareAndSet(extensionIdMap, extensionIdMap.updated(configPath, extensionId)) @@ -68,9 +66,8 @@ private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](s private def createPlugin(configPath: String, readJournalPluginConfig: Config): T = { val mergedConfig = readJournalPluginConfig.withFallback(system.settings.config) - require( - !isEmpty(configPath) && mergedConfig.hasPath(configPath), - s"'reference.conf' is missing persistence plugin config path: '$configPath'") + require(!isEmpty(configPath) && mergedConfig.hasPath(configPath), + s"'reference.conf' is missing persistence plugin config path: '$configPath'") val pluginConfig = mergedConfig.getConfig(configPath) val pluginClassName = pluginConfig.getString("class") log.debug(s"Create plugin: $configPath $pluginClassName") @@ -79,19 +76,23 @@ private[akka] abstract class PersistencePlugin[ScalaDsl, JavaDsl, T: ClassTag](s def instantiate(args: collection.immutable.Seq[(Class[_], AnyRef)]) = system.dynamicAccess.createInstanceFor[T](pluginClass, args) - instantiate((classOf[ExtendedActorSystem], system) :: (classOf[Config], pluginConfig) :: + instantiate( + (classOf[ExtendedActorSystem], system) :: (classOf[Config], pluginConfig) :: (classOf[String], configPath) :: Nil) .recoverWith { - case x: NoSuchMethodException => instantiate( - (classOf[ExtendedActorSystem], system) :: (classOf[Config], pluginConfig) :: Nil) + case x: NoSuchMethodException => + instantiate((classOf[ExtendedActorSystem], system) :: (classOf[Config], pluginConfig) :: Nil) } .recoverWith { case x: NoSuchMethodException => instantiate((classOf[ExtendedActorSystem], system) :: Nil) } .recoverWith { case x: NoSuchMethodException => instantiate(Nil) } .recoverWith { - case ex: Exception => Failure.apply( - new IllegalArgumentException("Unable to create read journal plugin instance for path " + - s"[$configPath], class [$pluginClassName]!", ex)) - }.get + case ex: Exception => + Failure.apply( + new IllegalArgumentException("Unable to create read journal plugin instance for path " + + s"[$configPath], class [$pluginClassName]!", + ex)) + } + .get } /** Check for default or missing identity. */ diff --git a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala index 7c7a677edc..d8f8f4eaf2 100644 --- a/akka-persistence/src/main/scala/akka/persistence/Persistent.scala +++ b/akka-persistence/src/main/scala/akka/persistence/Persistent.scala @@ -40,19 +40,20 @@ final case class AtomicWrite(payload: immutable.Seq[PersistentRepr]) extends Per // only check that all persistenceIds are equal when there's more than one in the Seq if (payload match { - case l: List[PersistentRepr] => l.tail.nonEmpty // avoids calling .size - case v: Vector[PersistentRepr] => v.size > 1 - case _ => true // some other collection type, let's just check - }) payload.foreach { pr => + case l: List[PersistentRepr] => l.tail.nonEmpty // avoids calling .size + case v: Vector[PersistentRepr] => v.size > 1 + case _ => true // some other collection type, let's just check + }) payload.foreach { pr => if (pr.persistenceId != payload.head.persistenceId) throw new IllegalArgumentException( "AtomicWrite must contain messages for the same persistenceId, " + - s"yet different persistenceIds found: ${payload.map(_.persistenceId).toSet}") + s"yet different persistenceIds found: ${payload.map(_.persistenceId).toSet}") _highestSequenceNr = pr.sequenceNr } def persistenceId = payload.head.persistenceId - def lowestSequenceNr = payload.head.sequenceNr // this assumes they're gapless; they should be (it is only our code creating AWs) + def lowestSequenceNr = + payload.head.sequenceNr // this assumes they're gapless; they should be (it is only our code creating AWs) def highestSequenceNr = _highestSequenceNr override def sender: ActorRef = ActorRef.noSender @@ -119,31 +120,31 @@ trait PersistentRepr extends Message { /** * Creates a new copy of this [[PersistentRepr]]. */ - def update( - sequenceNr: Long = sequenceNr, - persistenceId: String = persistenceId, - deleted: Boolean = deleted, - sender: ActorRef = sender, - writerUuid: String = writerUuid): PersistentRepr + def update(sequenceNr: Long = sequenceNr, + persistenceId: String = persistenceId, + deleted: Boolean = deleted, + sender: ActorRef = sender, + writerUuid: String = writerUuid): PersistentRepr } object PersistentRepr { + /** Plugin API: value of an undefined persistenceId or manifest. */ val Undefined = "" + /** Plugin API: value of an undefined / identity event adapter. */ val UndefinedId = 0 /** * Plugin API. */ - def apply( - payload: Any, - sequenceNr: Long = 0L, - persistenceId: String = PersistentRepr.Undefined, - manifest: String = PersistentRepr.Undefined, - deleted: Boolean = false, - sender: ActorRef = null, - writerUuid: String = PersistentRepr.Undefined): PersistentRepr = + def apply(payload: Any, + sequenceNr: Long = 0L, + persistenceId: String = PersistentRepr.Undefined, + manifest: String = PersistentRepr.Undefined, + deleted: Boolean = false, + sender: ActorRef = null, + writerUuid: String = PersistentRepr.Undefined): PersistentRepr = PersistentImpl(payload, sequenceNr, persistenceId, manifest, deleted, sender, writerUuid) /** @@ -161,14 +162,15 @@ object PersistentRepr { /** * INTERNAL API. */ -private[persistence] final case class PersistentImpl( - override val payload: Any, - override val sequenceNr: Long, - override val persistenceId: String, - override val manifest: String, - override val deleted: Boolean, - override val sender: ActorRef, - override val writerUuid: String) extends PersistentRepr with NoSerializationVerificationNeeded { +private[persistence] final case class PersistentImpl(override val payload: Any, + override val sequenceNr: Long, + override val persistenceId: String, + override val manifest: String, + override val deleted: Boolean, + override val sender: ActorRef, + override val writerUuid: String) + extends PersistentRepr + with NoSerializationVerificationNeeded { def withPayload(payload: Any): PersistentRepr = copy(payload = payload) @@ -178,12 +180,10 @@ private[persistence] final case class PersistentImpl( else copy(manifest = manifest) def update(sequenceNr: Long, persistenceId: String, deleted: Boolean, sender: ActorRef, writerUuid: String) = - copy( - sequenceNr = sequenceNr, - persistenceId = persistenceId, - deleted = deleted, - sender = sender, - writerUuid = writerUuid) + copy(sequenceNr = sequenceNr, + persistenceId = persistenceId, + deleted = deleted, + sender = sender, + writerUuid = writerUuid) } - diff --git a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala index c8e424e155..e8a980a4ad 100644 --- a/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala +++ b/akka-persistence/src/main/scala/akka/persistence/PersistentActor.scala @@ -16,11 +16,13 @@ import scala.util.control.NoStackTrace import akka.annotation.InternalApi abstract class RecoveryCompleted + /** * Sent to a [[PersistentActor]] when the journal replay has been finished. */ @SerialVersionUID(1L) case object RecoveryCompleted extends RecoveryCompleted { + /** * Java API: get the singleton instance */ @@ -56,10 +58,9 @@ final case class DeleteMessagesFailure(cause: Throwable, toSequenceNr: Long) * @param replayMax maximum number of messages to replay. Default is no limit. */ @SerialVersionUID(1L) -final case class Recovery( - fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest, - toSequenceNr: Long = Long.MaxValue, - replayMax: Long = Long.MaxValue) +final case class Recovery(fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest, + toSequenceNr: Long = Long.MaxValue, + replayMax: Long = Long.MaxValue) object Recovery { @@ -122,6 +123,7 @@ sealed trait StashOverflowStrategy * Discard the message to [[akka.actor.DeadLetter]]. */ case object DiscardToDeadLetterStrategy extends StashOverflowStrategy { + /** * Java API: get the singleton instance */ @@ -135,6 +137,7 @@ case object DiscardToDeadLetterStrategy extends StashOverflowStrategy { * to replay. */ case object ThrowOverflowExceptionStrategy extends StashOverflowStrategy { + /** * Java API: get the singleton instance */ @@ -458,6 +461,7 @@ abstract class UntypedPersistentActor extends UntypedActor with Eventsourced wit * Java API: an persistent actor - can be used to implement command or event sourcing. */ abstract class AbstractPersistentActor extends AbstractActor with AbstractPersistentActorLike { + /** * Recovery handler that receives persisted events during recovery. If a state snapshot * has been captured and saved, this handler will receive a [[SnapshotOffer]] message diff --git a/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala b/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala index 5cb66b5ace..a1c6173807 100644 --- a/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/RecoveryPermitter.scala @@ -70,9 +70,9 @@ import akka.actor.Terminated recoveryPermitGranted(ref) } if (pending.isEmpty && maxPendingStats > 0) { - log.debug( - "Drained pending recovery permit requests, max in progress was [{}], still [{}] in progress", - usedPermits + maxPendingStats, usedPermits) + log.debug("Drained pending recovery permit requests, max in progress was [{}], still [{}] in progress", + usedPermits + maxPendingStats, + usedPermits) maxPendingStats = 0 } } diff --git a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala index 0bf5109466..17ae2c10ab 100644 --- a/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala +++ b/akka-persistence/src/main/scala/akka/persistence/SnapshotProtocol.scala @@ -31,8 +31,7 @@ object SnapshotMetadata { * @param metadata snapshot metadata. */ @SerialVersionUID(1L) -final case class SaveSnapshotSuccess(metadata: SnapshotMetadata) - extends SnapshotProtocol.Response +final case class SaveSnapshotSuccess(metadata: SnapshotMetadata) extends SnapshotProtocol.Response /** * Sent to a [[PersistentActor]] after successful deletion of a snapshot. @@ -40,8 +39,7 @@ final case class SaveSnapshotSuccess(metadata: SnapshotMetadata) * @param metadata snapshot metadata. */ @SerialVersionUID(1L) -final case class DeleteSnapshotSuccess(metadata: SnapshotMetadata) - extends SnapshotProtocol.Response +final case class DeleteSnapshotSuccess(metadata: SnapshotMetadata) extends SnapshotProtocol.Response /** * Sent to a [[PersistentActor]] after successful deletion of specified range of snapshots. @@ -49,8 +47,7 @@ final case class DeleteSnapshotSuccess(metadata: SnapshotMetadata) * @param criteria snapshot selection criteria. */ @SerialVersionUID(1L) -final case class DeleteSnapshotsSuccess(criteria: SnapshotSelectionCriteria) - extends SnapshotProtocol.Response +final case class DeleteSnapshotsSuccess(criteria: SnapshotSelectionCriteria) extends SnapshotProtocol.Response /** * Sent to a [[PersistentActor]] after failed saving of a snapshot. @@ -59,8 +56,7 @@ final case class DeleteSnapshotsSuccess(criteria: SnapshotSelectionCriteria) * @param cause failure cause. */ @SerialVersionUID(1L) -final case class SaveSnapshotFailure(metadata: SnapshotMetadata, cause: Throwable) - extends SnapshotProtocol.Response +final case class SaveSnapshotFailure(metadata: SnapshotMetadata, cause: Throwable) extends SnapshotProtocol.Response /** * Sent to a [[PersistentActor]] after failed deletion of a snapshot. @@ -69,8 +65,7 @@ final case class SaveSnapshotFailure(metadata: SnapshotMetadata, cause: Throwabl * @param cause failure cause. */ @SerialVersionUID(1L) -final case class DeleteSnapshotFailure(metadata: SnapshotMetadata, cause: Throwable) - extends SnapshotProtocol.Response +final case class DeleteSnapshotFailure(metadata: SnapshotMetadata, cause: Throwable) extends SnapshotProtocol.Response /** * Sent to a [[PersistentActor]] after failed deletion of a range of snapshots. @@ -80,7 +75,7 @@ final case class DeleteSnapshotFailure(metadata: SnapshotMetadata, cause: Throwa */ @SerialVersionUID(1L) final case class DeleteSnapshotsFailure(criteria: SnapshotSelectionCriteria, cause: Throwable) - extends SnapshotProtocol.Response + extends SnapshotProtocol.Response /** * Offers a [[PersistentActor]] a previously saved `snapshot` during recovery. This offer is received @@ -104,11 +99,10 @@ final case class SnapshotOffer(metadata: SnapshotMetadata, snapshot: Any) * @see [[Recovery]] */ @SerialVersionUID(1L) -final case class SnapshotSelectionCriteria( - maxSequenceNr: Long = Long.MaxValue, - maxTimestamp: Long = Long.MaxValue, - minSequenceNr: Long = 0L, - minTimestamp: Long = 0L) { +final case class SnapshotSelectionCriteria(maxSequenceNr: Long = Long.MaxValue, + maxTimestamp: Long = Long.MaxValue, + minSequenceNr: Long = 0L, + minTimestamp: Long = 0L) { /** * INTERNAL API. @@ -121,10 +115,11 @@ final case class SnapshotSelectionCriteria( */ private[persistence] def matches(metadata: SnapshotMetadata): Boolean = metadata.sequenceNr <= maxSequenceNr && metadata.timestamp <= maxTimestamp && - metadata.sequenceNr >= minSequenceNr && metadata.timestamp >= minTimestamp + metadata.sequenceNr >= minSequenceNr && metadata.timestamp >= minTimestamp } object SnapshotSelectionCriteria { + /** * The latest saved snapshot. */ @@ -144,8 +139,7 @@ object SnapshotSelectionCriteria { /** * Java API. */ - def create(maxSequenceNr: Long, maxTimestamp: Long, - minSequenceNr: Long, minTimestamp: Long) = + def create(maxSequenceNr: Long, maxTimestamp: Long, minSequenceNr: Long, minTimestamp: Long) = SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, minSequenceNr, minTimestamp) /** @@ -168,6 +162,7 @@ object SnapshotSelectionCriteria { final case class SelectedSnapshot(metadata: SnapshotMetadata, snapshot: Any) object SelectedSnapshot { + /** * Java API, Plugin API. */ @@ -184,8 +179,10 @@ private[persistence] object SnapshotProtocol { /** Marker trait shared by internal snapshot messages. */ sealed trait Message extends Protocol.Message + /** Internal snapshot command. */ sealed trait Request extends Message + /** Internal snapshot acknowledgement. */ sealed trait Response extends Message @@ -197,15 +194,14 @@ private[persistence] object SnapshotProtocol { * @param toSequenceNr upper sequence number bound (inclusive) for recovery. */ final case class LoadSnapshot(persistenceId: String, criteria: SnapshotSelectionCriteria, toSequenceNr: Long) - extends Request + extends Request /** * Response message to a [[LoadSnapshot]] message. * * @param snapshot loaded snapshot, if any. */ - final case class LoadSnapshotResult(snapshot: Option[SelectedSnapshot], toSequenceNr: Long) - extends Response + final case class LoadSnapshotResult(snapshot: Option[SelectedSnapshot], toSequenceNr: Long) extends Response /** * Reply message to a failed [[LoadSnapshot]] request. @@ -219,16 +215,14 @@ private[persistence] object SnapshotProtocol { * @param metadata snapshot metadata. * @param snapshot snapshot. */ - final case class SaveSnapshot(metadata: SnapshotMetadata, snapshot: Any) - extends Request + final case class SaveSnapshot(metadata: SnapshotMetadata, snapshot: Any) extends Request /** * Instructs snapshot store to delete a snapshot. * * @param metadata snapshot metadata. */ - final case class DeleteSnapshot(metadata: SnapshotMetadata) - extends Request + final case class DeleteSnapshot(metadata: SnapshotMetadata) extends Request /** * Instructs snapshot store to delete all snapshots that match `criteria`. @@ -236,6 +230,5 @@ private[persistence] object SnapshotProtocol { * @param persistenceId persistent actor id. * @param criteria criteria for selecting snapshots to be deleted. */ - final case class DeleteSnapshots(persistenceId: String, criteria: SnapshotSelectionCriteria) - extends Request + final case class DeleteSnapshots(persistenceId: String, criteria: SnapshotSelectionCriteria) extends Request } diff --git a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala index 1b9e295ad6..2e539b6392 100644 --- a/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala +++ b/akka-persistence/src/main/scala/akka/persistence/fsm/PersistentFSMBase.scala @@ -5,7 +5,7 @@ package akka.persistence.fsm import akka.actor._ -import akka.japi.pf.{ UnitPFBuilder, UnitMatch, FSMTransitionHandlerBuilder } +import akka.japi.pf.{ FSMTransitionHandlerBuilder, UnitMatch, UnitPFBuilder } import language.implicitConversions import scala.collection.mutable @@ -126,7 +126,6 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging * DSL * **************************************** */ - /** * Insert a new StateFunction at the end of the processing chain for the * given state. If the stateTimeout parameter is set, entering this state @@ -173,7 +172,9 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging * * @return descriptor for staying in current state */ - final def stay(): State = goto(currentState.stateName).withNotification(false) // cannot directly use currentState because of the timeout field + final def stay(): State = + goto(currentState.stateName) + .withNotification(false) // cannot directly use currentState because of the timeout field /** * Produce change descriptor to stop this FSM actor with reason "Normal". @@ -192,7 +193,7 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging final class TransformHelper(func: StateFunction) { def using(andThen: PartialFunction[State, State]): StateFunction = - func andThen (andThen orElse { case x => x }) + func.andThen(andThen.orElse { case x => x }) } final def transform(func: StateFunction): TransformHelper = new TransformHelper(func) @@ -299,7 +300,7 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging * The current state may be queried using ``stateName``. */ final def whenUnhandled(stateFunction: StateFunction): Unit = - handleEvent = stateFunction orElse handleEventDefault + handleEvent = stateFunction.orElse(handleEventDefault) /** * Verify existence of initial state and setup timers. Used in [[akka.persistence.fsm.PersistentFSM]] @@ -361,7 +362,7 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging * Timer handling */ private val timers = mutable.Map[String, Timer]() - private val timerGen = Iterator from 0 + private val timerGen = Iterator.from(0) /* * State definitions @@ -371,8 +372,8 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging private def register(name: S, function: StateFunction, timeout: Timeout): Unit = { if (stateFunctions contains name) { - stateFunctions(name) = stateFunctions(name) orElse function - stateTimeouts(name) = timeout orElse stateTimeouts(name) + stateFunctions(name) = stateFunctions(name).orElse(function) + stateTimeouts(name) = timeout.orElse(stateTimeouts(name)) } else { stateFunctions(name) = function stateTimeouts(name) = timeout @@ -455,7 +456,7 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging private[akka] def processEvent(event: Event, source: AnyRef): Unit = { val stateFunc = stateFunctions(currentState.stateName) - val nextState = if (stateFunc isDefinedAt event) { + val nextState = if (stateFunc.isDefinedAt(event)) { stateFunc(event) } else { // handleEventDefault ensures that this is always defined @@ -468,7 +469,9 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging nextState.stopReason match { case None => makeTransition(nextState) case _ => - nextState.replies.reverse foreach { r => sender() ! r } + nextState.replies.reverse.foreach { r => + sender() ! r + } terminate(nextState) context.stop(self) } @@ -476,9 +479,11 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging private[akka] def makeTransition(nextState: State): Unit = { if (!stateFunctions.contains(nextState.stateName)) { - terminate(stay withStopReason Failure("Next state %s does not exist".format(nextState.stateName))) + terminate(stay.withStopReason(Failure("Next state %s does not exist".format(nextState.stateName)))) } else { - nextState.replies.reverse foreach { r => sender() ! r } + nextState.replies.reverse.foreach { r => + sender() ! r + } if (currentState.stateName != nextState.stateName || nextState.notifies) { this.nextState = nextState handleTransition(currentState.stateName, nextState.stateName) @@ -516,7 +521,7 @@ trait PersistentFSMBase[S, D, E] extends Actor with Listeners with ActorLogging * setting this instance’s state to terminated does no harm during restart * since the new instance will initialize fresh using startWith() */ - terminate(stay withStopReason Shutdown) + terminate(stay.withStopReason(Shutdown)) super.postStop() } @@ -604,7 +609,8 @@ trait LoggingPersistentFSM[S, D, E] extends PersistentFSMBase[S, D, E] { this: A * The log entries are lost when this actor is restarted. */ protected def getLog: IndexedSeq[LogEntry[S, D]] = { - val log = events zip states filter (_._1 ne null) map (x => LogEntry(x._2.asInstanceOf[S], x._1.stateData, x._1.event)) + val log = + events.zip(states).filter(_._1 ne null).map(x => LogEntry(x._2.asInstanceOf[S], x._1.stateData, x._1.event)) if (full) { IndexedSeq() ++ log.drop(pos) ++ log.take(pos) } else { @@ -619,6 +625,7 @@ trait LoggingPersistentFSM[S, D, E] extends PersistentFSMBase[S, D, E] { this: A * */ object AbstractPersistentFSMBase { + /** * A partial function value which does not match anything and can be used to * “reset” `whenUnhandled` and `onTermination` handlers. @@ -699,10 +706,9 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param stateTimeout default state timeout for this state * @param stateFunctionBuilder partial function builder describing response to input */ - final def when( - stateName: S, - stateTimeout: FiniteDuration, - stateFunctionBuilder: FSMStateFunctionBuilder[S, D, E]): Unit = + final def when(stateName: S, + stateTimeout: FiniteDuration, + stateFunctionBuilder: FSMStateFunctionBuilder[S, D, E]): Unit = when(stateName, stateTimeout)(stateFunctionBuilder.build()) /** @@ -775,7 +781,10 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent[ET, DT <: D](eventType: Class[ET], dataType: Class[DT], predicate: TypedPredicate2[ET, DT], apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D, E] = + final def matchEvent[ET, DT <: D](eventType: Class[ET], + dataType: Class[DT], + predicate: TypedPredicate2[ET, DT], + apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D, E] = new FSMStateFunctionBuilder[S, D, E]().event(eventType, dataType, predicate, apply) /** @@ -788,7 +797,9 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent[ET, DT <: D](eventType: Class[ET], dataType: Class[DT], apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D, E] = + final def matchEvent[ET, DT <: D](eventType: Class[ET], + dataType: Class[DT], + apply: Apply2[ET, DT, State]): FSMStateFunctionBuilder[S, D, E] = new FSMStateFunctionBuilder[S, D, E]().event(eventType, dataType, apply) /** @@ -801,7 +812,9 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent[ET](eventType: Class[ET], predicate: TypedPredicate2[ET, D], apply: Apply2[ET, D, State]): FSMStateFunctionBuilder[S, D, E] = + final def matchEvent[ET](eventType: Class[ET], + predicate: TypedPredicate2[ET, D], + apply: Apply2[ET, D, State]): FSMStateFunctionBuilder[S, D, E] = new FSMStateFunctionBuilder[S, D, E]().event(eventType, predicate, apply) /** @@ -825,7 +838,8 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent(predicate: TypedPredicate2[AnyRef, D], apply: Apply2[AnyRef, D, State]): FSMStateFunctionBuilder[S, D, E] = + final def matchEvent(predicate: TypedPredicate2[AnyRef, D], + apply: Apply2[AnyRef, D, State]): FSMStateFunctionBuilder[S, D, E] = new FSMStateFunctionBuilder[S, D, E]().event(predicate, apply) /** @@ -839,7 +853,9 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEvent[DT <: D](eventMatches: JList[AnyRef], dataType: Class[DT], apply: Apply2[AnyRef, DT, State]): FSMStateFunctionBuilder[S, D, E] = + final def matchEvent[DT <: D](eventMatches: JList[AnyRef], + dataType: Class[DT], + apply: Apply2[AnyRef, DT, State]): FSMStateFunctionBuilder[S, D, E] = new FSMStateFunctionBuilder[S, D, E]().event(eventMatches, dataType, apply) /** @@ -865,7 +881,9 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param apply an action to apply to the event and state data if there is a match * @return the builder with the case statement added */ - final def matchEventEquals[Ev, DT <: D](event: Ev, dataType: Class[DT], apply: Apply2[Ev, DT, State]): FSMStateFunctionBuilder[S, D, E] = + final def matchEventEquals[Ev, DT <: D](event: Ev, + dataType: Class[DT], + apply: Apply2[Ev, DT, State]): FSMStateFunctionBuilder[S, D, E] = new FSMStateFunctionBuilder[S, D, E]().eventEquals(event, dataType, apply) /** @@ -951,7 +969,9 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param predicate a predicate that will be evaluated on the reason if the type matches * @return the builder with the case statement added */ - final def matchStop[RT <: Reason](reasonType: Class[RT], predicate: TypedPredicate[RT], apply: UnitApply3[RT, S, D]): FSMStopBuilder[S, D] = + final def matchStop[RT <: Reason](reasonType: Class[RT], + predicate: TypedPredicate[RT], + apply: UnitApply3[RT, S, D]): FSMStopBuilder[S, D] = new FSMStopBuilder[S, D]().stop(reasonType, predicate, apply) /** @@ -972,7 +992,9 @@ abstract class AbstractPersistentFSMBase[S, D, E] extends PersistentFSMBase[S, D * @param apply an action to apply to the argument if the type and predicate matches * @return a builder with the case statement added */ - final def matchData[DT <: D](dataType: Class[DT], predicate: TypedPredicate[DT], apply: UnitApply[DT]): UnitPFBuilder[D] = + final def matchData[DT <: D](dataType: Class[DT], + predicate: TypedPredicate[DT], + apply: UnitApply[DT]): UnitPFBuilder[D] = UnitMatch.`match`(dataType, predicate, apply) /** diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala index a9877eeb93..e7c66d6058 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala @@ -44,8 +44,8 @@ trait AsyncRecovery { * * @see [[AsyncWriteJournal]] */ - def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, - max: Long)(recoveryCallback: PersistentRepr => Unit): Future[Unit] + def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + recoveryCallback: PersistentRepr => Unit): Future[Unit] /** * Plugin API: asynchronously reads the highest stored sequence number for the diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala index afbda0933d..98b5494985 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala @@ -40,8 +40,9 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { case "repair-by-discard-old" => ReplayFilter.RepairByDiscardOld case "fail" => ReplayFilter.Fail case "warn" => ReplayFilter.Warn - case other => throw new IllegalArgumentException( - s"invalid replay-filter.mode [$other], supported values [off, repair-by-discard-old, fail, warn]") + case other => + throw new IllegalArgumentException( + s"invalid replay-filter.mode [$other], supported values [off, repair-by-discard-old, fail, warn]") } private def isReplayFilterEnabled: Boolean = replayFilterMode != ReplayFilter.Disabled private val replayFilterWindowSize: Int = config.getInt("replay-filter.window-size") @@ -73,7 +74,8 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { Future.successful(messages.collect { case a: AtomicWrite => f }) }).map { results => if (results.nonEmpty && results.size != atomicWriteCount) - throw new IllegalStateException("asyncWriteMessages returned invalid number of results. " + + throw new IllegalStateException( + "asyncWriteMessages returned invalid number of results. " + s"Expected [${prepared.get.size}], but got [${results.size}]") results } @@ -96,7 +98,10 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { } case Failure(e) => a.payload.foreach { p => - resequencer ! Desequenced(WriteMessageRejected(p, e, actorInstanceId), n, persistentActor, p.sender) + resequencer ! Desequenced(WriteMessageRejected(p, e, actorInstanceId), + n, + persistentActor, + p.sender) n += 1 } } @@ -123,8 +128,13 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { case r @ ReplayMessages(fromSequenceNr, toSequenceNr, max, persistenceId, persistentActor) => val replyTo = - if (isReplayFilterEnabled) context.actorOf(ReplayFilter.props(persistentActor, replayFilterMode, - replayFilterWindowSize, replayFilterMaxOldWriters, replayDebugEnabled)) + if (isReplayFilterEnabled) + context.actorOf( + ReplayFilter.props(persistentActor, + replayFilterMode, + replayFilterWindowSize, + replayFilterMaxOldWriters, + replayDebugEnabled)) else persistentActor val readHighestSequenceNrFrom = math.max(0L, fromSequenceNr - 1) @@ -133,7 +143,8 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { * being called before a call to asyncReplayMessages even tho it currently always is. The Cassandra * plugin does rely on this so if you change this change the Cassandra plugin. */ - breaker.withCircuitBreaker(asyncReadHighestSequenceNr(persistenceId, readHighestSequenceNrFrom)) + breaker + .withCircuitBreaker(asyncReadHighestSequenceNr(persistenceId, readHighestSequenceNrFrom)) .flatMap { highSeqNr => val toSeqNr = math.min(toSequenceNr, highSeqNr) if (toSeqNr <= 0L || fromSequenceNr > toSeqNr) @@ -149,22 +160,31 @@ trait AsyncWriteJournal extends Actor with WriteJournalBase with AsyncRecovery { } }.map(_ => highSeqNr) } - }.map { - highSeqNr => RecoverySuccess(highSeqNr) - }.recover { + } + .map { highSeqNr => + RecoverySuccess(highSeqNr) + } + .recover { case e => ReplayMessagesFailure(e) - }.pipeTo(replyTo).foreach { - _ => if (publish) context.system.eventStream.publish(r) + } + .pipeTo(replyTo) + .foreach { _ => + if (publish) context.system.eventStream.publish(r) } case d @ DeleteMessagesTo(persistenceId, toSequenceNr, persistentActor) => - breaker.withCircuitBreaker(asyncDeleteMessagesTo(persistenceId, toSequenceNr)) map { - _ => DeleteMessagesSuccess(toSequenceNr) - } recover { - case e => DeleteMessagesFailure(e, toSequenceNr) - } pipeTo persistentActor onComplete { - _ => if (publish) context.system.eventStream.publish(d) - } + breaker + .withCircuitBreaker(asyncDeleteMessagesTo(persistenceId, toSequenceNr)) + .map { _ => + DeleteMessagesSuccess(toSequenceNr) + } + .recover { + case e => DeleteMessagesFailure(e, toSequenceNr) + } + .pipeTo(persistentActor) + .onComplete { _ => + if (publish) context.system.eventStream.publish(d) + } } } @@ -267,7 +287,7 @@ private[persistence] object AsyncWriteJournal { val successUnit: Success[Unit] = Success(()) final case class Desequenced(msg: Any, snr: Long, target: ActorRef, sender: ActorRef) - extends NoSerializationVerificationNeeded + extends NoSerializationVerificationNeeded class Resequencer extends Actor { import scala.collection.mutable.Map diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala index 08c7233378..c296aed312 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteProxy.scala @@ -29,8 +29,10 @@ private[persistence] trait AsyncWriteProxy extends AsyncWriteJournal with Stash private var isInitTimedOut = false protected var store: Option[ActorRef] = None private val storeNotInitialized = - Future.failed(new TimeoutException("Store not initialized. " + - "Use `SharedLeveldbJournal.setStore(sharedStore, system)`")) + Future.failed( + new TimeoutException( + "Store not initialized. " + + "Use `SharedLeveldbJournal.setStore(sharedStore, system)`")) override protected[akka] def aroundPreStart(): Unit = { context.system.scheduler.scheduleOnce(timeout.duration, self, InitTimeout) @@ -40,17 +42,18 @@ private[persistence] trait AsyncWriteProxy extends AsyncWriteJournal with Stash override protected[akka] def aroundReceive(receive: Receive, msg: Any): Unit = if (isInitialized) { if (msg != InitTimeout) super.aroundReceive(receive, msg) - } else msg match { - case SetStore(ref) => - store = Some(ref) - unstashAll() - isInitialized = true - case InitTimeout => - isInitTimedOut = true - unstashAll() // will trigger appropriate failures - case _ if isInitTimedOut => super.aroundReceive(receive, msg) - case _ => stash() - } + } else + msg match { + case SetStore(ref) => + store = Some(ref) + unstashAll() + isInitialized = true + case InitTimeout => + isInitTimedOut = true + unstashAll() // will trigger appropriate failures + case _ if isInitTimedOut => super.aroundReceive(receive, msg) + case _ => stash() + } implicit def timeout: Timeout @@ -66,11 +69,14 @@ private[persistence] trait AsyncWriteProxy extends AsyncWriteJournal with Stash case None => storeNotInitialized } - def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr => Unit): Future[Unit] = + def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + replayCallback: PersistentRepr => Unit): Future[Unit] = store match { case Some(s) => val replayCompletionPromise = Promise[Unit]() - val mediator = context.actorOf(Props(classOf[ReplayMediator], replayCallback, replayCompletionPromise, timeout.duration).withDeploy(Deploy.local)) + val mediator = context.actorOf( + Props(classOf[ReplayMediator], replayCallback, replayCompletionPromise, timeout.duration) + .withDeploy(Deploy.local)) s.tell(ReplayMessages(persistenceId, fromSequenceNr, toSequenceNr, max), mediator) replayCompletionPromise.future case None => storeNotInitialized @@ -122,7 +128,10 @@ private[persistence] object AsyncWriteTarget { @SerialVersionUID(1L) class AsyncReplayTimeoutException(msg: String) extends AkkaException(msg) -private class ReplayMediator(replayCallback: PersistentRepr => Unit, replayCompletionPromise: Promise[Unit], replayTimeout: Duration) extends Actor { +private class ReplayMediator(replayCallback: PersistentRepr => Unit, + replayCompletionPromise: Promise[Unit], + replayTimeout: Duration) + extends Actor { import AsyncWriteTarget._ context.setReceiveTimeout(replayTimeout) @@ -136,7 +145,8 @@ private class ReplayMediator(replayCallback: PersistentRepr => Unit, replayCompl replayCompletionPromise.failure(cause) context.stop(self) case ReceiveTimeout => - replayCompletionPromise.failure(new AsyncReplayTimeoutException(s"replay timed out after ${replayTimeout.toSeconds} seconds inactivity")) + replayCompletionPromise.failure( + new AsyncReplayTimeoutException(s"replay timed out after ${replayTimeout.toSeconds} seconds inactivity")) context.stop(self) } } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala index e8fc152733..396655c33b 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapter.scala @@ -88,6 +88,7 @@ sealed abstract class EventSeq { def events: immutable.Seq[Any] } object EventSeq { + /** Java API */ final def empty: EventSeq = EmptyEventSeq diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala index 5cb9516249..3edeef810f 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/EventAdapters.scala @@ -20,10 +20,9 @@ import scala.util.Try /** * `EventAdapters` serves as a per-journal collection of bound event adapters. */ -class EventAdapters( - map: ConcurrentHashMap[Class[_], EventAdapter], - bindings: immutable.Seq[(Class[_], EventAdapter)], - log: LoggingAdapter) { +class EventAdapters(map: ConcurrentHashMap[Class[_], EventAdapter], + bindings: immutable.Seq[(Class[_], EventAdapter)], + log: LoggingAdapter) { /** * Finds the "most specific" matching adapter for the given class (i.e. it may return an adapter that can work on a @@ -34,8 +33,8 @@ class EventAdapters( def get(clazz: Class[_]): EventAdapter = { map.get(clazz) match { case null => // bindings are ordered from most specific to least specific - val value = bindings filter { - _._1 isAssignableFrom clazz + val value = bindings.filter { + _._1.isAssignableFrom(clazz) } match { case (_, bestMatch) +: _ => bestMatch case _ => IdentityEventAdapter @@ -71,18 +70,17 @@ private[akka] object EventAdapters { apply(system, adapters, adapterBindings) } - private def apply( - system: ExtendedActorSystem, - adapters: Map[Name, FQN], - adapterBindings: Map[FQN, BoundAdapters]): EventAdapters = { + private def apply(system: ExtendedActorSystem, + adapters: Map[Name, FQN], + adapterBindings: Map[FQN, BoundAdapters]): EventAdapters = { val adapterNames = adapters.keys.toSet for { (fqn, boundToAdapters) <- adapterBindings boundAdapter <- boundToAdapters - } require( - adapterNames(boundAdapter.toString), - s"$fqn was bound to undefined event-adapter: $boundAdapter (bindings: ${boundToAdapters.mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})") + } require(adapterNames(boundAdapter.toString), + s"$fqn was bound to undefined event-adapter: $boundAdapter (bindings: ${boundToAdapters + .mkString("[", ", ", "]")}, known adapters: ${adapters.keys.mkString})") // A Map of handler from alias to implementation (i.e. class implementing akka.serialization.Serializer) // For example this defines a handler named 'country': `"country" -> com.example.comain.CountryTagsAdapter` @@ -92,24 +90,29 @@ private[akka] object EventAdapters { // It is primarily ordered by the most specific classes first, and secondly in the configured order. val bindings: immutable.Seq[ClassHandler] = { val bs = for ((k: FQN, as: BoundAdapters) <- adapterBindings) - yield if (as.size == 1) (system.dynamicAccess.getClassFor[Any](k).get, handlers(as.head)) - else (system.dynamicAccess.getClassFor[Any](k).get, NoopWriteEventAdapter(CombinedReadEventAdapter(as.map(handlers)))) + yield + if (as.size == 1) (system.dynamicAccess.getClassFor[Any](k).get, handlers(as.head)) + else + (system.dynamicAccess.getClassFor[Any](k).get, + NoopWriteEventAdapter(CombinedReadEventAdapter(as.map(handlers)))) sort(bs) } - val backing = bindings.foldLeft(new ConcurrentHashMap[Class[_], EventAdapter]) { case (map, (c, s)) => map.put(c, s); map } + val backing = bindings.foldLeft(new ConcurrentHashMap[Class[_], EventAdapter]) { + case (map, (c, s)) => map.put(c, s); map + } new EventAdapters(backing, bindings, system.log) } def instantiateAdapter(adapterFQN: String, system: ExtendedActorSystem): Try[EventAdapter] = { val clazz = system.dynamicAccess.getClassFor[Any](adapterFQN).get - if (classOf[EventAdapter] isAssignableFrom clazz) + if (classOf[EventAdapter].isAssignableFrom(clazz)) instantiate[EventAdapter](adapterFQN, system) - else if (classOf[WriteEventAdapter] isAssignableFrom clazz) + else if (classOf[WriteEventAdapter].isAssignableFrom(clazz)) instantiate[WriteEventAdapter](adapterFQN, system).map(NoopReadEventAdapter) - else if (classOf[ReadEventAdapter] isAssignableFrom clazz) + else if (classOf[ReadEventAdapter].isAssignableFrom(clazz)) instantiate[ReadEventAdapter](adapterFQN, system).map(NoopWriteEventAdapter) else throw new IllegalArgumentException(s"Configured $adapterFQN does not implement any EventAdapter interface!") @@ -117,7 +120,8 @@ private[akka] object EventAdapters { /** INTERNAL API */ private[akka] case class CombinedReadEventAdapter(adapters: immutable.Seq[EventAdapter]) extends EventAdapter { - private def onlyReadSideException = new IllegalStateException("CombinedReadEventAdapter must not be used when writing (creating manifests) events!") + private def onlyReadSideException = + new IllegalStateException("CombinedReadEventAdapter must not be used when writing (creating manifests) events!") override def manifest(event: Any): String = throw onlyReadSideException override def toJournal(event: Any): Any = throw onlyReadSideException @@ -133,7 +137,7 @@ private[akka] object EventAdapters { * loading is performed by the system’s [[akka.actor.DynamicAccess]]. */ private def instantiate[T: ClassTag](fqn: FQN, system: ExtendedActorSystem): Try[T] = - system.dynamicAccess.createInstanceFor[T](fqn, List(classOf[ExtendedActorSystem] -> system)) recoverWith { + system.dynamicAccess.createInstanceFor[T](fqn, List(classOf[ExtendedActorSystem] -> system)).recoverWith { case _: NoSuchMethodException => system.dynamicAccess.createInstanceFor[T](fqn, Nil) } @@ -143,26 +147,27 @@ private[akka] object EventAdapters { */ private def sort[T](in: Iterable[(Class[_], T)]): immutable.Seq[(Class[_], T)] = in.foldLeft(new ArrayBuffer[(Class[_], T)](in.size)) { (buf, ca) => - buf.indexWhere(_._1 isAssignableFrom ca._1) match { - case -1 => buf append ca - case x => buf insert (x, ca) + buf.indexWhere(_._1.isAssignableFrom(ca._1)) match { + case -1 => buf.append(ca) + case x => buf.insert(x, ca) + } + buf } - buf - }.to(immutable.Seq) + .to(immutable.Seq) private final def configToMap(config: Config, path: String): Map[String, String] = { import scala.collection.JavaConverters._ if (config.hasPath(path)) { - config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) => k -> v.toString } + config.getConfig(path).root.unwrapped.asScala.toMap.map { case (k, v) => k -> v.toString } } else Map.empty } private final def configToListMap(config: Config, path: String): Map[String, immutable.Seq[String]] = { import scala.collection.JavaConverters._ if (config.hasPath(path)) { - config.getConfig(path).root.unwrapped.asScala.toMap map { + config.getConfig(path).root.unwrapped.asScala.toMap.map { case (k, v: util.ArrayList[_]) if v.isInstanceOf[util.ArrayList[_]] => k -> v.asScala.map(_.toString).toList - case (k, v) => k -> List(v.toString) + case (k, v) => k -> List(v.toString) } } else Map.empty } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/PersistencePluginProxy.scala b/akka-persistence/src/main/scala/akka/persistence/journal/PersistencePluginProxy.scala index 48b7f2f063..7f775169b9 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/PersistencePluginProxy.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/PersistencePluginProxy.scala @@ -8,7 +8,17 @@ import java.net.URISyntaxException import java.util.concurrent.TimeoutException import akka.actor._ -import akka.persistence.{ AtomicWrite, DeleteMessagesFailure, DeleteSnapshotFailure, DeleteSnapshotsFailure, JournalProtocol, NonPersistentRepr, Persistence, SaveSnapshotFailure, SnapshotProtocol } +import akka.persistence.{ + AtomicWrite, + DeleteMessagesFailure, + DeleteSnapshotFailure, + DeleteSnapshotsFailure, + JournalProtocol, + NonPersistentRepr, + Persistence, + SaveSnapshotFailure, + SnapshotProtocol +} import akka.util.Helpers.Requiring import com.typesafe.config.Config @@ -51,8 +61,11 @@ class PersistencePluginProxyExtensionImpl(system: ActorSystem) extends Extension PersistencePluginProxy.start(system) } -object PersistencePluginProxyExtension extends ExtensionId[PersistencePluginProxyExtensionImpl] with ExtensionIdProvider { - override def createExtension(system: ExtendedActorSystem): PersistencePluginProxyExtensionImpl = new PersistencePluginProxyExtensionImpl(system) +object PersistencePluginProxyExtension + extends ExtensionId[PersistencePluginProxyExtensionImpl] + with ExtensionIdProvider { + override def createExtension(system: ExtendedActorSystem): PersistencePluginProxyExtensionImpl = + new PersistencePluginProxyExtensionImpl(system) override def lookup(): ExtensionId[_ <: Extension] = PersistencePluginProxyExtension override def get(system: ActorSystem): PersistencePluginProxyExtensionImpl = super.get(system) } @@ -96,7 +109,8 @@ final class PersistencePluginProxy(config: Config) extends Actor with Stash with log.info("Setting target {} address to {}", pluginType.qualifier, targetAddress) PersistencePluginProxy.setTargetLocation(context.system, AddressFromURIString(targetAddress)) } catch { - case _: URISyntaxException => log.warning("Invalid URL provided for target {} address: {}", pluginType.qualifier, targetAddress) + case _: URISyntaxException => + log.warning("Invalid URL provided for target {} address: {}", pluginType.qualifier, targetAddress) } } @@ -107,8 +121,10 @@ final class PersistencePluginProxy(config: Config) extends Actor with Stash with private val selfAddress: Address = context.system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress - private def timeoutException() = new TimeoutException(s"Target ${pluginType.qualifier} not initialized. " + - s"Use `PersistencePluginProxy.setTargetLocation` or set `target-${pluginType.qualifier}-address`") + private def timeoutException() = + new TimeoutException( + s"Target ${pluginType.qualifier} not initialized. " + + s"Use `PersistencePluginProxy.setTargetLocation` or set `target-${pluginType.qualifier}-address`") def receive = init @@ -117,7 +133,10 @@ final class PersistencePluginProxy(config: Config) extends Actor with Stash with context.setReceiveTimeout(1.second) // for retries context.become(identifying(address)) case InitTimeout => - log.info("Initialization timed-out (after {}), Use `PersistencePluginProxy.setTargetLocation` or set `target-{}-address`", initTimeout, pluginType.qualifier) + log.info( + "Initialization timed-out (after {}), Use `PersistencePluginProxy.setTargetLocation` or set `target-{}-address`", + initTimeout, + pluginType.qualifier) context.become(initTimedOut) unstashAll() // will trigger appropriate failures case Terminated(_) => @@ -137,18 +156,19 @@ final class PersistencePluginProxy(config: Config) extends Actor with Stash with sel ! Identify(targetPluginId) } - def identifying(address: Address): Receive = ({ - case ActorIdentity(`targetPluginId`, Some(target)) => - log.info("Found target {} at [{}]", pluginType.qualifier, address) - context.setReceiveTimeout(Duration.Undefined) - context.watch(target) - unstashAll() - context.become(active(target, address == selfAddress)) - case _: ActorIdentity => // will retry after ReceiveTimeout - case Terminated(_) => - case ReceiveTimeout => - sendIdentify(address) - }: Receive).orElse(init) + def identifying(address: Address): Receive = + ({ + case ActorIdentity(`targetPluginId`, Some(target)) => + log.info("Found target {} at [{}]", pluginType.qualifier, address) + context.setReceiveTimeout(Duration.Undefined) + context.watch(target) + unstashAll() + context.become(active(target, address == selfAddress)) + case _: ActorIdentity => // will retry after ReceiveTimeout + case Terminated(_) => + case ReceiveTimeout => + sendIdentify(address) + }: Receive).orElse(init) def active(targetJournal: ActorRef, targetAtThisNode: Boolean): Receive = { case TargetLocation(address) => @@ -160,44 +180,45 @@ final class PersistencePluginProxy(config: Config) extends Actor with Stash with case Terminated(_) => case InitTimeout => case msg => - targetJournal forward msg + targetJournal.forward(msg) } def initTimedOut: Receive = { - case req: JournalProtocol.Request => req match { // exhaustive match - case WriteMessages(messages, persistentActor, actorInstanceId) => - persistentActor ! WriteMessagesFailed(timeoutException) - messages.foreach { - case a: AtomicWrite => - a.payload.foreach { p => - persistentActor ! WriteMessageFailure(p, timeoutException, actorInstanceId) - } - case r: NonPersistentRepr => - persistentActor ! LoopMessageSuccess(r.payload, actorInstanceId) - } - case ReplayMessages(fromSequenceNr, toSequenceNr, max, persistenceId, persistentActor) => - persistentActor ! ReplayMessagesFailure(timeoutException) - case DeleteMessagesTo(persistenceId, toSequenceNr, persistentActor) => - persistentActor ! DeleteMessagesFailure(timeoutException, toSequenceNr) - } + case req: JournalProtocol.Request => + req match { // exhaustive match + case WriteMessages(messages, persistentActor, actorInstanceId) => + persistentActor ! WriteMessagesFailed(timeoutException) + messages.foreach { + case a: AtomicWrite => + a.payload.foreach { p => + persistentActor ! WriteMessageFailure(p, timeoutException, actorInstanceId) + } + case r: NonPersistentRepr => + persistentActor ! LoopMessageSuccess(r.payload, actorInstanceId) + } + case ReplayMessages(fromSequenceNr, toSequenceNr, max, persistenceId, persistentActor) => + persistentActor ! ReplayMessagesFailure(timeoutException) + case DeleteMessagesTo(persistenceId, toSequenceNr, persistentActor) => + persistentActor ! DeleteMessagesFailure(timeoutException, toSequenceNr) + } - case req: SnapshotProtocol.Request => req match { // exhaustive match - case LoadSnapshot(persistenceId, criteria, toSequenceNr) => - sender() ! LoadSnapshotFailed(timeoutException) - case SaveSnapshot(metadata, snapshot) => - sender() ! SaveSnapshotFailure(metadata, timeoutException) - case DeleteSnapshot(metadata) => - sender() ! DeleteSnapshotFailure(metadata, timeoutException) - case DeleteSnapshots(persistenceId, criteria) => - sender() ! DeleteSnapshotsFailure(criteria, timeoutException) - } + case req: SnapshotProtocol.Request => + req match { // exhaustive match + case LoadSnapshot(persistenceId, criteria, toSequenceNr) => + sender() ! LoadSnapshotFailed(timeoutException) + case SaveSnapshot(metadata, snapshot) => + sender() ! SaveSnapshotFailure(metadata, timeoutException) + case DeleteSnapshot(metadata) => + sender() ! DeleteSnapshotFailure(metadata, timeoutException) + case DeleteSnapshots(persistenceId, criteria) => + sender() ! DeleteSnapshotsFailure(criteria, timeoutException) + } case TargetLocation(address) => becomeIdentifying(address) case Terminated(_) => - case other => val e = timeoutException() log.error(e, "Failed PersistencePluginProxy request: {}", e.getMessage) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala index a5702673de..a95920cd4f 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/ReplayFilter.scala @@ -19,12 +19,11 @@ import scala.collection.mutable.LinkedHashSet * sequenceNr in the replayed events to find events emitted by overlapping writers. */ private[akka] object ReplayFilter { - def props( - persistentActor: ActorRef, - mode: Mode, - windowSize: Int, - maxOldWriters: Int, - debugEnabled: Boolean): Props = { + def props(persistentActor: ActorRef, + mode: Mode, + windowSize: Int, + maxOldWriters: Int, + debugEnabled: Boolean): Props = { require(windowSize > 0, "windowSize must be > 0") require(maxOldWriters > 0, "maxOldWriters must be > 0") require(mode != Disabled, "mode must not be Disabled") @@ -32,11 +31,8 @@ private[akka] object ReplayFilter { } // for binary compatibility - def props( - persistentActor: ActorRef, - mode: Mode, - windowSize: Int, - maxOldWriters: Int): Props = props(persistentActor, mode, windowSize, maxOldWriters, debugEnabled = false) + def props(persistentActor: ActorRef, mode: Mode, windowSize: Int, maxOldWriters: Int): Props = + props(persistentActor, mode, windowSize, maxOldWriters, debugEnabled = false) sealed trait Mode case object Fail extends Mode @@ -48,15 +44,19 @@ private[akka] object ReplayFilter { /** * INTERNAL API */ -private[akka] class ReplayFilter(persistentActor: ActorRef, mode: ReplayFilter.Mode, - windowSize: Int, maxOldWriters: Int, debugEnabled: Boolean) - extends Actor with ActorLogging { +private[akka] class ReplayFilter(persistentActor: ActorRef, + mode: ReplayFilter.Mode, + windowSize: Int, + maxOldWriters: Int, + debugEnabled: Boolean) + extends Actor + with ActorLogging { import JournalProtocol._ - import ReplayFilter.{ Warn, Fail, RepairByDiscardOld, Disabled } + import ReplayFilter.{ Disabled, Fail, RepairByDiscardOld, Warn } // for binary compatibility - def this(persistentActor: ActorRef, mode: ReplayFilter.Mode, - windowSize: Int, maxOldWriters: Int) = this(persistentActor, mode, windowSize, maxOldWriters, debugEnabled = false) + def this(persistentActor: ActorRef, mode: ReplayFilter.Mode, windowSize: Int, maxOldWriters: Int) = + this(persistentActor, mode, windowSize, maxOldWriters, debugEnabled = false) val buffer = new LinkedList[ReplayedMessage]() val oldWriters = LinkedHashSet.empty[String] @@ -151,8 +151,7 @@ private[akka] class ReplayFilter(persistentActor: ActorRef, mode: ReplayFilter.M def sendBuffered(): Unit = { val iter = buffer.iterator() - while (iter.hasNext()) - persistentActor.tell(iter.next(), Actor.noSender) + while (iter.hasNext()) persistentActor.tell(iter.next(), Actor.noSender) buffer.clear() } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/WriteJournalBase.scala b/akka-persistence/src/main/scala/akka/persistence/journal/WriteJournalBase.scala index 09d13c8340..de91e008cc 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/WriteJournalBase.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/WriteJournalBase.scala @@ -24,7 +24,7 @@ private[akka] trait WriteJournalBase { /** INTERNAL API */ private[akka] final def adaptFromJournal(repr: PersistentRepr): immutable.Seq[PersistentRepr] = - eventAdapters.get(repr.payload.getClass).fromJournal(repr.payload, repr.manifest).events map { adaptedPayload => + eventAdapters.get(repr.payload.getClass).fromJournal(repr.payload, repr.manifest).events.map { adaptedPayload => repr.withPayload(adaptedPayload) } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala index eaa49f7167..1da9c78ac3 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/inmem/InmemJournal.scala @@ -28,7 +28,7 @@ private[persistence] class InmemJournal extends AsyncWriteJournal with InmemMess } override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( - recoveryCallback: PersistentRepr => Unit): Future[Unit] = { + recoveryCallback: PersistentRepr => Unit): Future[Unit] = { val highest = highestSequenceNr(persistenceId) if (highest != 0L && max != 0L) read(persistenceId, fromSequenceNr, math.min(toSequenceNr, highest), max).foreach(recoveryCallback) @@ -53,10 +53,11 @@ private[persistence] trait InmemMessages { // persistenceId -> persistent message var messages = Map.empty[String, Vector[PersistentRepr]] - def add(p: PersistentRepr): Unit = messages = messages + (messages.get(p.persistenceId) match { - case Some(ms) => p.persistenceId -> (ms :+ p) - case None => p.persistenceId -> Vector(p) - }) + def add(p: PersistentRepr): Unit = + messages = messages + (messages.get(p.persistenceId) match { + case Some(ms) => p.persistenceId -> (ms :+ p) + case None => p.persistenceId -> Vector(p) + }) def update(pid: String, snr: Long)(f: PersistentRepr => PersistentRepr): Unit = messages = messages.get(pid) match { case Some(ms) => messages + (pid -> ms.map(sp => if (sp.sequenceNr == snr) f(sp) else sp)) @@ -68,10 +69,11 @@ private[persistence] trait InmemMessages { case None => messages } - def read(pid: String, fromSnr: Long, toSnr: Long, max: Long): immutable.Seq[PersistentRepr] = messages.get(pid) match { - case Some(ms) => ms.filter(m => m.sequenceNr >= fromSnr && m.sequenceNr <= toSnr).take(safeLongToInt(max)) - case None => Nil - } + def read(pid: String, fromSnr: Long, toSnr: Long, max: Long): immutable.Seq[PersistentRepr] = + messages.get(pid) match { + case Some(ms) => ms.filter(m => m.sequenceNr >= fromSnr && m.sequenceNr <= toSnr).take(safeLongToInt(max)) + case None => Nil + } def highestSequenceNr(pid: String): Long = { val snro = for { @@ -84,4 +86,3 @@ private[persistence] trait InmemMessages { private def safeLongToInt(l: Long): Int = if (Int.MaxValue < l) Int.MaxValue else l.toInt } - diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala index c56f3140f0..7166212cb8 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncRecovery.scala @@ -17,7 +17,8 @@ import akka.persistence.PersistentRepr abstract class AsyncRecovery extends SAsyncReplay with AsyncRecoveryPlugin { this: Actor => import context.dispatcher - final def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) => Unit) = + final def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + replayCallback: (PersistentRepr) => Unit) = doAsyncReplayMessages(persistenceId, fromSequenceNr, toSequenceNr, max, new Consumer[PersistentRepr] { def accept(p: PersistentRepr) = replayCallback(p) }).map(Unit.unbox) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala index 285bde2d86..d1098eb45f 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/japi/AsyncWriteJournal.scala @@ -22,10 +22,12 @@ abstract class AsyncWriteJournal extends AsyncRecovery with SAsyncWriteJournal w final def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = doAsyncWriteMessages(messages.asJava).map { results => - results.asScala.iterator.map { r => - if (r.isPresent) Failure(r.get) - else successUnit - }.to(immutable.IndexedSeq) + results.asScala.iterator + .map { r => + if (r.isPresent) Failure(r.get) + else successUnit + } + .to(immutable.IndexedSeq) } final def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long) = diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala index 6258496457..79a2ac01fa 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbCompaction.scala @@ -86,7 +86,8 @@ private[persistence] trait CompactionSegmentManagement { private def isCompactionRequired(persistenceId: String, toSeqNr: Long): Boolean = compactionSegment(persistenceId, toSeqNr) > latestCompactionSegment(persistenceId) - private def latestCompactionSegment(persistenceId: String): Long = latestCompactionSegments.getOrElse(persistenceId, 0L) + private def latestCompactionSegment(persistenceId: String): Long = + latestCompactionSegments.getOrElse(persistenceId, 0L) private def compactionInterval(persistenceId: String): Long = compactionIntervals.getOrElse(persistenceId, compactionIntervals.getOrElse(Wildcard, 0L)) diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala index db0bda80ed..540153c36b 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbIdMapping.scala @@ -50,10 +50,12 @@ private[persistence] trait LeveldbIdMapping extends Actor { this: LeveldbStore = } private def readIdMap(pathMap: Map[String, Int], iter: DBIterator): Map[String, Int] = { - if (!iter.hasNext) pathMap else { + if (!iter.hasNext) pathMap + else { val nextEntry = iter.next() val nextKey = keyFromBytes(nextEntry.getKey) - if (!isMappingKey(nextKey)) pathMap else { + if (!isMappingKey(nextKey)) pathMap + else { val nextVal = new String(nextEntry.getValue, UTF_8) readIdMap(pathMap + (nextVal -> nextKey.mappingId), iter) } diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala index f0315cddde..6ba94ac119 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbJournal.scala @@ -30,7 +30,7 @@ private[persistence] class LeveldbJournal(cfg: Config) extends AsyncWriteJournal if (cfg ne LeveldbStore.emptyConfig) cfg else context.system.settings.config.getConfig("akka.persistence.journal.leveldb") - override def receivePluginInternal: Receive = receiveCompactionInternal orElse { + override def receivePluginInternal: Receive = receiveCompactionInternal.orElse { case r @ ReplayTaggedMessages(fromSequenceNr, toSequenceNr, max, tag, replyTo) => import context.dispatcher val readHighestSequenceNrFrom = math.max(0L, fromSequenceNr - 1) @@ -47,11 +47,14 @@ private[persistence] class LeveldbJournal(cfg: Config) extends AsyncWriteJournal } }.map(_ => highSeqNr) } - }.map { - highSeqNr => RecoverySuccess(highSeqNr) - }.recover { + } + .map { highSeqNr => + RecoverySuccess(highSeqNr) + } + .recover { case e => ReplayMessagesFailure(e) - }.pipeTo(replyTo) + } + .pipeTo(replyTo) case SubscribePersistenceId(persistenceId: String) => addPersistenceIdSubscriber(sender(), persistenceId) @@ -105,10 +108,15 @@ private[persistence] object LeveldbJournal { * `fromSequenceNr` is exclusive * `toSequenceNr` is inclusive */ - final case class ReplayTaggedMessages(fromSequenceNr: Long, toSequenceNr: Long, max: Long, - tag: String, replyTo: ActorRef) extends SubscriptionCommand + final case class ReplayTaggedMessages(fromSequenceNr: Long, + toSequenceNr: Long, + max: Long, + tag: String, + replyTo: ActorRef) + extends SubscriptionCommand final case class ReplayedTaggedMessage(persistent: PersistentRepr, tag: String, offset: Long) - extends DeadLetterSuppression with NoSerializationVerificationNeeded + extends DeadLetterSuppression + with NoSerializationVerificationNeeded } /** @@ -117,8 +125,8 @@ private[persistence] object LeveldbJournal { * Journal backed by a [[SharedLeveldbStore]]. For testing only. */ private[persistence] class SharedLeveldbJournal extends AsyncWriteProxy { - val timeout: Timeout = context.system.settings.config.getMillisDuration( - "akka.persistence.journal.leveldb-shared.timeout") + val timeout: Timeout = + context.system.settings.config.getMillisDuration("akka.persistence.journal.leveldb-shared.timeout") override def receivePluginInternal: Receive = { case cmd: LeveldbJournal.SubscriptionCommand => @@ -127,13 +135,15 @@ private[persistence] class SharedLeveldbJournal extends AsyncWriteProxy { case Some(s) => s.forward(cmd) case None => log.error("Failed {} request. " + - "Store not initialized. Use `SharedLeveldbJournal.setStore(sharedStore, system)`", cmd) + "Store not initialized. Use `SharedLeveldbJournal.setStore(sharedStore, system)`", + cmd) } } } object SharedLeveldbJournal { + /** * Sets the shared LevelDB `store` for the given actor `system`. * diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala index 623c483c9e..6d742f337e 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbKey.scala @@ -9,10 +9,7 @@ import java.nio.ByteBuffer /** * LevelDB key. */ -private[leveldb] final case class Key( - persistenceId: Int, - sequenceNr: Long, - mappingId: Int) +private[leveldb] final case class Key(persistenceId: Int, sequenceNr: Long, mappingId: Int) private[leveldb] object Key { def keyToBytes(key: Key): Array[Byte] = { @@ -41,4 +38,3 @@ private[leveldb] object Key { def deletionKey(persistenceId: Int, sequenceNr: Long): Key = Key(persistenceId, sequenceNr, 1) def isDeletionKey(key: Key): Boolean = key.mappingId == 1 } - diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala index 207c561370..6e6924db2d 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbRecovery.scala @@ -26,12 +26,14 @@ private[persistence] trait LeveldbRecovery extends AsyncRecovery { this: Leveldb Future(readHighestSequenceNr(nid))(replayDispatcher) } - def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr => Unit): Future[Unit] = { + def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + replayCallback: PersistentRepr => Unit): Future[Unit] = { val nid = numericId(persistenceId) Future(replayMessages(nid, fromSequenceNr: Long, toSequenceNr, max: Long)(replayCallback))(replayDispatcher) } - def replayMessages(persistenceId: Int, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr => Unit): Unit = { + def replayMessages(persistenceId: Int, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + replayCallback: PersistentRepr => Unit): Unit = { @scala.annotation.tailrec def go(iter: DBIterator, key: Key, ctr: Long, replayCallback: PersistentRepr => Unit): Unit = { if (iter.hasNext) { @@ -59,7 +61,8 @@ private[persistence] trait LeveldbRecovery extends AsyncRecovery { this: Leveldb if (iter.hasNext) { val nextEntry = iter.peekNext() val nextKey = keyFromBytes(nextEntry.getKey) - if (key.persistenceId == nextKey.persistenceId && key.sequenceNr == nextKey.sequenceNr && isDeletionKey(nextKey)) { + if (key.persistenceId == nextKey.persistenceId && key.sequenceNr == nextKey.sequenceNr && isDeletionKey( + nextKey)) { iter.next() true } else false @@ -73,13 +76,15 @@ private[persistence] trait LeveldbRecovery extends AsyncRecovery { this: Leveldb } } - def asyncReplayTaggedMessages(tag: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: ReplayedTaggedMessage => Unit): Future[Unit] = { + def asyncReplayTaggedMessages(tag: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + replayCallback: ReplayedTaggedMessage => Unit): Future[Unit] = { val tagNid = tagNumericId(tag) - Future(replayTaggedMessages(tag, tagNid, fromSequenceNr: Long, toSequenceNr, max: Long)(replayCallback))(replayDispatcher) + Future(replayTaggedMessages(tag, tagNid, fromSequenceNr: Long, toSequenceNr, max: Long)(replayCallback))( + replayDispatcher) } def replayTaggedMessages(tag: String, tagNid: Int, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( - replayCallback: ReplayedTaggedMessage => Unit): Unit = { + replayCallback: ReplayedTaggedMessage => Unit): Unit = { @scala.annotation.tailrec def go(iter: DBIterator, key: Key, ctr: Long, replayCallback: ReplayedTaggedMessage => Unit): Unit = { diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala index 88c137c2d3..86761a2607 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/LeveldbStore.scala @@ -32,7 +32,12 @@ private[persistence] object LeveldbStore { /** * INTERNAL API. */ -private[persistence] trait LeveldbStore extends Actor with WriteJournalBase with LeveldbIdMapping with LeveldbRecovery with LeveldbCompaction { +private[persistence] trait LeveldbStore + extends Actor + with WriteJournalBase + with LeveldbIdMapping + with LeveldbRecovery + with LeveldbCompaction { def prepareConfig: Config @@ -44,10 +49,13 @@ private[persistence] trait LeveldbStore extends Actor with WriteJournalBase with val leveldbWriteOptions = new WriteOptions().sync(config.getBoolean("fsync")).snapshot(false) val leveldbDir = new File(config.getString("dir")) var leveldb: DB = _ - override val compactionIntervals: Map[String, Long] = LeveldbStore.toCompactionIntervalMap(config.getObject("compaction-intervals")) + override val compactionIntervals: Map[String, Long] = + LeveldbStore.toCompactionIntervalMap(config.getObject("compaction-intervals")) - private val persistenceIdSubscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] with mutable.MultiMap[String, ActorRef] - private val tagSubscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] with mutable.MultiMap[String, ActorRef] + private val persistenceIdSubscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] + with mutable.MultiMap[String, ActorRef] + private val tagSubscribers = new mutable.HashMap[String, mutable.Set[ActorRef]] + with mutable.MultiMap[String, ActorRef] private var allPersistenceIdsSubscribers = Set.empty[ActorRef] private var tagSequenceNr = Map.empty[String, Long] @@ -66,26 +74,27 @@ private[persistence] trait LeveldbStore extends Actor with WriteJournalBase with var allTags = Set.empty[String] val result = Future.fromTry(Try { - withBatch(batch => messages.map { a => - Try { - a.payload.foreach { p => - val (p2, tags) = p.payload match { - case Tagged(payload, tags) => - (p.withPayload(payload), tags) - case _ => (p, Set.empty[String]) - } - if (tags.nonEmpty && hasTagSubscribers) - allTags = allTags union tags + withBatch(batch => + messages.map { + a => + Try { + a.payload.foreach { p => + val (p2, tags) = p.payload match { + case Tagged(payload, tags) => + (p.withPayload(payload), tags) + case _ => (p, Set.empty[String]) + } + if (tags.nonEmpty && hasTagSubscribers) + allTags = allTags.union(tags) - require( - !p2.persistenceId.startsWith(tagPersistenceIdPrefix), - s"persistenceId [${p.persistenceId}] must not start with $tagPersistenceIdPrefix") - addToMessageBatch(p2, tags, batch) - } - if (hasPersistenceIdSubscribers) - persistenceIds += a.persistenceId - } - }) + require(!p2.persistenceId.startsWith(tagPersistenceIdPrefix), + s"persistenceId [${p.persistenceId}] must not start with $tagPersistenceIdPrefix") + addToMessageBatch(p2, tags, batch) + } + if (hasPersistenceIdSubscribers) + persistenceIds += a.persistenceId + } + }) }) if (hasPersistenceIdSubscribers) { @@ -182,7 +191,9 @@ private[persistence] trait LeveldbStore extends Actor with WriteJournalBase with tagPersistenceIdPrefix + tag override def preStart(): Unit = { - leveldb = leveldbFactory.open(leveldbDir, if (nativeLeveldb) leveldbOptions else leveldbOptions.compressionType(CompressionType.NONE)) + leveldb = leveldbFactory.open( + leveldbDir, + if (nativeLeveldb) leveldbOptions else leveldbOptions.compressionType(CompressionType.NONE)) super.preStart() } @@ -198,10 +209,14 @@ private[persistence] trait LeveldbStore extends Actor with WriteJournalBase with protected def removeSubscriber(subscriber: ActorRef): Unit = { val keys = persistenceIdSubscribers.collect { case (k, s) if s.contains(subscriber) => k } - keys.foreach { key => persistenceIdSubscribers.removeBinding(key, subscriber) } + keys.foreach { key => + persistenceIdSubscribers.removeBinding(key, subscriber) + } val tagKeys = tagSubscribers.collect { case (k, s) if s.contains(subscriber) => k } - tagKeys.foreach { key => tagSubscribers.removeBinding(key, subscriber) } + tagKeys.foreach { key => + tagSubscribers.removeBinding(key, subscriber) + } allPersistenceIdsSubscribers -= subscriber } @@ -238,4 +253,3 @@ private[persistence] trait LeveldbStore extends Actor with WriteJournalBase with } } - diff --git a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/SharedLeveldbStore.scala b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/SharedLeveldbStore.scala index 7ee8c8d6bc..fc5e6a89f6 100644 --- a/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/SharedLeveldbStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/journal/leveldb/SharedLeveldbStore.scala @@ -29,7 +29,7 @@ class SharedLeveldbStore(cfg: Config) extends LeveldbStore { if (cfg ne LeveldbStore.emptyConfig) cfg.getConfig("store") else context.system.settings.config.getConfig("akka.persistence.journal.leveldb-shared.store") - def receive = receiveCompactionInternal orElse { + def receive = receiveCompactionInternal.orElse { case WriteMessages(messages) => // TODO it would be nice to DRY this with AsyncWriteJournal, but this is using // AsyncWriteProxy message protocol @@ -38,13 +38,15 @@ class SharedLeveldbStore(cfg: Config) extends LeveldbStore { val writeResult = (prepared match { case Success(prep) => // in case the asyncWriteMessages throws - try asyncWriteMessages(prep) catch { case NonFatal(e) => Future.failed(e) } + try asyncWriteMessages(prep) + catch { case NonFatal(e) => Future.failed(e) } case f @ Failure(_) => // exception from preparePersistentBatch => rejected Future.successful(messages.collect { case a: AtomicWrite => f }) }).map { results => if (results.nonEmpty && results.size != atomicWriteCount) - throw new IllegalStateException("asyncWriteMessages returned invalid number of results. " + + throw new IllegalStateException( + "asyncWriteMessages returned invalid number of results. " + s"Expected [${prepared.get.size}], but got [${results.size}]") results } @@ -59,20 +61,24 @@ class SharedLeveldbStore(cfg: Config) extends LeveldbStore { // AsyncWriteProxy message protocol val replyTo = sender() val readHighestSequenceNrFrom = math.max(0L, fromSequenceNr - 1) - asyncReadHighestSequenceNr(persistenceId, readHighestSequenceNrFrom).flatMap { highSeqNr => - if (highSeqNr == 0L || max == 0L) - Future.successful(highSeqNr) - else { - val toSeqNr = math.min(toSequenceNr, highSeqNr) - asyncReplayMessages(persistenceId, fromSequenceNr, toSeqNr, max) { p => - if (!p.deleted) // old records from 2.3 may still have the deleted flag - adaptFromJournal(p).foreach(replyTo ! _) - }.map(_ => highSeqNr) + asyncReadHighestSequenceNr(persistenceId, readHighestSequenceNrFrom) + .flatMap { highSeqNr => + if (highSeqNr == 0L || max == 0L) + Future.successful(highSeqNr) + else { + val toSeqNr = math.min(toSequenceNr, highSeqNr) + asyncReplayMessages(persistenceId, fromSequenceNr, toSeqNr, max) { p => + if (!p.deleted) // old records from 2.3 may still have the deleted flag + adaptFromJournal(p).foreach(replyTo ! _) + }.map(_ => highSeqNr) + } } - }.map { - highSeqNr => ReplaySuccess(highSeqNr) - }.recover { - case e => ReplayFailure(e) - }.pipeTo(replyTo) + .map { highSeqNr => + ReplaySuccess(highSeqNr) + } + .recover { + case e => ReplayFailure(e) + } + .pipeTo(replyTo) } } diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala index 08925aa4a3..93ad137c01 100644 --- a/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala +++ b/akka-persistence/src/main/scala/akka/persistence/serialization/MessageSerializer.scala @@ -60,15 +60,17 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer */ def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): Message = manifest match { case None => persistent(mf.PersistentMessage.parseFrom(bytes)) - case Some(c) => c match { - case PersistentImplClass => persistent(mf.PersistentMessage.parseFrom(bytes)) - case PersistentReprClass => persistent(mf.PersistentMessage.parseFrom(bytes)) - case AtomicWriteClass => atomicWrite(mf.AtomicWrite.parseFrom(bytes)) - case AtLeastOnceDeliverySnapshotClass => atLeastOnceDeliverySnapshot(mf.AtLeastOnceDeliverySnapshot.parseFrom(bytes)) - case PersistentStateChangeEventClass => stateChange(mf.PersistentStateChangeEvent.parseFrom(bytes)) - case PersistentFSMSnapshotClass => persistentFSMSnapshot(mf.PersistentFSMSnapshot.parseFrom(bytes)) - case _ => throw new NotSerializableException(s"Can't deserialize object of type ${c}") - } + case Some(c) => + c match { + case PersistentImplClass => persistent(mf.PersistentMessage.parseFrom(bytes)) + case PersistentReprClass => persistent(mf.PersistentMessage.parseFrom(bytes)) + case AtomicWriteClass => atomicWrite(mf.AtomicWrite.parseFrom(bytes)) + case AtLeastOnceDeliverySnapshotClass => + atLeastOnceDeliverySnapshot(mf.AtLeastOnceDeliverySnapshot.parseFrom(bytes)) + case PersistentStateChangeEventClass => stateChange(mf.PersistentStateChangeEvent.parseFrom(bytes)) + case PersistentFSMSnapshotClass => persistentFSMSnapshot(mf.PersistentFSMSnapshot.parseFrom(bytes)) + case _ => throw new NotSerializableException(s"Can't deserialize object of type ${c}") + } } // @@ -80,10 +82,10 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer builder.setCurrentDeliveryId(snap.currentDeliveryId) snap.unconfirmedDeliveries.foreach { unconfirmed => val unconfirmedBuilder = - mf.AtLeastOnceDeliverySnapshot.UnconfirmedDelivery.newBuilder. - setDeliveryId(unconfirmed.deliveryId). - setDestination(unconfirmed.destination.toString). - setPayload(persistentPayloadBuilder(unconfirmed.message.asInstanceOf[AnyRef])) + mf.AtLeastOnceDeliverySnapshot.UnconfirmedDelivery.newBuilder + .setDeliveryId(unconfirmed.deliveryId) + .setDestination(unconfirmed.destination.toString) + .setPayload(persistentPayloadBuilder(unconfirmed.message.asInstanceOf[AnyRef])) builder.addUnconfirmedDeliveries(unconfirmedBuilder) } builder @@ -97,7 +99,8 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer } } - private[persistence] def persistentFSMSnapshotBuilder(persistentFSMSnapshot: PersistentFSMSnapshot[Any]): mf.PersistentFSMSnapshot.Builder = { + private[persistence] def persistentFSMSnapshotBuilder( + persistentFSMSnapshot: PersistentFSMSnapshot[Any]): mf.PersistentFSMSnapshot.Builder = { val builder = mf.PersistentFSMSnapshot.newBuilder .setStateIdentifier(persistentFSMSnapshot.stateIdentifier) .setData(persistentPayloadBuilder(persistentFSMSnapshot.data.asInstanceOf[AnyRef])) @@ -107,33 +110,35 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer } } - def atLeastOnceDeliverySnapshot(atLeastOnceDeliverySnapshot: mf.AtLeastOnceDeliverySnapshot): AtLeastOnceDeliverySnapshot = { + def atLeastOnceDeliverySnapshot( + atLeastOnceDeliverySnapshot: mf.AtLeastOnceDeliverySnapshot): AtLeastOnceDeliverySnapshot = { import scala.collection.JavaConverters._ val unconfirmedDeliveries = new VectorBuilder[UnconfirmedDelivery]() - atLeastOnceDeliverySnapshot.getUnconfirmedDeliveriesList().iterator().asScala foreach { next => - unconfirmedDeliveries += UnconfirmedDelivery(next.getDeliveryId, ActorPath.fromString(next.getDestination), - payload(next.getPayload)) + atLeastOnceDeliverySnapshot.getUnconfirmedDeliveriesList().iterator().asScala.foreach { next => + unconfirmedDeliveries += UnconfirmedDelivery(next.getDeliveryId, + ActorPath.fromString(next.getDestination), + payload(next.getPayload)) } - AtLeastOnceDeliverySnapshot( - atLeastOnceDeliverySnapshot.getCurrentDeliveryId, - unconfirmedDeliveries.result()) + AtLeastOnceDeliverySnapshot(atLeastOnceDeliverySnapshot.getCurrentDeliveryId, unconfirmedDeliveries.result()) } def stateChange(persistentStateChange: mf.PersistentStateChangeEvent): StateChangeEvent = { - StateChangeEvent( - persistentStateChange.getStateIdentifier, - // timeout field is deprecated, left for backward compatibility. timeoutNanos is used instead. - if (persistentStateChange.hasTimeoutNanos) Some(Duration.fromNanos(persistentStateChange.getTimeoutNanos)) - else if (persistentStateChange.hasTimeout) Some(Duration(persistentStateChange.getTimeout).asInstanceOf[duration.FiniteDuration]) - else None) + StateChangeEvent(persistentStateChange.getStateIdentifier, + // timeout field is deprecated, left for backward compatibility. timeoutNanos is used instead. + if (persistentStateChange.hasTimeoutNanos) + Some(Duration.fromNanos(persistentStateChange.getTimeoutNanos)) + else if (persistentStateChange.hasTimeout) + Some(Duration(persistentStateChange.getTimeout).asInstanceOf[duration.FiniteDuration]) + else None) } def persistentFSMSnapshot(persistentFSMSnapshot: mf.PersistentFSMSnapshot): PersistentFSMSnapshot[Any] = { - PersistentFSMSnapshot( - persistentFSMSnapshot.getStateIdentifier, - payload(persistentFSMSnapshot.getData), - if (persistentFSMSnapshot.hasTimeoutNanos) Some(Duration.fromNanos(persistentFSMSnapshot.getTimeoutNanos)) else None) + PersistentFSMSnapshot(persistentFSMSnapshot.getStateIdentifier, + payload(persistentFSMSnapshot.getData), + if (persistentFSMSnapshot.hasTimeoutNanos) + Some(Duration.fromNanos(persistentFSMSnapshot.getTimeoutNanos)) + else None) } private def atomicWriteBuilder(a: AtomicWrite) = { @@ -184,14 +189,14 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer // private def persistent(persistentMessage: mf.PersistentMessage): PersistentRepr = { - PersistentRepr( - payload(persistentMessage.getPayload), - persistentMessage.getSequenceNr, - if (persistentMessage.hasPersistenceId) persistentMessage.getPersistenceId else Undefined, - if (persistentMessage.hasManifest) persistentMessage.getManifest else Undefined, - if (persistentMessage.hasDeleted) persistentMessage.getDeleted else false, - if (persistentMessage.hasSender) system.provider.resolveActorRef(persistentMessage.getSender) else Actor.noSender, - if (persistentMessage.hasWriterUuid) persistentMessage.getWriterUuid else Undefined) + PersistentRepr(payload(persistentMessage.getPayload), + persistentMessage.getSequenceNr, + if (persistentMessage.hasPersistenceId) persistentMessage.getPersistenceId else Undefined, + if (persistentMessage.hasManifest) persistentMessage.getManifest else Undefined, + if (persistentMessage.hasDeleted) persistentMessage.getDeleted else false, + if (persistentMessage.hasSender) system.provider.resolveActorRef(persistentMessage.getSender) + else Actor.noSender, + if (persistentMessage.hasWriterUuid) persistentMessage.getWriterUuid else Undefined) } private def atomicWrite(atomicWrite: mf.AtomicWrite): AtomicWrite = { @@ -200,13 +205,12 @@ class MessageSerializer(val system: ExtendedActorSystem) extends BaseSerializer } private def payload(persistentPayload: mf.PersistentPayload): Any = { - val manifest = if (persistentPayload.hasPayloadManifest) - persistentPayload.getPayloadManifest.toStringUtf8 else "" + val manifest = + if (persistentPayload.hasPayloadManifest) + persistentPayload.getPayloadManifest.toStringUtf8 + else "" - serialization.deserialize( - persistentPayload.getPayload.toByteArray, - persistentPayload.getSerializerId, - manifest).get + serialization.deserialize(persistentPayload.getPayload.toByteArray, persistentPayload.getSerializerId, manifest).get } } diff --git a/akka-persistence/src/main/scala/akka/persistence/serialization/package.scala b/akka-persistence/src/main/scala/akka/persistence/serialization/package.scala index 058881674e..fd808c86bf 100644 --- a/akka-persistence/src/main/scala/akka/persistence/serialization/package.scala +++ b/akka-persistence/src/main/scala/akka/persistence/serialization/package.scala @@ -7,6 +7,7 @@ package akka.persistence import java.io.{ ByteArrayOutputStream, InputStream } package object serialization { + /** * Converts an input stream to a byte array. */ @@ -18,7 +19,9 @@ package object serialization { @scala.annotation.tailrec def copy(): Array[Byte] = { val n = inputStream.read(buf, 0, len) - if (n != -1) { out.write(buf, 0, n); copy() } else out.toByteArray + if (n != -1) { + out.write(buf, 0, n); copy() + } else out.toByteArray } copy() diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala index 880a077af1..59d51d4f52 100644 --- a/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/SnapshotStore.scala @@ -36,23 +36,31 @@ trait SnapshotStore extends Actor with ActorLogging { if (criteria == SnapshotSelectionCriteria.None) { senderPersistentActor() ! LoadSnapshotResult(snapshot = None, toSequenceNr) } else { - breaker.withCircuitBreaker(loadAsync(persistenceId, criteria.limit(toSequenceNr))) map { - sso => LoadSnapshotResult(sso, toSequenceNr) - } recover { - case e => LoadSnapshotFailed(e) - } pipeTo senderPersistentActor() + breaker + .withCircuitBreaker(loadAsync(persistenceId, criteria.limit(toSequenceNr))) + .map { sso => + LoadSnapshotResult(sso, toSequenceNr) + } + .recover { + case e => LoadSnapshotFailed(e) + } + .pipeTo(senderPersistentActor()) } case SaveSnapshot(metadata, snapshot) => val md = metadata.copy(timestamp = System.currentTimeMillis) - breaker.withCircuitBreaker(saveAsync(md, snapshot)) map { - _ => SaveSnapshotSuccess(md) - } recover { - case e => SaveSnapshotFailure(metadata, e) - } to (self, senderPersistentActor()) + breaker + .withCircuitBreaker(saveAsync(md, snapshot)) + .map { _ => + SaveSnapshotSuccess(md) + } + .recover { + case e => SaveSnapshotFailure(metadata, e) + } to (self, senderPersistentActor()) case evt: SaveSnapshotSuccess => - try tryReceivePluginInternal(evt) finally senderPersistentActor ! evt // sender is persistentActor + try tryReceivePluginInternal(evt) + finally senderPersistentActor ! evt // sender is persistentActor case evt @ SaveSnapshotFailure(metadata, _) => try { tryReceivePluginInternal(evt) @@ -60,32 +68,46 @@ trait SnapshotStore extends Actor with ActorLogging { } finally senderPersistentActor() ! evt // sender is persistentActor case d @ DeleteSnapshot(metadata) => - breaker.withCircuitBreaker(deleteAsync(metadata)).map { - case _ => DeleteSnapshotSuccess(metadata) - }.recover { - case e => DeleteSnapshotFailure(metadata, e) - }.pipeTo(self)(senderPersistentActor()).onComplete { - case _ => if (publish) context.system.eventStream.publish(d) - } + breaker + .withCircuitBreaker(deleteAsync(metadata)) + .map { + case _ => DeleteSnapshotSuccess(metadata) + } + .recover { + case e => DeleteSnapshotFailure(metadata, e) + } + .pipeTo(self)(senderPersistentActor()) + .onComplete { + case _ => if (publish) context.system.eventStream.publish(d) + } case evt: DeleteSnapshotSuccess => - try tryReceivePluginInternal(evt) finally senderPersistentActor() ! evt + try tryReceivePluginInternal(evt) + finally senderPersistentActor() ! evt case evt: DeleteSnapshotFailure => - try tryReceivePluginInternal(evt) finally senderPersistentActor() ! evt + try tryReceivePluginInternal(evt) + finally senderPersistentActor() ! evt case d @ DeleteSnapshots(persistenceId, criteria) => - breaker.withCircuitBreaker(deleteAsync(persistenceId, criteria)).map { - case _ => DeleteSnapshotsSuccess(criteria) - }.recover { - case e => DeleteSnapshotsFailure(criteria, e) - }.pipeTo(self)(senderPersistentActor()).onComplete { - case _ => if (publish) context.system.eventStream.publish(d) - } + breaker + .withCircuitBreaker(deleteAsync(persistenceId, criteria)) + .map { + case _ => DeleteSnapshotsSuccess(criteria) + } + .recover { + case e => DeleteSnapshotsFailure(criteria, e) + } + .pipeTo(self)(senderPersistentActor()) + .onComplete { + case _ => if (publish) context.system.eventStream.publish(d) + } case evt: DeleteSnapshotsFailure => - try tryReceivePluginInternal(evt) finally senderPersistentActor() ! evt // sender is persistentActor + try tryReceivePluginInternal(evt) + finally senderPersistentActor() ! evt // sender is persistentActor case evt: DeleteSnapshotsSuccess => - try tryReceivePluginInternal(evt) finally senderPersistentActor() ! evt + try tryReceivePluginInternal(evt) + finally senderPersistentActor() ! evt } /** Documents intent that the sender() is expected to be the PersistentActor */ diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala index 99dbaf1e60..15326f92ba 100644 --- a/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/japi/SnapshotStore.scala @@ -16,7 +16,8 @@ import scala.concurrent.Future abstract class SnapshotStore extends SSnapshotStore with SnapshotStorePlugin { import context.dispatcher - override final def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = + override final def loadAsync(persistenceId: String, + criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = doLoadAsync(persistenceId, criteria).map(option) override final def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = diff --git a/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala b/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala index 02ce066c4d..1b6c157b64 100644 --- a/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala +++ b/akka-persistence/src/main/scala/akka/persistence/snapshot/local/LocalSnapshotStore.scala @@ -31,8 +31,7 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt private val persistenceIdStartIdx = 9 // Persistence ID starts after the "snapshot-" substring import akka.util.Helpers._ - private val maxLoadAttempts = config.getInt("max-load-attempts") - .requiring(_ > 1, "max-load-attempts must be >= 1") + private val maxLoadAttempts = config.getInt("max-load-attempts").requiring(_ > 1, "max-load-attempts must be >= 1") private val streamDispatcher = context.system.dispatchers.lookup(config.getString("stream-dispatcher")) private val dir = new File(config.getString("dir")) @@ -40,7 +39,8 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt private val serializationExtension = SerializationExtension(context.system) private var saving = immutable.Set.empty[SnapshotMetadata] // saving in progress - override def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = { + override def loadAsync(persistenceId: String, + criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = { // // Heuristics: // @@ -75,9 +75,11 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = { val metadatas = snapshotMetadatas(persistenceId, criteria) - Future.sequence { - metadatas.map(deleteAsync) - }(scala.collection.immutable.IndexedSeq, streamDispatcher).map(_ => ())(streamDispatcher) + Future + .sequence { + metadatas.map(deleteAsync) + }(scala.collection.immutable.IndexedSeq, streamDispatcher) + .map(_ => ())(streamDispatcher) } override def receivePluginInternal: Receive = { @@ -92,21 +94,22 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt } @scala.annotation.tailrec - private def load(metadata: immutable.Seq[SnapshotMetadata]): Try[Option[SelectedSnapshot]] = metadata.lastOption match { - case None => Success(None) // no snapshots stored - case Some(md) => - Try(withInputStream(md)(deserialize)) match { - case Success(s) => - Success(Some(SelectedSnapshot(md, s.data))) - case Failure(e) => - val remaining = metadata.init - log.error(e, s"Error loading snapshot [{}], remaining attempts: [{}]", md, remaining.size) - if (remaining.isEmpty) - Failure(e) // all attempts failed - else - load(remaining) // try older snapshot - } - } + private def load(metadata: immutable.Seq[SnapshotMetadata]): Try[Option[SelectedSnapshot]] = + metadata.lastOption match { + case None => Success(None) // no snapshots stored + case Some(md) => + Try(withInputStream(md)(deserialize)) match { + case Success(s) => + Success(Some(SelectedSnapshot(md, s.data))) + case Failure(e) => + val remaining = metadata.init + log.error(e, s"Error loading snapshot [{}], remaining attempts: [{}]", md, remaining.size) + if (remaining.isEmpty) + Failure(e) // all attempts failed + else + load(remaining) // try older snapshot + } + } protected def save(metadata: SnapshotMetadata, snapshot: Any): Unit = { val tmpFile = withOutputStream(metadata)(serialize(_, Snapshot(snapshot))) @@ -130,21 +133,32 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt withStream(new BufferedInputStream(Files.newInputStream(snapshotFileForWrite(metadata).toPath())), p) private def withStream[A <: Closeable, B](stream: A, p: A => B): B = - try { p(stream) } finally { stream.close() } + try { + p(stream) + } finally { + stream.close() + } /** Only by persistenceId and sequenceNr, timestamp is informational - accommodates for 2.13.x series files */ protected def snapshotFileForWrite(metadata: SnapshotMetadata, extension: String = ""): File = - new File(snapshotDir, s"snapshot-${URLEncoder.encode(metadata.persistenceId, UTF_8)}-${metadata.sequenceNr}-${metadata.timestamp}${extension}") + new File( + snapshotDir, + s"snapshot-${URLEncoder.encode(metadata.persistenceId, UTF_8)}-${metadata.sequenceNr}-${metadata.timestamp}${extension}") - private def snapshotMetadatas(persistenceId: String, criteria: SnapshotSelectionCriteria): immutable.Seq[SnapshotMetadata] = { + private def snapshotMetadatas(persistenceId: String, + criteria: SnapshotSelectionCriteria): immutable.Seq[SnapshotMetadata] = { val files = snapshotDir.listFiles(new SnapshotFilenameFilter(persistenceId)) if (files eq null) Nil // if the dir was removed else { - files.map(_.getName).flatMap { filename => - extractMetadata(filename).map { - case (pid, snr, tms) => SnapshotMetadata(URLDecoder.decode(pid, UTF_8), snr, tms) + files + .map(_.getName) + .flatMap { filename => + extractMetadata(filename).map { + case (pid, snr, tms) => SnapshotMetadata(URLDecoder.decode(pid, UTF_8), snr, tms) + } } - }.filter(md => criteria.matches(md) && !saving.contains(md)).toVector + .filter(md => criteria.matches(md) && !saving.contains(md)) + .toVector } } @@ -169,14 +183,14 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt def accept(dir: File, name: String): Boolean = { val persistenceIdEndIdx = name.lastIndexOf('-', name.lastIndexOf('-') - 1) persistenceIdStartIdx + encodedPersistenceId.length == persistenceIdEndIdx && - name.startsWith(encodedPersistenceId, persistenceIdStartIdx) + name.startsWith(encodedPersistenceId, persistenceIdStartIdx) } } private final class SnapshotSeqNrFilenameFilter(md: SnapshotMetadata) extends FilenameFilter { private final def matches(pid: String, snr: String, tms: String): Boolean = { pid.equals(URLEncoder.encode(md.persistenceId)) && - Try(snr.toLong == md.sequenceNr && (md.timestamp == 0L || tms.toLong == md.timestamp)).getOrElse(false) + Try(snr.toLong == md.sequenceNr && (md.timestamp == 0L || tms.toLong == md.timestamp)).getOrElse(false) } def accept(dir: File, name: String): Boolean = diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryCrashSpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryCrashSpec.scala index d62b85710f..4a0abaa8f7 100644 --- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryCrashSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryCrashSpec.scala @@ -5,8 +5,8 @@ package akka.persistence import akka.actor._ -import akka.actor.SupervisorStrategy.{ Stop, Escalate } -import akka.testkit.{ AkkaSpec, TestProbe, ImplicitSender } +import akka.actor.SupervisorStrategy.{ Escalate, Stop } +import akka.testkit.{ AkkaSpec, ImplicitSender, TestProbe } import scala.concurrent.duration._ import scala.util.control.NoStackTrace @@ -23,7 +23,7 @@ object AtLeastOnceDeliveryCrashSpec { val crashingActor = context.actorOf(Props(new CrashingActor(testProbe)), "CrashingActor") - def receive: Receive = { case msg => crashingActor forward msg } + def receive: Receive = { case msg => crashingActor.forward(msg) } } object CrashingActor { @@ -32,8 +32,7 @@ object AtLeastOnceDeliveryCrashSpec { case class SendingMessage(deliveryId: Long, recovering: Boolean) } - class CrashingActor(testProbe: ActorRef) extends PersistentActor - with AtLeastOnceDelivery with ActorLogging { + class CrashingActor(testProbe: ActorRef) extends PersistentActor with AtLeastOnceDelivery with ActorLogging { import CrashingActor._ override def persistenceId = self.path.name @@ -47,23 +46,30 @@ object AtLeastOnceDeliveryCrashSpec { } override def receiveCommand: Receive = { - case Message => persist(Message)(_ => send()) - case CrashMessage => persist(CrashMessage) { evt => } + case Message => persist(Message)(_ => send()) + case CrashMessage => + persist(CrashMessage) { evt => + } } def send() = { - deliver(testProbe.path) { id => SendingMessage(id, false) } + deliver(testProbe.path) { id => + SendingMessage(id, false) + } } } } -class AtLeastOnceDeliveryCrashSpec extends AkkaSpec(PersistenceSpec.config("inmem", "AtLeastOnceDeliveryCrashSpec", serialization = "off")) with ImplicitSender { +class AtLeastOnceDeliveryCrashSpec + extends AkkaSpec(PersistenceSpec.config("inmem", "AtLeastOnceDeliveryCrashSpec", serialization = "off")) + with ImplicitSender { import AtLeastOnceDeliveryCrashSpec._ "At least once delivery" should { "not send when actor crashes" in { val testProbe = TestProbe() - def createCrashActorUnderSupervisor() = system.actorOf(Props(new StoppingStrategySupervisor(testProbe.ref)), "supervisor") + def createCrashActorUnderSupervisor() = + system.actorOf(Props(new StoppingStrategySupervisor(testProbe.ref)), "supervisor") val superVisor = createCrashActorUnderSupervisor() superVisor ! CrashingActor.Message testProbe.expectMsgType[CrashingActor.SendingMessage] diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryFailureSpec.scala index 459bea4c95..1300d62c2d 100644 --- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliveryFailureSpec.scala @@ -14,8 +14,7 @@ import akka.actor._ import akka.testkit._ object AtLeastOnceDeliveryFailureSpec { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.persistence.sender.chaos.live-processing-failure-rate = 0.3 akka.persistence.sender.chaos.replay-processing-failure-rate = 0.1 akka.persistence.destination.chaos.confirm-failure-rate = 0.3 @@ -63,7 +62,11 @@ object AtLeastOnceDeliveryFailureSpec { random.nextDouble() < rate } - class ChaosSender(destination: ActorRef, val probe: ActorRef) extends PersistentActor with ChaosSupport with ActorLogging with AtLeastOnceDelivery { + class ChaosSender(destination: ActorRef, val probe: ActorRef) + extends PersistentActor + with ChaosSupport + with ActorLogging + with AtLeastOnceDelivery { val config = context.system.settings.config.getConfig("akka.persistence.sender.chaos") val liveProcessingFailureRate = config.getDouble("live-processing-failure-rate") val replayProcessingFailureRate = config.getDouble("replay-processing-failure-rate") @@ -153,18 +156,21 @@ object AtLeastOnceDeliveryFailureSpec { context.watch(context.actorOf(Props(classOf[ChaosSender], destination, probe), "sender")) def receive = { - case Start => 1 to numMessages foreach (snd ! _) - case Ack(i) => acks += i + case Start => (1 to numMessages).foreach(snd ! _) + case Ack(i) => acks += i case Terminated(_) => // snd will be stopped if recovery or persist fails log.debug(s"sender stopped, starting it again") snd = createSender() - 1 to numMessages foreach (i => if (!acks(i)) snd ! i) + (1 to numMessages).foreach(i => if (!acks(i)) snd ! i) } } } -class AtLeastOnceDeliveryFailureSpec extends AkkaSpec(AtLeastOnceDeliveryFailureSpec.config) with Cleanup with ImplicitSender { +class AtLeastOnceDeliveryFailureSpec + extends AkkaSpec(AtLeastOnceDeliveryFailureSpec.config) + with Cleanup + with ImplicitSender { import AtLeastOnceDeliveryFailureSpec._ muteDeadLetters(classOf[AnyRef])(system) diff --git a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala index 4ca19fa577..503c875847 100644 --- a/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/AtLeastOnceDeliverySpec.scala @@ -29,24 +29,35 @@ object AtLeastOnceDeliverySpec { case object SaveSnap case class Snap(deliverySnapshot: AtLeastOnceDeliverySnapshot) // typically includes some user data as well - def senderProps(testActor: ActorRef, name: String, - redeliverInterval: FiniteDuration, warnAfterNumberOfUnconfirmedAttempts: Int, + def senderProps(testActor: ActorRef, + name: String, + redeliverInterval: FiniteDuration, + warnAfterNumberOfUnconfirmedAttempts: Int, redeliveryBurstLimit: Int, - destinations: Map[String, ActorPath], - async: Boolean, actorSelectionDelivery: Boolean = false): Props = - Props(new Sender(testActor, name, redeliverInterval, warnAfterNumberOfUnconfirmedAttempts, - redeliveryBurstLimit, destinations, async, actorSelectionDelivery)) + destinations: Map[String, ActorPath], + async: Boolean, + actorSelectionDelivery: Boolean = false): Props = + Props( + new Sender(testActor, + name, + redeliverInterval, + warnAfterNumberOfUnconfirmedAttempts, + redeliveryBurstLimit, + destinations, + async, + actorSelectionDelivery)) - class Sender( - testActor: ActorRef, - name: String, - override val redeliverInterval: FiniteDuration, - override val warnAfterNumberOfUnconfirmedAttempts: Int, - override val redeliveryBurstLimit: Int, - destinations: Map[String, ActorPath], - async: Boolean, - actorSelectionDelivery: Boolean) - extends PersistentActor with AtLeastOnceDelivery with ActorLogging { + class Sender(testActor: ActorRef, + name: String, + override val redeliverInterval: FiniteDuration, + override val warnAfterNumberOfUnconfirmedAttempts: Int, + override val redeliveryBurstLimit: Int, + destinations: Map[String, ActorPath], + async: Boolean, + actorSelectionDelivery: Boolean) + extends PersistentActor + with AtLeastOnceDelivery + with ActorLogging { override def persistenceId: String = name @@ -77,8 +88,7 @@ object AtLeastOnceDeliverySpec { persistAsync(AcceptedReq(payload, destination)) { evt => updateState(evt) sender() ! ReqAck - } - else + } else persist(AcceptedReq(payload, destination)) { evt => updateState(evt) sender() ! ReqAck @@ -89,9 +99,12 @@ object AtLeastOnceDeliverySpec { log.debug("Sender got ack {}", id) if (confirmDelivery(id)) if (async) - persistAsync(ReqDone(id)) { evt => updateState(evt) } - else - persist(ReqDone(id)) { evt => updateState(evt) } + persistAsync(ReqDone(id)) { evt => + updateState(evt) + } else + persist(ReqDone(id)) { evt => + updateState(evt) + } case Boom => log.debug("Boom!") @@ -150,7 +163,7 @@ object AtLeastOnceDeliverySpec { count += 1 if (count % dropMod != 0) { log.debug("Pass msg {} count {}", msg, count) - target forward msg + target.forward(msg) } else { log.debug("Drop msg {} count {}", msg, count) } @@ -177,7 +190,6 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c "AtLeastOnceDelivery" must { List(true, false).foreach { deliverUsingActorSelection => - s"deliver messages in order when nothing is lost (using actorSelection: $deliverUsingActorSelection)" taggedAs (TimingTest) in { val probe = TestProbe() val probeA = TestProbe() @@ -194,7 +206,15 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val probeA = TestProbe() val dst = system.actorOf(destinationProps(probeA.ref)) val destinations = Map("A" -> system.actorOf(unreliableProps(3, dst)).path) - val snd = system.actorOf(senderProps(probe.ref, name, 2.seconds, 5, 1000, destinations, async = false, actorSelectionDelivery = deliverUsingActorSelection), name) + val snd = system.actorOf(senderProps(probe.ref, + name, + 2.seconds, + 5, + 1000, + destinations, + async = false, + actorSelectionDelivery = deliverUsingActorSelection), + name) snd.tell(Req("a-1"), probe.ref) probe.expectMsg(ReqAck) probeA.expectMsg(Action(1, "a-1")) @@ -284,9 +304,8 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c // and then re-delivered probeA.expectMsg(Action(2, "a-2")) // re-delivered // a-4 was re-delivered but lost - probeA.expectMsgAllOf( - Action(5, "a-5"), // re-delivered - Action(4, "a-4")) // re-delivered, 3rd time + probeA.expectMsgAllOf(Action(5, "a-5"), // re-delivered + Action(4, "a-4")) // re-delivered, 3rd time probeA.expectNoMsg(1.second) } @@ -341,9 +360,11 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c probe.expectMsg(ReqAck) probe.expectMsg(ReqAck) probe.expectMsg(ReqAck) - val unconfirmed = probe.receiveWhile(5.seconds) { - case UnconfirmedWarning(unconfirmed) => unconfirmed - }.flatten + val unconfirmed = probe + .receiveWhile(5.seconds) { + case UnconfirmedWarning(unconfirmed) => unconfirmed + } + .flatten unconfirmed.map(_.destination).toSet should ===(Set(probeA.ref.path, probeB.ref.path)) unconfirmed.map(_.message).toSet should be(Set(Action(1, "a-1"), Action(2, "b-1"), Action(3, "b-2"))) system.stop(snd) @@ -357,10 +378,9 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c val dstA = system.actorOf(destinationProps(probeA.ref), "destination-a") val dstB = system.actorOf(destinationProps(probeB.ref), "destination-b") val dstC = system.actorOf(destinationProps(probeC.ref), "destination-c") - val destinations = Map( - "A" -> system.actorOf(unreliableProps(2, dstA), "unreliable-a").path, - "B" -> system.actorOf(unreliableProps(5, dstB), "unreliable-b").path, - "C" -> system.actorOf(unreliableProps(3, dstC), "unreliable-c").path) + val destinations = Map("A" -> system.actorOf(unreliableProps(2, dstA), "unreliable-a").path, + "B" -> system.actorOf(unreliableProps(5, dstB), "unreliable-b").path, + "C" -> system.actorOf(unreliableProps(3, dstC), "unreliable-c").path) val snd = system.actorOf(senderProps(probe.ref, name, 1000.millis, 5, 1000, destinations, async = true), name) val N = 100 for (n <- 1 to N) { @@ -373,9 +393,12 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c snd.tell(Req("c-" + n), probe.ref) } val deliverWithin = 20.seconds - probeA.receiveN(N, deliverWithin).map { case a: Action => a.payload }.toSet should ===((1 to N).map(n => "a-" + n).toSet) - probeB.receiveN(N, deliverWithin).map { case a: Action => a.payload }.toSet should ===((1 to N).map(n => "b-" + n).toSet) - probeC.receiveN(N, deliverWithin).map { case a: Action => a.payload }.toSet should ===((1 to N).map(n => "c-" + n).toSet) + probeA.receiveN(N, deliverWithin).map { case a: Action => a.payload }.toSet should ===( + (1 to N).map(n => "a-" + n).toSet) + probeB.receiveN(N, deliverWithin).map { case a: Action => a.payload }.toSet should ===( + (1 to N).map(n => "b-" + n).toSet) + probeC.receiveN(N, deliverWithin).map { case a: Action => a.payload }.toSet should ===( + (1 to N).map(n => "c-" + n).toSet) } "limit the number of messages redelivered at once" taggedAs (TimingTest) in { @@ -408,7 +431,8 @@ abstract class AtLeastOnceDeliverySpec(config: Config) extends PersistenceSpec(c } } -class LeveldbAtLeastOnceDeliverySpec extends AtLeastOnceDeliverySpec( - PersistenceSpec.config("leveldb", "AtLeastOnceDeliverySpec")) +class LeveldbAtLeastOnceDeliverySpec + extends AtLeastOnceDeliverySpec(PersistenceSpec.config("leveldb", "AtLeastOnceDeliverySpec")) -class InmemAtLeastOnceDeliverySpec extends AtLeastOnceDeliverySpec(PersistenceSpec.config("inmem", "AtLeastOnceDeliverySpec")) +class InmemAtLeastOnceDeliverySpec + extends AtLeastOnceDeliverySpec(PersistenceSpec.config("inmem", "AtLeastOnceDeliverySpec")) diff --git a/akka-persistence/src/test/scala/akka/persistence/AtomicWriteSpec.scala b/akka-persistence/src/test/scala/akka/persistence/AtomicWriteSpec.scala index d8635f3af6..e376e1c5cd 100644 --- a/akka-persistence/src/test/scala/akka/persistence/AtomicWriteSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/AtomicWriteSpec.scala @@ -12,28 +12,28 @@ class AtomicWriteSpec extends WordSpec with Matchers { "only contain messages for the same persistence id" in { AtomicWrite( PersistentRepr("", 1, "p1") :: - PersistentRepr("", 2, "p1") :: Nil).persistenceId should ===("p1") + PersistentRepr("", 2, "p1") :: Nil).persistenceId should ===("p1") intercept[IllegalArgumentException] { AtomicWrite( PersistentRepr("", 1, "p1") :: - PersistentRepr("", 2, "p1") :: - PersistentRepr("", 3, "p2") :: Nil) + PersistentRepr("", 2, "p1") :: + PersistentRepr("", 3, "p2") :: Nil) } } "have highestSequenceNr" in { AtomicWrite( PersistentRepr("", 1, "p1") :: - PersistentRepr("", 2, "p1") :: - PersistentRepr("", 3, "p1") :: Nil).highestSequenceNr should ===(3) + PersistentRepr("", 2, "p1") :: + PersistentRepr("", 3, "p1") :: Nil).highestSequenceNr should ===(3) } "have lowestSequenceNr" in { AtomicWrite( PersistentRepr("", 2, "p1") :: - PersistentRepr("", 3, "p1") :: - PersistentRepr("", 4, "p1") :: Nil).lowestSequenceNr should ===(2) + PersistentRepr("", 3, "p1") :: + PersistentRepr("", 4, "p1") :: Nil).lowestSequenceNr should ===(2) } } diff --git a/akka-persistence/src/test/scala/akka/persistence/EndToEndEventAdapterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EndToEndEventAdapterSpec.scala index 8132e16173..b8b3033b0c 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EndToEndEventAdapterSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EndToEndEventAdapterSpec.scala @@ -8,11 +8,11 @@ import java.io.File import akka.actor._ import akka.persistence.EndToEndEventAdapterSpec.NewA -import akka.persistence.journal.{ EventSeq, EventAdapter } -import akka.testkit.{ TestProbe, EventFilter } +import akka.persistence.journal.{ EventAdapter, EventSeq } +import akka.testkit.{ EventFilter, TestProbe } import com.typesafe.config.{ Config, ConfigFactory } import org.apache.commons.io.FileUtils -import org.scalatest.{ WordSpecLike, Matchers, BeforeAndAfterAll } +import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } import scala.concurrent.Await import scala.concurrent.duration._ @@ -34,7 +34,7 @@ object EndToEndEventAdapterSpec { event match { case m: AppModel => JSON(m.payload) } override def fromJournal(event: Any, manifest: String): EventSeq = event match { case m: JSON if m.payload.toString.startsWith("a") => EventSeq.single(A(m.payload)) - case _ => EventSeq.empty + case _ => EventSeq.empty } } class NewAEndToEndAdapter(system: ExtendedActorSystem) extends EventAdapter { @@ -44,7 +44,7 @@ object EndToEndEventAdapterSpec { event match { case m: AppModel => JSON(m.payload) } override def fromJournal(event: Any, manifest: String): EventSeq = event match { case m: JSON if m.payload.toString.startsWith("a") => EventSeq.single(NewA(m.payload)) - case _ => EventSeq.empty + case _ => EventSeq.empty } } class BEndToEndAdapter(system: ExtendedActorSystem) extends EventAdapter { @@ -54,7 +54,7 @@ object EndToEndEventAdapterSpec { event match { case m: AppModel => JSON(m.payload) } override def fromJournal(event: Any, manifest: String): EventSeq = event match { case m: JSON if m.payload.toString.startsWith("b") => EventSeq.single(B(m.payload)) - case _ => EventSeq.empty + case _ => EventSeq.empty } } class NewBEndToEndAdapter(system: ExtendedActorSystem) extends EventAdapter { @@ -64,12 +64,13 @@ object EndToEndEventAdapterSpec { event match { case m: AppModel => JSON(m.payload) } override def fromJournal(event: Any, manifest: String): EventSeq = event match { case m: JSON if m.payload.toString.startsWith("b") => EventSeq.single(NewB(m.payload)) - case _ => EventSeq.empty + case _ => EventSeq.empty } } class EndToEndAdapterActor(name: String, override val journalPluginId: String, probe: Option[ActorRef]) - extends NamedPersistentActor(name) with PersistentActor { + extends NamedPersistentActor(name) + with PersistentActor { var state: List[Any] = Nil @@ -94,11 +95,12 @@ object EndToEndEventAdapterSpec { } abstract class EndToEndEventAdapterSpec(journalName: String, journalConfig: Config) - extends WordSpecLike with Matchers with BeforeAndAfterAll { + extends WordSpecLike + with Matchers + with BeforeAndAfterAll { import EndToEndEventAdapterSpec._ - val storageLocations = List("akka.persistence.journal.leveldb.dir") - .map(s => new File(journalConfig.getString(s))) + val storageLocations = List("akka.persistence.journal.leveldb.dir").map(s => new File(journalConfig.getString(s))) override protected def beforeAll(): Unit = { storageLocations.foreach(FileUtils.deleteDirectory) @@ -110,8 +112,7 @@ abstract class EndToEndEventAdapterSpec(journalName: String, journalConfig: Conf val noAdaptersConfig = ConfigFactory.parseString("") - val adaptersConfig = ConfigFactory.parseString( - s""" + val adaptersConfig = ConfigFactory.parseString(s""" |akka.persistence.journal { | $journalName { | event-adapters { @@ -131,8 +132,7 @@ abstract class EndToEndEventAdapterSpec(journalName: String, journalConfig: Conf |akka.loggers = ["akka.testkit.TestEventListener"] """.stripMargin) - val newAdaptersConfig = ConfigFactory.parseString( - s""" + val newAdaptersConfig = ConfigFactory.parseString(s""" |akka.persistence.journal { | $journalName { | event-adapters { @@ -155,34 +155,35 @@ abstract class EndToEndEventAdapterSpec(journalName: String, journalConfig: Conf system.actorOf(Props(classOf[EndToEndAdapterActor], name, "akka.persistence.journal." + journalName, probe)) def withActorSystem[T](name: String, config: Config)(block: ActorSystem => T): T = { - val system = ActorSystem(name, journalConfig withFallback config) - try block(system) finally Await.ready(system.terminate(), 3.seconds) + val system = ActorSystem(name, journalConfig.withFallback(config)) + try block(system) + finally Await.ready(system.terminate(), 3.seconds) } "EventAdapters in end-to-end scenarios" must { "use the same adapter when reading as was used when writing to the journal" in - withActorSystem("SimpleSystem", adaptersConfig) { implicit system => - val p = TestProbe() - implicit val ref = p.ref + withActorSystem("SimpleSystem", adaptersConfig) { implicit system => + val p = TestProbe() + implicit val ref = p.ref - val p1 = persister("p1") - val a = A("a1") - val b = B("b1") - p1 ! a - p1 ! b - p.expectMsg(a) - p.expectMsg(b) + val p1 = persister("p1") + val a = A("a1") + val b = B("b1") + p1 ! a + p1 ! b + p.expectMsg(a) + p.expectMsg(b) - p.watch(p1) - p1 ! PoisonPill - p.expectTerminated(p1) + p.watch(p1) + p1 ! PoisonPill + p.expectTerminated(p1) - val p11 = persister("p1") - p11 ! GetState - p.expectMsg(A("a1")) - p.expectMsg(B("b1")) - } + val p11 = persister("p1") + p11 ! GetState + p.expectMsg(A("a1")) + p.expectMsg(B("b1")) + } "allow using an adapter, when write was performed without an adapter" in { val persistentName = "p2" @@ -226,10 +227,11 @@ abstract class EndToEndEventAdapterSpec(journalName: String, journalConfig: Conf val journalPath = s"akka.persistence.journal.$journalName" val missingAdapterConfig = adaptersConfig .withoutPath(s"$journalPath.event-adapters.a") - .withoutPath(s"""$journalPath.event-adapter-bindings."${classOf[EndToEndEventAdapterSpec].getCanonicalName}$$A"""") + .withoutPath( + s"""$journalPath.event-adapter-bindings."${classOf[EndToEndEventAdapterSpec].getCanonicalName}$$A"""") withActorSystem("MissingAdapterSystem", journalConfig.withFallback(missingAdapterConfig)) { implicit system2 => - EventFilter[ActorInitializationException](occurrences = 1, pattern = ".*undefined event-adapter.*") intercept { + EventFilter[ActorInitializationException](occurrences = 1, pattern = ".*undefined event-adapter.*").intercept { intercept[IllegalArgumentException] { Persistence(system2).adaptersFor(s"akka.persistence.journal.$journalName").get(classOf[String]) }.getMessage should include("was bound to undefined event-adapter: a (bindings: [a, b], known adapters: b)") @@ -240,4 +242,5 @@ abstract class EndToEndEventAdapterSpec(journalName: String, journalConfig: Conf } // needs persistence between actor systems, thus not running with the inmem journal -class LeveldbEndToEndEventAdapterSpec extends EndToEndEventAdapterSpec("leveldb", PersistenceSpec.config("leveldb", "LeveldbEndToEndEventAdapterSpec")) +class LeveldbEndToEndEventAdapterSpec + extends EndToEndEventAdapterSpec("leveldb", PersistenceSpec.config("leveldb", "LeveldbEndToEndEventAdapterSpec")) diff --git a/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala index d11e123f02..5b982a029a 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EventAdapterSpec.scala @@ -7,7 +7,7 @@ package akka.persistence import akka.actor._ import akka.event.Logging import akka.persistence.EventAdapterSpec.{ Tagged, UserDataChanged } -import akka.persistence.journal.{ SingleEventSeq, EventSeq, EventAdapter } +import akka.persistence.journal.{ EventAdapter, EventSeq, SingleEventSeq } import akka.testkit.ImplicitSender import com.typesafe.config.{ Config, ConfigFactory } @@ -71,7 +71,8 @@ object EventAdapterSpec { } class PersistAllIncomingActor(name: String, override val journalPluginId: String) - extends NamedPersistentActor(name) with PersistentActor { + extends NamedPersistentActor(name) + with PersistentActor { var state: List[Any] = Nil @@ -95,13 +96,15 @@ object EventAdapterSpec { } abstract class EventAdapterSpec(journalName: String, journalConfig: Config, adapterConfig: Config) - extends PersistenceSpec(journalConfig.withFallback(adapterConfig)) with ImplicitSender { + extends PersistenceSpec(journalConfig.withFallback(adapterConfig)) + with ImplicitSender { import EventAdapterSpec._ def this(journalName: String) { - this("inmem", PersistenceSpec.config("inmem", "InmemPersistentTaggingSpec"), ConfigFactory.parseString( - s""" + this("inmem", + PersistenceSpec.config("inmem", "InmemPersistentTaggingSpec"), + ConfigFactory.parseString(s""" |akka.persistence.journal { | | common-event-adapters { diff --git a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala index 5e509bbf7d..e7e8f03b31 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorDeleteFailureSpec.scala @@ -44,10 +44,15 @@ object EventSourcedActorDeleteFailureSpec { } -class EventSourcedActorDeleteFailureSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "SnapshotFailureRobustnessSpec", extraConfig = Some( - """ +class EventSourcedActorDeleteFailureSpec + extends PersistenceSpec( + PersistenceSpec.config("inmem", + "SnapshotFailureRobustnessSpec", + extraConfig = Some( + """ akka.persistence.journal.inmem.class = "akka.persistence.EventSourcedActorDeleteFailureSpec$DeleteFailingInmemJournal" - """))) with ImplicitSender { + """))) + with ImplicitSender { import EventSourcedActorDeleteFailureSpec._ system.eventStream.publish(TestEvent.Mute(EventFilter[akka.pattern.AskTimeoutException]())) @@ -72,4 +77,3 @@ class EventSourcedActorDeleteFailureSpec extends PersistenceSpec(PersistenceSpec } } - diff --git a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala index bc99635fbe..3f06922c4a 100644 --- a/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/EventSourcedActorFailureSpec.scala @@ -35,7 +35,7 @@ object EventSourcedActorFailureSpec { } override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( - recoveryCallback: PersistentRepr => Unit): Future[Unit] = { + recoveryCallback: PersistentRepr => Unit): Future[Unit] = { val highest = highestSequenceNr(persistenceId) val readFromStore = read(persistenceId, fromSequenceNr, toSequenceNr, max) if (readFromStore.isEmpty) @@ -72,7 +72,7 @@ object EventSourcedActorFailureSpec { } class OnRecoveryFailurePersistentActor(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case c @ Cmd(txt) => persist(Evt(txt))(updateState) } @@ -105,7 +105,7 @@ object EventSourcedActorFailureSpec { class FailingRecovery(name: String, recoveryFailureProbe: Option[ActorRef]) extends ExamplePersistentActor(name) { def this(name: String) = this(name, None) - override val receiveCommand: Receive = commonBehavior orElse { + override val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persist(Evt(s"${data}"))(updateState) } @@ -119,7 +119,7 @@ object EventSourcedActorFailureSpec { } class ThrowingActor1(name: String) extends ExamplePersistentActor(name) { - override val receiveCommand: Receive = commonBehavior orElse { + override val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persist(Evt(s"${data}"))(updateState) if (data == "err") @@ -128,7 +128,7 @@ object EventSourcedActorFailureSpec { } class ThrowingActor2(name: String) extends ExamplePersistentActor(name) { - override val receiveCommand: Receive = commonBehavior orElse { + override val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persist(Evt(s"${data}")) { evt => if (data == "err") @@ -140,16 +140,20 @@ object EventSourcedActorFailureSpec { } } -class EventSourcedActorFailureSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "SnapshotFailureRobustnessSpec", extraConfig = Some( - """ +class EventSourcedActorFailureSpec + extends PersistenceSpec( + PersistenceSpec.config("inmem", + "SnapshotFailureRobustnessSpec", + extraConfig = Some( + """ akka.persistence.journal.inmem.class = "akka.persistence.EventSourcedActorFailureSpec$FailingInmemJournal" - """))) with ImplicitSender { + """))) + with ImplicitSender { import EventSourcedActorFailureSpec._ import PersistentActorSpec._ - system.eventStream.publish(TestEvent.Mute( - EventFilter[akka.pattern.AskTimeoutException]())) + system.eventStream.publish(TestEvent.Mute(EventFilter[akka.pattern.AskTimeoutException]())) def prepareFailingRecovery(): Unit = { val persistentActor = namedPersistentActor[FailingRecovery] @@ -306,7 +310,7 @@ class EventSourcedActorFailureSpec extends PersistenceSpec(PersistenceSpec.confi expectMsg(List("a-1", "a-2", "c-1", "c-2")) // Create yet another one with same persistenceId, b-1 and b-2 discarded during replay - EventFilter.warning(start = "Invalid replayed event", occurrences = 2) intercept { + EventFilter.warning(start = "Invalid replayed event", occurrences = 2).intercept { val p3 = namedPersistentActor[Behavior1PersistentActor] p3 ! GetState expectMsg(List("a-1", "a-2", "c-1", "c-2")) @@ -315,4 +319,3 @@ class EventSourcedActorFailureSpec extends PersistenceSpec(PersistenceSpec.confi } } - diff --git a/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala b/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala index c569384554..4c3c51b86f 100644 --- a/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/LoadPluginSpec.scala @@ -20,11 +20,16 @@ object LoadJournalSpec { } } -class LoadJournalSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "LoadJournalSpec", extraConfig = Some( - """ +class LoadJournalSpec + extends PersistenceSpec( + PersistenceSpec.config("inmem", + "LoadJournalSpec", + extraConfig = Some( + """ akka.persistence.journal.inmem.class = "akka.persistence.LoadJournalSpec$JournalWithConfig" akka.persistence.journal.inmem.extra-property = 17 - """))) with ImplicitSender { + """))) + with ImplicitSender { import LoadJournalSpec._ "A journal with config parameter" must { @@ -35,4 +40,3 @@ class LoadJournalSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "L } } } - diff --git a/akka-persistence/src/test/scala/akka/persistence/ManyRecoveriesSpec.scala b/akka-persistence/src/test/scala/akka/persistence/ManyRecoveriesSpec.scala index ec7f7a6dec..f98c3cf485 100644 --- a/akka-persistence/src/test/scala/akka/persistence/ManyRecoveriesSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/ManyRecoveriesSpec.scala @@ -27,9 +27,10 @@ object ManyRecoveriesSpec { latch.foreach(Await.ready(_, 10.seconds)) } override def receiveCommand: Receive = { - case Cmd(s) => persist(Evt(s)) { _ => - sender() ! s"$persistenceId-$s-${lastSequenceNr}" - } + case Cmd(s) => + persist(Evt(s)) { _ => + sender() ! s"$persistenceId-$s-${lastSequenceNr}" + } case "stop" => context.stop(self) } @@ -37,8 +38,7 @@ object ManyRecoveriesSpec { } -class ManyRecoveriesSpec extends PersistenceSpec(ConfigFactory.parseString( - s""" +class ManyRecoveriesSpec extends PersistenceSpec(ConfigFactory.parseString(s""" akka.actor.default-dispatcher { type = Dispatcher executor = "thread-pool-executor" @@ -77,4 +77,3 @@ class ManyRecoveriesSpec extends PersistenceSpec(ConfigFactory.parseString( } } - diff --git a/akka-persistence/src/test/scala/akka/persistence/OptimizedRecoverySpec.scala b/akka-persistence/src/test/scala/akka/persistence/OptimizedRecoverySpec.scala index 52193b617a..48802fbf6d 100644 --- a/akka-persistence/src/test/scala/akka/persistence/OptimizedRecoverySpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/OptimizedRecoverySpec.scala @@ -21,7 +21,8 @@ object OptimizedRecoverySpec { } } - class TestPersistentActor(name: String, override val recovery: Recovery, probe: ActorRef) extends NamedPersistentActor(name) { + class TestPersistentActor(name: String, override val recovery: Recovery, probe: ActorRef) + extends NamedPersistentActor(name) { import TestPersistentActor._ override def persistenceId: String = name @@ -32,10 +33,11 @@ object OptimizedRecoverySpec { case TakeSnapshot => saveSnapshot(state) case s: SaveSnapshotSuccess => probe ! s case GetState => probe ! state - case Save(s) => persist(Saved(s, lastSequenceNr + 1)) { evt => - state += evt.s - probe ! evt - } + case Save(s) => + persist(Saved(s, lastSequenceNr + 1)) { evt => + state += evt.s + probe ! evt + } } def receiveRecover = { @@ -56,9 +58,9 @@ object OptimizedRecoverySpec { } -class OptimizedRecoverySpec extends PersistenceSpec(PersistenceSpec.config( - "inmem", - "OptimizedRecoverySpec")) with ImplicitSender { +class OptimizedRecoverySpec + extends PersistenceSpec(PersistenceSpec.config("inmem", "OptimizedRecoverySpec")) + with ImplicitSender { import OptimizedRecoverySpec.TestPersistentActor import OptimizedRecoverySpec.TestPersistentActor._ diff --git a/akka-persistence/src/test/scala/akka/persistence/OptionalSnapshotStoreSpec.scala b/akka-persistence/src/test/scala/akka/persistence/OptionalSnapshotStoreSpec.scala index 3d9e3272ce..08b02303b1 100644 --- a/akka-persistence/src/test/scala/akka/persistence/OptionalSnapshotStoreSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/OptionalSnapshotStoreSpec.scala @@ -31,8 +31,7 @@ object OptionalSnapshotStoreSpec { } } -class OptionalSnapshotStoreSpec extends PersistenceSpec(ConfigFactory.parseString( - s""" +class OptionalSnapshotStoreSpec extends PersistenceSpec(ConfigFactory.parseString(s""" akka.persistence.publish-plugin-commands = on akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.persistence.journal.leveldb.dir = "target/journal-${classOf[OptionalSnapshotStoreSpec].getName}" @@ -67,4 +66,3 @@ class OptionalSnapshotStoreSpec extends PersistenceSpec(ConfigFactory.parseStrin } } } - diff --git a/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala index c77f26ff10..54cf4819f5 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PerformanceSpec.scala @@ -54,21 +54,23 @@ object PerformanceSpec { class CommandsourcedTestPersistentActor(name: String) extends PerformanceTestPersistentActor(name) { - override val receiveCommand: Receive = controlBehavior orElse { - case cmd => persistAsync(cmd) { _ => - if (lastSequenceNr % 1000 == 0) print(".") - if (lastSequenceNr == failAt) throw new TestException("boom") - } + override val receiveCommand: Receive = controlBehavior.orElse { + case cmd => + persistAsync(cmd) { _ => + if (lastSequenceNr % 1000 == 0) print(".") + if (lastSequenceNr == failAt) throw new TestException("boom") + } } } class EventsourcedTestPersistentActor(name: String) extends PerformanceTestPersistentActor(name) { - override val receiveCommand: Receive = controlBehavior orElse { - case cmd => persist(cmd) { _ => - if (lastSequenceNr % 1000 == 0) print(".") - if (lastSequenceNr == failAt) throw new TestException("boom") - } + override val receiveCommand: Receive = controlBehavior.orElse { + case cmd => + persist(cmd) { _ => + if (lastSequenceNr % 1000 == 0) print(".") + if (lastSequenceNr == failAt) throw new TestException("boom") + } } } @@ -83,7 +85,7 @@ object PerformanceSpec { if (lastSequenceNr == failAt) throw new TestException("boom") } - val receiveCommand: Receive = controlBehavior orElse { + val receiveCommand: Receive = controlBehavior.orElse { case cmd => counter += 1 if (counter % 10 == 0) persist(cmd)(handler) @@ -97,12 +99,12 @@ object PerformanceSpec { case m => if (lastSequenceNr % 1000 == 0) print("."); m } - val receiveCommand: Receive = printProgress andThen (controlBehavior orElse { + val receiveCommand: Receive = printProgress.andThen(controlBehavior.orElse { case "a" => persist("a")(_ => context.become(processC)) case "b" => persist("b")(_ => ()) }) - val processC: Receive = printProgress andThen { + val processC: Receive = printProgress.andThen { case "c" => persist("c")(_ => context.unbecome()) unstashAll() @@ -111,16 +113,23 @@ object PerformanceSpec { } } -class PerformanceSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "PerformanceSpec", serialization = "off").withFallback(ConfigFactory.parseString(PerformanceSpec.config))) with ImplicitSender { +class PerformanceSpec + extends PersistenceSpec( + PersistenceSpec + .config("leveldb", "PerformanceSpec", serialization = "off") + .withFallback(ConfigFactory.parseString(PerformanceSpec.config))) + with ImplicitSender { import PerformanceSpec._ val loadCycles = system.settings.config.getInt("akka.persistence.performance.cycles.load") def stressPersistentActor(persistentActor: ActorRef, failAt: Option[Long], description: String): Unit = { - failAt foreach { persistentActor ! FailAt(_) } + failAt.foreach { persistentActor ! FailAt(_) } val m = new Measure(loadCycles) m.startMeasure() - 1 to loadCycles foreach { i => persistentActor ! s"msg${i}" } + (1 to loadCycles).foreach { i => + persistentActor ! s"msg${i}" + } persistentActor ! StopMeasure expectMsg(100.seconds, StopMeasure) println(f"\nthroughput = ${m.stopMeasure()}%.2f $description per second") @@ -145,8 +154,8 @@ class PerformanceSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", val persistentActor = namedPersistentActor[StashingEventsourcedTestPersistentActor] val m = new Measure(loadCycles) m.startMeasure() - val cmds = 1 to (loadCycles / 3) flatMap (_ => List("a", "b", "c")) - cmds foreach (persistentActor ! _) + val cmds = (1 to (loadCycles / 3)).flatMap(_ => List("a", "b", "c")) + cmds.foreach(persistentActor ! _) persistentActor ! StopMeasure expectMsg(100.seconds, StopMeasure) println(f"\nthroughput = ${m.stopMeasure()}%.2f persistent events per second") diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala index 7ba73183f1..ab15b0f9f8 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistenceSpec.scala @@ -21,8 +21,11 @@ import org.scalatest.BeforeAndAfterEach import akka.actor.Props import akka.testkit.AkkaSpec -abstract class PersistenceSpec(config: Config) extends AkkaSpec(config) with BeforeAndAfterEach with Cleanup - with PersistenceMatchers { this: AkkaSpec => +abstract class PersistenceSpec(config: Config) + extends AkkaSpec(config) + with BeforeAndAfterEach + with Cleanup + with PersistenceMatchers { this: AkkaSpec => private var _name: String = _ lazy val extension = Persistence(system) @@ -57,9 +60,10 @@ abstract class PersistenceSpec(config: Config) extends AkkaSpec(config) with Bef object PersistenceSpec { def config(plugin: String, test: String, serialization: String = "on", extraConfig: Option[String] = None) = - extraConfig.map(ConfigFactory.parseString(_)).getOrElse(ConfigFactory.empty()).withFallback( - ConfigFactory.parseString( - s""" + extraConfig + .map(ConfigFactory.parseString(_)) + .getOrElse(ConfigFactory.empty()) + .withFallback(ConfigFactory.parseString(s""" akka.actor.serialize-creators = ${serialization} akka.actor.serialize-messages = ${serialization} akka.actor.warn-about-java-serializer-usage = off @@ -73,10 +77,10 @@ object PersistenceSpec { } trait Cleanup { this: AkkaSpec => - val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) + val storageLocations = + List("akka.persistence.journal.leveldb.dir", + "akka.persistence.journal.leveldb-shared.store.dir", + "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) override protected def atStartup(): Unit = { storageLocations.foreach(FileUtils.deleteDirectory) @@ -101,6 +105,7 @@ case object GetState /** Additional ScalaTest matchers useful in persistence tests */ trait PersistenceMatchers { + /** Use this matcher to verify in-order execution of independent "streams" of events */ final class IndependentlyOrdered(prefixes: immutable.Seq[String]) extends Matcher[immutable.Seq[Any]] { override def apply(_left: immutable.Seq[Any]) = { @@ -111,10 +116,10 @@ trait PersistenceMatchers { nrs = seq.map(_.replaceFirst(prefixes(pos), "").toInt) sortedNrs = nrs.sorted if nrs != sortedNrs - } yield MatchResult( - false, - s"""Messages sequence with prefix ${prefixes(pos)} was not sorted! Was: $seq"""", - s"""Messages sequence with prefix ${prefixes(pos)} was sorted! Was: $seq"""") + } yield + MatchResult(false, + s"""Messages sequence with prefix ${prefixes(pos)} was not sorted! Was: $seq"""", + s"""Messages sequence with prefix ${prefixes(pos)} was sorted! Was: $seq"""") if (results.forall(_.matches)) MatchResult(true, "", "") else results.find(r => !r.matches).get diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala index 6a437d0b31..94164e486b 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorBoundedStashingSpec.scala @@ -36,7 +36,7 @@ object PersistentActorBoundedStashingSpec { def receiveRecover = updateState - override def receiveCommand: Receive = commonBehavior orElse { + override def receiveCommand: Receive = commonBehavior.orElse { case Cmd(x: Any) => persist(Evt(x))(updateState) } } @@ -52,15 +52,21 @@ object PersistentActorBoundedStashingSpec { val throwConfig = String.format(templateConfig, "akka.persistence.ThrowExceptionConfigurator") val discardConfig = String.format(templateConfig, "akka.persistence.DiscardConfigurator") - val replyToConfig = String.format(templateConfig, "akka.persistence.PersistentActorBoundedStashingSpec$ReplyToWithRejectConfigurator") + val replyToConfig = + String.format(templateConfig, "akka.persistence.PersistentActorBoundedStashingSpec$ReplyToWithRejectConfigurator") } class SteppingInMemPersistentActorBoundedStashingSpec(strategyConfig: String) - extends PersistenceSpec(SteppingInmemJournal.config("persistence-bounded-stash").withFallback(PersistenceSpec - .config("stepping-inmem", "SteppingInMemPersistentActorBoundedStashingSpec", extraConfig = Some(strategyConfig)))) - with BeforeAndAfterEach - with ImplicitSender { + extends PersistenceSpec( + SteppingInmemJournal + .config("persistence-bounded-stash") + .withFallback( + PersistenceSpec.config("stepping-inmem", + "SteppingInMemPersistentActorBoundedStashingSpec", + extraConfig = Some(strategyConfig)))) + with BeforeAndAfterEach + with ImplicitSender { override def atStartup: Unit = { system.eventStream.publish(Mute(EventFilter.warning(pattern = ".*received dead letter from.*Cmd.*"))) @@ -78,7 +84,7 @@ class SteppingInMemPersistentActorBoundedStashingSpec(strategyConfig: String) } class ThrowExceptionStrategyPersistentActorBoundedStashingSpec - extends SteppingInMemPersistentActorBoundedStashingSpec(PersistentActorBoundedStashingSpec.throwConfig) { + extends SteppingInMemPersistentActorBoundedStashingSpec(PersistentActorBoundedStashingSpec.throwConfig) { "Stashing with ThrowOverflowExceptionStrategy in a persistence actor " should { "throws stash overflow exception" in { val persistentActor = namedPersistentActor[StashOverflowStrategyFromConfigPersistentActor] @@ -92,11 +98,11 @@ class ThrowExceptionStrategyPersistentActorBoundedStashingSpec persistentActor ! Cmd("a") //internal stash overflow - 1 to (capacity + 1) foreach (persistentActor ! Cmd(_)) + (1 to (capacity + 1)).foreach(persistentActor ! Cmd(_)) //after PA stopped, all stashed messages forward to deadletters //the message triggering the overflow is lost, so we get one less message than we sent - 1 to capacity foreach (i => expectMsg(DeadLetter(Cmd(i), testActor, persistentActor))) + (1 to capacity).foreach(i => expectMsg(DeadLetter(Cmd(i), testActor, persistentActor))) // send another message to the now dead actor and make sure that it goes to dead letters persistentActor ! Cmd(capacity + 2) @@ -106,7 +112,7 @@ class ThrowExceptionStrategyPersistentActorBoundedStashingSpec } class DiscardStrategyPersistentActorBoundedStashingSpec - extends SteppingInMemPersistentActorBoundedStashingSpec(PersistentActorBoundedStashingSpec.discardConfig) { + extends SteppingInMemPersistentActorBoundedStashingSpec(PersistentActorBoundedStashingSpec.discardConfig) { "Stashing with DiscardToDeadLetterStrategy in a persistence actor " should { "discard to deadletter" in { val persistentActor = namedPersistentActor[StashOverflowStrategyFromConfigPersistentActor] @@ -120,11 +126,11 @@ class DiscardStrategyPersistentActorBoundedStashingSpec persistentActor ! Cmd("a") //internal stash overflow after 10 - 1 to (2 * capacity) foreach (persistentActor ! Cmd(_)) + (1 to (2 * capacity)).foreach(persistentActor ! Cmd(_)) //so, 11 to 20 discard to deadletter - ((1 + capacity) to (2 * capacity)).foreach (i => expectMsg(DeadLetter(Cmd(i), testActor, persistentActor))) + ((1 + capacity) to (2 * capacity)).foreach(i => expectMsg(DeadLetter(Cmd(i), testActor, persistentActor))) //allow "a" and 1 to 10 write complete - 1 to (1 + capacity) foreach (i => SteppingInmemJournal.step(journal)) + (1 to (1 + capacity)).foreach(i => SteppingInmemJournal.step(journal)) persistentActor ! GetState @@ -134,7 +140,7 @@ class DiscardStrategyPersistentActorBoundedStashingSpec } class ReplyToStrategyPersistentActorBoundedStashingSpec - extends SteppingInMemPersistentActorBoundedStashingSpec(PersistentActorBoundedStashingSpec.replyToConfig) { + extends SteppingInMemPersistentActorBoundedStashingSpec(PersistentActorBoundedStashingSpec.replyToConfig) { "Stashing with DiscardToDeadLetterStrategy in a persistence actor" should { "reply to request with custom message" in { val persistentActor = namedPersistentActor[StashOverflowStrategyFromConfigPersistentActor] @@ -148,11 +154,11 @@ class ReplyToStrategyPersistentActorBoundedStashingSpec persistentActor ! Cmd("a") //internal stash overflow after 10 - 1 to (2 * capacity) foreach (persistentActor ! Cmd(_)) + (1 to (2 * capacity)).foreach(persistentActor ! Cmd(_)) //so, 11 to 20 reply to with "Reject" String - ((1 + capacity) to (2 * capacity)).foreach (i => expectMsg("RejectToStash")) + ((1 + capacity) to (2 * capacity)).foreach(i => expectMsg("RejectToStash")) //allow "a" and 1 to 10 write complete - 1 to (1 + capacity) foreach (i => SteppingInmemJournal.step(journal)) + (1 to (1 + capacity)).foreach(i => SteppingInmemJournal.step(journal)) persistentActor ! GetState diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorJournalProtocolSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorJournalProtocolSpec.scala index d9eb025bba..27fb2333ba 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorJournalProtocolSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorJournalProtocolSpec.scala @@ -46,7 +46,7 @@ akka.persistence.snapshot-store.plugin = "akka.persistence.no-snapshot-store" def receiveRecover = { case x => monitor ! x } - def receiveCommand = behavior orElse { + def receiveCommand = behavior.orElse { case m: Multi => m.cmd.foreach(behavior) } @@ -91,7 +91,7 @@ class JournalProbe(implicit private val system: ExtendedActorSystem) extends Ext class JournalPuppet extends Actor { val ref = JournalPuppet(context.system).ref def receive = { - case x => ref forward x + case x => ref.forward(x) } } @@ -125,8 +125,7 @@ class PersistentActorJournalProtocolSpec extends AkkaSpec(config) with ImplicitS journal.send(w.persistentActor, WriteMessagesSuccessful) w.messages.foreach { case AtomicWrite(msgs) => - msgs.foreach(msg => - w.persistentActor.tell(WriteMessageSuccess(msg, w.actorInstanceId), msg.sender)) + msgs.foreach(msg => w.persistentActor.tell(WriteMessageSuccess(msg, w.actorInstanceId), msg.sender)) case NonPersistentRepr(msg, sender) => w.persistentActor.tell(msg, sender) } } @@ -221,7 +220,7 @@ class PersistentActorJournalProtocolSpec extends AkkaSpec(config) with ImplicitS val w0 = expectWrite(subject, Msgs("a" +: commands(20, 30): _*)) journal.expectNoMsg(300.millis) confirm(w0) - (1 to 11) foreach (x => expectMsg(Done(-1, x))) + (1 to 11).foreach(x => expectMsg(Done(-1, x))) val w1 = expectWrite(subject, msgs(0, 20): _*) journal.expectNoMsg(300.millis) confirm(w1) @@ -239,7 +238,7 @@ class PersistentActorJournalProtocolSpec extends AkkaSpec(config) with ImplicitS subject ! PersistAsync(1, "a-1") val w1 = expectWrite(subject, Msgs("a-1")) subject ! PersistAsync(2, "a-2") - EventFilter[Exception](message = "K-BOOM!", occurrences = 1) intercept { + EventFilter[Exception](message = "K-BOOM!", occurrences = 1).intercept { subject ! Fail(new Exception("K-BOOM!")) expectMsg(PreRestart("test-6")) expectMsg(PostRestart("test-6")) diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorRecoveryTimeoutSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorRecoveryTimeoutSpec.scala index a1c9f96e6e..9ead77376e 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorRecoveryTimeoutSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorRecoveryTimeoutSpec.scala @@ -16,19 +16,21 @@ object PersistentActorRecoveryTimeoutSpec { val journalId = "persistent-actor-recovery-timeout-spec" def config = - SteppingInmemJournal.config(PersistentActorRecoveryTimeoutSpec.journalId).withFallback( - ConfigFactory.parseString( - """ + SteppingInmemJournal + .config(PersistentActorRecoveryTimeoutSpec.journalId) + .withFallback(ConfigFactory.parseString(""" |akka.persistence.journal.stepping-inmem.recovery-event-timeout=1s - """.stripMargin)).withFallback(PersistenceSpec.config("stepping-inmem", "PersistentActorRecoveryTimeoutSpec")) + """.stripMargin)) + .withFallback(PersistenceSpec.config("stepping-inmem", "PersistentActorRecoveryTimeoutSpec")) class TestActor(probe: ActorRef) extends NamedPersistentActor("recovery-timeout-actor") { override def receiveRecover: Receive = Actor.emptyBehavior override def receiveCommand: Receive = { - case x => persist(x) { _ => - sender() ! x - } + case x => + persist(x) { _ => + sender() ! x + } } override protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = { @@ -36,7 +38,9 @@ object PersistentActorRecoveryTimeoutSpec { } } - class TestReceiveTimeoutActor(receiveTimeout: FiniteDuration, probe: ActorRef) extends NamedPersistentActor("recovery-timeout-actor-2") with ActorLogging { + class TestReceiveTimeoutActor(receiveTimeout: FiniteDuration, probe: ActorRef) + extends NamedPersistentActor("recovery-timeout-actor-2") + with ActorLogging { override def preStart(): Unit = { context.setReceiveTimeout(receiveTimeout) @@ -48,9 +52,10 @@ object PersistentActorRecoveryTimeoutSpec { } override def receiveCommand: Receive = { - case x => persist(x) { _ => - sender() ! x - } + case x => + persist(x) { _ => + sender() ! x + } } override protected def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = { @@ -61,7 +66,9 @@ object PersistentActorRecoveryTimeoutSpec { } -class PersistentActorRecoveryTimeoutSpec extends AkkaSpec(PersistentActorRecoveryTimeoutSpec.config) with ImplicitSender { +class PersistentActorRecoveryTimeoutSpec + extends AkkaSpec(PersistentActorRecoveryTimeoutSpec.config) + with ImplicitSender { import PersistentActorRecoveryTimeoutSpec.journalId @@ -105,7 +112,8 @@ class PersistentActorRecoveryTimeoutSpec extends AkkaSpec(PersistentActorRecover val timeout = 42.days val probe = TestProbe() - val persisting = system.actorOf(Props(classOf[PersistentActorRecoveryTimeoutSpec.TestReceiveTimeoutActor], timeout, probe.ref)) + val persisting = + system.actorOf(Props(classOf[PersistentActorRecoveryTimeoutSpec.TestReceiveTimeoutActor], timeout, probe.ref)) awaitAssert(SteppingInmemJournal.getRef(journalId), 3.seconds) val journal = SteppingInmemJournal.getRef(journalId) @@ -123,7 +131,8 @@ class PersistentActorRecoveryTimeoutSpec extends AkkaSpec(PersistentActorRecover // now replay, but don't give the journal any tokens to replay events // so that we cause the timeout to trigger - val replaying = system.actorOf(Props(classOf[PersistentActorRecoveryTimeoutSpec.TestReceiveTimeoutActor], timeout, probe.ref)) + val replaying = + system.actorOf(Props(classOf[PersistentActorRecoveryTimeoutSpec.TestReceiveTimeoutActor], timeout, probe.ref)) // initial read highest SteppingInmemJournal.step(journal) diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala index 5b51bf473f..7ec8376c0b 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorSpec.scala @@ -40,7 +40,9 @@ object PersistentActorSpec { case "boom" => throw new TestException("boom") case GetState => sender() ! events.reverse case Delete(toSequenceNr) => - persist(Some(sender())) { s => askedForDelete = s } + persist(Some(sender())) { s => + askedForDelete = s + } deleteMessages(toSequenceNr) } @@ -72,14 +74,16 @@ object PersistentActorSpec { } class Behavior1PersistentActor(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persistAll(Seq(Evt(s"${data}-1"), Evt(s"${data}-2")))(updateState) case d: DeleteMessagesSuccess => - val replyTo = askedForDelete.getOrElse(throw new RuntimeException("Received DeleteMessagesSuccess without anyone asking for delete!")) + val replyTo = askedForDelete.getOrElse( + throw new RuntimeException("Received DeleteMessagesSuccess without anyone asking for delete!")) replyTo ! d case d: DeleteMessagesFailure => - val replyTo = askedForDelete.getOrElse(throw new RuntimeException("Received DeleteMessagesFailure without anyone asking for delete!")) + val replyTo = askedForDelete.getOrElse( + throw new RuntimeException("Received DeleteMessagesFailure without anyone asking for delete!")) replyTo ! d } @@ -96,33 +100,39 @@ object PersistentActorSpec { } } class Behavior1PersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends Behavior1PersistentActor(name) with LevelDbRuntimePluginConfig + extends Behavior1PersistentActor(name) + with LevelDbRuntimePluginConfig class Behavior1PersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends Behavior1PersistentActor(name) with InmemRuntimePluginConfig + extends Behavior1PersistentActor(name) + with InmemRuntimePluginConfig class Behavior2PersistentActor(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persistAll(Seq(Evt(s"${data}-1"), Evt(s"${data}-2")))(updateState) persistAll(Seq(Evt(s"${data}-3"), Evt(s"${data}-4")))(updateState) } } class Behavior2PersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends Behavior2PersistentActor(name) with LevelDbRuntimePluginConfig + extends Behavior2PersistentActor(name) + with LevelDbRuntimePluginConfig class Behavior2PersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends Behavior2PersistentActor(name) with InmemRuntimePluginConfig + extends Behavior2PersistentActor(name) + with InmemRuntimePluginConfig class Behavior3PersistentActor(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persistAll(Seq(Evt(s"${data}-11"), Evt(s"${data}-12")))(updateState) updateState(Evt(s"${data}-10")) } } class Behavior3PersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends Behavior3PersistentActor(name) with LevelDbRuntimePluginConfig + extends Behavior3PersistentActor(name) + with LevelDbRuntimePluginConfig class Behavior3PersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends Behavior3PersistentActor(name) with InmemRuntimePluginConfig + extends Behavior3PersistentActor(name) + with InmemRuntimePluginConfig class ChangeBehaviorInLastEventHandlerPersistentActor(name: String) extends ExamplePersistentActor(name) { val newBehavior: Receive = { @@ -134,7 +144,7 @@ object PersistentActorSpec { } } - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persist(Evt(s"${data}-0")) { event => updateState(event) @@ -142,10 +152,14 @@ object PersistentActorSpec { } } } - class ChangeBehaviorInLastEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends ChangeBehaviorInLastEventHandlerPersistentActor(name) with LevelDbRuntimePluginConfig - class ChangeBehaviorInLastEventHandlerPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends ChangeBehaviorInLastEventHandlerPersistentActor(name) with InmemRuntimePluginConfig + class ChangeBehaviorInLastEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends ChangeBehaviorInLastEventHandlerPersistentActor(name) + with LevelDbRuntimePluginConfig + class ChangeBehaviorInLastEventHandlerPersistentActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends ChangeBehaviorInLastEventHandlerPersistentActor(name) + with InmemRuntimePluginConfig class ChangeBehaviorInFirstEventHandlerPersistentActor(name: String) extends ExamplePersistentActor(name) { val newBehavior: Receive = { @@ -157,7 +171,7 @@ object PersistentActorSpec { persist(Evt(s"${data}-22"))(updateState) } - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persist(Evt(s"${data}-0")) { event => updateState(event) @@ -165,10 +179,14 @@ object PersistentActorSpec { } } } - class ChangeBehaviorInFirstEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends ChangeBehaviorInFirstEventHandlerPersistentActor(name) with LevelDbRuntimePluginConfig - class ChangeBehaviorInFirstEventHandlerPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends ChangeBehaviorInFirstEventHandlerPersistentActor(name) with InmemRuntimePluginConfig + class ChangeBehaviorInFirstEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends ChangeBehaviorInFirstEventHandlerPersistentActor(name) + with LevelDbRuntimePluginConfig + class ChangeBehaviorInFirstEventHandlerPersistentActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends ChangeBehaviorInFirstEventHandlerPersistentActor(name) + with InmemRuntimePluginConfig class ChangeBehaviorInCommandHandlerFirstPersistentActor(name: String) extends ExamplePersistentActor(name) { val newBehavior: Receive = { @@ -178,16 +196,20 @@ object PersistentActorSpec { updateState(Evt(s"${data}-30")) } - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => context.become(newBehavior) persist(Evt(s"${data}-0"))(updateState) } } - class ChangeBehaviorInCommandHandlerFirstPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends ChangeBehaviorInCommandHandlerFirstPersistentActor(name) with LevelDbRuntimePluginConfig - class ChangeBehaviorInCommandHandlerFirstPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends ChangeBehaviorInCommandHandlerFirstPersistentActor(name) with InmemRuntimePluginConfig + class ChangeBehaviorInCommandHandlerFirstPersistentActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends ChangeBehaviorInCommandHandlerFirstPersistentActor(name) + with LevelDbRuntimePluginConfig + class ChangeBehaviorInCommandHandlerFirstPersistentActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends ChangeBehaviorInCommandHandlerFirstPersistentActor(name) + with InmemRuntimePluginConfig class ChangeBehaviorInCommandHandlerLastPersistentActor(name: String) extends ExamplePersistentActor(name) { val newBehavior: Receive = { @@ -197,19 +219,23 @@ object PersistentActorSpec { context.unbecome() } - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persist(Evt(s"${data}-0"))(updateState) context.become(newBehavior) } } - class ChangeBehaviorInCommandHandlerLastPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends ChangeBehaviorInCommandHandlerLastPersistentActor(name) with LevelDbRuntimePluginConfig - class ChangeBehaviorInCommandHandlerLastPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends ChangeBehaviorInCommandHandlerLastPersistentActor(name) with InmemRuntimePluginConfig + class ChangeBehaviorInCommandHandlerLastPersistentActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends ChangeBehaviorInCommandHandlerLastPersistentActor(name) + with LevelDbRuntimePluginConfig + class ChangeBehaviorInCommandHandlerLastPersistentActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends ChangeBehaviorInCommandHandlerLastPersistentActor(name) + with InmemRuntimePluginConfig class SnapshottingPersistentActor(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { - override def receiveRecover = super.receiveRecover orElse { + override def receiveRecover = super.receiveRecover.orElse { case SnapshotOffer(_, events: List[_]) => probe ! "offered" this.events = events @@ -219,18 +245,25 @@ object PersistentActorSpec { persistAll(Seq(Evt(s"${cmd.data}-41"), Evt(s"${cmd.data}-42")))(updateState) } - def receiveCommand: Receive = commonBehavior orElse { + def receiveCommand: Receive = commonBehavior.orElse { case c: Cmd => handleCmd(c) case SaveSnapshotSuccess(_) => probe ! "saved" case "snap" => saveSnapshot(events) } } - class SnapshottingPersistentActorWithLevelDbRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends SnapshottingPersistentActor(name, probe) with LevelDbRuntimePluginConfig - class SnapshottingPersistentActorWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends SnapshottingPersistentActor(name, probe) with InmemRuntimePluginConfig + class SnapshottingPersistentActorWithLevelDbRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends SnapshottingPersistentActor(name, probe) + with LevelDbRuntimePluginConfig + class SnapshottingPersistentActorWithInmemRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends SnapshottingPersistentActor(name, probe) + with InmemRuntimePluginConfig - class SnapshottingBecomingPersistentActor(name: String, probe: ActorRef) extends SnapshottingPersistentActor(name, probe) { + class SnapshottingBecomingPersistentActor(name: String, probe: ActorRef) + extends SnapshottingPersistentActor(name, probe) { val becomingRecover: Receive = { case msg: SnapshotOffer => context.become(becomingCommand) @@ -242,14 +275,20 @@ object PersistentActorSpec { override def receiveRecover = becomingRecover.orElse(super.receiveRecover) - val becomingCommand: Receive = receiveCommand orElse { + val becomingCommand: Receive = receiveCommand.orElse { case "It's changing me" => probe ! "I am becoming" } } - class SnapshottingBecomingPersistentActorWithLevelDbRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends SnapshottingBecomingPersistentActor(name, probe) with LevelDbRuntimePluginConfig - class SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends SnapshottingBecomingPersistentActor(name, probe) with InmemRuntimePluginConfig + class SnapshottingBecomingPersistentActorWithLevelDbRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends SnapshottingBecomingPersistentActor(name, probe) + with LevelDbRuntimePluginConfig + class SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends SnapshottingBecomingPersistentActor(name, probe) + with InmemRuntimePluginConfig class ReplyInEventHandlerPersistentActor(name: String) extends ExamplePersistentActor(name) { val receiveCommand: Receive = { @@ -257,14 +296,16 @@ object PersistentActorSpec { } } class ReplyInEventHandlerPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends ReplyInEventHandlerPersistentActor(name) with LevelDbRuntimePluginConfig + extends ReplyInEventHandlerPersistentActor(name) + with LevelDbRuntimePluginConfig class ReplyInEventHandlerPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends ReplyInEventHandlerPersistentActor(name) with InmemRuntimePluginConfig + extends ReplyInEventHandlerPersistentActor(name) + with InmemRuntimePluginConfig class AsyncPersistPersistentActor(name: String) extends ExamplePersistentActor(name) { var counter = 0 - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => sender() ! data persistAsync(Evt(s"$data-${incCounter()}")) { evt => @@ -285,18 +326,20 @@ object PersistentActorSpec { } class AsyncPersistPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistPersistentActor(name) with LevelDbRuntimePluginConfig + extends AsyncPersistPersistentActor(name) + with LevelDbRuntimePluginConfig class AsyncPersistPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistPersistentActor(name) with InmemRuntimePluginConfig + extends AsyncPersistPersistentActor(name) + with InmemRuntimePluginConfig class AsyncPersistThreeTimesPersistentActor(name: String) extends ExamplePersistentActor(name) { var counter = 0 - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => sender() ! data - 1 to 3 foreach { i => + (1 to 3).foreach { i => persistAsync(Evt(s"$data-${incCounter()}")) { evt => sender() ! ("a" + evt.data.toString.drop(1)) // c-1 => a-1, as in "ack" } @@ -309,16 +352,18 @@ object PersistentActorSpec { } } class AsyncPersistThreeTimesPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistThreeTimesPersistentActor(name) with LevelDbRuntimePluginConfig + extends AsyncPersistThreeTimesPersistentActor(name) + with LevelDbRuntimePluginConfig class AsyncPersistThreeTimesPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistThreeTimesPersistentActor(name) with InmemRuntimePluginConfig + extends AsyncPersistThreeTimesPersistentActor(name) + with InmemRuntimePluginConfig class AsyncPersistSameEventTwicePersistentActor(name: String) extends ExamplePersistentActor(name) { // atomic because used from inside the *async* callbacks val sendMsgCounter = new AtomicInteger() - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => sender() ! data val event = Evt(data) @@ -328,17 +373,22 @@ object PersistentActorSpec { Thread.sleep(300) sender() ! s"${evt.data}-a-${sendMsgCounter.incrementAndGet()}" } - persistAsync(event) { evt => sender() ! s"${evt.data}-b-${sendMsgCounter.incrementAndGet()}" } + persistAsync(event) { evt => + sender() ! s"${evt.data}-b-${sendMsgCounter.incrementAndGet()}" + } } } - class AsyncPersistSameEventTwicePersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistSameEventTwicePersistentActor(name) with LevelDbRuntimePluginConfig + class AsyncPersistSameEventTwicePersistentActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends AsyncPersistSameEventTwicePersistentActor(name) + with LevelDbRuntimePluginConfig class AsyncPersistSameEventTwicePersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistSameEventTwicePersistentActor(name) with InmemRuntimePluginConfig + extends AsyncPersistSameEventTwicePersistentActor(name) + with InmemRuntimePluginConfig class PersistAllNilPersistentActor(name: String) extends ExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data: String) if data contains "defer" => deferAsync("before-nil")(sender() ! _) persistAll(Nil)(_ => sender() ! "Nil") @@ -353,15 +403,17 @@ object PersistentActorSpec { } } class PersistAllNilPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends PersistAllNilPersistentActor(name) with LevelDbRuntimePluginConfig + extends PersistAllNilPersistentActor(name) + with LevelDbRuntimePluginConfig class PersistAllNilPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends PersistAllNilPersistentActor(name) with InmemRuntimePluginConfig + extends PersistAllNilPersistentActor(name) + with InmemRuntimePluginConfig class AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor(name: String) extends ExamplePersistentActor(name) { var counter = 0 - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => sender() ! data @@ -384,16 +436,21 @@ object PersistentActorSpec { counter } } - class AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor(name) with LevelDbRuntimePluginConfig - class AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor(name) with InmemRuntimePluginConfig + class AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithLevelDbRuntimePluginConfig( + name: String, + val providedConfig: Config) + extends AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor(name) + with LevelDbRuntimePluginConfig + class AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor(name) + with InmemRuntimePluginConfig class AsyncPersistAndPersistMixedSyncAsyncPersistentActor(name: String) extends ExamplePersistentActor(name) { var sendMsgCounter = 0 - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => sender() ! data @@ -411,15 +468,19 @@ object PersistentActorSpec { sendMsgCounter } } - class AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistAndPersistMixedSyncAsyncPersistentActor(name) with LevelDbRuntimePluginConfig - class AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistAndPersistMixedSyncAsyncPersistentActor(name) with InmemRuntimePluginConfig + class AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends AsyncPersistAndPersistMixedSyncAsyncPersistentActor(name) + with LevelDbRuntimePluginConfig + class AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends AsyncPersistAndPersistMixedSyncAsyncPersistentActor(name) + with InmemRuntimePluginConfig class AsyncPersistHandlerCorrelationCheck(name: String) extends ExamplePersistentActor(name) { var counter = 0 - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => persistAsync(Evt(data)) { evt => if (data != evt.data) @@ -435,9 +496,11 @@ object PersistentActorSpec { } } class AsyncPersistHandlerCorrelationCheckWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistHandlerCorrelationCheck(name) with LevelDbRuntimePluginConfig + extends AsyncPersistHandlerCorrelationCheck(name) + with LevelDbRuntimePluginConfig class AsyncPersistHandlerCorrelationCheckWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends AsyncPersistHandlerCorrelationCheck(name) with InmemRuntimePluginConfig + extends AsyncPersistHandlerCorrelationCheck(name) + with InmemRuntimePluginConfig class AnyValEventPersistentActor(name: String) extends ExamplePersistentActor(name) { val receiveCommand: Receive = { @@ -445,11 +508,14 @@ object PersistentActorSpec { } } class AnyValEventPersistentActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends AnyValEventPersistentActor(name) with LevelDbRuntimePluginConfig + extends AnyValEventPersistentActor(name) + with LevelDbRuntimePluginConfig class AnyValEventPersistentActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends AnyValEventPersistentActor(name) with InmemRuntimePluginConfig + extends AnyValEventPersistentActor(name) + with InmemRuntimePluginConfig - class HandleRecoveryFinishedEventPersistentActor(name: String, probe: ActorRef) extends SnapshottingPersistentActor(name, probe) { + class HandleRecoveryFinishedEventPersistentActor(name: String, probe: ActorRef) + extends SnapshottingPersistentActor(name, probe) { val sendingRecover: Receive = { case msg: SnapshotOffer => // sending ourself a normal message tests @@ -464,15 +530,21 @@ object PersistentActorSpec { override def receiveRecover = sendingRecover.orElse(super.receiveRecover) - override def receiveCommand: Receive = super.receiveCommand orElse { + override def receiveCommand: Receive = super.receiveCommand.orElse { case s: String => probe ! s } } - class HandleRecoveryFinishedEventPersistentActorWithLevelDbRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends HandleRecoveryFinishedEventPersistentActor(name, probe) with LevelDbRuntimePluginConfig - class HandleRecoveryFinishedEventPersistentActorWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends HandleRecoveryFinishedEventPersistentActor(name, probe) with InmemRuntimePluginConfig + class HandleRecoveryFinishedEventPersistentActorWithLevelDbRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends HandleRecoveryFinishedEventPersistentActor(name, probe) + with LevelDbRuntimePluginConfig + class HandleRecoveryFinishedEventPersistentActorWithInmemRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends HandleRecoveryFinishedEventPersistentActor(name, probe) + with InmemRuntimePluginConfig trait DeferActor extends PersistentActor { def doDefer[A](event: A)(handler: A => Unit): Unit @@ -497,13 +569,17 @@ object PersistentActorSpec { class DeferringAsyncWithPersistActor(name: String) extends DeferringWithPersistActor(name) with DeferAsync class DeferringSyncWithPersistActor(name: String) extends DeferringWithPersistActor(name) with DeferSync class DeferringAsyncWithPersistActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncWithPersistActor(name) with LevelDbRuntimePluginConfig + extends DeferringAsyncWithPersistActor(name) + with LevelDbRuntimePluginConfig class DeferringSyncWithPersistActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncWithPersistActor(name) with LevelDbRuntimePluginConfig + extends DeferringSyncWithPersistActor(name) + with LevelDbRuntimePluginConfig class DeferringAsyncWithPersistActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncWithPersistActor(name) with InmemRuntimePluginConfig + extends DeferringAsyncWithPersistActor(name) + with InmemRuntimePluginConfig class DeferringSyncWithPersistActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncWithPersistActor(name) with InmemRuntimePluginConfig + extends DeferringSyncWithPersistActor(name) + with InmemRuntimePluginConfig abstract class DeferringWithAsyncPersistActor(name: String) extends ExamplePersistentActor(name) with DeferActor { val receiveCommand: Receive = { @@ -517,15 +593,21 @@ object PersistentActorSpec { class DeferringAsyncWithAsyncPersistActor(name: String) extends DeferringWithAsyncPersistActor(name) with DeferAsync class DeferringSyncWithAsyncPersistActor(name: String) extends DeferringWithAsyncPersistActor(name) with DeferSync class DeferringAsyncWithAsyncPersistActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncWithAsyncPersistActor(name) with LevelDbRuntimePluginConfig + extends DeferringAsyncWithAsyncPersistActor(name) + with LevelDbRuntimePluginConfig class DeferringSyncWithAsyncPersistActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncWithAsyncPersistActor(name) with LevelDbRuntimePluginConfig + extends DeferringSyncWithAsyncPersistActor(name) + with LevelDbRuntimePluginConfig class DeferringAsyncWithAsyncPersistActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncWithAsyncPersistActor(name) with InmemRuntimePluginConfig + extends DeferringAsyncWithAsyncPersistActor(name) + with InmemRuntimePluginConfig class DeferringSyncWithAsyncPersistActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncWithAsyncPersistActor(name) with InmemRuntimePluginConfig + extends DeferringSyncWithAsyncPersistActor(name) + with InmemRuntimePluginConfig - abstract class DeferringMixedCallsPPADDPADPersistActor(name: String) extends ExamplePersistentActor(name) with DeferActor { + abstract class DeferringMixedCallsPPADDPADPersistActor(name: String) + extends ExamplePersistentActor(name) + with DeferActor { val receiveCommand: Receive = { case Cmd(data) => persist(s"p-$data-1") { sender() ! _ } @@ -536,18 +618,32 @@ object PersistentActorSpec { doDefer(s"d-$data-6") { sender() ! _ } } } - class DeferringAsyncMixedCallsPPADDPADPersistActor(name: String) extends DeferringMixedCallsPPADDPADPersistActor(name) with DeferAsync - class DeferringSyncMixedCallsPPADDPADPersistActor(name: String) extends DeferringMixedCallsPPADDPADPersistActor(name) with DeferSync - class DeferringAsyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncMixedCallsPPADDPADPersistActor(name) with LevelDbRuntimePluginConfig - class DeferringSyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncMixedCallsPPADDPADPersistActor(name) with LevelDbRuntimePluginConfig - class DeferringAsyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncMixedCallsPPADDPADPersistActor(name) with InmemRuntimePluginConfig - class DeferringSyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncMixedCallsPPADDPADPersistActor(name) with InmemRuntimePluginConfig + class DeferringAsyncMixedCallsPPADDPADPersistActor(name: String) + extends DeferringMixedCallsPPADDPADPersistActor(name) + with DeferAsync + class DeferringSyncMixedCallsPPADDPADPersistActor(name: String) + extends DeferringMixedCallsPPADDPADPersistActor(name) + with DeferSync + class DeferringAsyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends DeferringAsyncMixedCallsPPADDPADPersistActor(name) + with LevelDbRuntimePluginConfig + class DeferringSyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends DeferringSyncMixedCallsPPADDPADPersistActor(name) + with LevelDbRuntimePluginConfig + class DeferringAsyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends DeferringAsyncMixedCallsPPADDPADPersistActor(name) + with InmemRuntimePluginConfig + class DeferringSyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends DeferringSyncMixedCallsPPADDPADPersistActor(name) + with InmemRuntimePluginConfig - abstract class DeferringWithNoPersistCallsPersistActor(name: String) extends ExamplePersistentActor(name) with DeferActor { + abstract class DeferringWithNoPersistCallsPersistActor(name: String) + extends ExamplePersistentActor(name) + with DeferActor { val receiveCommand: Receive = { case Cmd(_) => doDefer("d-1") { sender() ! _ } @@ -555,22 +651,35 @@ object PersistentActorSpec { doDefer("d-3") { sender() ! _ } } } - class DeferringAsyncWithNoPersistCallsPersistActor(name: String) extends DeferringWithNoPersistCallsPersistActor(name) with DeferAsync - class DeferringSyncWithNoPersistCallsPersistActor(name: String) extends DeferringWithNoPersistCallsPersistActor(name) with DeferSync - class DeferringAsyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncWithNoPersistCallsPersistActor(name) with LevelDbRuntimePluginConfig - class DeferringSyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncWithNoPersistCallsPersistActor(name) with LevelDbRuntimePluginConfig - class DeferringAsyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncWithNoPersistCallsPersistActor(name) with InmemRuntimePluginConfig - class DeferringSyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncWithNoPersistCallsPersistActor(name) with InmemRuntimePluginConfig + class DeferringAsyncWithNoPersistCallsPersistActor(name: String) + extends DeferringWithNoPersistCallsPersistActor(name) + with DeferAsync + class DeferringSyncWithNoPersistCallsPersistActor(name: String) + extends DeferringWithNoPersistCallsPersistActor(name) + with DeferSync + class DeferringAsyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends DeferringAsyncWithNoPersistCallsPersistActor(name) + with LevelDbRuntimePluginConfig + class DeferringSyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig(name: String, + val providedConfig: Config) + extends DeferringSyncWithNoPersistCallsPersistActor(name) + with LevelDbRuntimePluginConfig + class DeferringAsyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends DeferringAsyncWithNoPersistCallsPersistActor(name) + with InmemRuntimePluginConfig + class DeferringSyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig(name: String, + val providedConfig: Config) + extends DeferringSyncWithNoPersistCallsPersistActor(name) + with InmemRuntimePluginConfig abstract class DeferringActor(name: String) extends ExamplePersistentActor(name) with DeferActor { val receiveCommand: Receive = { case Cmd(data) => sender() ! data - persist(()) { _ => } // skip calling defer immediately because of empty pending invocations + persist(()) { _ => + } // skip calling defer immediately because of empty pending invocations doDefer(Evt(s"$data-defer")) { evt => sender() ! evt.data } @@ -579,13 +688,17 @@ object PersistentActorSpec { class DeferringAsyncActor(name: String) extends DeferringActor(name) with DeferAsync class DeferringSyncActor(name: String) extends DeferringActor(name) with DeferSync class DeferringAsyncActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncActor(name) with LevelDbRuntimePluginConfig + extends DeferringAsyncActor(name) + with LevelDbRuntimePluginConfig class DeferringSyncActorWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncActor(name) with LevelDbRuntimePluginConfig + extends DeferringSyncActor(name) + with LevelDbRuntimePluginConfig class DeferringAsyncActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringAsyncActor(name) with InmemRuntimePluginConfig + extends DeferringAsyncActor(name) + with InmemRuntimePluginConfig class DeferringSyncActorWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends DeferringSyncActor(name) with InmemRuntimePluginConfig + extends DeferringSyncActor(name) + with InmemRuntimePluginConfig class StressOrdering(name: String) extends ExamplePersistentActor(name) { val receiveCommand: Receive = { @@ -601,9 +714,11 @@ object PersistentActorSpec { } } class StressOrderingWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends StressOrdering(name) with LevelDbRuntimePluginConfig + extends StressOrdering(name) + with LevelDbRuntimePluginConfig class StressOrderingWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends StressOrdering(name) with InmemRuntimePluginConfig + extends StressOrdering(name) + with InmemRuntimePluginConfig class RecoverMessageCausedRestart(name: String) extends NamedPersistentActor(name) { var master: ActorRef = _ @@ -618,7 +733,7 @@ object PersistentActorSpec { if (master ne null) { master ! "failed with " + reason.getClass.getSimpleName + " while processing " + message.getOrElse("") } - context stop self + context.stop(self) } override def receiveRecover = { @@ -627,9 +742,11 @@ object PersistentActorSpec { } class RecoverMessageCausedRestartWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends RecoverMessageCausedRestart(name) with LevelDbRuntimePluginConfig + extends RecoverMessageCausedRestart(name) + with LevelDbRuntimePluginConfig class RecoverMessageCausedRestartWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends RecoverMessageCausedRestart(name) with InmemRuntimePluginConfig + extends RecoverMessageCausedRestart(name) + with InmemRuntimePluginConfig class MultipleAndNestedPersists(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { val receiveCommand: Receive = { @@ -637,18 +754,26 @@ object PersistentActorSpec { probe ! s persist(s + "-outer-1") { outer => probe ! outer - persist(s + "-inner-1") { inner => probe ! inner } + persist(s + "-inner-1") { inner => + probe ! inner + } } persist(s + "-outer-2") { outer => probe ! outer - persist(s + "-inner-2") { inner => probe ! inner } + persist(s + "-inner-2") { inner => + probe ! inner + } } } } - class MultipleAndNestedPersistsWithLevelDbRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends MultipleAndNestedPersists(name, probe) with LevelDbRuntimePluginConfig + class MultipleAndNestedPersistsWithLevelDbRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends MultipleAndNestedPersists(name, probe) + with LevelDbRuntimePluginConfig class MultipleAndNestedPersistsWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends MultipleAndNestedPersists(name, probe) with InmemRuntimePluginConfig + extends MultipleAndNestedPersists(name, probe) + with InmemRuntimePluginConfig class MultipleAndNestedPersistAsyncs(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { val receiveCommand: Receive = { @@ -656,18 +781,28 @@ object PersistentActorSpec { probe ! s persistAsync(s + "-outer-1") { outer => probe ! outer - persistAsync(s + "-inner-1") { inner => probe ! inner } + persistAsync(s + "-inner-1") { inner => + probe ! inner + } } persistAsync(s + "-outer-2") { outer => probe ! outer - persistAsync(s + "-inner-2") { inner => probe ! inner } + persistAsync(s + "-inner-2") { inner => + probe ! inner + } } } } - class MultipleAndNestedPersistAsyncsWithLevelDbRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends MultipleAndNestedPersistAsyncs(name, probe) with LevelDbRuntimePluginConfig - class MultipleAndNestedPersistAsyncsWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends MultipleAndNestedPersistAsyncs(name, probe) with InmemRuntimePluginConfig + class MultipleAndNestedPersistAsyncsWithLevelDbRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends MultipleAndNestedPersistAsyncs(name, probe) + with LevelDbRuntimePluginConfig + class MultipleAndNestedPersistAsyncsWithInmemRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends MultipleAndNestedPersistAsyncs(name, probe) + with InmemRuntimePluginConfig class DeeplyNestedPersistAsyncs(name: String, maxDepth: Int, probe: ActorRef) extends ExamplePersistentActor(name) { var currentDepths = Map.empty[String, Int].withDefaultValue(1) @@ -690,10 +825,18 @@ object PersistentActorSpec { persistAsync(s + "-" + 1)(weMustGoDeeper) } } - class DeeplyNestedPersistAsyncsWithLevelDbRuntimePluginConfig(name: String, maxDepth: Int, probe: ActorRef, val providedConfig: Config) - extends DeeplyNestedPersistAsyncs(name, maxDepth, probe) with LevelDbRuntimePluginConfig - class DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig(name: String, maxDepth: Int, probe: ActorRef, val providedConfig: Config) - extends DeeplyNestedPersistAsyncs(name, maxDepth, probe) with InmemRuntimePluginConfig + class DeeplyNestedPersistAsyncsWithLevelDbRuntimePluginConfig(name: String, + maxDepth: Int, + probe: ActorRef, + val providedConfig: Config) + extends DeeplyNestedPersistAsyncs(name, maxDepth, probe) + with LevelDbRuntimePluginConfig + class DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig(name: String, + maxDepth: Int, + probe: ActorRef, + val providedConfig: Config) + extends DeeplyNestedPersistAsyncs(name, maxDepth, probe) + with InmemRuntimePluginConfig class NestedPersistNormalAndAsyncs(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { val receiveCommand: Receive = { @@ -713,10 +856,16 @@ object PersistentActorSpec { } } } - class NestedPersistNormalAndAsyncsWithLevelDbRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends NestedPersistNormalAndAsyncs(name, probe) with LevelDbRuntimePluginConfig - class NestedPersistNormalAndAsyncsWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends NestedPersistNormalAndAsyncs(name, probe) with InmemRuntimePluginConfig + class NestedPersistNormalAndAsyncsWithLevelDbRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends NestedPersistNormalAndAsyncs(name, probe) + with LevelDbRuntimePluginConfig + class NestedPersistNormalAndAsyncsWithInmemRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends NestedPersistNormalAndAsyncs(name, probe) + with InmemRuntimePluginConfig class NestedPersistAsyncsAndNormal(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { val receiveCommand: Receive = { @@ -736,10 +885,16 @@ object PersistentActorSpec { } } } - class NestedPersistAsyncsAndNormalWithLevelDbRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends NestedPersistAsyncsAndNormal(name, probe) with LevelDbRuntimePluginConfig - class NestedPersistAsyncsAndNormalWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends NestedPersistAsyncsAndNormal(name, probe) with InmemRuntimePluginConfig + class NestedPersistAsyncsAndNormalWithLevelDbRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends NestedPersistAsyncsAndNormal(name, probe) + with LevelDbRuntimePluginConfig + class NestedPersistAsyncsAndNormalWithInmemRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends NestedPersistAsyncsAndNormal(name, probe) + with InmemRuntimePluginConfig class NestedPersistInAsyncEnforcesStashing(name: String, probe: ActorRef) extends ExamplePersistentActor(name) { val receiveCommand: Receive = { @@ -756,10 +911,16 @@ object PersistentActorSpec { } } } - class NestedPersistInAsyncEnforcesStashingWithLevelDbRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends NestedPersistInAsyncEnforcesStashing(name, probe) with LevelDbRuntimePluginConfig - class NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig(name: String, probe: ActorRef, val providedConfig: Config) - extends NestedPersistInAsyncEnforcesStashing(name, probe) with InmemRuntimePluginConfig + class NestedPersistInAsyncEnforcesStashingWithLevelDbRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends NestedPersistInAsyncEnforcesStashing(name, probe) + with LevelDbRuntimePluginConfig + class NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig(name: String, + probe: ActorRef, + val providedConfig: Config) + extends NestedPersistInAsyncEnforcesStashing(name, probe) + with InmemRuntimePluginConfig class DeeplyNestedPersists(name: String, maxDepth: Int, probe: ActorRef) extends ExamplePersistentActor(name) { var currentDepths = Map.empty[String, Int].withDefaultValue(1) @@ -782,18 +943,30 @@ object PersistentActorSpec { persist(s + "-" + 1)(weMustGoDeeper) } } - class DeeplyNestedPersistsWithLevelDbRuntimePluginConfig(name: String, maxDepth: Int, probe: ActorRef, val providedConfig: Config) - extends DeeplyNestedPersists(name, maxDepth, probe) with LevelDbRuntimePluginConfig - class DeeplyNestedPersistsWithInmemRuntimePluginConfig(name: String, maxDepth: Int, probe: ActorRef, val providedConfig: Config) - extends DeeplyNestedPersists(name, maxDepth, probe) with InmemRuntimePluginConfig + class DeeplyNestedPersistsWithLevelDbRuntimePluginConfig(name: String, + maxDepth: Int, + probe: ActorRef, + val providedConfig: Config) + extends DeeplyNestedPersists(name, maxDepth, probe) + with LevelDbRuntimePluginConfig + class DeeplyNestedPersistsWithInmemRuntimePluginConfig(name: String, + maxDepth: Int, + probe: ActorRef, + val providedConfig: Config) + extends DeeplyNestedPersists(name, maxDepth, probe) + with InmemRuntimePluginConfig - class StackableTestPersistentActor(val probe: ActorRef) extends StackableTestPersistentActor.BaseActor with PersistentActor with StackableTestPersistentActor.MixinActor { + class StackableTestPersistentActor(val probe: ActorRef) + extends StackableTestPersistentActor.BaseActor + with PersistentActor + with StackableTestPersistentActor.MixinActor { override def persistenceId: String = "StackableTestPersistentActor" def receiveCommand = { - case "restart" => throw new Exception("triggering restart") with NoStackTrace { - override def toString = "Boom!" - } + case "restart" => + throw new Exception("triggering restart") with NoStackTrace { + override def toString = "Boom!" + } } def receiveRecover = { @@ -822,9 +995,11 @@ object PersistentActorSpec { } class StackableTestPersistentActorWithLevelDbRuntimePluginConfig(probe: ActorRef, val providedConfig: Config) - extends StackableTestPersistentActor(probe) with LevelDbRuntimePluginConfig + extends StackableTestPersistentActor(probe) + with LevelDbRuntimePluginConfig class StackableTestPersistentActorWithInmemRuntimePluginConfig(probe: ActorRef, val providedConfig: Config) - extends StackableTestPersistentActor(probe) with InmemRuntimePluginConfig + extends StackableTestPersistentActor(probe) + with InmemRuntimePluginConfig object StackableTestPersistentActor { @@ -903,14 +1078,16 @@ object PersistentActorSpec { override def onRecoveryFailure(cause: scala.Throwable, event: Option[Any]): Unit = () - def receiveCommand = commonBehavior orElse { + def receiveCommand = commonBehavior.orElse { case Cmd(d) => persist(Evt(d))(updateState) } } class PersistInRecoveryWithLevelDbRuntimePluginConfig(name: String, val providedConfig: Config) - extends PersistInRecovery(name) with LevelDbRuntimePluginConfig + extends PersistInRecovery(name) + with LevelDbRuntimePluginConfig class PersistInRecoveryWithInmemRuntimePluginConfig(name: String, val providedConfig: Config) - extends PersistInRecovery(name) with InmemRuntimePluginConfig + extends PersistInRecovery(name) + with InmemRuntimePluginConfig class ExceptionActor(name: String) extends ExamplePersistentActor(name) { override def receiveCommand = commonBehavior @@ -937,17 +1114,23 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi protected def behavior3PersistentActor: ActorRef = namedPersistentActor[Behavior3PersistentActor] - protected def changeBehaviorInFirstEventHandlerPersistentActor: ActorRef = namedPersistentActor[ChangeBehaviorInFirstEventHandlerPersistentActor] + protected def changeBehaviorInFirstEventHandlerPersistentActor: ActorRef = + namedPersistentActor[ChangeBehaviorInFirstEventHandlerPersistentActor] - protected def changeBehaviorInLastEventHandlerPersistentActor: ActorRef = namedPersistentActor[ChangeBehaviorInLastEventHandlerPersistentActor] + protected def changeBehaviorInLastEventHandlerPersistentActor: ActorRef = + namedPersistentActor[ChangeBehaviorInLastEventHandlerPersistentActor] - protected def changeBehaviorInCommandHandlerFirstPersistentActor: ActorRef = namedPersistentActor[ChangeBehaviorInCommandHandlerFirstPersistentActor] + protected def changeBehaviorInCommandHandlerFirstPersistentActor: ActorRef = + namedPersistentActor[ChangeBehaviorInCommandHandlerFirstPersistentActor] - protected def changeBehaviorInCommandHandlerLastPersistentActor: ActorRef = namedPersistentActor[ChangeBehaviorInCommandHandlerLastPersistentActor] + protected def changeBehaviorInCommandHandlerLastPersistentActor: ActorRef = + namedPersistentActor[ChangeBehaviorInCommandHandlerLastPersistentActor] - protected def snapshottingPersistentActor: ActorRef = system.actorOf(Props(classOf[SnapshottingPersistentActor], name, testActor)) + protected def snapshottingPersistentActor: ActorRef = + system.actorOf(Props(classOf[SnapshottingPersistentActor], name, testActor)) - protected def snapshottingBecomingPersistentActor: ActorRef = system.actorOf(Props(classOf[SnapshottingBecomingPersistentActor], name, testActor)) + protected def snapshottingBecomingPersistentActor: ActorRef = + system.actorOf(Props(classOf[SnapshottingBecomingPersistentActor], name, testActor)) protected def replyInEventHandlerPersistentActor: ActorRef = namedPersistentActor[ReplyInEventHandlerPersistentActor] @@ -955,45 +1138,61 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi protected def asyncPersistPersistentActor: ActorRef = namedPersistentActor[AsyncPersistPersistentActor] - protected def asyncPersistThreeTimesPersistentActor: ActorRef = namedPersistentActor[AsyncPersistThreeTimesPersistentActor] + protected def asyncPersistThreeTimesPersistentActor: ActorRef = + namedPersistentActor[AsyncPersistThreeTimesPersistentActor] - protected def asyncPersistSameEventTwicePersistentActor: ActorRef = namedPersistentActor[AsyncPersistSameEventTwicePersistentActor] + protected def asyncPersistSameEventTwicePersistentActor: ActorRef = + namedPersistentActor[AsyncPersistSameEventTwicePersistentActor] protected def persistAllNilPersistentActor: ActorRef = namedPersistentActor[PersistAllNilPersistentActor] - protected def asyncPersistAndPersistMixedSyncAsyncSyncPersistentActor: ActorRef = namedPersistentActor[AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor] + protected def asyncPersistAndPersistMixedSyncAsyncSyncPersistentActor: ActorRef = + namedPersistentActor[AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActor] - protected def asyncPersistAndPersistMixedSyncAsyncPersistentActor: ActorRef = namedPersistentActor[AsyncPersistAndPersistMixedSyncAsyncPersistentActor] + protected def asyncPersistAndPersistMixedSyncAsyncPersistentActor: ActorRef = + namedPersistentActor[AsyncPersistAndPersistMixedSyncAsyncPersistentActor] - protected def asyncPersistHandlerCorrelationCheck: ActorRef = namedPersistentActor[AsyncPersistHandlerCorrelationCheck] + protected def asyncPersistHandlerCorrelationCheck: ActorRef = + namedPersistentActor[AsyncPersistHandlerCorrelationCheck] protected def deferringWithPersistActor: ActorRef = namedPersistentActor[DeferringWithPersistActor] protected def deferringWithAsyncPersistActor: ActorRef = namedPersistentActor[DeferringWithAsyncPersistActor] - protected def deferringMixedCallsPPADDPADPersistActor: ActorRef = namedPersistentActor[DeferringMixedCallsPPADDPADPersistActor] + protected def deferringMixedCallsPPADDPADPersistActor: ActorRef = + namedPersistentActor[DeferringMixedCallsPPADDPADPersistActor] - protected def deferringWithNoPersistCallsPersistActor: ActorRef = namedPersistentActor[DeferringWithNoPersistCallsPersistActor] + protected def deferringWithNoPersistCallsPersistActor: ActorRef = + namedPersistentActor[DeferringWithNoPersistCallsPersistActor] - protected def handleRecoveryFinishedEventPersistentActor: ActorRef = system.actorOf(Props(classOf[HandleRecoveryFinishedEventPersistentActor], name, testActor)) + protected def handleRecoveryFinishedEventPersistentActor: ActorRef = + system.actorOf(Props(classOf[HandleRecoveryFinishedEventPersistentActor], name, testActor)) protected def stressOrdering: ActorRef = namedPersistentActor[StressOrdering] - protected def stackableTestPersistentActor: ActorRef = system.actorOf(Props(classOf[StackableTestPersistentActor], testActor)) + protected def stackableTestPersistentActor: ActorRef = + system.actorOf(Props(classOf[StackableTestPersistentActor], testActor)) - protected def multipleAndNestedPersists: ActorRef = system.actorOf(Props(classOf[MultipleAndNestedPersists], name, testActor)) + protected def multipleAndNestedPersists: ActorRef = + system.actorOf(Props(classOf[MultipleAndNestedPersists], name, testActor)) - protected def multipleAndNestedPersistAsyncs: ActorRef = system.actorOf(Props(classOf[MultipleAndNestedPersistAsyncs], name, testActor)) + protected def multipleAndNestedPersistAsyncs: ActorRef = + system.actorOf(Props(classOf[MultipleAndNestedPersistAsyncs], name, testActor)) - protected def deeplyNestedPersists(nestedPersists: Int): ActorRef = system.actorOf(Props(classOf[DeeplyNestedPersists], name, nestedPersists, testActor)) + protected def deeplyNestedPersists(nestedPersists: Int): ActorRef = + system.actorOf(Props(classOf[DeeplyNestedPersists], name, nestedPersists, testActor)) - protected def deeplyNestedPersistAsyncs(nestedPersistAsyncs: Int): ActorRef = system.actorOf(Props(classOf[DeeplyNestedPersistAsyncs], name, nestedPersistAsyncs, testActor)) + protected def deeplyNestedPersistAsyncs(nestedPersistAsyncs: Int): ActorRef = + system.actorOf(Props(classOf[DeeplyNestedPersistAsyncs], name, nestedPersistAsyncs, testActor)) - protected def nestedPersistNormalAndAsyncs: ActorRef = system.actorOf(Props(classOf[NestedPersistNormalAndAsyncs], name, testActor)) + protected def nestedPersistNormalAndAsyncs: ActorRef = + system.actorOf(Props(classOf[NestedPersistNormalAndAsyncs], name, testActor)) - protected def nestedPersistAsyncsAndNormal: ActorRef = system.actorOf(Props(classOf[NestedPersistAsyncsAndNormal], name, testActor)) + protected def nestedPersistAsyncsAndNormal: ActorRef = + system.actorOf(Props(classOf[NestedPersistAsyncsAndNormal], name, testActor)) - protected def nestedPersistInAsyncEnforcesStashing: ActorRef = system.actorOf(Props(classOf[NestedPersistInAsyncEnforcesStashing], name, testActor)) + protected def nestedPersistInAsyncEnforcesStashing: ActorRef = + system.actorOf(Props(classOf[NestedPersistInAsyncEnforcesStashing], name, testActor)) protected def persistInRecovery: ActorRef = namedPersistentActor[PersistInRecovery] @@ -1003,17 +1202,22 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi protected def deferringSyncWithPersistActor: ActorRef = namedPersistentActor[DeferringSyncWithPersistActor] - protected def deferringAsyncWithAsyncPersistActor: ActorRef = namedPersistentActor[DeferringAsyncWithAsyncPersistActor] + protected def deferringAsyncWithAsyncPersistActor: ActorRef = + namedPersistentActor[DeferringAsyncWithAsyncPersistActor] protected def deferringSyncWithAsyncPersistActor: ActorRef = namedPersistentActor[DeferringSyncWithAsyncPersistActor] - protected def deferringAsyncMixedCallsPPADDPADPersistActor: ActorRef = namedPersistentActor[DeferringAsyncMixedCallsPPADDPADPersistActor] + protected def deferringAsyncMixedCallsPPADDPADPersistActor: ActorRef = + namedPersistentActor[DeferringAsyncMixedCallsPPADDPADPersistActor] - protected def deferringSyncMixedCallsPPADDPADPersistActor: ActorRef = namedPersistentActor[DeferringSyncMixedCallsPPADDPADPersistActor] + protected def deferringSyncMixedCallsPPADDPADPersistActor: ActorRef = + namedPersistentActor[DeferringSyncMixedCallsPPADDPADPersistActor] - protected def deferringAsyncWithNoPersistCallsPersistActor: ActorRef = namedPersistentActor[DeferringAsyncWithNoPersistCallsPersistActor] + protected def deferringAsyncWithNoPersistCallsPersistActor: ActorRef = + namedPersistentActor[DeferringAsyncWithNoPersistCallsPersistActor] - protected def deferringSyncWithNoPersistCallsPersistActor: ActorRef = namedPersistentActor[DeferringSyncWithNoPersistCallsPersistActor] + protected def deferringSyncWithNoPersistCallsPersistActor: ActorRef = + namedPersistentActor[DeferringSyncWithNoPersistCallsPersistActor] protected def deferringAsyncActor: ActorRef = namedPersistentActor[DeferringAsyncActor] @@ -1023,7 +1227,7 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi "fail fast if persistenceId is null" in { import akka.testkit.filterEvents filterEvents(EventFilter[ActorInitializationException]()) { - EventFilter.error(message = "requirement failed: persistenceId is [null] for PersistentActor") intercept { + EventFilter.error(message = "requirement failed: persistenceId is [null] for PersistentActor").intercept { val ref = system.actorOf(Props(new NamedPersistentActor(null) { override def receiveRecover: Receive = Actor.emptyBehavior @@ -1037,7 +1241,7 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi "fail fast if persistenceId is an empty string" in { import akka.testkit.filterEvents filterEvents(EventFilter[ActorInitializationException]()) { - EventFilter.error(message = "persistenceId cannot be empty for PersistentActor") intercept { + EventFilter.error(message = "persistenceId cannot be empty for PersistentActor").intercept { val ref = system.actorOf(Props(new NamedPersistentActor(" ") { override def receiveRecover: Receive = Actor.emptyBehavior @@ -1167,9 +1371,11 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi } "support multiple persistAsync calls for one command, and execute them 'when possible', not hindering command processing" in { val persistentActor = asyncPersistThreeTimesPersistentActor - val commands = 1 to 10 map { i => Cmd(s"c-$i") } + val commands = (1 to 10).map { i => + Cmd(s"c-$i") + } - commands foreach { i => + commands.foreach { i => Thread.sleep(Random.nextInt(10)) persistentActor ! i } @@ -1179,7 +1385,9 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi val replies = all.filter(r => r.count(_ == '-') == 1) replies should equal(commands.map(_.data)) - val expectedAcks = (3 to 32) map { i => s"a-${i / 3}-${i - 2}" } + val expectedAcks = (3 to 32).map { i => + s"a-${i / 3}-${i - 2}" + } val acks = all.filter(r => r.count(_ == '-') == 2) acks should equal(expectedAcks) } @@ -1188,17 +1396,19 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi // but as we want to remove it soon, keeping the explicit test here. val persistentActor = asyncPersistThreeTimesPersistentActor - val commands = 1 to 10 map { i => Cmd(s"c-$i") } + val commands = (1 to 10).map { i => + Cmd(s"c-$i") + } val probes = Vector.fill(10)(TestProbe()) - (probes zip commands) foreach { + (probes.zip(commands)).foreach { case (p, c) => persistentActor.tell(c, p.ref) } val ackClass = classOf[String] within(3.seconds) { - probes foreach { + probes.foreach { _.expectMsgAllClassOf(ackClass, ackClass, ackClass) } } @@ -1457,8 +1667,8 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi persistentActor ! "b" val msgs = receiveN(10).map(_.toString) - val as = msgs.filter(_ startsWith "a") - val bs = msgs.filter(_ startsWith "b") + val as = msgs.filter(_.startsWith("a")) + val bs = msgs.filter(_.startsWith("b")) as should equal(List("a", "a-outer-1", "a-outer-2", "a-inner-1", "a-inner-2")) bs should equal(List("b", "b-outer-1", "b-outer-2", "b-inner-1", "b-inner-2")) } @@ -1580,7 +1790,8 @@ abstract class PersistentActorSpec(config: Config) extends PersistenceSpec(confi } -class LeveldbPersistentActorSpec extends PersistentActorSpec(PersistenceSpec.config("leveldb", "LeveldbPersistentActorSpec")) +class LeveldbPersistentActorSpec + extends PersistentActorSpec(PersistenceSpec.config("leveldb", "LeveldbPersistentActorSpec")) class InmemPersistentActorSpec extends PersistentActorSpec(PersistenceSpec.config("inmem", "InmemPersistentActorSpec")) @@ -1588,205 +1799,377 @@ class InmemPersistentActorSpec extends PersistentActorSpec(PersistenceSpec.confi * Same test suite as [[LeveldbPersistentActorSpec]], the only difference is that all persistent actors are using the * provided [[Config]] instead of the [[Config]] coming from the [[ActorSystem]]. */ -class LeveldbPersistentActorWithRuntimePluginConfigSpec extends PersistentActorSpec( - PersistenceSpec.config("leveldb", "LeveldbPersistentActorWithRuntimePluginConfigSpec") -) { +class LeveldbPersistentActorWithRuntimePluginConfigSpec + extends PersistentActorSpec(PersistenceSpec.config("leveldb", "LeveldbPersistentActorWithRuntimePluginConfigSpec")) { val providedActorConfig: Config = { - ConfigFactory.parseString( - s""" + ConfigFactory + .parseString(s""" | custom.persistence.journal.leveldb.dir = target/journal-LeveldbPersistentActorWithRuntimePluginConfigSpec | custom.persistence.snapshot-store.local.dir = target/snapshots-LeveldbPersistentActorWithRuntimePluginConfigSpec/ - """.stripMargin - ).withValue( - s"custom.persistence.journal.leveldb", - system.settings.config.getValue(s"akka.persistence.journal.leveldb") - ).withValue( - "custom.persistence.snapshot-store.local", - system.settings.config.getValue("akka.persistence.snapshot-store.local") - ) + """.stripMargin) + .withValue(s"custom.persistence.journal.leveldb", + system.settings.config.getValue(s"akka.persistence.journal.leveldb")) + .withValue("custom.persistence.snapshot-store.local", + system.settings.config.getValue("akka.persistence.snapshot-store.local")) } - override protected def behavior1PersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[Behavior1PersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def behavior1PersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[Behavior1PersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def behavior2PersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[Behavior2PersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def behavior2PersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[Behavior2PersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def behavior3PersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[Behavior3PersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def behavior3PersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[Behavior3PersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def changeBehaviorInFirstEventHandlerPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ChangeBehaviorInFirstEventHandlerPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def changeBehaviorInFirstEventHandlerPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + ChangeBehaviorInFirstEventHandlerPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def changeBehaviorInLastEventHandlerPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ChangeBehaviorInLastEventHandlerPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def changeBehaviorInLastEventHandlerPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + ChangeBehaviorInLastEventHandlerPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def changeBehaviorInCommandHandlerFirstPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ChangeBehaviorInCommandHandlerFirstPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def changeBehaviorInCommandHandlerFirstPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + ChangeBehaviorInCommandHandlerFirstPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def changeBehaviorInCommandHandlerLastPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ChangeBehaviorInCommandHandlerLastPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def changeBehaviorInCommandHandlerLastPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + ChangeBehaviorInCommandHandlerLastPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def snapshottingPersistentActor: ActorRef = system.actorOf(Props(classOf[SnapshottingPersistentActorWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def snapshottingPersistentActor: ActorRef = + system.actorOf( + Props(classOf[SnapshottingPersistentActorWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def snapshottingBecomingPersistentActor: ActorRef = system.actorOf(Props(classOf[SnapshottingBecomingPersistentActorWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def snapshottingBecomingPersistentActor: ActorRef = + system.actorOf( + Props(classOf[SnapshottingBecomingPersistentActorWithLevelDbRuntimePluginConfig], + name, + testActor, + providedActorConfig)) - override protected def replyInEventHandlerPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ReplyInEventHandlerPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def replyInEventHandlerPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ReplyInEventHandlerPersistentActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def anyValEventPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AnyValEventPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def anyValEventPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[AnyValEventPersistentActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def asyncPersistPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[AsyncPersistPersistentActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def asyncPersistThreeTimesPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistThreeTimesPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistThreeTimesPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[AsyncPersistThreeTimesPersistentActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def asyncPersistSameEventTwicePersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistSameEventTwicePersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistSameEventTwicePersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[AsyncPersistSameEventTwicePersistentActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def persistAllNilPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[PersistAllNilPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def persistAllNilPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[PersistAllNilPersistentActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def asyncPersistAndPersistMixedSyncAsyncSyncPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistAndPersistMixedSyncAsyncSyncPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def asyncPersistAndPersistMixedSyncAsyncPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistAndPersistMixedSyncAsyncPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def asyncPersistHandlerCorrelationCheck: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistHandlerCorrelationCheckWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistHandlerCorrelationCheck: ActorRef = + namedPersistentActorWithProvidedConfig[AsyncPersistHandlerCorrelationCheckWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def handleRecoveryFinishedEventPersistentActor: ActorRef = system.actorOf(Props(classOf[HandleRecoveryFinishedEventPersistentActorWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def handleRecoveryFinishedEventPersistentActor: ActorRef = + system.actorOf( + Props(classOf[HandleRecoveryFinishedEventPersistentActorWithLevelDbRuntimePluginConfig], + name, + testActor, + providedActorConfig)) - override protected def stressOrdering: ActorRef = namedPersistentActorWithProvidedConfig[StressOrderingWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def stressOrdering: ActorRef = + namedPersistentActorWithProvidedConfig[StressOrderingWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def stackableTestPersistentActor: ActorRef = system.actorOf(Props(classOf[StackableTestPersistentActorWithLevelDbRuntimePluginConfig], testActor, providedActorConfig)) + override protected def stackableTestPersistentActor: ActorRef = + system.actorOf( + Props(classOf[StackableTestPersistentActorWithLevelDbRuntimePluginConfig], testActor, providedActorConfig)) - override protected def multipleAndNestedPersists: ActorRef = system.actorOf(Props(classOf[MultipleAndNestedPersistsWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def multipleAndNestedPersists: ActorRef = + system.actorOf( + Props(classOf[MultipleAndNestedPersistsWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def multipleAndNestedPersistAsyncs: ActorRef = system.actorOf(Props(classOf[MultipleAndNestedPersistAsyncsWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def multipleAndNestedPersistAsyncs: ActorRef = + system.actorOf( + Props(classOf[MultipleAndNestedPersistAsyncsWithLevelDbRuntimePluginConfig], + name, + testActor, + providedActorConfig)) - override protected def deeplyNestedPersists(nestedPersists: Int): ActorRef = system.actorOf(Props(classOf[DeeplyNestedPersistsWithLevelDbRuntimePluginConfig], name, nestedPersists, testActor, providedActorConfig)) + override protected def deeplyNestedPersists(nestedPersists: Int): ActorRef = + system.actorOf( + Props(classOf[DeeplyNestedPersistsWithLevelDbRuntimePluginConfig], + name, + nestedPersists, + testActor, + providedActorConfig)) - override protected def deeplyNestedPersistAsyncs(nestedPersistAsyncs: Int): ActorRef = system.actorOf(Props(classOf[DeeplyNestedPersistAsyncsWithLevelDbRuntimePluginConfig], name, nestedPersistAsyncs, testActor, providedActorConfig)) + override protected def deeplyNestedPersistAsyncs(nestedPersistAsyncs: Int): ActorRef = + system.actorOf( + Props(classOf[DeeplyNestedPersistAsyncsWithLevelDbRuntimePluginConfig], + name, + nestedPersistAsyncs, + testActor, + providedActorConfig)) - override protected def nestedPersistNormalAndAsyncs: ActorRef = system.actorOf(Props(classOf[NestedPersistNormalAndAsyncsWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def nestedPersistNormalAndAsyncs: ActorRef = + system.actorOf( + Props(classOf[NestedPersistNormalAndAsyncsWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def nestedPersistAsyncsAndNormal: ActorRef = system.actorOf(Props(classOf[NestedPersistAsyncsAndNormalWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def nestedPersistAsyncsAndNormal: ActorRef = + system.actorOf( + Props(classOf[NestedPersistAsyncsAndNormalWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def nestedPersistInAsyncEnforcesStashing: ActorRef = system.actorOf(Props(classOf[NestedPersistInAsyncEnforcesStashingWithLevelDbRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def nestedPersistInAsyncEnforcesStashing: ActorRef = + system.actorOf( + Props(classOf[NestedPersistInAsyncEnforcesStashingWithLevelDbRuntimePluginConfig], + name, + testActor, + providedActorConfig)) - override protected def persistInRecovery: ActorRef = namedPersistentActorWithProvidedConfig[PersistInRecoveryWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def persistInRecovery: ActorRef = + namedPersistentActorWithProvidedConfig[PersistInRecoveryWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def recoverMessageCausedRestart: ActorRef = namedPersistentActorWithProvidedConfig[RecoverMessageCausedRestartWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def recoverMessageCausedRestart: ActorRef = + namedPersistentActorWithProvidedConfig[RecoverMessageCausedRestartWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncWithPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncWithPersistActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncWithPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncWithPersistActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringSyncWithPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncWithPersistActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncWithPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncWithPersistActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncWithAsyncPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncWithAsyncPersistActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncWithAsyncPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncWithAsyncPersistActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringSyncWithAsyncPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncWithAsyncPersistActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncWithAsyncPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncWithAsyncPersistActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncMixedCallsPPADDPADPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncMixedCallsPPADDPADPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringSyncMixedCallsPPADDPADPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncMixedCallsPPADDPADPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncMixedCallsPPADDPADPersistActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncWithNoPersistCallsPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncWithNoPersistCallsPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringSyncWithNoPersistCallsPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncWithNoPersistCallsPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncWithNoPersistCallsPersistActorWithLevelDbRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncActorWithLevelDbRuntimePluginConfig](providedActorConfig) - override protected def deferringSyncActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncActorWithLevelDbRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncActorWithLevelDbRuntimePluginConfig](providedActorConfig) } /** * Same test suite as [[InmemPersistentActorSpec]], the only difference is that all persistent actors are using the * provided [[Config]] instead of the [[Config]] coming from the [[ActorSystem]]. */ -class InmemPersistentActorWithRuntimePluginConfigSpec extends PersistentActorSpec( - PersistenceSpec.config("inmem", "InmemPersistentActorWithRuntimePluginConfigSpec") -) { +class InmemPersistentActorWithRuntimePluginConfigSpec + extends PersistentActorSpec(PersistenceSpec.config("inmem", "InmemPersistentActorWithRuntimePluginConfigSpec")) { val providedActorConfig: Config = { - ConfigFactory.parseString( - s""" + ConfigFactory + .parseString(s""" | custom.persistence.snapshot-store.local.dir = target/snapshots-InmemPersistentActorWithRuntimePluginConfigSpec/ - """.stripMargin - ).withValue( - s"custom.persistence.journal.inmem", - system.settings.config.getValue(s"akka.persistence.journal.inmem") - ).withValue( - "custom.persistence.snapshot-store.local", - system.settings.config.getValue("akka.persistence.snapshot-store.local") - ) + """.stripMargin) + .withValue(s"custom.persistence.journal.inmem", + system.settings.config.getValue(s"akka.persistence.journal.inmem")) + .withValue("custom.persistence.snapshot-store.local", + system.settings.config.getValue("akka.persistence.snapshot-store.local")) } - override protected def behavior1PersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[Behavior1PersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def behavior1PersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[Behavior1PersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def behavior2PersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[Behavior2PersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def behavior2PersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[Behavior2PersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def behavior3PersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[Behavior3PersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def behavior3PersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[Behavior3PersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def changeBehaviorInFirstEventHandlerPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ChangeBehaviorInFirstEventHandlerPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def changeBehaviorInFirstEventHandlerPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + ChangeBehaviorInFirstEventHandlerPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def changeBehaviorInLastEventHandlerPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ChangeBehaviorInLastEventHandlerPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def changeBehaviorInLastEventHandlerPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ChangeBehaviorInLastEventHandlerPersistentActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def changeBehaviorInCommandHandlerFirstPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ChangeBehaviorInCommandHandlerFirstPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def changeBehaviorInCommandHandlerFirstPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + ChangeBehaviorInCommandHandlerFirstPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def changeBehaviorInCommandHandlerLastPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ChangeBehaviorInCommandHandlerLastPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def changeBehaviorInCommandHandlerLastPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + ChangeBehaviorInCommandHandlerLastPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def snapshottingPersistentActor: ActorRef = system.actorOf(Props(classOf[SnapshottingPersistentActorWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def snapshottingPersistentActor: ActorRef = + system.actorOf( + Props(classOf[SnapshottingPersistentActorWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def snapshottingBecomingPersistentActor: ActorRef = system.actorOf(Props(classOf[SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def snapshottingBecomingPersistentActor: ActorRef = + system.actorOf( + Props(classOf[SnapshottingBecomingPersistentActorWithInmemRuntimePluginConfig], + name, + testActor, + providedActorConfig)) - override protected def replyInEventHandlerPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[ReplyInEventHandlerPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def replyInEventHandlerPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ReplyInEventHandlerPersistentActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def anyValEventPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AnyValEventPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def anyValEventPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[AnyValEventPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def asyncPersistPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[AsyncPersistPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def asyncPersistThreeTimesPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistThreeTimesPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistThreeTimesPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[AsyncPersistThreeTimesPersistentActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def asyncPersistSameEventTwicePersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistSameEventTwicePersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistSameEventTwicePersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[AsyncPersistSameEventTwicePersistentActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def persistAllNilPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[PersistAllNilPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def persistAllNilPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[PersistAllNilPersistentActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def asyncPersistAndPersistMixedSyncAsyncSyncPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistAndPersistMixedSyncAsyncSyncPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + AsyncPersistAndPersistMixedSyncAsyncSyncPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def asyncPersistAndPersistMixedSyncAsyncPersistentActor: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistAndPersistMixedSyncAsyncPersistentActor: ActorRef = + namedPersistentActorWithProvidedConfig[ + AsyncPersistAndPersistMixedSyncAsyncPersistentActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def asyncPersistHandlerCorrelationCheck: ActorRef = namedPersistentActorWithProvidedConfig[AsyncPersistHandlerCorrelationCheckWithInmemRuntimePluginConfig](providedActorConfig) + override protected def asyncPersistHandlerCorrelationCheck: ActorRef = + namedPersistentActorWithProvidedConfig[AsyncPersistHandlerCorrelationCheckWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def handleRecoveryFinishedEventPersistentActor: ActorRef = system.actorOf(Props(classOf[HandleRecoveryFinishedEventPersistentActorWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def handleRecoveryFinishedEventPersistentActor: ActorRef = + system.actorOf( + Props(classOf[HandleRecoveryFinishedEventPersistentActorWithInmemRuntimePluginConfig], + name, + testActor, + providedActorConfig)) - override protected def stressOrdering: ActorRef = namedPersistentActorWithProvidedConfig[StressOrderingWithInmemRuntimePluginConfig](providedActorConfig) + override protected def stressOrdering: ActorRef = + namedPersistentActorWithProvidedConfig[StressOrderingWithInmemRuntimePluginConfig](providedActorConfig) - override protected def stackableTestPersistentActor: ActorRef = system.actorOf(Props(classOf[StackableTestPersistentActorWithInmemRuntimePluginConfig], testActor, providedActorConfig)) + override protected def stackableTestPersistentActor: ActorRef = + system.actorOf( + Props(classOf[StackableTestPersistentActorWithInmemRuntimePluginConfig], testActor, providedActorConfig)) - override protected def multipleAndNestedPersists: ActorRef = system.actorOf(Props(classOf[MultipleAndNestedPersistsWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def multipleAndNestedPersists: ActorRef = + system.actorOf( + Props(classOf[MultipleAndNestedPersistsWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def multipleAndNestedPersistAsyncs: ActorRef = system.actorOf(Props(classOf[MultipleAndNestedPersistAsyncsWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def multipleAndNestedPersistAsyncs: ActorRef = + system.actorOf( + Props(classOf[MultipleAndNestedPersistAsyncsWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def deeplyNestedPersists(nestedPersists: Int): ActorRef = system.actorOf(Props(classOf[DeeplyNestedPersistsWithInmemRuntimePluginConfig], name, nestedPersists, testActor, providedActorConfig)) + override protected def deeplyNestedPersists(nestedPersists: Int): ActorRef = + system.actorOf( + Props(classOf[DeeplyNestedPersistsWithInmemRuntimePluginConfig], + name, + nestedPersists, + testActor, + providedActorConfig)) - override protected def deeplyNestedPersistAsyncs(nestedPersistAsyncs: Int): ActorRef = system.actorOf(Props(classOf[DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig], name, nestedPersistAsyncs, testActor, providedActorConfig)) + override protected def deeplyNestedPersistAsyncs(nestedPersistAsyncs: Int): ActorRef = + system.actorOf( + Props(classOf[DeeplyNestedPersistAsyncsWithInmemRuntimePluginConfig], + name, + nestedPersistAsyncs, + testActor, + providedActorConfig)) - override protected def nestedPersistNormalAndAsyncs: ActorRef = system.actorOf(Props(classOf[NestedPersistNormalAndAsyncsWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def nestedPersistNormalAndAsyncs: ActorRef = + system.actorOf( + Props(classOf[NestedPersistNormalAndAsyncsWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def nestedPersistAsyncsAndNormal: ActorRef = system.actorOf(Props(classOf[NestedPersistAsyncsAndNormalWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def nestedPersistAsyncsAndNormal: ActorRef = + system.actorOf( + Props(classOf[NestedPersistAsyncsAndNormalWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) - override protected def nestedPersistInAsyncEnforcesStashing: ActorRef = system.actorOf(Props(classOf[NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig], name, testActor, providedActorConfig)) + override protected def nestedPersistInAsyncEnforcesStashing: ActorRef = + system.actorOf( + Props(classOf[NestedPersistInAsyncEnforcesStashingWithInmemRuntimePluginConfig], + name, + testActor, + providedActorConfig)) - override protected def persistInRecovery: ActorRef = namedPersistentActorWithProvidedConfig[PersistInRecoveryWithInmemRuntimePluginConfig](providedActorConfig) + override protected def persistInRecovery: ActorRef = + namedPersistentActorWithProvidedConfig[PersistInRecoveryWithInmemRuntimePluginConfig](providedActorConfig) - override protected def recoverMessageCausedRestart: ActorRef = namedPersistentActorWithProvidedConfig[RecoverMessageCausedRestartWithInmemRuntimePluginConfig](providedActorConfig) + override protected def recoverMessageCausedRestart: ActorRef = + namedPersistentActorWithProvidedConfig[RecoverMessageCausedRestartWithInmemRuntimePluginConfig](providedActorConfig) - override protected def deferringAsyncWithPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncWithPersistActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncWithPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncWithPersistActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def deferringSyncWithPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncWithPersistActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncWithPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncWithPersistActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncWithAsyncPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncWithAsyncPersistActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncWithAsyncPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncWithAsyncPersistActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def deferringSyncWithAsyncPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncWithAsyncPersistActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncWithAsyncPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncWithAsyncPersistActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncMixedCallsPPADDPADPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncMixedCallsPPADDPADPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def deferringSyncMixedCallsPPADDPADPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncMixedCallsPPADDPADPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncMixedCallsPPADDPADPersistActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncWithNoPersistCallsPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncWithNoPersistCallsPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def deferringSyncWithNoPersistCallsPersistActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncWithNoPersistCallsPersistActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncWithNoPersistCallsPersistActorWithInmemRuntimePluginConfig]( + providedActorConfig) - override protected def deferringAsyncActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringAsyncActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringAsyncActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringAsyncActorWithInmemRuntimePluginConfig](providedActorConfig) - override protected def deferringSyncActor: ActorRef = namedPersistentActorWithProvidedConfig[DeferringSyncActorWithInmemRuntimePluginConfig](providedActorConfig) + override protected def deferringSyncActor: ActorRef = + namedPersistentActorWithProvidedConfig[DeferringSyncActorWithInmemRuntimePluginConfig](providedActorConfig) } diff --git a/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala b/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala index a620eeb9fc..2109492913 100644 --- a/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/PersistentActorStashingSpec.scala @@ -5,7 +5,7 @@ package akka.persistence import akka.actor.SupervisorStrategy.Resume -import akka.actor.{ ActorRef, OneForOneStrategy, Actor, Props } +import akka.actor.{ Actor, ActorRef, OneForOneStrategy, Props } import akka.persistence.journal.SteppingInmemJournal import akka.testkit.ImplicitSender import com.typesafe.config.Config @@ -38,7 +38,7 @@ object PersistentActorStashingSpec { class UserStashPersistentActor(name: String) extends StashExamplePersistentActor(name) { var stashed = false - val receiveCommand: Receive = unstashBehavior orElse { + val receiveCommand: Receive = unstashBehavior.orElse { case Cmd("a") if !stashed => stash(); stashed = true case Cmd("a") => sender() ! "a" @@ -52,39 +52,48 @@ object PersistentActorStashingSpec { class UserStashWithinHandlerPersistentActor(name: String) extends UserStashPersistentActor(name: String) { override def unstashBehavior: Receive = { - case Cmd("c") => persist(Evt("c")) { evt => sender() ! evt.data; unstashAll() } + case Cmd("c") => + persist(Evt("c")) { evt => + sender() ! evt.data; unstashAll() + } } } class UserStashManyPersistentActor(name: String) extends StashExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior orElse { - case Cmd("a") => persist(Evt("a")) { evt => - updateState(evt) - context.become(processC) - } + val receiveCommand: Receive = commonBehavior.orElse { + case Cmd("a") => + persist(Evt("a")) { evt => + updateState(evt) + context.become(processC) + } case Cmd("b-1") => persist(Evt("b-1"))(updateState) case Cmd("b-2") => persist(Evt("b-2"))(updateState) } - val processC: Receive = unstashBehavior orElse { + val processC: Receive = unstashBehavior.orElse { case other => stash() } def unstashBehavior: Receive = { case Cmd("c") => - persist(Evt("c")) { evt => updateState(evt); context.unbecome() } + persist(Evt("c")) { evt => + updateState(evt); context.unbecome() + } unstashAll() } } class UserStashWithinHandlerManyPersistentActor(name: String) extends UserStashManyPersistentActor(name) { override def unstashBehavior: Receive = { - case Cmd("c") => persist(Evt("c")) { evt => updateState(evt); context.unbecome(); unstashAll() } + case Cmd("c") => + persist(Evt("c")) { evt => + updateState(evt); context.unbecome(); unstashAll() + } } } class UserStashFailurePersistentActor(name: String) extends StashExamplePersistentActor(name) { - val receiveCommand: Receive = commonBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse { case Cmd(data) => if (data == "b-2") throw new TestException("boom") persist(Evt(data)) { evt => @@ -93,7 +102,7 @@ object PersistentActorStashingSpec { } } - val otherCommandHandler: Receive = unstashBehavior orElse { + val otherCommandHandler: Receive = unstashBehavior.orElse { case other => stash() } @@ -107,7 +116,8 @@ object PersistentActorStashingSpec { } } - class UserStashWithinHandlerFailureCallbackPersistentActor(name: String) extends UserStashFailurePersistentActor(name) { + class UserStashWithinHandlerFailureCallbackPersistentActor(name: String) + extends UserStashFailurePersistentActor(name) { override def unstashBehavior: Receive = { case Cmd("c") => persist(Evt("c")) { evt => @@ -121,7 +131,7 @@ object PersistentActorStashingSpec { class AsyncStashingPersistentActor(name: String) extends StashExamplePersistentActor(name) { var stashed = false - val receiveCommand: Receive = commonBehavior orElse unstashBehavior orElse { + val receiveCommand: Receive = commonBehavior.orElse(unstashBehavior).orElse { case Cmd("a") => persistAsync(Evt("a"))(updateState) case Cmd("b") if !stashed => stash(); stashed = true @@ -135,7 +145,10 @@ object PersistentActorStashingSpec { class AsyncStashingWithinHandlerPersistentActor(name: String) extends AsyncStashingPersistentActor(name) { override def unstashBehavior: Receive = { - case Cmd("c") => persistAsync(Evt("c")) { evt => updateState(evt); unstashAll() } + case Cmd("c") => + persistAsync(Evt("c")) { evt => + updateState(evt); unstashAll() + } } } @@ -167,15 +180,15 @@ object PersistentActorStashingSpec { case Cmd("a") => persist(Evt("a"))(stashWithinHandler) case Cmd("b") => persistAsync(Evt("b"))(stashWithinHandler) case Cmd("c") => - persist(Evt("x")) { _ => } + persist(Evt("x")) { _ => + } deferAsync(Evt("c"))(stashWithinHandler) } } } -abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSpec(config) - with ImplicitSender { +abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSpec(config) with ImplicitSender { import PersistentActorStashingSpec._ def stash[T <: NamedPersistentActor: ClassTag](): Unit = { @@ -194,10 +207,10 @@ abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSp "support user stash operations with several stashed messages" in { val persistentActor = namedPersistentActor[T] val n = 10 - val cmds = 1 to n flatMap (_ => List(Cmd("a"), Cmd("b-1"), Cmd("b-2"), Cmd("c"))) - val evts = 1 to n flatMap (_ => List("a", "c", "b-1", "b-2")) + val cmds = (1 to n).flatMap(_ => List(Cmd("a"), Cmd("b-1"), Cmd("b-2"), Cmd("c"))) + val evts = (1 to n).flatMap(_ => List("a", "c", "b-1", "b-2")) - cmds foreach (persistentActor ! _) + cmds.foreach(persistentActor ! _) persistentActor ! GetState expectMsg(evts.toList) } @@ -206,9 +219,9 @@ abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSp def stashUnderFailures[T <: NamedPersistentActor: ClassTag](): Unit = { "support user stash operations under failures" in { val persistentActor = namedPersistentActor[T] - val bs = 1 to 10 map ("b-" + _) + val bs = (1 to 10).map("b-" + _) persistentActor ! Cmd("a") - bs foreach (persistentActor ! Cmd(_)) + bs.foreach(persistentActor ! Cmd(_)) persistentActor ! Cmd("c") persistentActor ! GetState expectMsg(List("a", "c") ++ bs.filter(_ != "b-2")) @@ -216,15 +229,15 @@ abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSp } "Stashing in a persistent actor" must { - behave like stash[UserStashPersistentActor]() - behave like stashWithSeveralMessages[UserStashManyPersistentActor]() - behave like stashUnderFailures[UserStashFailurePersistentActor]() + behave.like(stash[UserStashPersistentActor]()) + behave.like(stashWithSeveralMessages[UserStashManyPersistentActor]()) + behave.like(stashUnderFailures[UserStashFailurePersistentActor]()) } "Stashing(unstashAll called in handler) in a persistent actor" must { - behave like stash[UserStashWithinHandlerPersistentActor]() - behave like stashWithSeveralMessages[UserStashWithinHandlerManyPersistentActor]() - behave like stashUnderFailures[UserStashWithinHandlerFailureCallbackPersistentActor]() + behave.like(stash[UserStashWithinHandlerPersistentActor]()) + behave.like(stashWithSeveralMessages[UserStashWithinHandlerManyPersistentActor]()) + behave.like(stashUnderFailures[UserStashWithinHandlerFailureCallbackPersistentActor]()) } "Stashing(stash called in handler) in a persistent actor" must { @@ -246,9 +259,12 @@ abstract class PersistentActorStashingSpec(config: Config) extends PersistenceSp } } -class SteppingInMemPersistentActorStashingSpec extends PersistenceSpec( - SteppingInmemJournal.config("persistence-stash").withFallback(PersistenceSpec.config("stepping-inmem", "SteppingInMemPersistentActorStashingSpec"))) - with ImplicitSender { +class SteppingInMemPersistentActorStashingSpec + extends PersistenceSpec( + SteppingInmemJournal + .config("persistence-stash") + .withFallback(PersistenceSpec.config("stepping-inmem", "SteppingInMemPersistentActorStashingSpec"))) + with ImplicitSender { import PersistentActorStashingSpec._ def stash[T <: NamedPersistentActor: ClassTag](): Unit = { @@ -281,14 +297,16 @@ class SteppingInMemPersistentActorStashingSpec extends PersistenceSpec( } "Stashing in a persistent actor mixed with persistAsync" must { - behave like stash[AsyncStashingPersistentActor]() + behave.like(stash[AsyncStashingPersistentActor]()) } "Stashing(unstashAll called in handler) in a persistent actor mixed with persistAsync" must { - behave like stash[AsyncStashingWithinHandlerPersistentActor]() + behave.like(stash[AsyncStashingWithinHandlerPersistentActor]()) } } -class LeveldbPersistentActorStashingSpec extends PersistentActorStashingSpec(PersistenceSpec.config("leveldb", "LeveldbPersistentActorStashingSpec")) -class InmemPersistentActorStashingSpec extends PersistentActorStashingSpec(PersistenceSpec.config("inmem", "InmemPersistentActorStashingSpec")) +class LeveldbPersistentActorStashingSpec + extends PersistentActorStashingSpec(PersistenceSpec.config("leveldb", "LeveldbPersistentActorStashingSpec")) +class InmemPersistentActorStashingSpec + extends PersistentActorStashingSpec(PersistenceSpec.config("inmem", "InmemPersistentActorStashingSpec")) diff --git a/akka-persistence/src/test/scala/akka/persistence/RecoveryPermitterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/RecoveryPermitterSpec.scala index eaedf6ca44..fc9c91518d 100644 --- a/akka-persistence/src/test/scala/akka/persistence/RecoveryPermitterSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/RecoveryPermitterSpec.scala @@ -21,7 +21,7 @@ object RecoveryPermitterSpec { Props(new TestPersistentActor(name, probe, throwFromRecoveryCompleted)) class TestPersistentActor(name: String, probe: ActorRef, throwFromRecoveryCompleted: Boolean) - extends PersistentActor { + extends PersistentActor { override def persistenceId = name @@ -43,8 +43,7 @@ object RecoveryPermitterSpec { } -class RecoveryPermitterSpec extends PersistenceSpec(ConfigFactory.parseString( - s""" +class RecoveryPermitterSpec extends PersistenceSpec(ConfigFactory.parseString(s""" akka.persistence.max-concurrent-recoveries = 3 akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.actor.warn-about-java-serializer-usage = off @@ -188,4 +187,3 @@ class RecoveryPermitterSpec extends PersistenceSpec(ConfigFactory.parseString( } } - diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala index 51e6bae73e..4f2ec8d9b3 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotDirectoryFailureSpec.scala @@ -12,8 +12,7 @@ import akka.testkit.{ AkkaSpec, EventFilter, ImplicitSender } object SnapshotDirectoryFailureSpec { val inUseSnapshotPath = "target/inUseSnapshotPath" - class TestPersistentActor(name: String, probe: ActorRef) extends PersistentActor - with TurnOffRecoverOnStart { + class TestPersistentActor(name: String, probe: ActorRef) extends PersistentActor with TurnOffRecoverOnStart { override def persistenceId: String = name @@ -29,10 +28,14 @@ object SnapshotDirectoryFailureSpec { } } -class SnapshotDirectoryFailureSpec extends AkkaSpec(PersistenceSpec.config("leveldb", "SnapshotDirectoryFailureSpec", extraConfig = Some( - s""" +class SnapshotDirectoryFailureSpec + extends AkkaSpec( + PersistenceSpec.config("leveldb", + "SnapshotDirectoryFailureSpec", + extraConfig = Some(s""" akka.persistence.snapshot-store.local.dir = "${SnapshotDirectoryFailureSpec.inUseSnapshotPath}" - """))) with ImplicitSender { + """))) + with ImplicitSender { import SnapshotDirectoryFailureSpec._ diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala index 0af56a700a..9b02f08b64 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotFailureRobustnessSpec.scala @@ -95,12 +95,17 @@ object SnapshotFailureRobustnessSpec { } } -class SnapshotFailureRobustnessSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "SnapshotFailureRobustnessSpec", serialization = "off", extraConfig = Some( - s""" +class SnapshotFailureRobustnessSpec + extends PersistenceSpec( + PersistenceSpec.config("leveldb", + "SnapshotFailureRobustnessSpec", + serialization = "off", + extraConfig = Some(s""" akka.persistence.snapshot-store.local.class = "akka.persistence.SnapshotFailureRobustnessSpec$$FailingLocalSnapshotStore" akka.persistence.snapshot-store.local-delete-fail = $${akka.persistence.snapshot-store.local} akka.persistence.snapshot-store.local-delete-fail.class = "akka.persistence.SnapshotFailureRobustnessSpec$$DeleteFailingLocalSnapshotStore" - """))) with ImplicitSender { + """))) + with ImplicitSender { import SnapshotFailureRobustnessSpec._ @@ -114,8 +119,8 @@ class SnapshotFailureRobustnessSpec extends PersistenceSpec(PersistenceSpec.conf expectMsg(1) sPersistentActor ! Cmd("kablama") expectMsg(2) - system.eventStream.publish(TestEvent.Mute( - EventFilter[java.io.NotSerializableException](start = "Error loading snapshot"))) + system.eventStream.publish( + TestEvent.Mute(EventFilter[java.io.NotSerializableException](start = "Error loading snapshot"))) system.eventStream.subscribe(testActor, classOf[Logging.Error]) try { val lPersistentActor = system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, testActor)) @@ -130,8 +135,7 @@ class SnapshotFailureRobustnessSpec extends PersistenceSpec(PersistenceSpec.conf expectNoMsg(1 second) } finally { system.eventStream.unsubscribe(testActor, classOf[Logging.Error]) - system.eventStream.publish(TestEvent.UnMute( - EventFilter.error(start = "Error loading snapshot ["))) + system.eventStream.publish(TestEvent.UnMute(EventFilter.error(start = "Error loading snapshot ["))) } } @@ -149,10 +153,10 @@ class SnapshotFailureRobustnessSpec extends PersistenceSpec(PersistenceSpec.conf expectMsg(3) sPersistentActor ! Cmd("boom") expectMsg(4) - system.eventStream.publish(TestEvent.Mute( - EventFilter[java.io.NotSerializableException](start = "Error loading snapshot"))) - system.eventStream.publish(TestEvent.Mute( - EventFilter[java.io.NotSerializableException](start = "Persistence failure"))) + system.eventStream.publish( + TestEvent.Mute(EventFilter[java.io.NotSerializableException](start = "Error loading snapshot"))) + system.eventStream.publish( + TestEvent.Mute(EventFilter[java.io.NotSerializableException](start = "Persistence failure"))) system.eventStream.subscribe(testActor, classOf[Logging.Error]) try { val lPersistentActor = system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, testActor)) @@ -164,8 +168,7 @@ class SnapshotFailureRobustnessSpec extends PersistenceSpec(PersistenceSpec.conf expectTerminated(lPersistentActor) } finally { system.eventStream.unsubscribe(testActor, classOf[Logging.Error]) - system.eventStream.publish(TestEvent.UnMute( - EventFilter.error(start = "Error loading snapshot ["))) + system.eventStream.publish(TestEvent.UnMute(EventFilter.error(start = "Error loading snapshot ["))) } } diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotRecoveryLocalStoreSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotRecoveryLocalStoreSpec.scala index bb0b9b6877..c4fb4f13be 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotRecoveryLocalStoreSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotRecoveryLocalStoreSpec.scala @@ -25,8 +25,9 @@ object SnapshotRecoveryLocalStoreSpec { } } - class LoadSnapshotTestPersistentActor(name: String, probe: ActorRef) extends NamedPersistentActor(name) - with ActorLogging { + class LoadSnapshotTestPersistentActor(name: String, probe: ActorRef) + extends NamedPersistentActor(name) + with ActorLogging { override def recovery = Recovery(toSequenceNr = 0) @@ -39,7 +40,9 @@ object SnapshotRecoveryLocalStoreSpec { } } -class SnapshotRecoveryLocalStoreSpec extends PersistenceSpec(PersistenceSpec.config("inmem", "SnapshotRecoveryLocalStoreSpec")) with ImplicitSender { +class SnapshotRecoveryLocalStoreSpec + extends PersistenceSpec(PersistenceSpec.config("inmem", "SnapshotRecoveryLocalStoreSpec")) + with ImplicitSender { import SnapshotRecoveryLocalStoreSpec._ diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala index b5b5194350..53abff2577 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotSerializationSpec.scala @@ -4,7 +4,7 @@ package akka.persistence -import akka.actor.{ Props, ActorRef } +import akka.actor.{ ActorRef, Props } import akka.serialization.Serializer import akka.testkit.{ ImplicitSender } import java.io._ @@ -64,8 +64,13 @@ object SnapshotSerializationSpec { } -class SnapshotSerializationSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "SnapshotSerializationSpec", serialization = "off", extraConfig = Some( - """ +class SnapshotSerializationSpec + extends PersistenceSpec( + PersistenceSpec.config("leveldb", + "SnapshotSerializationSpec", + serialization = "off", + extraConfig = + Some(""" akka.actor { serializers { my-snapshot = "akka.persistence.SnapshotSerializationSpec$MySerializer" @@ -74,7 +79,8 @@ class SnapshotSerializationSpec extends PersistenceSpec(PersistenceSpec.config(" "akka.persistence.SnapshotSerializationSpec$SerializationMarker" = my-snapshot } } - """))) with ImplicitSender { + """))) + with ImplicitSender { import SnapshotSerializationSpec._ import SnapshotSerializationSpec.XXXXXXXXXXXXXXXXXXXX._ diff --git a/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala b/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala index 027dd9b1db..ac85750099 100644 --- a/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/SnapshotSpec.scala @@ -29,7 +29,8 @@ object SnapshotSpec { } } - class LoadSnapshotTestPersistentActor(name: String, _recovery: Recovery, probe: ActorRef) extends NamedPersistentActor(name) { + class LoadSnapshotTestPersistentActor(name: String, _recovery: Recovery, probe: ActorRef) + extends NamedPersistentActor(name) { override def recovery: Recovery = _recovery override def receiveRecover: Receive = { @@ -49,7 +50,8 @@ object SnapshotSpec { } } - class IgnoringSnapshotTestPersistentActor(name: String, _recovery: Recovery, probe: ActorRef) extends NamedPersistentActor(name) { + class IgnoringSnapshotTestPersistentActor(name: String, _recovery: Recovery, probe: ActorRef) + extends NamedPersistentActor(name) { override def recovery: Recovery = _recovery override def receiveRecover: Receive = { @@ -71,9 +73,9 @@ object SnapshotSpec { final case class DeleteN(criteria: SnapshotSelectionCriteria) class DeleteSnapshotTestPersistentActor(name: String, _recovery: Recovery, probe: ActorRef) - extends LoadSnapshotTestPersistentActor(name, _recovery, probe) { + extends LoadSnapshotTestPersistentActor(name, _recovery, probe) { - override def receiveCommand = receiveDelete orElse super.receiveCommand + override def receiveCommand = receiveDelete.orElse(super.receiveCommand) def receiveDelete: Receive = { case Delete1(metadata) => deleteSnapshot(metadata.sequenceNr) case DeleteN(criteria) => deleteSnapshots(criteria) @@ -116,7 +118,8 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "Sn expectMsg(RecoveryCompleted) } "recover completely if snapshot is not handled" in { - val persistentActor = system.actorOf(Props(classOf[IgnoringSnapshotTestPersistentActor], name, Recovery(), testActor)) + val persistentActor = + system.actorOf(Props(classOf[IgnoringSnapshotTestPersistentActor], name, Recovery(), testActor)) val persistenceId = name expectMsg("a-1") @@ -128,7 +131,8 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "Sn expectMsg(RecoveryCompleted) } "recover state starting from the most recent snapshot matching an upper sequence number bound" in { - val persistentActor = system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, Recovery(toSequenceNr = 3), testActor)) + val persistentActor = + system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, Recovery(toSequenceNr = 3), testActor)) val persistenceId = name expectMsgPF() { @@ -140,7 +144,8 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "Sn expectMsg(RecoveryCompleted) } "recover state starting from the most recent snapshot matching an upper sequence number bound (without further replay)" in { - val persistentActor = system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, Recovery(toSequenceNr = 4), testActor)) + val persistentActor = + system.actorOf(Props(classOf[LoadSnapshotTestPersistentActor], name, Recovery(toSequenceNr = 4), testActor)) val persistenceId = name persistentActor ! "done" @@ -196,7 +201,8 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "Sn // recover persistentActor from 3rd snapshot and then delete snapshot val recovery = Recovery(toSequenceNr = 4) - val persistentActor1 = system.actorOf(Props(classOf[DeleteSnapshotTestPersistentActor], name, recovery, testActor)) + val persistentActor1 = + system.actorOf(Props(classOf[DeleteSnapshotTestPersistentActor], name, recovery, testActor)) val persistenceId = name system.eventStream.subscribe(deleteProbe.ref, classOf[DeleteSnapshot]) @@ -216,7 +222,8 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "Sn expectMsgPF() { case m @ DeleteSnapshotSuccess(SnapshotMetadata(`persistenceId`, 4, _)) => } // recover persistentActor from 2nd snapshot (3rd was deleted) plus replayed messages - val persistentActor2 = system.actorOf(Props(classOf[DeleteSnapshotTestPersistentActor], name, recovery, testActor)) + val persistentActor2 = + system.actorOf(Props(classOf[DeleteSnapshotTestPersistentActor], name, recovery, testActor)) expectMsgPF(hint = "" + SnapshotOffer(SnapshotMetadata(`persistenceId`, 2, 0), null)) { case SnapshotOffer(md @ SnapshotMetadata(`persistenceId`, 2, _), state) => @@ -231,7 +238,8 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "Sn val deleteProbe = TestProbe() val recovery = Recovery(toSequenceNr = 4) - val persistentActor1 = system.actorOf(Props(classOf[DeleteSnapshotTestPersistentActor], name, recovery, testActor)) + val persistentActor1 = + system.actorOf(Props(classOf[DeleteSnapshotTestPersistentActor], name, recovery, testActor)) val persistenceId = name system.eventStream.subscribe(deleteProbe.ref, classOf[DeleteSnapshots]) @@ -248,7 +256,8 @@ class SnapshotSpec extends PersistenceSpec(PersistenceSpec.config("leveldb", "Sn expectMsgPF() { case DeleteSnapshotsSuccess(`criteria`) => } // recover persistentActor from replayed messages (all snapshots deleted) - val persistentActor2 = system.actorOf(Props(classOf[DeleteSnapshotTestPersistentActor], name, recovery, testActor)) + val persistentActor2 = + system.actorOf(Props(classOf[DeleteSnapshotTestPersistentActor], name, recovery, testActor)) expectMsg("a-1") expectMsg("b-2") diff --git a/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala b/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala index 83d3ac8210..b9a79b8d1e 100644 --- a/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/TimerPersistentActorSpec.scala @@ -79,8 +79,7 @@ object TimerPersistentActorSpec { } -class TimerPersistentActorSpec extends PersistenceSpec(ConfigFactory.parseString( - s""" +class TimerPersistentActorSpec extends PersistenceSpec(ConfigFactory.parseString(s""" akka.persistence.journal.plugin = "akka.persistence.journal.inmem" akka.actor.warn-about-java-serializer-usage = off """)) with ImplicitSender { @@ -117,4 +116,3 @@ class TimerPersistentActorSpec extends PersistenceSpec(ConfigFactory.parseString } } - diff --git a/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala b/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala index f1324a9341..fb280208c3 100644 --- a/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/fsm/PersistentFSMSpec.scala @@ -31,9 +31,9 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) watch(fsmRef) fsmRef ! SubscribeTransitionCallBack(testActor) - val shirt = Item("1", "Shirt", 59.99F) - val shoes = Item("2", "Shoes", 89.99F) - val coat = Item("3", "Coat", 119.99F) + val shirt = Item("1", "Shirt", 59.99f) + val shoes = Item("2", "Shoes", 89.99f) + val coat = Item("3", "Coat", 119.99f) fsmRef ! GetCurrentCart fsmRef ! AddItem(shirt) @@ -67,7 +67,7 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) watch(fsmRef) fsmRef ! SubscribeTransitionCallBack(testActor) - val shirt = Item("1", "Shirt", 59.99F) + val shirt = Item("1", "Shirt", 59.99f) // this isn't quite when the state transition happens, but close enough val before = System.nanoTime @@ -92,9 +92,9 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) watch(fsmRef) fsmRef ! SubscribeTransitionCallBack(testActor) - val shirt = Item("1", "Shirt", 59.99F) - val shoes = Item("2", "Shoes", 89.99F) - val coat = Item("3", "Coat", 119.99F) + val shirt = Item("1", "Shirt", 59.99f) + val shoes = Item("2", "Shoes", 89.99f) + val coat = Item("3", "Coat", 119.99f) fsmRef ! GetCurrentCart fsmRef ! AddItem(shirt) @@ -144,9 +144,9 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) watch(fsmRef) fsmRef ! SubscribeTransitionCallBack(testActor) - val shirt = Item("1", "Shirt", 59.99F) - val shoes = Item("2", "Shoes", 89.99F) - val coat = Item("3", "Coat", 119.99F) + val shirt = Item("1", "Shirt", 59.99f) + val shoes = Item("2", "Shoes", 89.99f) + val coat = Item("3", "Coat", 119.99f) fsmRef ! AddItem(shirt) fsmRef ! AddItem(shoes) @@ -169,9 +169,9 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) watch(fsmRef) fsmRef ! SubscribeTransitionCallBack(testActor) - val shirt = Item("1", "Shirt", 59.99F) - val shoes = Item("2", "Shoes", 89.99F) - val coat = Item("3", "Coat", 119.99F) + val shirt = Item("1", "Shirt", 59.99f) + val shoes = Item("2", "Shoes", 89.99f) + val coat = Item("3", "Coat", 119.99f) fsmRef ! AddItem(shirt) fsmRef ! AddItem(shoes) @@ -191,7 +191,7 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) watch(fsmRef) fsmRef ! SubscribeTransitionCallBack(testActor) - val shirt = Item("1", "Shirt", 59.99F) + val shirt = Item("1", "Shirt", 59.99f) fsmRef ! AddItem(shirt) @@ -247,9 +247,9 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) val fsmRef = system.actorOf(WebStoreCustomerFSM.props(persistenceId, dummyReportActorRef)) watch(fsmRef) - val shirt = Item("1", "Shirt", 59.99F) - val shoes = Item("2", "Shoes", 89.99F) - val coat = Item("3", "Coat", 119.99F) + val shirt = Item("1", "Shirt", 59.99f) + val shoes = Item("2", "Shoes", 89.99f) + val coat = Item("3", "Coat", 119.99f) fsmRef ! GetCurrentCart fsmRef ! AddItem(shirt) @@ -270,13 +270,13 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) val persistentEventsStreamer = system.actorOf(PersistentEventsStreamer.props(persistenceId, testActor)) - expectMsg(ItemAdded(Item("1", "Shirt", 59.99F))) + expectMsg(ItemAdded(Item("1", "Shirt", 59.99f))) expectMsgType[StateChangeEvent] //because a timeout is defined, State Change is persisted - expectMsg(ItemAdded(Item("2", "Shoes", 89.99F))) + expectMsg(ItemAdded(Item("2", "Shoes", 89.99f))) expectMsgType[StateChangeEvent] //because a timeout is defined, State Change is persisted - expectMsg(ItemAdded(Item("3", "Coat", 119.99F))) + expectMsg(ItemAdded(Item("3", "Coat", 119.99f))) expectMsgType[StateChangeEvent] //because a timeout is defined, State Change is persisted watch(persistentEventsStreamer) @@ -297,9 +297,9 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) val fsmRef = system.actorOf(WebStoreCustomerFSM.props(persistenceId, dummyReportActorRef)) watch(fsmRef) - val shirt = Item("1", "Shirt", 59.99F) - val shoes = Item("2", "Shoes", 89.99F) - val coat = Item("3", "Coat", 119.99F) + val shirt = Item("1", "Shirt", 59.99f) + val shoes = Item("2", "Shoes", 89.99f) + val coat = Item("3", "Coat", 119.99f) fsmRef ! GetCurrentCart fsmRef ! AddItem(shirt) @@ -361,10 +361,8 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) } "save periodical snapshots if akka.persistence.fsm.enable-snapshot-after = on" in { - val sys2 = ActorSystem( - "PersistentFsmSpec2", - ConfigFactory - .parseString(""" + val sys2 = ActorSystem("PersistentFsmSpec2", + ConfigFactory.parseString(""" akka.persistence.fsm.enable-snapshot-after = on akka.persistence.fsm.snapshot-after = 3 """).withFallback(PersistenceSpec.config("leveldb", "PersistentFSMSpec2"))) @@ -387,10 +385,10 @@ abstract class PersistentFSMSpec(config: Config) extends PersistenceSpec(config) probe.expectMsg("SeqNo=8, StateData=List(10, 10, 10, 10, 3, 2, 1)") } finally { - val storageLocations = List( - "akka.persistence.journal.leveldb.dir", - "akka.persistence.journal.leveldb-shared.store.dir", - "akka.persistence.snapshot-store.local.dir").map(s => new File(sys2.settings.config.getString(s))) + val storageLocations = + List("akka.persistence.journal.leveldb.dir", + "akka.persistence.journal.leveldb-shared.store.dir", + "akka.persistence.snapshot-store.local.dir").map(s => new File(sys2.settings.config.getString(s))) shutdown(sys2) storageLocations.foreach(FileUtils.deleteDirectory) } @@ -452,7 +450,9 @@ object PersistentFSMSpec { case class PurchaseWasMade(items: Seq[Item]) extends ReportEvent case object ShoppingCardDiscarded extends ReportEvent - class SimpleTransitionFSM(_persistenceId: String, reportActor: ActorRef)(implicit val domainEventClassTag: ClassTag[DomainEvent]) extends PersistentFSM[UserState, ShoppingCart, DomainEvent] { + class SimpleTransitionFSM(_persistenceId: String, reportActor: ActorRef)( + implicit val domainEventClassTag: ClassTag[DomainEvent]) + extends PersistentFSM[UserState, ShoppingCart, DomainEvent] { override val persistenceId = _persistenceId startWith(LookingAround, EmptyShoppingCart) @@ -474,8 +474,9 @@ object PersistentFSMSpec { Props(new SimpleTransitionFSM(persistenceId, reportActor)) } - class WebStoreCustomerFSM(_persistenceId: String, reportActor: ActorRef)(implicit val domainEventClassTag: ClassTag[DomainEvent]) - extends PersistentFSM[UserState, ShoppingCart, DomainEvent] { + class WebStoreCustomerFSM(_persistenceId: String, reportActor: ActorRef)( + implicit val domainEventClassTag: ClassTag[DomainEvent]) + extends PersistentFSM[UserState, ShoppingCart, DomainEvent] { override def persistenceId = _persistenceId @@ -484,17 +485,17 @@ object PersistentFSMSpec { when(LookingAround) { case Event(AddItem(item), _) => - goto(Shopping) applying ItemAdded(item) forMax (1 seconds) + goto(Shopping).applying(ItemAdded(item)).forMax(1 seconds) case Event(GetCurrentCart, data) => - stay replying data + stay.replying(data) } when(Shopping) { case Event(AddItem(item), _) => - stay applying ItemAdded(item) forMax (1 seconds) + stay.applying(ItemAdded(item)).forMax(1 seconds) case Event(Buy, _) => //#customer-andthen-example - goto(Paid) applying OrderExecuted andThen { + goto(Paid).applying(OrderExecuted).andThen { case NonEmptyShoppingCart(items) => reportActor ! PurchaseWasMade(items) //#customer-andthen-example @@ -505,23 +506,23 @@ object PersistentFSMSpec { //#customer-andthen-example case Event(Leave, _) => //#customer-snapshot-example - stop applying OrderDiscarded andThen { + stop.applying(OrderDiscarded).andThen { case _ => reportActor ! ShoppingCardDiscarded saveStateSnapshot() } //#customer-snapshot-example case Event(GetCurrentCart, data) => - stay replying data + stay.replying(data) case Event(StateTimeout, _) => - goto(Inactive) forMax (2 seconds) + goto(Inactive).forMax(2 seconds) } when(Inactive) { case Event(AddItem(item), _) => - goto(Shopping) applying ItemAdded(item) forMax (1 seconds) + goto(Shopping).applying(ItemAdded(item)).forMax(1 seconds) case Event(StateTimeout, _) => - stop applying OrderDiscarded andThen { + stop.applying(OrderDiscarded).andThen { case _ => reportActor ! ShoppingCardDiscarded } } @@ -529,7 +530,7 @@ object PersistentFSMSpec { when(Paid) { case Event(Leave, _) => stop() case Event(GetCurrentCart, data) => - stay replying data + stay.replying(data) } //#customer-fsm-body @@ -579,8 +580,9 @@ object PersistentFSMSpec { def props(probe: ActorRef) = Props(new TimeoutFSM(probe)) } - class TimeoutFSM(probe: ActorRef)(implicit val domainEventClassTag: ClassTag[String]) extends Actor - with PersistentFSM[TimeoutFSM.State, String, String] { + class TimeoutFSM(probe: ActorRef)(implicit val domainEventClassTag: ClassTag[String]) + extends Actor + with PersistentFSM[TimeoutFSM.State, String, String] { import TimeoutFSM._ override def applyEvent(domainEvent: String, currentData: String): String = "whatever" @@ -595,7 +597,7 @@ object PersistentFSMSpec { case Event(OverrideTimeoutToInf, _) => probe ! OverrideTimeoutToInf - stay() forMax Duration.Inf + stay().forMax(Duration.Inf) } } @@ -611,7 +613,8 @@ object PersistentFSMSpec { } class SnapshotFSM(probe: ActorRef)(implicit val domainEventClassTag: ClassTag[SnapshotFSMEvent]) - extends Actor with PersistentFSM[SnapshotFSMState, List[Int], SnapshotFSMEvent] { + extends Actor + with PersistentFSM[SnapshotFSMState, List[Int], SnapshotFSMEvent] { override def persistenceId: String = "snapshot-fsm-test" @@ -623,7 +626,7 @@ object PersistentFSMSpec { when(PersistSingleAtOnce) { case Event(i: Int, _) => - stay applying IntAdded(i) + stay.applying(IntAdded(i)) case Event("4x", _) => goto(Persist4xAtOnce) case Event(SaveSnapshotSuccess(metadata), _) => @@ -633,7 +636,7 @@ object PersistentFSMSpec { when(Persist4xAtOnce) { case Event(i: Int, _) => - stay applying (IntAdded(i), IntAdded(i), IntAdded(i), IntAdded(i)) + stay.applying(IntAdded(i), IntAdded(i), IntAdded(i), IntAdded(i)) case Event(SaveSnapshotSuccess(metadata), _) => probe ! s"SeqNo=${metadata.sequenceNr}, StateData=${stateData}" stay() diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala index ee0479b876..d0bd41d94b 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/InmemEventAdaptersSpec.scala @@ -10,8 +10,7 @@ import com.typesafe.config.ConfigFactory class InmemEventAdaptersSpec extends AkkaSpec { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" |akka.persistence.journal { | plugin = "akka.persistence.journal.inmem" | @@ -69,8 +68,7 @@ class InmemEventAdaptersSpec extends AkkaSpec { } "fail with useful message when binding to not defined adapter" in { - val badConfig = ConfigFactory.parseString( - """ + val badConfig = ConfigFactory.parseString(""" |akka.persistence.journal.inmem { | event-adapter-bindings { | "java.lang.Integer" = undefined-adapter @@ -107,10 +105,8 @@ class InmemEventAdaptersSpec extends AkkaSpec { // combined-read-side only adapter val r: EventAdapter = adapters.get(classOf[ReadMeTwiceEvent]) - r.fromJournal(r.toJournal(ReadMeTwiceEvent()), "").events.map(_.toString) shouldBe Seq( - "from-ReadMeTwiceEvent()", - "again-ReadMeTwiceEvent()" - ) + r.fromJournal(r.toJournal(ReadMeTwiceEvent()), "").events.map(_.toString) shouldBe Seq("from-ReadMeTwiceEvent()", + "again-ReadMeTwiceEvent()") } } @@ -122,12 +118,9 @@ abstract class BaseTestAdapter extends EventAdapter { override def manifest(event: Any): String = "" } -class ExampleEventAdapter extends BaseTestAdapter { -} -class MarkerInterfaceAdapter extends BaseTestAdapter { -} -class PreciseAdapter extends BaseTestAdapter { -} +class ExampleEventAdapter extends BaseTestAdapter {} +class MarkerInterfaceAdapter extends BaseTestAdapter {} +class PreciseAdapter extends BaseTestAdapter {} case class ReadMeEvent() case class ReadMeTwiceEvent() @@ -149,4 +142,3 @@ class WriterAdapter extends WriteEventAdapter { trait EventMarkerInterface final case class SampleEvent() extends EventMarkerInterface final case class PreciseAdapterEvent() extends EventMarkerInterface - diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/ReplayFilterSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/ReplayFilterSpec.scala index a50cbb9916..e1bc9f1bcf 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/ReplayFilterSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/ReplayFilterSpec.scala @@ -11,7 +11,7 @@ import akka.persistence.PersistentRepr class ReplayFilterSpec extends AkkaSpec with ImplicitSender { import JournalProtocol._ - import ReplayFilter.{ Warn, Fail, RepairByDiscardOld } + import ReplayFilter.{ Fail, RepairByDiscardOld, Warn } val writerA = "writer-A" val writerB = "writer-B" @@ -28,8 +28,9 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { "ReplayFilter in RepairByDiscardOld mode" must { "pass on all replayed messages and then stop" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = RepairByDiscardOld, windowSize = 2, maxOldWriters = 10, debugEnabled = false)) + val filter = system.actorOf( + ReplayFilter + .props(testActor, mode = RepairByDiscardOld, windowSize = 2, maxOldWriters = 10, debugEnabled = false)) filter ! m1 filter ! m2 filter ! m3 @@ -45,8 +46,9 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { } "pass on all replayed messages (when previously no writer id was given, but now is) and then stop" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = RepairByDiscardOld, windowSize = 2, maxOldWriters = 10, debugEnabled = true)) + val filter = system.actorOf( + ReplayFilter + .props(testActor, mode = RepairByDiscardOld, windowSize = 2, maxOldWriters = 10, debugEnabled = true)) filter ! n1 filter ! n2 filter ! m3 @@ -62,8 +64,9 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { } "pass on all replayed messages when switching writer" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = RepairByDiscardOld, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + val filter = system.actorOf( + ReplayFilter + .props(testActor, mode = RepairByDiscardOld, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) filter ! m1 filter ! m2 val m32 = m3.copy(persistent = m3.persistent.update(writerUuid = writerB)) @@ -77,9 +80,10 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { } "discard message with same seqNo from old overlapping writer" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = RepairByDiscardOld, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) - EventFilter.warning(start = "Invalid replayed event", occurrences = 1) intercept { + val filter = system.actorOf( + ReplayFilter + .props(testActor, mode = RepairByDiscardOld, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + EventFilter.warning(start = "Invalid replayed event", occurrences = 1).intercept { filter ! m1 filter ! m2 filter ! m3 @@ -95,9 +99,10 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { } "discard messages from old writer after switching writer" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = RepairByDiscardOld, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) - EventFilter.warning(start = "Invalid replayed event", occurrences = 2) intercept { + val filter = system.actorOf( + ReplayFilter + .props(testActor, mode = RepairByDiscardOld, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + EventFilter.warning(start = "Invalid replayed event", occurrences = 2).intercept { filter ! m1 filter ! m2 val m3b = m3.copy(persistent = m3.persistent.update(writerUuid = writerB)) @@ -115,9 +120,10 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { } "discard messages from several old writers" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = RepairByDiscardOld, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) - EventFilter.warning(start = "Invalid replayed event", occurrences = 3) intercept { + val filter = system.actorOf( + ReplayFilter + .props(testActor, mode = RepairByDiscardOld, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + EventFilter.warning(start = "Invalid replayed event", occurrences = 3).intercept { filter ! m1 val m2b = m2.copy(persistent = m2.persistent.update(writerUuid = writerB)) filter ! m2b @@ -143,9 +149,9 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { "ReplayFilter in Fail mode" must { "fail when message with same seqNo from old overlapping writer" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = Fail, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) - EventFilter.error(start = "Invalid replayed event", occurrences = 1) intercept { + val filter = system.actorOf( + ReplayFilter.props(testActor, mode = Fail, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + EventFilter.error(start = "Invalid replayed event", occurrences = 1).intercept { filter ! m1 filter ! m2 filter ! m3 @@ -158,9 +164,9 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { } "fail when messages from old writer after switching writer" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = Fail, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) - EventFilter.error(start = "Invalid replayed event", occurrences = 1) intercept { + val filter = system.actorOf( + ReplayFilter.props(testActor, mode = Fail, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + EventFilter.error(start = "Invalid replayed event", occurrences = 1).intercept { filter ! m1 filter ! m2 val m3b = m3.copy(persistent = m3.persistent.update(writerUuid = writerB)) @@ -176,9 +182,9 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { "ReplayFilter in Warn mode" must { "warn about message with same seqNo from old overlapping writer" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = Warn, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) - EventFilter.warning(start = "Invalid replayed event", occurrences = 1) intercept { + val filter = system.actorOf( + ReplayFilter.props(testActor, mode = Warn, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + EventFilter.warning(start = "Invalid replayed event", occurrences = 1).intercept { filter ! m1 filter ! m2 filter ! m3 @@ -195,9 +201,9 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { } "warn about messages from old writer after switching writer" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = Warn, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) - EventFilter.warning(start = "Invalid replayed event", occurrences = 2) intercept { + val filter = system.actorOf( + ReplayFilter.props(testActor, mode = Warn, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + EventFilter.warning(start = "Invalid replayed event", occurrences = 2).intercept { filter ! m1 filter ! m2 val m3b = m3.copy(persistent = m3.persistent.update(writerUuid = writerB)) @@ -216,9 +222,9 @@ class ReplayFilterSpec extends AkkaSpec with ImplicitSender { } "warn about messages from several old writers" in { - val filter = system.actorOf(ReplayFilter.props( - testActor, mode = Warn, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) - EventFilter.warning(start = "Invalid replayed event", occurrences = 3) intercept { + val filter = system.actorOf( + ReplayFilter.props(testActor, mode = Warn, windowSize = 100, maxOldWriters = 10, debugEnabled = false)) + EventFilter.warning(start = "Invalid replayed event", occurrences = 3).intercept { filter ! m1 val m2b = m2.copy(persistent = m2.persistent.update(writerUuid = writerB)) filter ! m2b diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala b/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala index 468ff79e7e..591c630a6f 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/SteppingInmemJournal.scala @@ -4,13 +4,13 @@ package akka.persistence.journal -import akka.actor.{ ActorSystem, ActorRef } +import akka.actor.{ ActorRef, ActorSystem } import akka.pattern.ask import akka.persistence.journal.inmem.InmemJournal import akka.persistence.{ AtomicWrite, PersistentRepr } import akka.util.Timeout import akka.testkit._ -import com.typesafe.config.{ ConfigFactory, Config } +import com.typesafe.config.{ Config, ConfigFactory } import scala.collection.immutable.Seq import scala.concurrent.duration._ import scala.concurrent.{ Await, Future, Promise } @@ -31,8 +31,7 @@ object SteppingInmemJournal { } def config(instanceId: String): Config = - ConfigFactory.parseString( - s""" + ConfigFactory.parseString(s""" |akka.persistence.journal.stepping-inmem.class=${classOf[SteppingInmemJournal].getName} |akka.persistence.journal.plugin = "akka.persistence.journal.stepping-inmem" |akka.persistence.journal.stepping-inmem.instance-id = "$instanceId" @@ -49,8 +48,7 @@ object SteppingInmemJournal { private def putRef(instanceId: String, instance: ActorRef): Unit = synchronized { _current = _current + (instanceId -> instance) } - private def remove(instanceId: String): Unit = synchronized( - _current -= instanceId) + private def remove(instanceId: String): Unit = synchronized(_current -= instanceId) } /** @@ -72,7 +70,7 @@ final class SteppingInmemJournal extends InmemJournal { var queuedOps: Seq[() => Future[Unit]] = Seq.empty var queuedTokenRecipients = List.empty[ActorRef] - override def receivePluginInternal = super.receivePluginInternal orElse { + override def receivePluginInternal = super.receivePluginInternal.orElse { case Token if queuedOps.isEmpty => queuedTokenRecipients = queuedTokenRecipients :+ sender() case Token => val op +: rest = queuedOps @@ -128,11 +126,13 @@ final class SteppingInmemJournal extends InmemJournal { future } - override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(recoveryCallback: (PersistentRepr) => Unit): Future[Unit] = { + override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + recoveryCallback: (PersistentRepr) => Unit): Future[Unit] = { val promise = Promise[Unit]() val future = promise.future doOrEnqueue { () => - promise.completeWith(super.asyncReplayMessages(persistenceId, fromSequenceNr, toSequenceNr, max)(recoveryCallback)) + promise.completeWith( + super.asyncReplayMessages(persistenceId, fromSequenceNr, toSequenceNr, max)(recoveryCallback)) future } diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala b/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala index 28dabe6436..12a811a393 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/chaos/ChaosJournal.scala @@ -14,13 +14,12 @@ import scala.util.Try import scala.util.control.NonFatal class WriteFailedException(ps: Seq[PersistentRepr]) - extends TestException(s"write failed for payloads = [${ps.map(_.payload)}]") + extends TestException(s"write failed for payloads = [${ps.map(_.payload)}]") class ReplayFailedException(ps: Seq[PersistentRepr]) - extends TestException(s"recovery failed after replaying payloads = [${ps.map(_.payload)}]") + extends TestException(s"recovery failed after replaying payloads = [${ps.map(_.payload)}]") -class ReadHighestFailedException - extends TestException(s"recovery failed when reading highest sequence number") +class ReadHighestFailedException extends TestException(s"recovery failed when reading highest sequence number") /** * Keep [[ChaosJournal]] state in an external singleton so that it survives journal restarts. @@ -61,7 +60,8 @@ class ChaosJournal extends AsyncWriteJournal { } } - def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) => Unit): Future[Unit] = + def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + replayCallback: (PersistentRepr) => Unit): Future[Unit] = if (shouldFail(replayFailureRate)) { val rm = read(persistenceId, fromSequenceNr, toSequenceNr, max) val sm = rm.take(random.nextInt(rm.length + 1)) diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/CompactionSegmentManagementSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/CompactionSegmentManagementSpec.scala index a905786787..e508da9afc 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/CompactionSegmentManagementSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/CompactionSegmentManagementSpec.scala @@ -10,11 +10,7 @@ class CompactionSegmentManagementSpec extends WordSpec { "A CompactionSegmentManagement compatible object" must { "ignore persistence ids without declared compaction intervals" in { - val intervals = Map( - "persistence_id-1" -> 1L, - "persistence_id-2" -> 1L, - "persistence_id-3" -> 1L - ) + val intervals = Map("persistence_id-1" -> 1L, "persistence_id-2" -> 1L, "persistence_id-3" -> 1L) val compactionStub = new CompactionSegmentManagement { override def compactionIntervals: Map[String, Long] = intervals } @@ -25,11 +21,7 @@ class CompactionSegmentManagementSpec extends WordSpec { } "ignore persistence ids whose compaction intervals are less or equal to zero" in { - val intervals = Map( - "persistence_id-1" -> 1L, - "persistence_id-2" -> 0L, - "persistence_id-3" -> -1L - ) + val intervals = Map("persistence_id-1" -> 1L, "persistence_id-2" -> 0L, "persistence_id-3" -> -1L) val compactionStub = new CompactionSegmentManagement { override def compactionIntervals: Map[String, Long] = intervals } @@ -39,11 +31,7 @@ class CompactionSegmentManagementSpec extends WordSpec { } "allow for wildcard configuration" in { - val intervals = Map( - "persistence_id-1" -> 1L, - "persistence_id-2" -> 1L, - "*" -> 1L - ) + val intervals = Map("persistence_id-1" -> 1L, "persistence_id-2" -> 1L, "*" -> 1L) val compactionStub = new CompactionSegmentManagement { override def compactionIntervals: Map[String, Long] = intervals } diff --git a/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala b/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala index 3ee60c4bde..6449503a12 100644 --- a/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala +++ b/akka-persistence/src/test/scala/akka/persistence/journal/leveldb/JournalCompactionSpec.scala @@ -16,7 +16,8 @@ import org.apache.commons.io.FileUtils import scala.util.Random -class JournalNoCompactionSpec extends JournalCompactionSpecBase(SpecComponentBuilder("leveldb-JournalNoCompactionSpec")) { +class JournalNoCompactionSpec + extends JournalCompactionSpecBase(SpecComponentBuilder("leveldb-JournalNoCompactionSpec")) { "A LevelDB-based persistent actor" must { "NOT compact the journal if compaction is not activated by configuration" in { @@ -50,7 +51,8 @@ class JournalNoCompactionSpec extends JournalCompactionSpecBase(SpecComponentBui } } -class JournalCompactionSpec extends JournalCompactionSpecBase(SpecComponentBuilder("leveldb-JournalCompactionSpec", 500)) { +class JournalCompactionSpec + extends JournalCompactionSpecBase(SpecComponentBuilder("leveldb-JournalCompactionSpec", 500)) { "A LevelDB-based persistent actor" must { "compact the journal upon message deletions of configured persistence ids" in { @@ -84,7 +86,8 @@ class JournalCompactionSpec extends JournalCompactionSpecBase(SpecComponentBuild } } -class JournalCompactionThresholdSpec extends JournalCompactionSpecBase(SpecComponentBuilder("leveldb-JournalCompactionThresholdSpec", 500)) { +class JournalCompactionThresholdSpec + extends JournalCompactionSpecBase(SpecComponentBuilder("leveldb-JournalCompactionThresholdSpec", 500)) { "A LevelDB-based persistent actor" must { "compact the journal only after the threshold implied by the configured compaction interval has been exceeded" in { @@ -141,11 +144,11 @@ object JournalCompactionSpec { class SpecComponentBuilder(val specId: String, val compactionInterval: Long) { def config: Config = { - PersistenceSpec.config("leveldb", specId, extraConfig = Some( - s""" + PersistenceSpec.config("leveldb", + specId, + extraConfig = Some(s""" | akka.persistence.journal.leveldb.compaction-intervals.$specId = $compactionInterval - """.stripMargin - )) + """.stripMargin)) } def createLogger(system: ActorSystem, watcher: ActorRef): ActorRef = { diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/io/DnsSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/io/DnsSpec.scala index 937a0614d0..185264d580 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/io/DnsSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/io/DnsSpec.scala @@ -24,9 +24,10 @@ class DnsSpec extends RemotingMultiNodeSpec(DnsSpec) { val ip4Address = InetAddress.getByAddress("localhost", Array[Byte](127, 0, 0, 1)) match { case address: Inet4Address => address } - val ipv6Address = InetAddress.getByAddress("localhost", Array[Byte](0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)) match { - case address: Inet6Address => address - } + val ipv6Address = + InetAddress.getByAddress("localhost", Array[Byte](0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)) match { + case address: Inet6Address => address + } var temporaryValue: Option[String] = None diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/AttemptSysMsgRedeliverySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/AttemptSysMsgRedeliverySpec.scala index ec8953cf7f..e8c67ca709 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/AttemptSysMsgRedeliverySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/AttemptSysMsgRedeliverySpec.scala @@ -22,8 +22,7 @@ class AttemptSysMsgRedeliveryMultiJvmSpec(artery: Boolean) extends MultiNodeConf val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -31,19 +30,19 @@ class AttemptSysMsgRedeliveryMultiJvmSpec(artery: Boolean) extends MultiNodeConf } -class AttemptSysMsgRedeliveryMultiJvmNode1 extends AttemptSysMsgRedeliverySpec( - new AttemptSysMsgRedeliveryMultiJvmSpec(artery = false)) -class AttemptSysMsgRedeliveryMultiJvmNode2 extends AttemptSysMsgRedeliverySpec( - new AttemptSysMsgRedeliveryMultiJvmSpec(artery = false)) -class AttemptSysMsgRedeliveryMultiJvmNode3 extends AttemptSysMsgRedeliverySpec( - new AttemptSysMsgRedeliveryMultiJvmSpec(artery = false)) +class AttemptSysMsgRedeliveryMultiJvmNode1 + extends AttemptSysMsgRedeliverySpec(new AttemptSysMsgRedeliveryMultiJvmSpec(artery = false)) +class AttemptSysMsgRedeliveryMultiJvmNode2 + extends AttemptSysMsgRedeliverySpec(new AttemptSysMsgRedeliveryMultiJvmSpec(artery = false)) +class AttemptSysMsgRedeliveryMultiJvmNode3 + extends AttemptSysMsgRedeliverySpec(new AttemptSysMsgRedeliveryMultiJvmSpec(artery = false)) -class ArteryAttemptSysMsgRedeliveryMultiJvmNode1 extends AttemptSysMsgRedeliverySpec( - new AttemptSysMsgRedeliveryMultiJvmSpec(artery = true)) -class ArteryAttemptSysMsgRedeliveryMultiJvmNode2 extends AttemptSysMsgRedeliverySpec( - new AttemptSysMsgRedeliveryMultiJvmSpec(artery = true)) -class ArteryAttemptSysMsgRedeliveryMultiJvmNode3 extends AttemptSysMsgRedeliverySpec( - new AttemptSysMsgRedeliveryMultiJvmSpec(artery = true)) +class ArteryAttemptSysMsgRedeliveryMultiJvmNode1 + extends AttemptSysMsgRedeliverySpec(new AttemptSysMsgRedeliveryMultiJvmSpec(artery = true)) +class ArteryAttemptSysMsgRedeliveryMultiJvmNode2 + extends AttemptSysMsgRedeliverySpec(new AttemptSysMsgRedeliveryMultiJvmSpec(artery = true)) +class ArteryAttemptSysMsgRedeliveryMultiJvmNode3 + extends AttemptSysMsgRedeliverySpec(new AttemptSysMsgRedeliveryMultiJvmSpec(artery = true)) object AttemptSysMsgRedeliverySpec { class Echo extends Actor { @@ -54,7 +53,7 @@ object AttemptSysMsgRedeliverySpec { } abstract class AttemptSysMsgRedeliverySpec(multiNodeConfig: AttemptSysMsgRedeliveryMultiJvmSpec) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import AttemptSysMsgRedeliverySpec._ diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala index a07ef67445..2fa08d9218 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/LookupRemoteActorSpec.scala @@ -16,8 +16,7 @@ import com.typesafe.config.ConfigFactory class LookupRemoteActorMultiJvmSpec(artery: Boolean) extends MultiNodeConfig { - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -29,8 +28,10 @@ class LookupRemoteActorMultiJvmSpec(artery: Boolean) extends MultiNodeConfig { class LookupRemoteActorMultiJvmNode1 extends LookupRemoteActorSpec(new LookupRemoteActorMultiJvmSpec(artery = false)) class LookupRemoteActorMultiJvmNode2 extends LookupRemoteActorSpec(new LookupRemoteActorMultiJvmSpec(artery = false)) -class ArteryLookupRemoteActorMultiJvmNode1 extends LookupRemoteActorSpec(new LookupRemoteActorMultiJvmSpec(artery = true)) -class ArteryLookupRemoteActorMultiJvmNode2 extends LookupRemoteActorSpec(new LookupRemoteActorMultiJvmSpec(artery = true)) +class ArteryLookupRemoteActorMultiJvmNode1 + extends LookupRemoteActorSpec(new LookupRemoteActorMultiJvmSpec(artery = true)) +class ArteryLookupRemoteActorMultiJvmNode2 + extends LookupRemoteActorSpec(new LookupRemoteActorMultiJvmSpec(artery = true)) object LookupRemoteActorSpec { class SomeActor extends Actor { @@ -41,7 +42,7 @@ object LookupRemoteActorSpec { } abstract class LookupRemoteActorSpec(multiNodeConfig: LookupRemoteActorMultiJvmSpec) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import LookupRemoteActorSpec._ @@ -67,4 +68,3 @@ abstract class LookupRemoteActorSpec(multiNodeConfig: LookupRemoteActorMultiJvmS } } - diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala index a5098a6159..45741bd04b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/NewRemoteActorSpec.scala @@ -16,8 +16,7 @@ import scala.concurrent.duration._ class NewRemoteActorMultiJvmSpec(artery: Boolean) extends MultiNodeConfig { - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.remote.log-remote-lifecycle-events = off akka.remote.artery.enabled = $artery """).withFallback(RemotingMultiNodeSpec.commonConfig))) @@ -55,7 +54,7 @@ object NewRemoteActorSpec { } abstract class NewRemoteActorSpec(multiNodeConfig: NewRemoteActorMultiJvmSpec) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import NewRemoteActorSpec._ diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/PiercingShouldKeepQuarantineSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/PiercingShouldKeepQuarantineSpec.scala index aa09781bb0..601e6e36d5 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/PiercingShouldKeepQuarantineSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/PiercingShouldKeepQuarantineSpec.scala @@ -14,23 +14,22 @@ class PiercingShouldKeepQuarantineConfig(artery: Boolean) extends MultiNodeConfi val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.remote.retry-gate-closed-for = 0.5s akka.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) } -class PiercingShouldKeepQuarantineSpecMultiJvmNode1 extends PiercingShouldKeepQuarantineSpec( - new PiercingShouldKeepQuarantineConfig(artery = false)) -class PiercingShouldKeepQuarantineSpecMultiJvmNode2 extends PiercingShouldKeepQuarantineSpec( - new PiercingShouldKeepQuarantineConfig(artery = false)) +class PiercingShouldKeepQuarantineSpecMultiJvmNode1 + extends PiercingShouldKeepQuarantineSpec(new PiercingShouldKeepQuarantineConfig(artery = false)) +class PiercingShouldKeepQuarantineSpecMultiJvmNode2 + extends PiercingShouldKeepQuarantineSpec(new PiercingShouldKeepQuarantineConfig(artery = false)) -class ArteryPiercingShouldKeepQuarantineSpecMultiJvmNode1 extends PiercingShouldKeepQuarantineSpec( - new PiercingShouldKeepQuarantineConfig(artery = true)) -class ArteryPiercingShouldKeepQuarantineSpecMultiJvmNode2 extends PiercingShouldKeepQuarantineSpec( - new PiercingShouldKeepQuarantineConfig(artery = true)) +class ArteryPiercingShouldKeepQuarantineSpecMultiJvmNode1 + extends PiercingShouldKeepQuarantineSpec(new PiercingShouldKeepQuarantineConfig(artery = true)) +class ArteryPiercingShouldKeepQuarantineSpecMultiJvmNode2 + extends PiercingShouldKeepQuarantineSpec(new PiercingShouldKeepQuarantineConfig(artery = true)) object PiercingShouldKeepQuarantineSpec { class Subject extends Actor { @@ -41,7 +40,7 @@ object PiercingShouldKeepQuarantineSpec { } abstract class PiercingShouldKeepQuarantineSpec(multiNodeConfig: PiercingShouldKeepQuarantineConfig) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import PiercingShouldKeepQuarantineSpec._ diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeliverySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeliverySpec.scala index 518adbf8e1..284df13168 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeliverySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeliverySpec.scala @@ -21,8 +21,7 @@ class RemoteDeliveryConfig(artery: Boolean) extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) } @@ -46,7 +45,7 @@ object RemoteDeliverySpec { } abstract class RemoteDeliverySpec(multiNodeConfig: RemoteDeliveryConfig) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import RemoteDeliverySpec._ diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeploymentDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeploymentDeathWatchSpec.scala index 70010bba43..13d8b21eee 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeploymentDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteDeploymentDeathWatchSpec.scala @@ -20,8 +20,7 @@ class RemoteDeploymentDeathWatchMultiJvmSpec(artery: Boolean) extends MultiNodeC val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.loglevel = INFO akka.remote.log-remote-lifecycle-events = off akka.remote.artery.enabled = $artery @@ -41,8 +40,8 @@ class ArteryRemoteDeploymentDeathWatchFastMultiJvmNode1 extends RemoteDeployment class ArteryRemoteDeploymentDeathWatchFastMultiJvmNode2 extends RemoteDeploymentNodeDeathWatchFastSpec(artery = true) class ArteryRemoteDeploymentDeathWatchFastMultiJvmNode3 extends RemoteDeploymentNodeDeathWatchFastSpec(artery = true) -abstract class RemoteDeploymentNodeDeathWatchFastSpec(artery: Boolean) extends RemoteDeploymentDeathWatchSpec( - new RemoteDeploymentDeathWatchMultiJvmSpec(artery)) { +abstract class RemoteDeploymentNodeDeathWatchFastSpec(artery: Boolean) + extends RemoteDeploymentDeathWatchSpec(new RemoteDeploymentDeathWatchMultiJvmSpec(artery)) { override def scenario = "fast" } @@ -54,8 +53,8 @@ class ArteryRemoteDeploymentDeathWatchSlowMultiJvmNode1 extends RemoteDeployment class ArteryRemoteDeploymentDeathWatchSlowMultiJvmNode2 extends RemoteDeploymentNodeDeathWatchSlowSpec(artery = true) class ArteryRemoteDeploymentDeathWatchSlowMultiJvmNode3 extends RemoteDeploymentNodeDeathWatchSlowSpec(artery = true) -abstract class RemoteDeploymentNodeDeathWatchSlowSpec(artery: Boolean) extends RemoteDeploymentDeathWatchSpec( - new RemoteDeploymentDeathWatchMultiJvmSpec(artery)) { +abstract class RemoteDeploymentNodeDeathWatchSlowSpec(artery: Boolean) + extends RemoteDeploymentDeathWatchSpec(new RemoteDeploymentDeathWatchMultiJvmSpec(artery)) { override def scenario = "slow" override def sleep(): Unit = Thread.sleep(3000) } @@ -67,7 +66,7 @@ object RemoteDeploymentDeathWatchSpec { } abstract class RemoteDeploymentDeathWatchSpec(multiNodeConfig: RemoteDeploymentDeathWatchMultiJvmSpec) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import RemoteDeploymentDeathWatchSpec._ @@ -92,10 +91,13 @@ abstract class RemoteDeploymentDeathWatchSpec(multiNodeConfig: RemoteDeploymentD // if the remote deployed actor is not removed the system will not shutdown val timeout = remainingOrDefault - try Await.ready(system.whenTerminated, timeout) catch { + try Await.ready(system.whenTerminated, timeout) + catch { case _: TimeoutException => - fail("Failed to stop [%s] within [%s] \n%s".format(system.name, timeout, - system.asInstanceOf[ActorSystemImpl].printTree)) + fail( + "Failed to stop [%s] within [%s] \n%s".format(system.name, + timeout, + system.asInstanceOf[ActorSystemImpl].printTree)) } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteGatePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteGatePiercingSpec.scala index 6dce6d8956..f10bc1a854 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteGatePiercingSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteGatePiercingSpec.scala @@ -22,18 +22,17 @@ object RemoteGatePiercingSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.loglevel = INFO akka.remote.log-remote-lifecycle-events = INFO akka.remote.transport-failure-detector.acceptable-heartbeat-pause = 5 s """))) - nodeConfig(first)( - ConfigFactory.parseString("akka.remote.retry-gate-closed-for = 1 d # Keep it long")) + nodeConfig(first)(ConfigFactory.parseString("akka.remote.retry-gate-closed-for = 1 d # Keep it long")) - nodeConfig(second)( - ConfigFactory.parseString("akka.remote.retry-gate-closed-for = 1 s # Keep it short")) + nodeConfig(second)(ConfigFactory.parseString("akka.remote.retry-gate-closed-for = 1 s # Keep it short")) testTransport(on = true) @@ -48,8 +47,7 @@ object RemoteGatePiercingSpec extends MultiNodeConfig { class RemoteGatePiercingSpecMultiJvmNode1 extends RemoteGatePiercingSpec class RemoteGatePiercingSpecMultiJvmNode2 extends RemoteGatePiercingSpec -abstract class RemoteGatePiercingSpec - extends RemotingMultiNodeSpec(RemoteGatePiercingSpec) { +abstract class RemoteGatePiercingSpec extends RemotingMultiNodeSpec(RemoteGatePiercingSpec) { import RemoteGatePiercingSpec._ @@ -73,7 +71,8 @@ abstract class RemoteGatePiercingSpec EventFilter.warning(pattern = "address is now gated", occurrences = 1).intercept { Await.result(RARP(system).provider.transport.managementCommand( - ForceDisassociateExplicitly(node(second).address, AssociationHandle.Unknown)), 3.seconds) + ForceDisassociateExplicitly(node(second).address, AssociationHandle.Unknown)), + 3.seconds) } enterBarrier("gated") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeDeathWatchSpec.scala index ad82dc0f99..ff024f8c47 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeDeathWatchSpec.scala @@ -23,8 +23,7 @@ class RemoteNodeDeathWatchConfig(artery: Boolean) extends MultiNodeConfig { val second = role("second") val third = role("third") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.loglevel = INFO akka.remote.log-remote-lifecycle-events = off ## Use a tighter setting than the default, otherwise it takes 20s for DeathWatch to trigger @@ -44,8 +43,8 @@ class ArteryRemoteNodeDeathWatchFastMultiJvmNode1 extends RemoteNodeDeathWatchFa class ArteryRemoteNodeDeathWatchFastMultiJvmNode2 extends RemoteNodeDeathWatchFastSpec(artery = true) class ArteryRemoteNodeDeathWatchFastMultiJvmNode3 extends RemoteNodeDeathWatchFastSpec(artery = true) -abstract class RemoteNodeDeathWatchFastSpec(artery: Boolean) extends RemoteNodeDeathWatchSpec( - new RemoteNodeDeathWatchConfig(artery)) { +abstract class RemoteNodeDeathWatchFastSpec(artery: Boolean) + extends RemoteNodeDeathWatchSpec(new RemoteNodeDeathWatchConfig(artery)) { override def scenario = "fast" } @@ -57,8 +56,8 @@ class ArteryRemoteNodeDeathWatchSlowMultiJvmNode1 extends RemoteNodeDeathWatchSl class ArteryRemoteNodeDeathWatchSlowMultiJvmNode2 extends RemoteNodeDeathWatchSlowSpec(artery = true) class ArteryRemoteNodeDeathWatchSlowMultiJvmNode3 extends RemoteNodeDeathWatchSlowSpec(artery = true) -abstract class RemoteNodeDeathWatchSlowSpec(artery: Boolean) extends RemoteNodeDeathWatchSpec( - new RemoteNodeDeathWatchConfig(artery)) { +abstract class RemoteNodeDeathWatchSlowSpec(artery: Boolean) + extends RemoteNodeDeathWatchSpec(new RemoteNodeDeathWatchConfig(artery)) { override def scenario = "slow" override def sleep(): Unit = Thread.sleep(3000) } @@ -77,20 +76,20 @@ object RemoteNodeDeathWatchSpec { class ProbeActor(testActor: ActorRef) extends Actor { def receive = { case WatchIt(watchee) => - context watch watchee + context.watch(watchee) sender() ! Ack case UnwatchIt(watchee) => - context unwatch watchee + context.unwatch(watchee) sender() ! Ack case t: Terminated => - testActor forward WrappedTerminated(t) - case msg => testActor forward msg + testActor.forward(WrappedTerminated(t)) + case msg => testActor.forward(msg) } } } abstract class RemoteNodeDeathWatchSpec(multiNodeConfig: RemoteNodeDeathWatchConfig) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import RemoteNodeDeathWatchSpec._ import RemoteWatcher._ diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala index 12e41e03b6..0aa72db235 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartDeathWatchSpec.scala @@ -24,8 +24,7 @@ class RemoteNodeRestartDeathWatchConfig(artery: Boolean) extends MultiNodeConfig val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.loglevel = INFO akka.remote.log-remote-lifecycle-events = off akka.remote.transport-failure-detector.heartbeat-interval = 1 s @@ -37,10 +36,10 @@ class RemoteNodeRestartDeathWatchConfig(artery: Boolean) extends MultiNodeConfig } -class RemoteNodeRestartDeathWatchMultiJvmNode1 extends RemoteNodeRestartDeathWatchSpec( - new RemoteNodeRestartDeathWatchConfig(artery = false)) -class RemoteNodeRestartDeathWatchMultiJvmNode2 extends RemoteNodeRestartDeathWatchSpec( - new RemoteNodeRestartDeathWatchConfig(artery = false)) +class RemoteNodeRestartDeathWatchMultiJvmNode1 + extends RemoteNodeRestartDeathWatchSpec(new RemoteNodeRestartDeathWatchConfig(artery = false)) +class RemoteNodeRestartDeathWatchMultiJvmNode2 + extends RemoteNodeRestartDeathWatchSpec(new RemoteNodeRestartDeathWatchConfig(artery = false)) // FIXME this is failing with Artery //class ArteryRemoteNodeRestartDeathWatchMultiJvmNode1 extends RemoteNodeRestartDeathWatchSpec( @@ -60,7 +59,7 @@ object RemoteNodeRestartDeathWatchSpec { } abstract class RemoteNodeRestartDeathWatchSpec(multiNodeConfig: RemoteNodeRestartDeathWatchConfig) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import RemoteNodeRestartDeathWatchSpec._ @@ -108,7 +107,8 @@ abstract class RemoteNodeRestartDeathWatchSpec(multiNodeConfig: RemoteNodeRestar Await.ready(system.whenTerminated, 30.seconds) - val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" + val freshSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = ${address.port.get} akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala index d16363313f..40664158de 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeRestartGateSpec.scala @@ -22,8 +22,9 @@ object RemoteNodeRestartGateSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.loglevel = INFO akka.remote.log-remote-lifecycle-events = INFO akka.remote.retry-gate-closed-for = 1d # Keep it long @@ -43,8 +44,7 @@ object RemoteNodeRestartGateSpec extends MultiNodeConfig { class RemoteNodeRestartGateSpecMultiJvmNode1 extends RemoteNodeRestartGateSpec class RemoteNodeRestartGateSpecMultiJvmNode2 extends RemoteNodeRestartGateSpec -abstract class RemoteNodeRestartGateSpec - extends RemotingMultiNodeSpec(RemoteNodeRestartGateSpec) { +abstract class RemoteNodeRestartGateSpec extends RemotingMultiNodeSpec(RemoteNodeRestartGateSpec) { import RemoteNodeRestartGateSpec._ @@ -69,7 +69,8 @@ abstract class RemoteNodeRestartGateSpec EventFilter.warning(pattern = "address is now gated", occurrences = 1).intercept { Await.result(RARP(system).provider.transport.managementCommand( - ForceDisassociateExplicitly(node(second).address, AssociationHandle.Unknown)), 3.seconds) + ForceDisassociateExplicitly(node(second).address, AssociationHandle.Unknown)), + 3.seconds) } enterBarrier("gated") @@ -94,7 +95,8 @@ abstract class RemoteNodeRestartGateSpec Await.ready(system.whenTerminated, 10.seconds) - val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" + val freshSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.retry-gate-closed-for = 0.5 s akka.remote.netty.tcp { hostname = ${address.host.get} @@ -107,7 +109,9 @@ abstract class RemoteNodeRestartGateSpec // Pierce the gate within(30.seconds) { awaitAssert { - freshSystem.actorSelection(RootActorPath(firstAddress) / "user" / "subject").tell(Identify("subject"), probe.ref) + freshSystem + .actorSelection(RootActorPath(firstAddress) / "user" / "subject") + .tell(Identify("subject"), probe.ref) probe.expectMsgType[ActorIdentity].ref.get } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala index 70bc19a897..bc95e8945d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteNodeShutdownAndComesBackSpec.scala @@ -9,7 +9,7 @@ import scala.concurrent.duration._ import com.typesafe.config.ConfigFactory import akka.actor._ import akka.remote.testconductor.RoleName -import akka.remote.transport.ThrottlerTransportAdapter.{ ForceDisassociate, Direction } +import akka.remote.transport.ThrottlerTransportAdapter.{ Direction, ForceDisassociate } import akka.remote.testkit.MultiNodeConfig import akka.testkit._ import akka.actor.ActorIdentity @@ -21,8 +21,9 @@ object RemoteNodeShutdownAndComesBackSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback( + ConfigFactory.parseString(""" akka.loglevel = INFO akka.remote.log-remote-lifecycle-events = INFO ## Keep it tight, otherwise reestablishing a connection takes too much time @@ -45,8 +46,7 @@ object RemoteNodeShutdownAndComesBackSpec extends MultiNodeConfig { class RemoteNodeShutdownAndComesBackMultiJvmNode1 extends RemoteNodeShutdownAndComesBackSpec class RemoteNodeShutdownAndComesBackMultiJvmNode2 extends RemoteNodeShutdownAndComesBackSpec -abstract class RemoteNodeShutdownAndComesBackSpec - extends RemotingMultiNodeSpec(RemoteNodeShutdownAndComesBackSpec) { +abstract class RemoteNodeShutdownAndComesBackSpec extends RemotingMultiNodeSpec(RemoteNodeShutdownAndComesBackSpec) { import RemoteNodeShutdownAndComesBackSpec._ @@ -81,7 +81,8 @@ abstract class RemoteNodeShutdownAndComesBackSpec // Drop all messages from this point so no SHUTDOWN is ever received testConductor.blackhole(second, first, Direction.Send).await // Shut down all existing connections so that the system can enter recovery mode (association attempts) - Await.result(RARP(system).provider.transport.managementCommand(ForceDisassociate(node(second).address)), 3.seconds) + Await.result(RARP(system).provider.transport.managementCommand(ForceDisassociate(node(second).address)), + 3.seconds) // Trigger reconnect attempt and also queue up a system message to be in limbo state (UID of remote system // is unknown, and system message is pending) @@ -132,7 +133,8 @@ abstract class RemoteNodeShutdownAndComesBackSpec Await.ready(system.whenTerminated, 30.seconds) - val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" + val freshSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = ${address.port.get} akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala index 0218d40ac9..ab839128ec 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteQuarantinePiercingSpec.scala @@ -17,8 +17,7 @@ class RemoteQuarantinePiercingConfig(artery: Boolean) extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.loglevel = INFO akka.remote.log-remote-lifecycle-events = INFO akka.remote.artery.enabled = $artery @@ -26,15 +25,15 @@ class RemoteQuarantinePiercingConfig(artery: Boolean) extends MultiNodeConfig { } -class RemoteQuarantinePiercingMultiJvmNode1 extends RemoteQuarantinePiercingSpec( - new RemoteQuarantinePiercingConfig(artery = false)) -class RemoteQuarantinePiercingMultiJvmNode2 extends RemoteQuarantinePiercingSpec( - new RemoteQuarantinePiercingConfig(artery = false)) +class RemoteQuarantinePiercingMultiJvmNode1 + extends RemoteQuarantinePiercingSpec(new RemoteQuarantinePiercingConfig(artery = false)) +class RemoteQuarantinePiercingMultiJvmNode2 + extends RemoteQuarantinePiercingSpec(new RemoteQuarantinePiercingConfig(artery = false)) -class ArteryRemoteQuarantinePiercingMultiJvmNode1 extends RemoteQuarantinePiercingSpec( - new RemoteQuarantinePiercingConfig(artery = true)) -class ArteryRemoteQuarantinePiercingMultiJvmNode2 extends RemoteQuarantinePiercingSpec( - new RemoteQuarantinePiercingConfig(artery = true)) +class ArteryRemoteQuarantinePiercingMultiJvmNode1 + extends RemoteQuarantinePiercingSpec(new RemoteQuarantinePiercingConfig(artery = true)) +class ArteryRemoteQuarantinePiercingMultiJvmNode2 + extends RemoteQuarantinePiercingSpec(new RemoteQuarantinePiercingConfig(artery = true)) object RemoteQuarantinePiercingSpec { class Subject extends Actor { @@ -46,13 +45,15 @@ object RemoteQuarantinePiercingSpec { } abstract class RemoteQuarantinePiercingSpec(multiNodeConfig: RemoteQuarantinePiercingConfig) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { import multiNodeConfig._ import RemoteQuarantinePiercingSpec._ override def initialParticipants = roles.size - def identifyWithUid(role: RoleName, actorName: String, timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = { + def identifyWithUid(role: RoleName, + actorName: String, + timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = { within(timeout) { system.actorSelection(node(role) / "user" / actorName) ! "identify" expectMsgType[(Long, ActorRef)] @@ -106,7 +107,8 @@ abstract class RemoteQuarantinePiercingSpec(multiNodeConfig: RemoteQuarantinePie Await.ready(system.whenTerminated, 30.seconds) - val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" + val freshSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = ${address.port.get} akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala index 43bc8d3ff3..7b2b861ee9 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteReDeploymentSpec.scala @@ -23,8 +23,8 @@ class RemoteReDeploymentConfig(artery: Boolean) extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = true).withFallback(ConfigFactory.parseString( - s"""akka.remote.transport-failure-detector { + commonConfig( + debugConfig(on = true).withFallback(ConfigFactory.parseString(s"""akka.remote.transport-failure-detector { threshold=0.1 heartbeat-interval=0.1s acceptable-heartbeat-pause=1s @@ -49,8 +49,8 @@ class RemoteReDeploymentFastMultiJvmNode2 extends RemoteReDeploymentFastMultiJvm class ArteryRemoteReDeploymentFastMultiJvmNode1 extends RemoteReDeploymentFastMultiJvmSpec(artery = true) class ArteryRemoteReDeploymentFastMultiJvmNode2 extends RemoteReDeploymentFastMultiJvmSpec(artery = true) -abstract class RemoteReDeploymentFastMultiJvmSpec(artery: Boolean) extends RemoteReDeploymentMultiJvmSpec( - new RemoteReDeploymentConfig(artery)) { +abstract class RemoteReDeploymentFastMultiJvmSpec(artery: Boolean) + extends RemoteReDeploymentMultiJvmSpec(new RemoteReDeploymentConfig(artery)) { override def sleepAfterKill = 0.seconds // new association will come in while old is still “healthy” override def expectQuarantine = false } @@ -61,9 +61,10 @@ class RemoteReDeploymentMediumMultiJvmNode2 extends RemoteReDeploymentMediumMult class ArteryRemoteReDeploymentMediumMultiJvmNode1 extends RemoteReDeploymentMediumMultiJvmSpec(artery = true) class ArteryRemoteReDeploymentMediumMultiJvmNode2 extends RemoteReDeploymentMediumMultiJvmSpec(artery = true) -abstract class RemoteReDeploymentMediumMultiJvmSpec(artery: Boolean) extends RemoteReDeploymentMultiJvmSpec( - new RemoteReDeploymentConfig(artery)) { - override def sleepAfterKill = 1.seconds // new association will come in while old is gated in ReliableDeliverySupervisor +abstract class RemoteReDeploymentMediumMultiJvmSpec(artery: Boolean) + extends RemoteReDeploymentMultiJvmSpec(new RemoteReDeploymentConfig(artery)) { + override def sleepAfterKill = + 1.seconds // new association will come in while old is gated in ReliableDeliverySupervisor override def expectQuarantine = false } @@ -73,8 +74,8 @@ class RemoteReDeploymentSlowMultiJvmNode2 extends RemoteReDeploymentSlowMultiJvm class ArteryRemoteReDeploymentSlowMultiJvmNode1 extends RemoteReDeploymentSlowMultiJvmSpec(artery = true) class ArteryRemoteReDeploymentSlowMultiJvmNode2 extends RemoteReDeploymentSlowMultiJvmSpec(artery = true) -abstract class RemoteReDeploymentSlowMultiJvmSpec(artery: Boolean) extends RemoteReDeploymentMultiJvmSpec( - new RemoteReDeploymentConfig(artery)) { +abstract class RemoteReDeploymentSlowMultiJvmSpec(artery: Boolean) + extends RemoteReDeploymentMultiJvmSpec(new RemoteReDeploymentConfig(artery)) { override def sleepAfterKill = 10.seconds // new association will come in after old has been quarantined override def expectQuarantine = true } @@ -109,7 +110,7 @@ object RemoteReDeploymentMultiJvmSpec { } abstract class RemoteReDeploymentMultiJvmSpec(multiNodeConfig: RemoteReDeploymentConfig) - extends RemotingMultiNodeSpec(multiNodeConfig) { + extends RemotingMultiNodeSpec(multiNodeConfig) { def sleepAfterKill: FiniteDuration def expectQuarantine: Boolean @@ -150,8 +151,7 @@ abstract class RemoteReDeploymentMultiJvmSpec(multiNodeConfig: RemoteReDeploymen // The quarantine of node 2, where the Parent lives, should cause the Hello child to be stopped: expectMsg("PostStop") expectNoMsg() - } - else expectNoMsg(sleepAfterKill) + } else expectNoMsg(sleepAfterKill) awaitAssert(node(second), 10.seconds, 100.millis) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala index 16a3af905b..ed9d1c0130 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemoteRestartedQuarantinedSpec.scala @@ -20,8 +20,9 @@ object RemoteRestartedQuarantinedSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString( + """ # Keep it long, we don't want reconnects akka.remote.retry-gate-closed-for = 1 s @@ -48,8 +49,7 @@ object RemoteRestartedQuarantinedSpec extends MultiNodeConfig { class RemoteRestartedQuarantinedSpecMultiJvmNode1 extends RemoteRestartedQuarantinedSpec class RemoteRestartedQuarantinedSpecMultiJvmNode2 extends RemoteRestartedQuarantinedSpec -abstract class RemoteRestartedQuarantinedSpec - extends RemotingMultiNodeSpec(RemoteRestartedQuarantinedSpec) { +abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(RemoteRestartedQuarantinedSpec) { import RemoteRestartedQuarantinedSpec._ @@ -115,7 +115,8 @@ abstract class RemoteRestartedQuarantinedSpec Await.result(system.whenTerminated, 10.seconds) - val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" + val freshSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.retry-gate-closed-for = 0.5 s akka.remote.netty.tcp { hostname = ${address.host.get} @@ -125,10 +126,14 @@ abstract class RemoteRestartedQuarantinedSpec // retry because it's possible to loose the initial message here, see issue #17314 val probe = TestProbe()(freshSystem) - probe.awaitAssert({ - freshSystem.actorSelection(RootActorPath(firstAddress) / "user" / "subject").tell(Identify("subject"), probe.ref) - probe.expectMsgType[ActorIdentity](1.second).ref should not be (None) - }, 30.seconds) + probe.awaitAssert( + { + freshSystem + .actorSelection(RootActorPath(firstAddress) / "user" / "subject") + .tell(Identify("subject"), probe.ref) + probe.expectMsgType[ActorIdentity](1.second).ref should not be (None) + }, + 30.seconds) // Now the other system will be able to pass, too freshSystem.actorOf(Props[Subject], "subject") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala index 909d8274de..66a653c22e 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/RemotingMultiNodeSpec.scala @@ -15,24 +15,23 @@ import org.scalatest.{ Outcome, Suite } object RemotingMultiNodeSpec { def commonConfig = - ConfigFactory.parseString( - s""" + ConfigFactory.parseString(s""" akka.actor.warn-about-java-serializer-usage = off akka.remote.artery.advanced.flight-recorder { enabled=on destination=target/flight-recorder-${UUID.randomUUID().toString}.afr } - """) - .withFallback(ArterySpecSupport.tlsConfig) // TLS only used if transport=tls-tcp + """).withFallback(ArterySpecSupport.tlsConfig) // TLS only used if transport=tls-tcp } -abstract class RemotingMultiNodeSpec(config: MultiNodeConfig) extends MultiNodeSpec(config) - with Suite - with STMultiNodeSpec - with FlightRecordingSupport - with ImplicitSender - with DefaultTimeout { self: MultiNodeSpec => +abstract class RemotingMultiNodeSpec(config: MultiNodeConfig) + extends MultiNodeSpec(config) + with Suite + with STMultiNodeSpec + with FlightRecordingSupport + with ImplicitSender + with DefaultTimeout { self: MultiNodeSpec => // Keep track of failure so we can print artery flight recording on failure private var failed = false diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/Ticket15109Spec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/Ticket15109Spec.scala index 2e3dfa29bb..c9044a1268 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/Ticket15109Spec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/Ticket15109Spec.scala @@ -21,8 +21,9 @@ object Ticket15109Spec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback( + ConfigFactory.parseString(""" akka.loglevel = INFO akka.remote.log-remote-lifecycle-events = INFO ## Keep it tight, otherwise reestablishing a connection takes too much time @@ -85,8 +86,9 @@ abstract class Ticket15109Spec extends RemotingMultiNodeSpec(Ticket15109Spec) { runOn(second) { // Force a disassociation. Using the message Shutdown, which is suboptimal here, but this is the only // DisassociateInfo that triggers the code-path we want to test - Await.result(RARP(system).provider.transport.managementCommand( - ForceDisassociateExplicitly(node(first).address, AssociationHandle.Shutdown)), 3.seconds) + Await.result(RARP(system).provider.transport + .managementCommand(ForceDisassociateExplicitly(node(first).address, AssociationHandle.Shutdown)), + 3.seconds) } enterBarrier("disassociated") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/TransportFailSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/TransportFailSpec.scala index cc9a7d5f37..702cae27ea 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/TransportFailSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/TransportFailSpec.scala @@ -25,8 +25,7 @@ object TransportFailConfig extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.loglevel = INFO akka.remote { transport-failure-detector { diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/BenchmarkFileReporter.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/BenchmarkFileReporter.scala index c2172bbcbb..cc37f224f5 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/BenchmarkFileReporter.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/BenchmarkFileReporter.scala @@ -52,22 +52,20 @@ object BenchmarkFileReporter { reportResults(s"Git commit: $gitCommit") val settingsToReport = - Seq( - "akka.test.MaxThroughputSpec.totalMessagesFactor", - "akka.test.MaxThroughputSpec.real-message", - "akka.test.LatencySpec.totalMessagesFactor", - "akka.test.LatencySpec.repeatCount", - "akka.test.LatencySpec.real-message", - "akka.remote.artery.enabled", - "akka.remote.artery.advanced.inbound-lanes", - "akka.remote.artery.advanced.idle-cpu-level", - "akka.remote.artery.advanced.buffer-pool-size", - "akka.remote.artery.advanced.embedded-media-driver", - "akka.remote.default-remote-dispatcher.throughput", - "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-factor", - "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-min", - "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-max" - ) + Seq("akka.test.MaxThroughputSpec.totalMessagesFactor", + "akka.test.MaxThroughputSpec.real-message", + "akka.test.LatencySpec.totalMessagesFactor", + "akka.test.LatencySpec.repeatCount", + "akka.test.LatencySpec.real-message", + "akka.remote.artery.enabled", + "akka.remote.artery.advanced.inbound-lanes", + "akka.remote.artery.advanced.idle-cpu-level", + "akka.remote.artery.advanced.buffer-pool-size", + "akka.remote.artery.advanced.embedded-media-driver", + "akka.remote.default-remote-dispatcher.throughput", + "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-factor", + "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-min", + "akka.remote.default-remote-dispatcher.fork-join-executor.parallelism-max") settingsToReport.foreach(reportSetting) def reportResults(result: String): Unit = synchronized { diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/DirectMemorySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/DirectMemorySpec.scala index 7c5926f78f..a142f7ece2 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/DirectMemorySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/DirectMemorySpec.scala @@ -15,8 +15,9 @@ object DirectMemorySpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.loglevel = WARNING akka.remote.log-remote-lifecycle-events = WARNING akka.remote.artery.enabled = on @@ -25,7 +26,8 @@ object DirectMemorySpec extends MultiNodeConfig { akka.remote.artery.maximum-frame-size = 256 KiB akka.remote.artery.large-buffer-pool-size = 4 akka.remote.artery.maximum-large-frame-size = 2 MiB - """)).withFallback(RemotingMultiNodeSpec.commonConfig)) + """)) + .withFallback(RemotingMultiNodeSpec.commonConfig)) // buffer pool + large buffer pool = 16M, see DirectMemorySpecMultiJvmNode1.opts @@ -58,7 +60,7 @@ abstract class DirectMemorySpec extends MultiNodeSpec(DirectMemorySpec) with STM "This test" should { "override JVM start-up options" in { // it's important that *.opts files have been processed - assert(System.getProperty("DirectMemorySpec.marker") equals "true") + assert(System.getProperty("DirectMemorySpec.marker").equals("true")) } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala index 92b172c5c7..9fd1d67388 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanInThrougputSpec.scala @@ -28,8 +28,7 @@ object FanInThroughputSpec extends MultiNodeConfig { val barrierTimeout = 5.minutes - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" # for serious measurements you should increase the totalMessagesFactor (20) akka.test.FanInThroughputSpec.totalMessagesFactor = 10.0 akka.test.FanInThroughputSpec.real-message = off @@ -37,9 +36,7 @@ object FanInThroughputSpec extends MultiNodeConfig { akka.remote.artery.advanced { # inbound-lanes = 4 } - """)) - .withFallback(MaxThroughputSpec.cfg) - .withFallback(RemotingMultiNodeSpec.commonConfig)) + """)).withFallback(MaxThroughputSpec.cfg).withFallback(RemotingMultiNodeSpec.commonConfig)) } @@ -65,7 +62,8 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput override def initialParticipants = roles.size - def remoteSettings = system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].remoteSettings + def remoteSettings = + system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].remoteSettings lazy val reporterExecutor = Executors.newFixedThreadPool(1) def reporter(name: String): TestRateReporter = { @@ -91,34 +89,30 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput } val scenarios = List( - TestSettings( - testName = "warmup", - totalMessages = adjustedTotalMessages(20000), - burstSize = 1000, - payloadSize = 100, - senderReceiverPairs = senderReceiverPairs, - realMessage), - TestSettings( - testName = "size-100", - totalMessages = adjustedTotalMessages(50000), - burstSize = 1000, - payloadSize = 100, - senderReceiverPairs = senderReceiverPairs, - realMessage), - TestSettings( - testName = "size-1k", - totalMessages = adjustedTotalMessages(10000), - burstSize = 1000, - payloadSize = 1000, - senderReceiverPairs = senderReceiverPairs, - realMessage), - TestSettings( - testName = "size-10k", - totalMessages = adjustedTotalMessages(2000), - burstSize = 1000, - payloadSize = 10000, - senderReceiverPairs = senderReceiverPairs, - realMessage)) + TestSettings(testName = "warmup", + totalMessages = adjustedTotalMessages(20000), + burstSize = 1000, + payloadSize = 100, + senderReceiverPairs = senderReceiverPairs, + realMessage), + TestSettings(testName = "size-100", + totalMessages = adjustedTotalMessages(50000), + burstSize = 1000, + payloadSize = 100, + senderReceiverPairs = senderReceiverPairs, + realMessage), + TestSettings(testName = "size-1k", + totalMessages = adjustedTotalMessages(10000), + burstSize = 1000, + payloadSize = 1000, + senderReceiverPairs = senderReceiverPairs, + realMessage), + TestSettings(testName = "size-10k", + totalMessages = adjustedTotalMessages(2000), + burstSize = 1000, + payloadSize = 10000, + senderReceiverPairs = senderReceiverPairs, + realMessage)) def test(testSettings: TestSettings, resultReporter: BenchmarkFileReporter): Unit = { import testSettings._ @@ -131,9 +125,8 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput runOn(roles.head) { val rep = reporter(testName) val receivers = (1 to sendingNodes.size).map { n => - system.actorOf( - receiverProps(rep, payloadSize, printTaskRunnerMetrics = n == 1, senderReceiverPairs), - receiverName + "-" + n) + system.actorOf(receiverProps(rep, payloadSize, printTaskRunnerMetrics = n == 1, senderReceiverPairs), + receiverName + "-" + n) } enterBarrier(receiverName + "-started") enterBarrier(testName + "-done") @@ -144,16 +137,22 @@ abstract class FanInThroughputSpec extends RemotingMultiNodeSpec(FanInThroughput runOn(sendingNodes: _*) { enterBarrier(receiverName + "-started") val ignore = TestProbe() - val receivers = (1 to sendingNodes.size).map { n => - identifyReceiver(receiverName + "-" + n, roles.head) - }.toArray[Target] + val receivers = (1 to sendingNodes.size) + .map { n => + identifyReceiver(receiverName + "-" + n, roles.head) + } + .toArray[Target] val idx = roles.indexOf(myself) - 1 val receiver = receivers(idx) val plotProbe = TestProbe() - val snd = system.actorOf( - senderProps(receiver, receivers, testSettings, plotProbe.ref, printTaskRunnerMetrics = idx == 0, resultReporter), - testName + "-snd" + idx) + val snd = system.actorOf(senderProps(receiver, + receivers, + testSettings, + plotProbe.ref, + printTaskRunnerMetrics = idx == 0, + resultReporter), + testName + "-snd" + idx) val terminationProbe = TestProbe() terminationProbe.watch(snd) snd ! Run diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala index 3ac7ee395c..4e4bc3f6c5 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/FanOutThrougputSpec.scala @@ -28,15 +28,12 @@ object FanOutThroughputSpec extends MultiNodeConfig { val barrierTimeout = 5.minutes - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" # for serious measurements you should increase the totalMessagesFactor (20) akka.test.FanOutThroughputSpec.totalMessagesFactor = 10.0 akka.test.FanOutThroughputSpec.real-message = off akka.test.FanOutThroughputSpec.actor-selection = off - """)) - .withFallback(MaxThroughputSpec.cfg) - .withFallback(RemotingMultiNodeSpec.commonConfig)) + """)).withFallback(MaxThroughputSpec.cfg).withFallback(RemotingMultiNodeSpec.commonConfig)) } @@ -62,7 +59,8 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp override def initialParticipants = roles.size - def remoteSettings = system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].remoteSettings + def remoteSettings = + system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].remoteSettings lazy val reporterExecutor = Executors.newFixedThreadPool(1) def reporter(name: String): TestRateReporter = { @@ -90,34 +88,30 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp // each sender may have 3 bursts in flight val burstSize = 3000 / senderReceiverPairs / 3 val scenarios = List( - TestSettings( - testName = "warmup", - totalMessages = adjustedTotalMessages(20000), - burstSize = burstSize, - payloadSize = 100, - senderReceiverPairs = senderReceiverPairs, - realMessage), - TestSettings( - testName = "size-100", - totalMessages = adjustedTotalMessages(50000), - burstSize = burstSize, - payloadSize = 100, - senderReceiverPairs = senderReceiverPairs, - realMessage), - TestSettings( - testName = "size-1k", - totalMessages = adjustedTotalMessages(10000), - burstSize = burstSize, - payloadSize = 1000, - senderReceiverPairs = senderReceiverPairs, - realMessage), - TestSettings( - testName = "size-10k", - totalMessages = adjustedTotalMessages(2000), - burstSize = burstSize, - payloadSize = 10000, - senderReceiverPairs = senderReceiverPairs, - realMessage)) + TestSettings(testName = "warmup", + totalMessages = adjustedTotalMessages(20000), + burstSize = burstSize, + payloadSize = 100, + senderReceiverPairs = senderReceiverPairs, + realMessage), + TestSettings(testName = "size-100", + totalMessages = adjustedTotalMessages(50000), + burstSize = burstSize, + payloadSize = 100, + senderReceiverPairs = senderReceiverPairs, + realMessage), + TestSettings(testName = "size-1k", + totalMessages = adjustedTotalMessages(10000), + burstSize = burstSize, + payloadSize = 1000, + senderReceiverPairs = senderReceiverPairs, + realMessage), + TestSettings(testName = "size-10k", + totalMessages = adjustedTotalMessages(2000), + burstSize = burstSize, + payloadSize = 10000, + senderReceiverPairs = senderReceiverPairs, + realMessage)) def test(testSettings: TestSettings, resultReporter: BenchmarkFileReporter): Unit = { import testSettings._ @@ -129,9 +123,8 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp runOn(targetNodes: _*) { val rep = reporter(testName) - val receiver = system.actorOf( - receiverProps(rep, payloadSize, printTaskRunnerMetrics = true, senderReceiverPairs), - receiverName) + val receiver = system.actorOf(receiverProps(rep, payloadSize, printTaskRunnerMetrics = true, senderReceiverPairs), + receiverName) enterBarrier(receiverName + "-started") enterBarrier(testName + "-done") receiver ! PoisonPill @@ -145,9 +138,13 @@ abstract class FanOutThroughputSpec extends RemotingMultiNodeSpec(FanOutThroughp val senders = for ((target, i) <- targetNodes.zipWithIndex) yield { val receiver = receivers(i) val plotProbe = TestProbe() - val snd = system.actorOf( - senderProps(receiver, receivers, testSettings, plotProbe.ref, printTaskRunnerMetrics = i == 0, resultReporter), - testName + "-snd" + (i + 1)) + val snd = system.actorOf(senderProps(receiver, + receivers, + testSettings, + plotProbe.ref, + printTaskRunnerMetrics = i == 0, + resultReporter), + testName + "-snd" + (i + 1)) val terminationProbe = TestProbe() terminationProbe.watch(snd) snd ! Run diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala index 7a2a523f94..5a6fc2dcaf 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/HandshakeRestartReceiverSpec.scala @@ -20,8 +20,7 @@ object HandshakeRestartReceiverSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka { loglevel = INFO actor.provider = remote @@ -44,8 +43,9 @@ class HandshakeRestartReceiverSpecMultiJvmNode1 extends HandshakeRestartReceiver class HandshakeRestartReceiverSpecMultiJvmNode2 extends HandshakeRestartReceiverSpec abstract class HandshakeRestartReceiverSpec - extends MultiNodeSpec(HandshakeRestartReceiverSpec) - with STMultiNodeSpec with ImplicitSender { + extends MultiNodeSpec(HandshakeRestartReceiverSpec) + with STMultiNodeSpec + with ImplicitSender { import HandshakeRestartReceiverSpec._ @@ -55,7 +55,9 @@ abstract class HandshakeRestartReceiverSpec super.afterAll() } - def identifyWithUid(rootPath: ActorPath, actorName: String, timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = { + def identifyWithUid(rootPath: ActorPath, + actorName: String, + timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = { within(timeout) { system.actorSelection(rootPath / "user" / actorName) ! "identify" expectMsgType[(Long, ActorRef)] @@ -104,7 +106,8 @@ abstract class HandshakeRestartReceiverSpec Await.result(system.whenTerminated, 10.seconds) - val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" + val freshSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) freshSystem.actorOf(Props[Subject], "subject2") diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala index fc9b541f73..028a5a938d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/LatencySpec.scala @@ -26,8 +26,7 @@ object LatencySpec extends MultiNodeConfig { val barrierTimeout = 5.minutes - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" # for serious measurements you should increase the totalMessagesFactor (30) and repeatCount (3) akka.test.LatencySpec.totalMessagesFactor = 1.0 akka.test.LatencySpec.repeatCount = 1 @@ -83,12 +82,23 @@ object LatencySpec extends MultiNodeConfig { } } - def receiverProps(reporter: RateReporter, settings: TestSettings, totalMessages: Int, - sendTimes: AtomicLongArray, histogram: Histogram, plotsRef: ActorRef, BenchmarkFileReporter: BenchmarkFileReporter): Props = + def receiverProps(reporter: RateReporter, + settings: TestSettings, + totalMessages: Int, + sendTimes: AtomicLongArray, + histogram: Histogram, + plotsRef: ActorRef, + BenchmarkFileReporter: BenchmarkFileReporter): Props = Props(new Receiver(reporter, settings, totalMessages, sendTimes, histogram, plotsRef, BenchmarkFileReporter)) - class Receiver(reporter: RateReporter, settings: TestSettings, totalMessages: Int, - sendTimes: AtomicLongArray, histogram: Histogram, plotsRef: ActorRef, BenchmarkFileReporter: BenchmarkFileReporter) extends Actor { + class Receiver(reporter: RateReporter, + settings: TestSettings, + totalMessages: Int, + sendTimes: AtomicLongArray, + histogram: Histogram, + plotsRef: ActorRef, + BenchmarkFileReporter: BenchmarkFileReporter) + extends Actor { import settings._ var count = 0 @@ -128,11 +138,16 @@ object LatencySpec extends MultiNodeConfig { } } - def printTotal(testName: String, payloadSize: Long, histogram: Histogram, totalDurationNanos: Long, reporter: BenchmarkFileReporter): Unit = { + def printTotal(testName: String, + payloadSize: Long, + histogram: Histogram, + totalDurationNanos: Long, + reporter: BenchmarkFileReporter): Unit = { def percentile(p: Double): Double = histogram.getValueAtPercentile(p) / 1000.0 val throughput = 1000.0 * histogram.getTotalCount / math.max(1, totalDurationNanos.nanos.toMillis) - reporter.reportResults(s"=== ${reporter.testName} $testName: RTT " + + reporter.reportResults( + s"=== ${reporter.testName} $testName: RTT " + f"50%%ile: ${percentile(50.0)}%.0f µs, " + f"90%%ile: ${percentile(90.0)}%.0f µs, " + f"99%%ile: ${percentile(99.0)}%.0f µs, " + @@ -142,28 +157,25 @@ object LatencySpec extends MultiNodeConfig { taskRunnerMetrics.printHistograms() - val plots = LatencyPlots( - PlotResult().add(testName, percentile(50.0)), - PlotResult().add(testName, percentile(90.0)), - PlotResult().add(testName, percentile(99.0))) + val plots = LatencyPlots(PlotResult().add(testName, percentile(50.0)), + PlotResult().add(testName, percentile(90.0)), + PlotResult().add(testName, percentile(99.0))) plotsRef ! plots } } - final case class TestSettings( - testName: String, - messageRate: Int, // msg/s - payloadSize: Int, - repeat: Int, - realMessage: Boolean) + final case class TestSettings(testName: String, + messageRate: Int, // msg/s + payloadSize: Int, + repeat: Int, + realMessage: Boolean) } class LatencySpecMultiJvmNode1 extends LatencySpec class LatencySpecMultiJvmNode2 extends LatencySpec -abstract class LatencySpec - extends RemotingMultiNodeSpec(LatencySpec) { +abstract class LatencySpec extends RemotingMultiNodeSpec(LatencySpec) { import LatencySpec._ @@ -201,42 +213,32 @@ abstract class LatencySpec } val scenarios = List( - TestSettings( - testName = "warmup", - messageRate = 10000, - payloadSize = 100, - repeat = repeatCount, - realMessage), - TestSettings( - testName = "rate-100-size-100", - messageRate = 100, - payloadSize = 100, - repeat = repeatCount, - realMessage), - TestSettings( - testName = "rate-1000-size-100", - messageRate = 1000, - payloadSize = 100, - repeat = repeatCount, - realMessage), - TestSettings( - testName = "rate-10000-size-100", - messageRate = 10000, - payloadSize = 100, - repeat = repeatCount, - realMessage), - TestSettings( - testName = "rate-20000-size-100", - messageRate = 20000, - payloadSize = 100, - repeat = repeatCount, - realMessage), - TestSettings( - testName = "rate-1000-size-1k", - messageRate = 1000, - payloadSize = 1000, - repeat = repeatCount, - realMessage)) + TestSettings(testName = "warmup", messageRate = 10000, payloadSize = 100, repeat = repeatCount, realMessage), + TestSettings(testName = "rate-100-size-100", + messageRate = 100, + payloadSize = 100, + repeat = repeatCount, + realMessage), + TestSettings(testName = "rate-1000-size-100", + messageRate = 1000, + payloadSize = 100, + repeat = repeatCount, + realMessage), + TestSettings(testName = "rate-10000-size-100", + messageRate = 10000, + payloadSize = 100, + repeat = repeatCount, + realMessage), + TestSettings(testName = "rate-20000-size-100", + messageRate = 20000, + payloadSize = 100, + repeat = repeatCount, + realMessage), + TestSettings(testName = "rate-1000-size-1k", + messageRate = 1000, + payloadSize = 1000, + repeat = repeatCount, + realMessage)) def test(testSettings: TestSettings, BenchmarkFileReporter: BenchmarkFileReporter): Unit = { import testSettings._ @@ -265,14 +267,13 @@ abstract class LatencySpec echo ! Reset expectMsg(Reset) histogram.reset() - val receiver = system.actorOf(receiverProps(rep, testSettings, totalMessages, sendTimes, histogram, plotProbe.ref, BenchmarkFileReporter)) + val receiver = system.actorOf( + receiverProps(rep, testSettings, totalMessages, sendTimes, histogram, plotProbe.ref, BenchmarkFileReporter)) // warmup for 3 seconds to init compression - val warmup = Source(1 to 30) - .throttle(10, 1.second, 10, ThrottleMode.Shaping) - .runForeach { n => - echo.tell(Array.emptyByteArray, receiver) - } + val warmup = Source(1 to 30).throttle(10, 1.second, 10, ThrottleMode.Shaping).runForeach { n => + echo.tell(Array.emptyByteArray, receiver) + } warmup.foreach { _ => var i = 0 @@ -289,13 +290,12 @@ abstract class LatencySpec val msg = if (testSettings.realMessage) - TestMessage( - id = i, - name = "abc", - status = i % 2 == 0, - description = "ABC", - payload = payload, - items = Vector(TestMessage.Item(1, "A"), TestMessage.Item(2, "B"))) + TestMessage(id = i, + name = "abc", + status = i % 2 == 0, + description = "ABC", + payload = payload, + items = Vector(TestMessage.Item(1, "A"), TestMessage.Item(2, "B"))) else payload echo.tell(payload, receiver) @@ -315,10 +315,9 @@ abstract class LatencySpec val p = plotProbe.expectMsgType[LatencyPlots] // only use the last repeat for the plots if (n == repeat) { - plots = plots.copy( - plot50 = plots.plot50.addAll(p.plot50), - plot90 = plots.plot90.addAll(p.plot90), - plot99 = plots.plot99.addAll(p.plot99)) + plots = plots.copy(plot50 = plots.plot50.addAll(p.plot50), + plot90 = plots.plot90.addAll(p.plot90), + plot99 = plots.plot99.addAll(p.plot99)) } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala index 572f1757c8..677d614d0f 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/MaxThroughputSpec.scala @@ -84,8 +84,7 @@ object MaxThroughputSpec extends MultiNodeConfig { } """) - commonConfig(debugConfig(on = false).withFallback( - cfg).withFallback(RemotingMultiNodeSpec.commonConfig)) + commonConfig(debugConfig(on = false).withFallback(cfg).withFallback(RemotingMultiNodeSpec.commonConfig)) case object Run sealed trait Echo extends DeadLetterSuppression with JavaSerializable @@ -109,13 +108,16 @@ object MaxThroughputSpec extends MultiNodeConfig { } def receiverProps(reporter: RateReporter, payloadSize: Int, printTaskRunnerMetrics: Boolean, numSenders: Int): Props = - Props(new Receiver(reporter, payloadSize, printTaskRunnerMetrics, numSenders)).withDispatcher("akka.remote.default-remote-dispatcher") + Props(new Receiver(reporter, payloadSize, printTaskRunnerMetrics, numSenders)) + .withDispatcher("akka.remote.default-remote-dispatcher") - class Receiver(reporter: RateReporter, payloadSize: Int, printTaskRunnerMetrics: Boolean, numSenders: Int) extends Actor { + class Receiver(reporter: RateReporter, payloadSize: Int, printTaskRunnerMetrics: Boolean, numSenders: Int) + extends Actor { private var c = 0L private val taskRunnerMetrics = new TaskRunnerMetrics(context.system) private var endMessagesMissing = numSenders - private var correspondingSender: ActorRef = null // the Actor which send the Start message will also receive the report + private var correspondingSender + : ActorRef = null // the Actor which send the Start message will also receive the report def receive = { case msg: Array[Byte] => @@ -148,12 +150,21 @@ object MaxThroughputSpec extends MultiNodeConfig { } } - def senderProps(mainTarget: Target, targets: Array[Target], testSettings: TestSettings, plotRef: ActorRef, - printTaskRunnerMetrics: Boolean, reporter: BenchmarkFileReporter): Props = + def senderProps(mainTarget: Target, + targets: Array[Target], + testSettings: TestSettings, + plotRef: ActorRef, + printTaskRunnerMetrics: Boolean, + reporter: BenchmarkFileReporter): Props = Props(new Sender(mainTarget, targets, testSettings, plotRef, printTaskRunnerMetrics, reporter)) - class Sender(target: Target, targets: Array[Target], testSettings: TestSettings, plotRef: ActorRef, printTaskRunnerMetrics: Boolean, reporter: BenchmarkFileReporter) - extends Actor { + class Sender(target: Target, + targets: Array[Target], + testSettings: TestSettings, + plotRef: ActorRef, + printTaskRunnerMetrics: Boolean, + reporter: BenchmarkFileReporter) + extends Actor { val numTargets = targets.size import testSettings._ @@ -170,7 +181,7 @@ object MaxThroughputSpec extends MultiNodeConfig { val compressionEnabled = RARP(context.system).provider.transport.isInstanceOf[ArteryTransport] && - RARP(context.system).provider.remoteSettings.Artery.Enabled + RARP(context.system).provider.remoteSettings.Artery.Enabled def receive = { case Run => @@ -204,7 +215,8 @@ object MaxThroughputSpec extends MultiNodeConfig { def warmup: Receive = { case Start => - println(s"${self.path.name}: Starting benchmark of $totalMessages messages with burst size " + + println( + s"${self.path.name}: Starting benchmark of $totalMessages messages with burst size " + s"$burstSize and payload size $payloadSize") startTime = System.nanoTime remaining = totalMessages @@ -243,18 +255,17 @@ object MaxThroughputSpec extends MultiNodeConfig { val took = NANOSECONDS.toMillis(System.nanoTime - startTime) val throughput = (totalReceived * 1000.0 / took) - reporter.reportResults( - s"=== ${reporter.testName} ${self.path.name}: " + - f"throughput ${throughput * testSettings.senderReceiverPairs}%,.0f msg/s, " + - f"${throughput * payloadSize * testSettings.senderReceiverPairs}%,.0f bytes/s (payload), " + - f"${throughput * totalSize(context.system) * testSettings.senderReceiverPairs}%,.0f bytes/s (total" + - (if (RARP(context.system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) ",compression" else "") + "), " + - (if (testSettings.senderReceiverPairs == 1) s"dropped ${totalMessages - totalReceived}, " else "") + - s"max round-trip $maxRoundTripMillis ms, " + - s"burst size $burstSize, " + - s"payload size $payloadSize, " + - s"total size ${totalSize(context.system)}, " + - s"$took ms to deliver $totalReceived messages.") + reporter.reportResults(s"=== ${reporter.testName} ${self.path.name}: " + + f"throughput ${throughput * testSettings.senderReceiverPairs}%,.0f msg/s, " + + f"${throughput * payloadSize * testSettings.senderReceiverPairs}%,.0f bytes/s (payload), " + + f"${throughput * totalSize(context.system) * testSettings.senderReceiverPairs}%,.0f bytes/s (total" + + (if (RARP(context.system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) ",compression" else "") + "), " + + (if (testSettings.senderReceiverPairs == 1) s"dropped ${totalMessages - totalReceived}, " else "") + + s"max round-trip $maxRoundTripMillis ms, " + + s"burst size $burstSize, " + + s"payload size $payloadSize, " + + s"total size ${totalSize(context.system)}, " + + s"$took ms to deliver $totalReceived messages.") if (printTaskRunnerMetrics) taskRunnerMetrics.printHistograms() @@ -272,13 +283,12 @@ object MaxThroughputSpec extends MultiNodeConfig { while (i < batchSize) { val msg0 = if (realMessage) - TestMessage( - id = totalMessages - remaining + i, - name = "abc", - status = i % 2 == 0, - description = "ABC", - payload = payload, - items = Vector(TestMessage.Item(1, "A"), TestMessage.Item(2, "B"))) + TestMessage(id = totalMessages - remaining + i, + name = "abc", + status = i % 2 == 0, + description = "ABC", + payload = payload, + items = Vector(TestMessage.Item(1, "A"), TestMessage.Item(2, "B"))) else payload val msg1 = if (warmup) Warmup(msg0) else msg0 @@ -303,15 +313,15 @@ object MaxThroughputSpec extends MultiNodeConfig { } } - final case class TestSettings( - testName: String, - totalMessages: Long, - burstSize: Int, - payloadSize: Int, - senderReceiverPairs: Int, - realMessage: Boolean) { + final case class TestSettings(testName: String, + totalMessages: Long, + burstSize: Int, + payloadSize: Int, + senderReceiverPairs: Int, + realMessage: Boolean) { // data based on measurement - def totalSize(system: ActorSystem) = payloadSize + (if (RARP(system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) 38 else 110) + def totalSize(system: ActorSystem) = + payloadSize + (if (RARP(system).provider.remoteSettings.Artery.Advanced.Compression.Enabled) 38 else 110) } class TestSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with ByteBufferSerializer { @@ -370,7 +380,8 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec override def initialParticipants = roles.size - def remoteSettings = system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].remoteSettings + def remoteSettings = + system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].remoteSettings lazy val reporterExecutor = Executors.newFixedThreadPool(1) def reporter(name: String): TestRateReporter = { @@ -396,41 +407,36 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec } val scenarios = List( - TestSettings( - testName = "warmup", - totalMessages = adjustedTotalMessages(20000), - burstSize = 1000, - payloadSize = 100, - senderReceiverPairs = 1, - realMessage), - TestSettings( - testName = "1-to-1", - totalMessages = adjustedTotalMessages(50000), - burstSize = 1000, - payloadSize = 100, - senderReceiverPairs = 1, - realMessage), - TestSettings( - testName = "1-to-1-size-1k", - totalMessages = adjustedTotalMessages(20000), - burstSize = 1000, - payloadSize = 1000, - senderReceiverPairs = 1, - realMessage), - TestSettings( - testName = "1-to-1-size-10k", - totalMessages = adjustedTotalMessages(5000), - burstSize = 1000, - payloadSize = 10000, - senderReceiverPairs = 1, - realMessage), - TestSettings( - testName = "5-to-5", - totalMessages = adjustedTotalMessages(20000), - burstSize = 200, // don't exceed the send queue capacity 200*5*3=3000 - payloadSize = 100, - senderReceiverPairs = 5, - realMessage)) + TestSettings(testName = "warmup", + totalMessages = adjustedTotalMessages(20000), + burstSize = 1000, + payloadSize = 100, + senderReceiverPairs = 1, + realMessage), + TestSettings(testName = "1-to-1", + totalMessages = adjustedTotalMessages(50000), + burstSize = 1000, + payloadSize = 100, + senderReceiverPairs = 1, + realMessage), + TestSettings(testName = "1-to-1-size-1k", + totalMessages = adjustedTotalMessages(20000), + burstSize = 1000, + payloadSize = 1000, + senderReceiverPairs = 1, + realMessage), + TestSettings(testName = "1-to-1-size-10k", + totalMessages = adjustedTotalMessages(5000), + burstSize = 1000, + payloadSize = 10000, + senderReceiverPairs = 1, + realMessage), + TestSettings(testName = "5-to-5", + totalMessages = adjustedTotalMessages(20000), + burstSize = 200, // don't exceed the send queue capacity 200*5*3=3000 + payloadSize = 100, + senderReceiverPairs = 5, + realMessage)) def test(testSettings: TestSettings, resultReporter: BenchmarkFileReporter): Unit = { import testSettings._ @@ -441,9 +447,8 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec runOn(second) { val rep = reporter(testName) val receivers = (1 to senderReceiverPairs).map { n => - system.actorOf( - receiverProps(rep, payloadSize, printTaskRunnerMetrics = n == 1, senderReceiverPairs), - receiverName + n) + system.actorOf(receiverProps(rep, payloadSize, printTaskRunnerMetrics = n == 1, senderReceiverPairs), + receiverName + n) } enterBarrier(receiverName + "-started") enterBarrier(testName + "-done") @@ -458,9 +463,13 @@ abstract class MaxThroughputSpec extends RemotingMultiNodeSpec(MaxThroughputSpec val senders = for (n <- 1 to senderReceiverPairs) yield { val receiver = receivers(n - 1) val plotProbe = TestProbe() - val snd = system.actorOf( - senderProps(receiver, receivers, testSettings, plotProbe.ref, printTaskRunnerMetrics = n == 1, resultReporter), - testName + "-snd" + n) + val snd = system.actorOf(senderProps(receiver, + receivers, + testSettings, + plotProbe.ref, + printTaskRunnerMetrics = n == 1, + resultReporter), + testName + "-snd" + n) val terminationProbe = TestProbe() terminationProbe.watch(snd) snd ! Run diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/PlotResult.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/PlotResult.scala index ef6f27dbe7..8251137c90 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/PlotResult.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/PlotResult.scala @@ -21,4 +21,6 @@ final case class PlotResult(values: Vector[(String, Number)] = Vector.empty) { } -final case class LatencyPlots(plot50: PlotResult = PlotResult(), plot90: PlotResult = PlotResult(), plot99: PlotResult = PlotResult()) +final case class LatencyPlots(plot50: PlotResult = PlotResult(), + plot90: PlotResult = PlotResult(), + plot99: PlotResult = PlotResult()) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala index a3866b53a9..26ec6e6fd4 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/RemoteRestartedQuarantinedSpec.scala @@ -19,8 +19,8 @@ object RemoteRestartedQuarantinedSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false).withFallback(ConfigFactory.parseString(""" akka.loglevel = WARNING akka.remote.log-remote-lifecycle-events = WARNING akka.remote.artery.enabled = on @@ -44,7 +44,9 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo override def initialParticipants = 2 - def identifyWithUid(role: RoleName, actorName: String, timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = { + def identifyWithUid(role: RoleName, + actorName: String, + timeout: FiniteDuration = remainingOrDefault): (Long, ActorRef) = { within(timeout) { system.actorSelection(node(role) / "user" / actorName) ! "identify" expectMsgType[(Long, ActorRef)] @@ -106,13 +108,16 @@ abstract class RemoteRestartedQuarantinedSpec extends RemotingMultiNodeSpec(Remo Await.result(system.whenTerminated, 10.seconds) - val freshSystem = ActorSystem(system.name, ConfigFactory.parseString(s""" + val freshSystem = ActorSystem(system.name, + ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = ${address.port.get} """).withFallback(system.settings.config)) val probe = TestProbe()(freshSystem) - freshSystem.actorSelection(RootActorPath(firstAddress) / "user" / "subject").tell(Identify("subject"), probe.ref) + freshSystem + .actorSelection(RootActorPath(firstAddress) / "user" / "subject") + .tell(Identify("subject"), probe.ref) probe.expectMsgType[ActorIdentity](5.seconds).ref should not be (None) // Now the other system will be able to pass, too diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala index 437640d886..44c0edfd27 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveInboundStreamRestartWithCompressionInFlightSpec.scala @@ -20,9 +20,9 @@ object SurviveInboundStreamRestartWithCompressionInFlightSpec extends MultiNodeC val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString( - """ + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.loglevel = INFO akka.remote.artery { enabled = on @@ -33,7 +33,8 @@ object SurviveInboundStreamRestartWithCompressionInFlightSpec extends MultiNodeC compression.manifests.advertisement-interval = 1 minute } } - """)).withFallback(RemotingMultiNodeSpec.commonConfig)) + """)) + .withFallback(RemotingMultiNodeSpec.commonConfig)) testTransport(on = true) @@ -57,13 +58,16 @@ object SurviveInboundStreamRestartWithCompressionInFlightSpec extends MultiNodeC } -class SurviveInboundStreamRestartWithCompressionInFlightSpecMultiJvmNode1 extends SurviveInboundStreamRestartWithCompressionInFlightSpec +class SurviveInboundStreamRestartWithCompressionInFlightSpecMultiJvmNode1 + extends SurviveInboundStreamRestartWithCompressionInFlightSpec -class SurviveInboundStreamRestartWithCompressionInFlightSpecMultiJvmNode2 extends SurviveInboundStreamRestartWithCompressionInFlightSpec +class SurviveInboundStreamRestartWithCompressionInFlightSpecMultiJvmNode2 + extends SurviveInboundStreamRestartWithCompressionInFlightSpec -abstract class SurviveInboundStreamRestartWithCompressionInFlightSpec extends RemotingMultiNodeSpec(SurviveInboundStreamRestartWithCompressionInFlightSpec) - with ImplicitSender - with ScalaFutures { +abstract class SurviveInboundStreamRestartWithCompressionInFlightSpec + extends RemotingMultiNodeSpec(SurviveInboundStreamRestartWithCompressionInFlightSpec) + with ImplicitSender + with ScalaFutures { import SurviveInboundStreamRestartWithCompressionInFlightSpec._ @@ -93,10 +97,14 @@ abstract class SurviveInboundStreamRestartWithCompressionInFlightSpec extends Re val sendToB = expectMsgType[ActorIdentity].ref.get runOn(second) { - 1 to 100 foreach { i => pingPong(sendToA, s"a$i") } + (1 to 100).foreach { i => + pingPong(sendToA, s"a$i") + } info("done sending to A, first round") - 1 to 100 foreach { i => pingPong(sendToB, s"a$i") } + (1 to 100).foreach { i => + pingPong(sendToB, s"a$i") + } info("done sending to B, first round") } enterBarrier("sender-started") @@ -120,17 +128,19 @@ abstract class SurviveInboundStreamRestartWithCompressionInFlightSpec extends Re Thread.sleep(2000) // we poke the remote system, awaiting its inbound stream recovery, then it should reply - awaitAssert( - { - sendToB ! "alive-again" - expectMsg(300.millis, s"${sendToB.path.name}-alive-again") - }, - max = 5.seconds, interval = 500.millis) + awaitAssert({ + sendToB ! "alive-again" + expectMsg(300.millis, s"${sendToB.path.name}-alive-again") + }, max = 5.seconds, interval = 500.millis) // we continue sending messages using the "old table". // if a new table was being built, it would cause the b to be compressed as 1 causing a wrong reply to come back - 1 to 100 foreach { i => pingPong(sendToB, s"b$i") } - 1 to 100 foreach { i => pingPong(sendToA, s"a$i") } + (1 to 100).foreach { i => + pingPong(sendToB, s"b$i") + } + (1 to 100).foreach { i => + pingPong(sendToA, s"a$i") + } info("received correct replies from restarted system!") } @@ -148,4 +158,3 @@ abstract class SurviveInboundStreamRestartWithCompressionInFlightSpec extends Re expectMsg(s"${target.path.name}-$msg") } } - diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveNetworkPartitionSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveNetworkPartitionSpec.scala index 679dec1e09..d64db72e3b 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveNetworkPartitionSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/SurviveNetworkPartitionSpec.scala @@ -8,7 +8,7 @@ import scala.concurrent.duration._ import akka.actor._ import akka.actor.ActorIdentity import akka.actor.Identify -import akka.remote.{ RemotingMultiNodeSpec, QuarantinedEvent, RARP } +import akka.remote.{ QuarantinedEvent, RARP, RemotingMultiNodeSpec } import akka.remote.testkit.MultiNodeConfig import akka.remote.transport.ThrottlerTransportAdapter.Direction import akka.testkit._ @@ -18,12 +18,14 @@ object SurviveNetworkPartitionSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(""" + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" akka.loglevel = INFO akka.remote.artery.enabled = on akka.remote.artery.advanced.give-up-system-message-after = 4s - """)).withFallback(RemotingMultiNodeSpec.commonConfig)) + """)) + .withFallback(RemotingMultiNodeSpec.commonConfig)) testTransport(on = true) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TaskRunnerMetrics.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TaskRunnerMetrics.scala index d60137d64d..06b825d344 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TaskRunnerMetrics.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TaskRunnerMetrics.scala @@ -45,12 +45,14 @@ class TaskRunnerMetrics(system: ActorSystem) { entryOffset = c if (aeronSourceHistogram.getTotalCount > 0) { - println(s"Histogram of AeronSource tasks in microseconds. Max count before delegate: $aeronSourceMaxBeforeDelegate") + println( + s"Histogram of AeronSource tasks in microseconds. Max count before delegate: $aeronSourceMaxBeforeDelegate") aeronSourceHistogram.outputPercentileDistribution(System.out, 1000.0) } if (aeronSinkHistogram.getTotalCount > 0) { - println(s"Histogram of AeronSink tasks in microseconds. Max count before delegate: $aeronSinkMaxBeforeDelegate") + println( + s"Histogram of AeronSink tasks in microseconds. Max count before delegate: $aeronSinkMaxBeforeDelegate") aeronSinkHistogram.outputPercentileDistribution(System.out, 1000.0) } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestMessage.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestMessage.scala index 2b9c5c9e9d..93835d09b2 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestMessage.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestMessage.scala @@ -13,13 +13,12 @@ object TestMessage { final case class Item(id: Long, name: String) } -final case class TestMessage( - id: Long, - name: String, - status: Boolean, - description: String, - payload: Array[Byte], - items: Vector[TestMessage.Item]) +final case class TestMessage(id: Long, + name: String, + status: Boolean, + description: String, + payload: Array[Byte], + items: Vector[TestMessage.Item]) class TestMessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest { @@ -34,7 +33,8 @@ class TestMessageSerializer(val system: ExtendedActorSystem) extends SerializerW override def toBinary(o: AnyRef): Array[Byte] = o match { case msg: TestMessage => - val builder = proto.TestMessage.newBuilder() + val builder = proto.TestMessage + .newBuilder() .setId(msg.id) .setName(msg.name) .setDescription(msg.description) @@ -53,12 +53,11 @@ class TestMessageSerializer(val system: ExtendedActorSystem) extends SerializerW TestMessage.Item(item.getId, item.getName) }.toVector - TestMessage( - id = protoMsg.getId, - name = protoMsg.getName, - description = protoMsg.getDescription, - status = protoMsg.getStatus, - payload = protoMsg.getPayload.toByteArray(), - items = items) + TestMessage(id = protoMsg.getId, + name = protoMsg.getName, + description = protoMsg.getDescription, + status = protoMsg.getStatus, + payload = protoMsg.getPayload.toByteArray(), + items = items) } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala index 77c0101e1a..b017597bb8 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/TestRateReporter.scala @@ -6,14 +6,15 @@ package akka.remote.artery import java.util.concurrent.TimeUnit.SECONDS -class TestRateReporter(name: String) extends RateReporter( - SECONDS.toNanos(1), - new RateReporter.Reporter { - override def onReport(messagesPerSec: Double, bytesPerSec: Double, totalMessages: Long, totalBytes: Long): Unit = { - println(name + - f": ${messagesPerSec}%,.0f msgs/sec, ${bytesPerSec}%,.0f bytes/sec, " + - f"totals ${totalMessages}%,d messages ${totalBytes / (1024 * 1024)}%,d MB") - } - }) { - -} +class TestRateReporter(name: String) + extends RateReporter(SECONDS.toNanos(1), new RateReporter.Reporter { + override def onReport(messagesPerSec: Double, + bytesPerSec: Double, + totalMessages: Long, + totalBytes: Long): Unit = { + println( + name + + f": ${messagesPerSec}%,.0f msgs/sec, ${bytesPerSec}%,.0f bytes/sec, " + + f"totals ${totalMessages}%,d messages ${totalBytes / (1024 * 1024)}%,d MB") + } + }) {} diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/UdpPortActor.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/UdpPortActor.scala index d3ed23f875..c5d7f1db37 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/UdpPortActor.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/UdpPortActor.scala @@ -18,8 +18,8 @@ object UdpPortActor { class UdpPortActor extends Actor { import UdpPortActor._ - val port = SocketUtil.temporaryServerAddress(RARP(context.system).provider - .getDefaultAddress.host.get, udp = true).getPort + val port = + SocketUtil.temporaryServerAddress(RARP(context.system).provider.getDefaultAddress.host.get, udp = true).getPort def receive = { case GetUdpPort => sender() ! port diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamConcistencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamConcistencySpec.scala index 26e587a7a7..81de6793ed 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamConcistencySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamConcistencySpec.scala @@ -36,8 +36,7 @@ object AeronStreamConsistencySpec extends MultiNodeConfig { val barrierTimeout = 5.minutes - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka { loglevel = INFO actor { @@ -52,8 +51,9 @@ class AeronStreamConsistencySpecMultiJvmNode1 extends AeronStreamConsistencySpec class AeronStreamConsistencySpecMultiJvmNode2 extends AeronStreamConsistencySpec abstract class AeronStreamConsistencySpec - extends MultiNodeSpec(AeronStreamConsistencySpec) - with STMultiNodeSpec with ImplicitSender { + extends MultiNodeSpec(AeronStreamConsistencySpec) + with STMultiNodeSpec + with ImplicitSender { import AeronStreamConsistencySpec._ @@ -107,8 +107,10 @@ abstract class AeronStreamConsistencySpec "start echo" in { runOn(second) { // just echo back - Source.fromGraph(new AeronSource(channel(second), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) - .runWith(new AeronSink(channel(first), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) + Source + .fromGraph(new AeronSource(channel(second), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) + .runWith( + new AeronSink(channel(first), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) } enterBarrier("echo-started") } @@ -121,7 +123,8 @@ abstract class AeronStreamConsistencySpec val killSwitch = KillSwitches.shared("test") val started = TestProbe() val startMsg = "0".getBytes("utf-8") - Source.fromGraph(new AeronSource(channel(first), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) + Source + .fromGraph(new AeronSource(channel(first), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) .via(killSwitch.flow) .runForeach { envelope => val bytes = ByteString.fromByteBuffer(envelope.byteBuffer) @@ -137,17 +140,21 @@ abstract class AeronStreamConsistencySpec done.countDown() } pool.release(envelope) - }.failed.foreach { _.printStackTrace } + } + .failed + .foreach { _.printStackTrace } within(10.seconds) { - Source(1 to 100).map { _ => - val envelope = pool.acquire() - envelope.byteBuffer.put(startMsg) - envelope.byteBuffer.flip() - envelope - } + Source(1 to 100) + .map { _ => + val envelope = pool.acquire() + envelope.byteBuffer.put(startMsg) + envelope.byteBuffer.flip() + envelope + } .throttle(1, 200.milliseconds, 1, ThrottleMode.Shaping) - .runWith(new AeronSink(channel(second), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) + .runWith( + new AeronSink(channel(second), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) started.expectMsg(Done) } @@ -159,7 +166,8 @@ abstract class AeronStreamConsistencySpec envelope.byteBuffer.flip() envelope } - .runWith(new AeronSink(channel(second), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) + .runWith( + new AeronSink(channel(second), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) Await.ready(done, 20.seconds) killSwitch.shutdown() diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamLatencySpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamLatencySpec.scala index 8a30a7b8ad..0f5c120065 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamLatencySpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamLatencySpec.scala @@ -43,8 +43,7 @@ object AeronStreamLatencySpec extends MultiNodeConfig { val barrierTimeout = 5.minutes - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" # for serious measurements you should increase the totalMessagesFactor (10) and repeatCount (3) akka.test.AeronStreamLatencySpec.totalMessagesFactor = 1.0 akka.test.AeronStreamLatencySpec.repeatCount = 1 @@ -63,11 +62,10 @@ object AeronStreamLatencySpec extends MultiNodeConfig { } """))) - final case class TestSettings( - testName: String, - messageRate: Int, // msg/s - payloadSize: Int, - repeat: Int) + final case class TestSettings(testName: String, + messageRate: Int, // msg/s + payloadSize: Int, + repeat: Int) } @@ -75,8 +73,9 @@ class AeronStreamLatencySpecMultiJvmNode1 extends AeronStreamLatencySpec class AeronStreamLatencySpecMultiJvmNode2 extends AeronStreamLatencySpec abstract class AeronStreamLatencySpec - extends MultiNodeSpec(AeronStreamLatencySpec) - with STMultiNodeSpec with ImplicitSender { + extends MultiNodeSpec(AeronStreamLatencySpec) + with STMultiNodeSpec + with ImplicitSender { import AeronStreamLatencySpec._ @@ -142,11 +141,16 @@ abstract class AeronStreamLatencySpec super.afterAll() } - def printTotal(testName: String, payloadSize: Long, histogram: Histogram, totalDurationNanos: Long, lastRepeat: Boolean): Unit = { + def printTotal(testName: String, + payloadSize: Long, + histogram: Histogram, + totalDurationNanos: Long, + lastRepeat: Boolean): Unit = { def percentile(p: Double): Double = histogram.getValueAtPercentile(p) / 1000.0 val throughput = 1000.0 * histogram.getTotalCount / totalDurationNanos.nanos.toMillis - println(s"=== AeronStreamLatency $testName: RTT " + + println( + s"=== AeronStreamLatency $testName: RTT " + f"50%%ile: ${percentile(50.0)}%.0f µs, " + f"90%%ile: ${percentile(90.0)}%.0f µs, " + f"99%%ile: ${percentile(99.0)}%.0f µs, " + @@ -156,10 +160,9 @@ abstract class AeronStreamLatencySpec // only use the last repeat for the plots if (lastRepeat) { - plots = plots.copy( - plot50 = plots.plot50.add(testName, percentile(50.0)), - plot90 = plots.plot90.add(testName, percentile(90.0)), - plot99 = plots.plot99.add(testName, percentile(99.0))) + plots = plots.copy(plot50 = plots.plot50.add(testName, percentile(50.0)), + plot90 = plots.plot90.add(testName, percentile(90.0)), + plot99 = plots.plot99.add(testName, percentile(99.0))) } } @@ -172,31 +175,11 @@ abstract class AeronStreamLatencySpec pending.foreach(system.deadLetters ! _) val scenarios = List( - TestSettings( - testName = "rate-100-size-100", - messageRate = 100, - payloadSize = 100, - repeat = repeatCount), - TestSettings( - testName = "rate-1000-size-100", - messageRate = 1000, - payloadSize = 100, - repeat = repeatCount), - TestSettings( - testName = "rate-10000-size-100", - messageRate = 10000, - payloadSize = 100, - repeat = repeatCount), - TestSettings( - testName = "rate-20000-size-100", - messageRate = 20000, - payloadSize = 100, - repeat = repeatCount), - TestSettings( - testName = "rate-1000-size-1k", - messageRate = 1000, - payloadSize = 1000, - repeat = repeatCount)) + TestSettings(testName = "rate-100-size-100", messageRate = 100, payloadSize = 100, repeat = repeatCount), + TestSettings(testName = "rate-1000-size-100", messageRate = 1000, payloadSize = 100, repeat = repeatCount), + TestSettings(testName = "rate-10000-size-100", messageRate = 10000, payloadSize = 100, repeat = repeatCount), + TestSettings(testName = "rate-20000-size-100", messageRate = 20000, payloadSize = 100, repeat = repeatCount), + TestSettings(testName = "rate-1000-size-1k", messageRate = 1000, payloadSize = 1000, repeat = repeatCount)) def test(testSettings: TestSettings): Unit = { import testSettings._ @@ -216,7 +199,8 @@ abstract class AeronStreamLatencySpec val killSwitch = KillSwitches.shared(testName) val started = TestProbe() val startMsg = "0".getBytes("utf-8") - Source.fromGraph(new AeronSource(channel(first), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) + Source + .fromGraph(new AeronSource(channel(first), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) .via(killSwitch.flow) .runForeach { envelope => val bytes = ByteString.fromByteBuffer(envelope.byteBuffer) @@ -238,14 +222,16 @@ abstract class AeronStreamLatencySpec } within(10.seconds) { - Source(1 to 50).map { _ => - val envelope = pool.acquire() - envelope.byteBuffer.put(startMsg) - envelope.byteBuffer.flip() - envelope - } + Source(1 to 50) + .map { _ => + val envelope = pool.acquire() + envelope.byteBuffer.put(startMsg) + envelope.byteBuffer.flip() + envelope + } .throttle(1, 200.milliseconds, 1, ThrottleMode.Shaping) - .runWith(new AeronSink(channel(second), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) + .runWith( + new AeronSink(channel(second), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) started.expectMsg(Done) } @@ -254,15 +240,15 @@ abstract class AeronStreamLatencySpec count.set(0) lastRepeat.set(rep == repeat) - val sendFlow = Flow[Unit] - .map { _ => - val envelope = pool.acquire() - envelope.byteBuffer.put(payload) - envelope.byteBuffer.flip() - envelope - } + val sendFlow = Flow[Unit].map { _ => + val envelope = pool.acquire() + envelope.byteBuffer.put(payload) + envelope.byteBuffer.flip() + envelope + } - val queueValue = Source.fromGraph(new SendQueue[Unit](sendToDeadLetters)) + val queueValue = Source + .fromGraph(new SendQueue[Unit](sendToDeadLetters)) .via(sendFlow) .to(new AeronSink(channel(second), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) .run() @@ -318,8 +304,10 @@ abstract class AeronStreamLatencySpec "start echo" in { runOn(second) { // just echo back - Source.fromGraph(new AeronSource(channel(second), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) - .runWith(new AeronSink(channel(first), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) + Source + .fromGraph(new AeronSource(channel(second), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) + .runWith( + new AeronSink(channel(first), streamId, aeron, taskRunner, pool, giveUpMessageAfter, IgnoreEventSink)) } enterBarrier("echo-started") } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamMaxThroughputSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamMaxThroughputSpec.scala index e620a41903..18436b9ac6 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamMaxThroughputSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/artery/aeron/AeronStreamMaxThroughputSpec.scala @@ -36,8 +36,7 @@ object AeronStreamMaxThroughputSpec extends MultiNodeConfig { val barrierTimeout = 5.minutes - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" # for serious measurements you should increase the totalMessagesFactor (20) akka.test.AeronStreamMaxThroughputSpec.totalMessagesFactor = 1.0 akka { @@ -52,10 +51,7 @@ object AeronStreamMaxThroughputSpec extends MultiNodeConfig { } """))) - final case class TestSettings( - testName: String, - totalMessages: Long, - payloadSize: Int) + final case class TestSettings(testName: String, totalMessages: Long, payloadSize: Int) def iterate(start: Long, end: Long): Iterator[Long] = new AbstractIterator[Long] { private[this] var first = true @@ -76,12 +72,14 @@ class AeronStreamMaxThroughputSpecMultiJvmNode1 extends AeronStreamMaxThroughput class AeronStreamMaxThroughputSpecMultiJvmNode2 extends AeronStreamMaxThroughputSpec abstract class AeronStreamMaxThroughputSpec - extends MultiNodeSpec(AeronStreamMaxThroughputSpec) - with STMultiNodeSpec with ImplicitSender { + extends MultiNodeSpec(AeronStreamMaxThroughputSpec) + with STMultiNodeSpec + with ImplicitSender { import AeronStreamMaxThroughputSpec._ - val totalMessagesFactor = system.settings.config.getDouble("akka.test.AeronStreamMaxThroughputSpec.totalMessagesFactor") + val totalMessagesFactor = + system.settings.config.getDouble("akka.test.AeronStreamMaxThroughputSpec.totalMessagesFactor") var plot = PlotResult() @@ -146,7 +144,8 @@ abstract class AeronStreamMaxThroughputSpec def printTotal(testName: String, total: Long, startTime: Long, payloadSize: Long): Unit = { val d = (System.nanoTime - startTime).nanos.toMillis val throughput = 1000.0 * total / d - println(f"=== AeronStreamMaxThroughput $testName: " + + println( + f"=== AeronStreamMaxThroughput $testName: " + f"${throughput}%,.0f msg/s, ${throughput * payloadSize}%,.0f bytes/s, " + s"payload size $payloadSize, " + s"$d ms to deliver $total messages") @@ -159,18 +158,9 @@ abstract class AeronStreamMaxThroughputSpec } val scenarios = List( - TestSettings( - testName = "size-100", - totalMessages = adjustedTotalMessages(1000000), - payloadSize = 100), - TestSettings( - testName = "size-1k", - totalMessages = adjustedTotalMessages(100000), - payloadSize = 1000), - TestSettings( - testName = "size-10k", - totalMessages = adjustedTotalMessages(10000), - payloadSize = 10000)) + TestSettings(testName = "size-100", totalMessages = adjustedTotalMessages(1000000), payloadSize = 100), + TestSettings(testName = "size-1k", totalMessages = adjustedTotalMessages(100000), payloadSize = 1000), + TestSettings(testName = "size-10k", totalMessages = adjustedTotalMessages(10000), payloadSize = 10000)) def test(testSettings: TestSettings): Unit = { import testSettings._ @@ -182,7 +172,8 @@ abstract class AeronStreamMaxThroughputSpec var count = 0L val done = TestLatch(1) val killSwitch = KillSwitches.shared(testName) - Source.fromGraph(new AeronSource(channel(second), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) + Source + .fromGraph(new AeronSource(channel(second), streamId, aeron, taskRunner, pool, IgnoreEventSink, 0)) .via(killSwitch.flow) .runForeach { envelope => val bytes = ByteString.fromByteBuffer(envelope.byteBuffer) @@ -196,7 +187,9 @@ abstract class AeronStreamMaxThroughputSpec killSwitch.shutdown() } pool.release(envelope) - }.failed.foreach { _.printStackTrace } + } + .failed + .foreach { _.printStackTrace } enterBarrier(receiverName + "-started") Await.ready(done, barrierTimeout) @@ -210,7 +203,8 @@ abstract class AeronStreamMaxThroughputSpec val payload = ("0" * payloadSize).getBytes("utf-8") val t0 = System.nanoTime() - Source.fromIterator(() => iterate(1, totalMessages)) + Source + .fromIterator(() => iterate(1, totalMessages)) .map { n => val envelope = pool.acquire() envelope.byteBuffer.put(payload) diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala index 1136b0c9eb..bfd71a91c4 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRandomSpec.scala @@ -25,8 +25,7 @@ class RemoteRandomConfig(artery: Boolean) extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -57,8 +56,9 @@ object RemoteRandomSpec { } } -class RemoteRandomSpec(multiNodeConfig: RemoteRandomConfig) extends RemotingMultiNodeSpec(multiNodeConfig) - with DefaultTimeout { +class RemoteRandomSpec(multiNodeConfig: RemoteRandomConfig) + extends RemotingMultiNodeSpec(multiNodeConfig) + with DefaultTimeout { import multiNodeConfig._ import RemoteRandomSpec._ @@ -94,7 +94,7 @@ class RemoteRandomSpec(multiNodeConfig: RemoteRandomConfig) extends RemotingMult enterBarrier("end") // since it's random we can't be too strict in the assert - replies.values count (_ > 0) should be > (connectionCount - 2) + replies.values.count(_ > 0) should be > (connectionCount - 2) replies.get(node(fourth).address) should ===(None) // shut down the actor before we let the other node(s) shut down so we don't try to send diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala index ef02567e6c..5855acfc4a 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteRoundRobinSpec.scala @@ -24,8 +24,7 @@ class RemoteRoundRobinConfig(artery: Boolean) extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -74,8 +73,9 @@ object RemoteRoundRobinSpec { } } -class RemoteRoundRobinSpec(multiNodeConfig: RemoteRoundRobinConfig) extends RemotingMultiNodeSpec(multiNodeConfig) - with DefaultTimeout { +class RemoteRoundRobinSpec(multiNodeConfig: RemoteRoundRobinConfig) + extends RemotingMultiNodeSpec(multiNodeConfig) + with DefaultTimeout { import RemoteRoundRobinSpec._ import multiNodeConfig._ @@ -112,7 +112,7 @@ class RemoteRoundRobinSpec(multiNodeConfig: RemoteRoundRobinConfig) extends Remo actor ! Broadcast(PoisonPill) enterBarrier("end") - replies.values foreach { _ should ===(iterationCount) } + replies.values.foreach { _ should ===(iterationCount) } replies.get(node(fourth).address) should ===(None) // shut down the actor before we let the other node(s) shut down so we don't try to send @@ -133,9 +133,9 @@ class RemoteRoundRobinSpec(multiNodeConfig: RemoteRoundRobinConfig) extends Remo runOn(fourth) { enterBarrier("start") - val actor = system.actorOf(RoundRobinPool( - nrOfInstances = 1, - resizer = Some(new TestResizer)).props(Props[SomeActor]), "service-hello2") + val actor = + system.actorOf(RoundRobinPool(nrOfInstances = 1, resizer = Some(new TestResizer)).props(Props[SomeActor]), + "service-hello2") actor.isInstanceOf[RoutedActorRef] should ===(true) actor ! GetRoutees @@ -194,7 +194,7 @@ class RemoteRoundRobinSpec(multiNodeConfig: RemoteRoundRobinConfig) extends Remo } enterBarrier("end") - replies.values foreach { _ should ===(iterationCount) } + replies.values.foreach { _ should ===(iterationCount) } replies.get(node(fourth).address) should ===(None) } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala index 1af84f64a6..f9963e8265 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/routing/RemoteScatterGatherSpec.scala @@ -26,8 +26,7 @@ class RemoteScatterGatherConfig(artery: Boolean) extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = false).withFallback( - ConfigFactory.parseString(s""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.remote.artery.enabled = $artery """)).withFallback(RemotingMultiNodeSpec.commonConfig)) @@ -45,10 +44,14 @@ class RemoteScatterGatherMultiJvmNode2 extends RemoteScatterGatherSpec(new Remot class RemoteScatterGatherMultiJvmNode3 extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = false)) class RemoteScatterGatherMultiJvmNode4 extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = false)) -class ArteryRemoteScatterGatherMultiJvmNode1 extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = true)) -class ArteryRemoteScatterGatherMultiJvmNode2 extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = true)) -class ArteryRemoteScatterGatherMultiJvmNode3 extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = true)) -class ArteryRemoteScatterGatherMultiJvmNode4 extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = true)) +class ArteryRemoteScatterGatherMultiJvmNode1 + extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = true)) +class ArteryRemoteScatterGatherMultiJvmNode2 + extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = true)) +class ArteryRemoteScatterGatherMultiJvmNode3 + extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = true)) +class ArteryRemoteScatterGatherMultiJvmNode4 + extends RemoteScatterGatherSpec(new RemoteScatterGatherConfig(artery = true)) object RemoteScatterGatherSpec { class SomeActor extends Actor { @@ -58,8 +61,9 @@ object RemoteScatterGatherSpec { } } -class RemoteScatterGatherSpec(multiNodeConfig: RemoteScatterGatherConfig) extends RemotingMultiNodeSpec(multiNodeConfig) - with DefaultTimeout { +class RemoteScatterGatherSpec(multiNodeConfig: RemoteScatterGatherConfig) + extends RemotingMultiNodeSpec(multiNodeConfig) + with DefaultTimeout { import multiNodeConfig._ import RemoteScatterGatherSpec._ @@ -76,7 +80,9 @@ class RemoteScatterGatherSpec(multiNodeConfig: RemoteScatterGatherConfig) extend runOn(fourth) { enterBarrier("start") - val actor = system.actorOf(ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 10.seconds).props(Props[SomeActor]), "service-hello") + val actor = system.actorOf( + ScatterGatherFirstCompletedPool(nrOfInstances = 1, within = 10.seconds).props(Props[SomeActor]), + "service-hello") actor.isInstanceOf[RoutedActorRef] should ===(true) val connectionCount = 3 diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala index f9b0e4b935..74b3972360 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/sample/MultiNodeSample.scala @@ -32,8 +32,7 @@ object MultiNodeSample { } } -class MultiNodeSample extends MultiNodeSpec(MultiNodeSampleConfig) - with STMultiNodeSpec with ImplicitSender { +class MultiNodeSample extends MultiNodeSpec(MultiNodeSampleConfig) with STMultiNodeSpec with ImplicitSender { import MultiNodeSample._ import MultiNodeSampleConfig._ diff --git a/akka-remote-tests/src/test/scala/akka/remote/RemotingFailedToBindSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/RemotingFailedToBindSpec.scala index e7cf9c0994..1343eb8368 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/RemotingFailedToBindSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/RemotingFailedToBindSpec.scala @@ -15,8 +15,7 @@ class RemotingFailedToBindSpec extends WordSpec with Matchers { "an ActorSystem" must { "not start if port is taken" in { val port = SocketUtil.temporaryLocalPort() - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" |akka { | actor { | provider = remote diff --git a/akka-remote-tests/src/test/scala/akka/remote/artery/ArteryFailedToBindSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/artery/ArteryFailedToBindSpec.scala index fde236a18a..39160dc547 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/artery/ArteryFailedToBindSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/artery/ArteryFailedToBindSpec.scala @@ -17,8 +17,7 @@ class ArteryFailedToBindSpec extends WordSpec with Matchers { "an ActorSystem" must { "not start if port is taken" in { val port = SocketUtil.temporaryLocalPort(true) - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" |akka { | actor { | provider = remote diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index 90acc7a121..feceb3c5e4 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -7,9 +7,9 @@ package akka.remote.testconductor import language.postfixOps import akka.actor._ -import akka.testkit.{ AkkaSpec, ImplicitSender, EventFilter, TestProbe, TimingTest } +import akka.testkit.{ AkkaSpec, EventFilter, ImplicitSender, TestProbe, TimingTest } import scala.concurrent.duration._ -import java.net.{ InetSocketAddress, InetAddress } +import java.net.{ InetAddress, InetSocketAddress } object BarrierSpec { final case class Failed(ref: ActorRef, thr: Throwable) @@ -38,7 +38,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) b ! RemoveClient(B) b ! RemoveClient(A) - EventFilter[BarrierEmpty](occurrences = 1) intercept { + EventFilter[BarrierEmpty](occurrences = 1).intercept { b ! RemoveClient(A) } expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil, null), "cannot remove RoleName(a): no client to remove"))) @@ -127,13 +127,16 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! nodeA barrier ! NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) a.send(barrier, EnterBarrier("bar6", None)) - EventFilter[ClientLost](occurrences = 1) intercept { + EventFilter[ClientLost](occurrences = 1).intercept { barrier ! ClientDisconnected(B) } val msg = expectMsgType[Failed] msg match { - case Failed(barrier, thr: ClientLost) if (thr == ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, thr.data.deadline), B)) => - case x => fail("Expected " + Failed(barrier, ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, null), B)) + " but got " + x) + case Failed(barrier, thr: ClientLost) + if (thr == ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, thr.data.deadline), B)) => + case x => + fail( + "Expected " + Failed(barrier, ClientLost(Data(Set(nodeA), "bar6", a.ref :: Nil, null), B)) + " but got " + x) } } @@ -147,13 +150,16 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! nodeC a.send(barrier, EnterBarrier("bar7", None)) b.send(barrier, EnterBarrier("bar7", None)) - EventFilter[ClientLost](occurrences = 1) intercept { + EventFilter[ClientLost](occurrences = 1).intercept { barrier ! ClientDisconnected(B) } val msg = expectMsgType[Failed] msg match { - case Failed(barrier, thr: ClientLost) if (thr == ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, thr.data.deadline), B)) => - case x => fail("Expected " + Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, null), B)) + " but got " + x) + case Failed(barrier, thr: ClientLost) + if (thr == ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, thr.data.deadline), B)) => + case x => + fail( + "Expected " + Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar7", a.ref :: Nil, null), B)) + " but got " + x) } } @@ -165,26 +171,37 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val nodeB = NodeInfo(B, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeB a.send(barrier, EnterBarrier("bar8", None)) - EventFilter[WrongBarrier](occurrences = 1) intercept { + EventFilter[WrongBarrier](occurrences = 1).intercept { b.send(barrier, EnterBarrier("foo", None)) } val msg = expectMsgType[Failed] msg match { - case Failed(barrier, thr: WrongBarrier) if (thr == WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar8", a.ref :: Nil, thr.data.deadline))) => - case x => fail("Expected " + Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar8", a.ref :: Nil, null))) + " but got " + x) + case Failed(barrier, thr: WrongBarrier) + if (thr == WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar8", a.ref :: Nil, thr.data.deadline))) => + case x => + fail( + "Expected " + Failed( + barrier, + WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar8", a.ref :: Nil, null))) + " but got " + x) } } "fail barrier after first failure" taggedAs TimingTest in { val barrier = getBarrier() val a = TestProbe() - EventFilter[BarrierEmpty](occurrences = 1) intercept { + EventFilter[BarrierEmpty](occurrences = 1).intercept { barrier ! RemoveClient(A) } val msg = expectMsgType[Failed] msg match { - case Failed(barrier, thr: BarrierEmpty) if (thr == BarrierEmpty(Data(Set(), "", Nil, thr.data.deadline), "cannot remove RoleName(a): no client to remove")) => - case x => fail("Expected " + Failed(barrier, BarrierEmpty(Data(Set(), "", Nil, null), "cannot remove RoleName(a): no client to remove")) + " but got " + x) + case Failed(barrier, thr: BarrierEmpty) + if (thr == BarrierEmpty(Data(Set(), "", Nil, thr.data.deadline), + "cannot remove RoleName(a): no client to remove")) => + case x => + fail( + "Expected " + Failed(barrier, + BarrierEmpty(Data(Set(), "", Nil, null), + "cannot remove RoleName(a): no client to remove")) + " but got " + x) } barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) a.send(barrier, EnterBarrier("bar9", None)) @@ -199,11 +216,14 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { barrier ! nodeA barrier ! nodeB a.send(barrier, EnterBarrier("bar10", None)) - EventFilter[BarrierTimeout](occurrences = 1) intercept { + EventFilter[BarrierTimeout](occurrences = 1).intercept { val msg = expectMsgType[Failed](7 seconds) msg match { - case Failed(barrier, thr: BarrierTimeout) if (thr == BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, thr.data.deadline))) => - case x => fail("Expected " + Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, null))) + " but got " + x) + case Failed(barrier, thr: BarrierTimeout) + if (thr == BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, thr.data.deadline))) => + case x => + fail( + "Expected " + Failed(barrier, BarrierTimeout(Data(Set(nodeA, nodeB), "bar10", a.ref :: Nil, null))) + " but got " + x) } } } @@ -214,13 +234,15 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) barrier ! nodeA - EventFilter[DuplicateNode](occurrences = 1) intercept { + EventFilter[DuplicateNode](occurrences = 1).intercept { barrier ! nodeB } val msg = expectMsgType[Failed] msg match { - case Failed(barrier, thr: DuplicateNode) if (thr == DuplicateNode(Data(Set(nodeA), "", Nil, thr.data.deadline), nodeB)) => - case x => fail("Expected " + Failed(barrier, DuplicateNode(Data(Set(nodeA), "", Nil, null), nodeB)) + " but got " + x) + case Failed(barrier, thr: DuplicateNode) + if (thr == DuplicateNode(Data(Set(nodeA), "", Nil, thr.data.deadline), nodeB)) => + case x => + fail("Expected " + Failed(barrier, DuplicateNode(Data(Set(nodeA), "", Nil, null), nodeB)) + " but got " + x) } } @@ -238,7 +260,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { expectMsg(ToClient(Done)) b ! Remove(B) b ! Remove(A) - EventFilter.warning(start = "cannot remove", occurrences = 1) intercept { + EventFilter.warning(start = "cannot remove", occurrences = 1).intercept { b ! Remove(A) } Thread.sleep(5000) @@ -349,7 +371,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { a.send(barrier, EnterBarrier("bar15", None)) barrier ! ClientDisconnected(RoleName("unknown")) noMsg(a) - EventFilter[ClientLost](occurrences = 1) intercept { + EventFilter[ClientLost](occurrences = 1).intercept { barrier ! ClientDisconnected(B) } a.expectMsg(ToClient(BarrierResult("bar15", false))) @@ -369,7 +391,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar16", None)) b.send(barrier, EnterBarrier("bar16", None)) - EventFilter[ClientLost](occurrences = 1) intercept { + EventFilter[ClientLost](occurrences = 1).intercept { barrier ! ClientDisconnected(B) } a.expectMsg(ToClient(BarrierResult("bar16", false))) @@ -386,7 +408,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar17", None)) - EventFilter[WrongBarrier](occurrences = 1) intercept { + EventFilter[WrongBarrier](occurrences = 1).intercept { b.send(barrier, EnterBarrier("foo", None)) } a.expectMsg(ToClient(BarrierResult("bar17", false))) @@ -404,7 +426,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar18", Option(2 seconds))) - EventFilter[BarrierTimeout](occurrences = 1) intercept { + EventFilter[BarrierTimeout](occurrences = 1).intercept { Thread.sleep(4000) } b.send(barrier, EnterBarrier("bar18", None)) @@ -419,7 +441,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) controller ! nodeA - EventFilter[DuplicateNode](occurrences = 1) intercept { + EventFilter[DuplicateNode](occurrences = 1).intercept { controller ! nodeB } a.expectMsg(ToClient(BarrierResult("initial startup", false))) @@ -434,7 +456,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { val nodeB = NodeInfo(A, AddressFromURIString("akka://sys"), b.ref) controller ! nodeA a.expectMsg(ToClient(Done)) - EventFilter[DuplicateNode](occurrences = 1) intercept { + EventFilter[DuplicateNode](occurrences = 1).intercept { controller ! nodeB b.expectMsg(ToClient(BarrierResult("initial startup", false))) } @@ -453,7 +475,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { a.expectMsg(ToClient(Done)) b.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar20", Option(2 seconds))) - EventFilter[FailedBarrier](occurrences = 1) intercept { + EventFilter[FailedBarrier](occurrences = 1).intercept { b.send(barrier, FailBarrier("bar20")) a.expectMsg(ToClient(BarrierResult("bar20", false))) b.expectNoMsg(1 second) @@ -479,7 +501,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar22", Option(10 seconds))) b.send(barrier, EnterBarrier("bar22", Option(2 seconds))) - EventFilter[BarrierTimeout](occurrences = 1) intercept { + EventFilter[BarrierTimeout](occurrences = 1).intercept { Thread.sleep(4000) } c.send(barrier, EnterBarrier("bar22", None)) @@ -503,7 +525,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { c.expectMsg(ToClient(Done)) a.send(barrier, EnterBarrier("bar23", Option(2 seconds))) b.send(barrier, EnterBarrier("bar23", Option(10 seconds))) - EventFilter[BarrierTimeout](occurrences = 1) intercept { + EventFilter[BarrierTimeout](occurrences = 1).intercept { Thread.sleep(4000) } c.send(barrier, EnterBarrier("bar23", None)) @@ -521,7 +543,8 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { private def withController(participants: Int)(f: (ActorRef) => Unit): Unit = { system.actorOf(Props(new Actor { - val controller = context.actorOf(Props(classOf[Controller], participants, new InetSocketAddress(InetAddress.getLocalHost, 0))) + val controller = + context.actorOf(Props(classOf[Controller], participants, new InetSocketAddress(InetAddress.getLocalHost, 0))) controller ! GetSockAddr override def supervisorStrategy = OneForOneStrategy() { case x => testActor ! Failed(controller, x); SupervisorStrategy.Restart @@ -554,10 +577,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender { private def noMsg(probes: TestProbe*): Unit = { expectNoMsg(1 second) - probes foreach (_.msgAvailable should ===(false)) + probes.foreach(_.msgAvailable should ===(false)) } - private def data(clients: Set[Controller.NodeInfo], barrier: String, arrived: List[ActorRef], previous: Data): Data = { + private def data(clients: Set[Controller.NodeInfo], + barrier: String, + arrived: List[ActorRef], + previous: Data): Data = { Data(clients, barrier, arrived, previous.deadline) } } diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala index b04a7e4992..daf6845fa9 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/ControllerSpec.scala @@ -5,7 +5,7 @@ package akka.remote.testconductor import akka.testkit.AkkaSpec -import akka.actor.{ PoisonPill, Props, AddressFromURIString } +import akka.actor.{ AddressFromURIString, PoisonPill, Props } import akka.testkit.ImplicitSender import akka.remote.testconductor.Controller.NodeInfo import java.net.InetSocketAddress diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala index 51d8177071..085c8755ce 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala @@ -45,9 +45,8 @@ object LogRoleReplace extends ClipboardOwner { val replacer = new LogRoleReplace if (args.length == 0) { - replacer.process( - new BufferedReader(new InputStreamReader(System.in)), - new PrintWriter(new OutputStreamWriter(System.out))) + replacer.process(new BufferedReader(new InputStreamReader(System.in)), + new PrintWriter(new OutputStreamWriter(System.out))) } else if (args(0) == "clipboard") { val clipboard = Toolkit.getDefaultToolkit.getSystemClipboard @@ -55,9 +54,7 @@ object LogRoleReplace extends ClipboardOwner { if (contents != null && contents.isDataFlavorSupported(DataFlavor.stringFlavor)) { val text = contents.getTransferData(DataFlavor.stringFlavor).asInstanceOf[String] val result = new StringWriter - replacer.process( - new BufferedReader(new StringReader(text)), - new PrintWriter(result)) + replacer.process(new BufferedReader(new StringReader(text)), new PrintWriter(result)) clipboard.setContents(new StringSelection(result.toString), this) println("Replaced clipboard contents") } @@ -65,9 +62,7 @@ object LogRoleReplace extends ClipboardOwner { } else if (args.length == 1) { val inputFile = new BufferedReader(new FileReader(args(0))) try { - replacer.process( - inputFile, - new PrintWriter(new OutputStreamWriter(System.out))) + replacer.process(inputFile, new PrintWriter(new OutputStreamWriter(System.out))) } finally { inputFile.close() } @@ -92,7 +87,8 @@ object LogRoleReplace extends ClipboardOwner { class LogRoleReplace { - private val RoleStarted = """\[([\w\-]+)\].*Role \[([\w]+)\] started with address \[[\w\-\+\.]+://.*@([\w\-\.]+):([0-9]+)\]""".r + private val RoleStarted = + """\[([\w\-]+)\].*Role \[([\w]+)\] started with address \[[\w\-\+\.]+://.*@([\w\-\.]+):([0-9]+)\]""".r private val ColorCode = """\u001B?\[[0-9]+m""" private var replacements: Map[String, String] = Map.empty diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala index 844de9349c..6d1a8e277c 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/STMultiNodeSpec.scala @@ -13,14 +13,15 @@ import org.scalatest.Matchers /** * Hooks up MultiNodeSpec with ScalaTest */ -trait STMultiNodeSpec extends MultiNodeSpecCallbacks - with WordSpecLike with Matchers with BeforeAndAfterAll { self: MultiNodeSpec => +trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with Matchers with BeforeAndAfterAll { + self: MultiNodeSpec => override def beforeAll() = multiNodeSpecBeforeAll() override def afterAll() = multiNodeSpecAfterAll() // Might not be needed anymore if we find a nice way to tag all logging from a node - override implicit def convertToWordSpecStringWrapper(s: String): WordSpecStringWrapper = new WordSpecStringWrapper(s"$s (on node '${self.myself.name}', $getClass)") + override implicit def convertToWordSpecStringWrapper(s: String): WordSpecStringWrapper = + new WordSpecStringWrapper(s"$s (on node '${self.myself.name}', $getClass)") } //#example diff --git a/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala b/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala index 436b4ec1f6..7fea9196c5 100644 --- a/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala +++ b/akka-remote-tests/src/test/scala/org/scalatest/extra/QuietReporter.scala @@ -9,7 +9,7 @@ import org.scalatest.events._ import java.lang.Boolean.getBoolean class QuietReporter(inColor: Boolean, withDurations: Boolean = false) - extends StandardOutReporter(withDurations, inColor, false, true, false, false, false, false, false, false) { + extends StandardOutReporter(withDurations, inColor, false, true, false, false, false, false, false, false) { def this() = this(!getBoolean("akka.test.nocolor"), !getBoolean("akka.test.nodurations")) diff --git a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala index a780dae6f3..6076431fe4 100644 --- a/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala +++ b/akka-remote/src/main/scala/akka/remote/AckedDelivery.scala @@ -71,11 +71,12 @@ final case class Ack(cumulativeAck: SeqNo, nacks: Set[SeqNo] = Set.empty) { } class ResendBufferCapacityReachedException(c: Int) - extends AkkaException(s"Resend buffer capacity of [$c] has been reached.") + extends AkkaException(s"Resend buffer capacity of [$c] has been reached.") class ResendUnfulfillableException - extends AkkaException("Unable to fulfill resend request since negatively acknowledged payload is no longer in buffer. " + - "The resend states between two systems are compromised and cannot be recovered.") + extends AkkaException( + "Unable to fulfill resend request since negatively acknowledged payload is no longer in buffer. " + + "The resend states between two systems are compromised and cannot be recovered.") /** * Implements an immutable resend buffer that buffers messages until they have been acknowledged. Properly removes messages @@ -88,11 +89,10 @@ class ResendUnfulfillableException * @param maxSeq The maximum sequence number that has been stored in this buffer. Messages having lower sequence number * will be not stored but rejected with [[java.lang.IllegalArgumentException]] */ -final case class AckedSendBuffer[T <: HasSequenceNumber]( - capacity: Int, - nonAcked: IndexedSeq[T] = Vector.empty[T], - nacked: IndexedSeq[T] = Vector.empty[T], - maxSeq: SeqNo = SeqNo(-1)) { +final case class AckedSendBuffer[T <: HasSequenceNumber](capacity: Int, + nonAcked: IndexedSeq[T] = Vector.empty[T], + nacked: IndexedSeq[T] = Vector.empty[T], + maxSeq: SeqNo = SeqNo(-1)) { /** * Processes an incoming acknowledgement and returns a new buffer with only unacknowledged elements remaining. @@ -104,11 +104,15 @@ final case class AckedSendBuffer[T <: HasSequenceNumber]( throw new IllegalArgumentException(s"Highest SEQ so far was $maxSeq but cumulative ACK is ${ack.cumulativeAck}") val newNacked = if (ack.nacks.isEmpty) Vector.empty - else (nacked ++ nonAcked) filter { m => ack.nacks(m.seq) } + else + (nacked ++ nonAcked).filter { m => + ack.nacks(m.seq) + } if (newNacked.size < ack.nacks.size) throw new ResendUnfulfillableException - else this.copy( - nonAcked = nonAcked.filter { m => m.seq > ack.cumulativeAck }, - nacked = newNacked) + else + this.copy(nonAcked = nonAcked.filter { m => + m.seq > ack.cumulativeAck + }, nacked = newNacked) } /** @@ -118,8 +122,10 @@ final case class AckedSendBuffer[T <: HasSequenceNumber]( * @return The updated buffer */ def buffer(msg: T): AckedSendBuffer[T] = { - if (msg.seq <= maxSeq) throw new IllegalArgumentException(s"Sequence number must be monotonic. Received [${msg.seq}] " + - s"which is smaller than [$maxSeq]") + if (msg.seq <= maxSeq) + throw new IllegalArgumentException( + s"Sequence number must be monotonic. Received [${msg.seq}] " + + s"which is smaller than [$maxSeq]") if (nonAcked.size == capacity) throw new ResendBufferCapacityReachedException(capacity) @@ -138,9 +144,9 @@ final case class AckedSendBuffer[T <: HasSequenceNumber]( * @param buf Buffer of messages that are waiting for delivery */ final case class AckedReceiveBuffer[T <: HasSequenceNumber]( - lastDelivered: SeqNo = SeqNo(-1), - cumulativeAck: SeqNo = SeqNo(-1), - buf: SortedSet[T] = TreeSet.empty[T])(implicit val seqOrdering: Ordering[T]) { + lastDelivered: SeqNo = SeqNo(-1), + cumulativeAck: SeqNo = SeqNo(-1), + buf: SortedSet[T] = TreeSet.empty[T])(implicit val seqOrdering: Ordering[T]) { import SeqNo.ord.max @@ -150,9 +156,8 @@ final case class AckedReceiveBuffer[T <: HasSequenceNumber]( * @return The updated buffer containing the message. */ def receive(arrivedMsg: T): AckedReceiveBuffer[T] = { - this.copy( - cumulativeAck = max(arrivedMsg.seq, cumulativeAck), - buf = if (arrivedMsg.seq > lastDelivered && !buf.contains(arrivedMsg)) buf + arrivedMsg else buf) + this.copy(cumulativeAck = max(arrivedMsg.seq, cumulativeAck), + buf = if (arrivedMsg.seq > lastDelivered && !buf.contains(arrivedMsg)) buf + arrivedMsg else buf) } /** @@ -196,10 +201,9 @@ final case class AckedReceiveBuffer[T <: HasSequenceNumber]( */ def mergeFrom(that: AckedReceiveBuffer[T]): AckedReceiveBuffer[T] = { val mergedLastDelivered = max(this.lastDelivered, that.lastDelivered) - this.copy( - lastDelivered = mergedLastDelivered, - cumulativeAck = max(this.cumulativeAck, that.cumulativeAck), - buf = (this.buf union that.buf).filter { _.seq > mergedLastDelivered }) + this.copy(lastDelivered = mergedLastDelivered, + cumulativeAck = max(this.cumulativeAck, that.cumulativeAck), + buf = this.buf.union(that.buf).filter { _.seq > mergedLastDelivered }) } override def toString = buf.map { _.seq }.mkString("[", ", ", "]") diff --git a/akka-remote/src/main/scala/akka/remote/BoundAddressesExtension.scala b/akka-remote/src/main/scala/akka/remote/BoundAddressesExtension.scala index 7212eae09e..772b8dc377 100644 --- a/akka-remote/src/main/scala/akka/remote/BoundAddressesExtension.scala +++ b/akka-remote/src/main/scala/akka/remote/BoundAddressesExtension.scala @@ -25,12 +25,12 @@ object BoundAddressesExtension extends ExtensionId[BoundAddressesExtension] with } class BoundAddressesExtension(val system: ExtendedActorSystem) extends Extension { + /** * Returns a mapping from a protocol to a set of bound addresses. */ - def boundAddresses: Map[String, Set[Address]] = system.provider - .asInstanceOf[RemoteActorRefProvider].transport match { - case artery: ArteryTransport => Map(ArteryTransport.ProtocolName -> Set(artery.bindAddress.address)) - case remoting: Remoting => remoting.boundAddresses - } + def boundAddresses: Map[String, Set[Address]] = system.provider.asInstanceOf[RemoteActorRefProvider].transport match { + case artery: ArteryTransport => Map(ArteryTransport.ProtocolName -> Set(artery.bindAddress.address)) + case remoting: Remoting => remoting.boundAddresses + } } diff --git a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala index bcefe527e7..1513dc1798 100644 --- a/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/DeadlineFailureDetector.scala @@ -27,20 +27,18 @@ import akka.util.Helpers.ConfigOps * @param clock The clock, returning current time in milliseconds, but can be faked for testing * purposes. It is only used for measuring intervals (duration). */ -class DeadlineFailureDetector( - val acceptableHeartbeatPause: FiniteDuration, - val heartbeatInterval: FiniteDuration)( - implicit - clock: Clock) extends FailureDetector { +class DeadlineFailureDetector(val acceptableHeartbeatPause: FiniteDuration, val heartbeatInterval: FiniteDuration)( + implicit + clock: Clock) + extends FailureDetector { /** * Constructor that reads parameters from config. * Expecting config properties named `acceptable-heartbeat-pause`. */ def this(config: Config, ev: EventStream) = - this( - acceptableHeartbeatPause = config.getMillisDuration("acceptable-heartbeat-pause"), - heartbeatInterval = config.getMillisDuration("heartbeat-interval")) + this(acceptableHeartbeatPause = config.getMillisDuration("acceptable-heartbeat-pause"), + heartbeatInterval = config.getMillisDuration("heartbeat-interval")) require(acceptableHeartbeatPause >= Duration.Zero, "failure-detector.acceptable-heartbeat-pause must be >= 0 s") require(heartbeatInterval > Duration.Zero, "failure-detector.heartbeat-interval must be > 0 s") @@ -63,4 +61,3 @@ class DeadlineFailureDetector( } } - diff --git a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala index aaa434838d..a53fc4990a 100644 --- a/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/DefaultFailureDetectorRegistry.scala @@ -7,7 +7,7 @@ package akka.remote import java.util.concurrent.atomic.AtomicReference import scala.annotation.tailrec import scala.collection.immutable.Map -import java.util.concurrent.locks.{ ReentrantLock, Lock } +import java.util.concurrent.locks.{ Lock, ReentrantLock } /** * A lock-less thread-safe implementation of [[akka.remote.FailureDetectorRegistry]]. @@ -35,7 +35,7 @@ class DefaultFailureDetectorRegistry[A](detectorFactory: () => FailureDetector) resourceToFailureDetector.get.get(resource) match { case Some(failureDetector) => failureDetector.heartbeat() - case None => + case None => // First one wins and creates the new FailureDetector failureDetectorCreationLock.lock() try { @@ -89,4 +89,3 @@ class DefaultFailureDetectorRegistry[A](detectorFactory: () => FailureDetector) resourceToFailureDetector.get.get(resource) } - diff --git a/akka-remote/src/main/scala/akka/remote/Endpoint.scala b/akka-remote/src/main/scala/akka/remote/Endpoint.scala index 3bff8fd91b..973b477bfe 100644 --- a/akka-remote/src/main/scala/akka/remote/Endpoint.scala +++ b/akka-remote/src/main/scala/akka/remote/Endpoint.scala @@ -15,7 +15,12 @@ import akka.remote.EndpointManager.{ Link, ResendState, Send } import akka.remote.EndpointWriter.{ FlushAndStop, StoppedReading } import akka.remote.WireFormats.SerializedMessage import akka.remote.transport.AkkaPduCodec.Message -import akka.remote.transport.AssociationHandle.{ ActorHandleEventListener, DisassociateInfo, Disassociated, InboundPayload } +import akka.remote.transport.AssociationHandle.{ + ActorHandleEventListener, + DisassociateInfo, + Disassociated, + InboundPayload +} import akka.remote.transport.Transport.InvalidAssociationException import akka.remote.transport._ import akka.serialization.Serialization @@ -37,28 +42,26 @@ import akka.util.OptionVal * INTERNAL API */ private[remote] trait InboundMessageDispatcher { - def dispatch( - recipient: InternalActorRef, - recipientAddress: Address, - serializedMessage: SerializedMessage, - senderOption: OptionVal[ActorRef]): Unit + def dispatch(recipient: InternalActorRef, + recipientAddress: Address, + serializedMessage: SerializedMessage, + senderOption: OptionVal[ActorRef]): Unit } /** * INTERNAL API */ -private[remote] class DefaultMessageDispatcher( - private val system: ExtendedActorSystem, - private val provider: RemoteActorRefProvider, - private val log: MarkerLoggingAdapter) extends InboundMessageDispatcher { +private[remote] class DefaultMessageDispatcher(private val system: ExtendedActorSystem, + private val provider: RemoteActorRefProvider, + private val log: MarkerLoggingAdapter) + extends InboundMessageDispatcher { private val remoteDaemon = provider.remoteDaemon - override def dispatch( - recipient: InternalActorRef, - recipientAddress: Address, - serializedMessage: SerializedMessage, - senderOption: OptionVal[ActorRef]): Unit = { + override def dispatch(recipient: InternalActorRef, + recipientAddress: Address, + serializedMessage: SerializedMessage, + senderOption: OptionVal[ActorRef]): Unit = { import provider.remoteSettings._ @@ -69,7 +72,11 @@ private[remote] class DefaultMessageDispatcher( def logMessageReceived(messageType: String): Unit = { if (LogReceive && log.isDebugEnabled) - log.debug(s"received $messageType RemoteMessage: [{}] to [{}]<+[{}] from [{}]", payload, recipient, originalReceiver, sender) + log.debug(s"received $messageType RemoteMessage: [{}] to [{}]<+[{}] from [{}]", + payload, + recipient, + originalReceiver, + sender) } recipient match { @@ -86,17 +93,18 @@ private[remote] class DefaultMessageDispatcher( payload match { case sel: ActorSelectionMessage => if (UntrustedMode && (!TrustedSelectionPaths.contains(sel.elements.mkString("/", "/", "")) || - sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) - log.debug( - LogMarker.Security, - "operating in UntrustedMode, dropping inbound actor selection to [{}], " + - "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration", - sel.elements.mkString("/", "/", "")) + sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) + log.debug(LogMarker.Security, + "operating in UntrustedMode, dropping inbound actor selection to [{}], " + + "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration", + sel.elements.mkString("/", "/", "")) else // run the receive logic for ActorSelectionMessage here to make sure it is not stuck on busy user actor ActorSelection.deliverSelection(l, sender, sel) case msg: PossiblyHarmful if UntrustedMode => - log.debug(LogMarker.Security, "operating in UntrustedMode, dropping inbound PossiblyHarmful message of type [{}]", msg.getClass.getName) + log.debug(LogMarker.Security, + "operating in UntrustedMode, dropping inbound PossiblyHarmful message of type [{}]", + msg.getClass.getName) case msg: SystemMessage => l.sendSystemMessage(msg) case msg => l.!(msg)(sender) } @@ -107,13 +115,18 @@ private[remote] class DefaultMessageDispatcher( // if it was originally addressed to us but is in fact remote from our point of view (i.e. remote-deployed) r.!(payload)(sender) else - log.error( - "dropping message [{}] for non-local recipient [{}] arriving at [{}] inbound addresses are [{}]", - payloadClass, r, recipientAddress, provider.transport.addresses.mkString(", ")) + log.error("dropping message [{}] for non-local recipient [{}] arriving at [{}] inbound addresses are [{}]", + payloadClass, + r, + recipientAddress, + provider.transport.addresses.mkString(", ")) - case r => log.error( - "dropping message [{}] for unknown recipient [{}] arriving at [{}] inbound addresses are [{}]", - payloadClass, r, recipientAddress, provider.transport.addresses.mkString(", ")) + case r => + log.error("dropping message [{}] for unknown recipient [{}] arriving at [{}] inbound addresses are [{}]", + payloadClass, + r, + recipientAddress, + provider.transport.addresses.mkString(", ")) } } @@ -124,7 +137,9 @@ private[remote] class DefaultMessageDispatcher( * INTERNAL API */ @SerialVersionUID(1L) -private[remote] class EndpointException(msg: String, cause: Throwable) extends AkkaException(msg, cause) with OnlyCauseStackTrace { +private[remote] class EndpointException(msg: String, cause: Throwable) + extends AkkaException(msg, cause) + with OnlyCauseStackTrace { def this(msg: String) = this(msg, null) } @@ -138,25 +153,30 @@ private[remote] trait AssociationProblem */ @SerialVersionUID(1L) private[remote] final case class ShutDownAssociation(localAddress: Address, remoteAddress: Address, cause: Throwable) - extends EndpointException("Shut down address: " + remoteAddress, cause) with AssociationProblem + extends EndpointException("Shut down address: " + remoteAddress, cause) + with AssociationProblem /** * INTERNAL API */ @SerialVersionUID(2L) -private[remote] final case class InvalidAssociation( - localAddress: Address, - remoteAddress: Address, - cause: Throwable, - disassociationInfo: Option[DisassociateInfo] = None) - extends EndpointException("Invalid address: " + remoteAddress, cause) with AssociationProblem +private[remote] final case class InvalidAssociation(localAddress: Address, + remoteAddress: Address, + cause: Throwable, + disassociationInfo: Option[DisassociateInfo] = None) + extends EndpointException("Invalid address: " + remoteAddress, cause) + with AssociationProblem /** * INTERNAL API */ @SerialVersionUID(1L) -private[remote] final case class HopelessAssociation(localAddress: Address, remoteAddress: Address, uid: Option[Int], cause: Throwable) - extends EndpointException("Catastrophic association error.") with AssociationProblem +private[remote] final case class HopelessAssociation(localAddress: Address, + remoteAddress: Address, + uid: Option[Int], + cause: Throwable) + extends EndpointException("Catastrophic association error.") + with AssociationProblem /** * INTERNAL API @@ -188,44 +208,55 @@ private[remote] object ReliableDeliverySupervisor { case object Idle case object TooLongIdle - def props( - handleOrActive: Option[AkkaProtocolHandle], - localAddress: Address, - remoteAddress: Address, - refuseUid: Option[Int], - transport: AkkaProtocolTransport, - settings: RemoteSettings, - codec: AkkaPduCodec, - receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props = - Props(classOf[ReliableDeliverySupervisor], handleOrActive, localAddress, remoteAddress, refuseUid, transport, settings, - codec, receiveBuffers) + def props(handleOrActive: Option[AkkaProtocolHandle], + localAddress: Address, + remoteAddress: Address, + refuseUid: Option[Int], + transport: AkkaProtocolTransport, + settings: RemoteSettings, + codec: AkkaPduCodec, + receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props = + Props(classOf[ReliableDeliverySupervisor], + handleOrActive, + localAddress, + remoteAddress, + refuseUid, + transport, + settings, + codec, + receiveBuffers) } /** * INTERNAL API */ -private[remote] class ReliableDeliverySupervisor( - handleOrActive: Option[AkkaProtocolHandle], - val localAddress: Address, - val remoteAddress: Address, - val refuseUid: Option[Int], - val transport: AkkaProtocolTransport, - val settings: RemoteSettings, - val codec: AkkaPduCodec, - val receiveBuffers: ConcurrentHashMap[Link, ResendState]) extends Actor with ActorLogging { +private[remote] class ReliableDeliverySupervisor(handleOrActive: Option[AkkaProtocolHandle], + val localAddress: Address, + val remoteAddress: Address, + val refuseUid: Option[Int], + val transport: AkkaProtocolTransport, + val settings: RemoteSettings, + val codec: AkkaPduCodec, + val receiveBuffers: ConcurrentHashMap[Link, ResendState]) + extends Actor + with ActorLogging { import ReliableDeliverySupervisor._ import context.dispatcher - val autoResendTimer = context.system.scheduler.schedule( - settings.SysResendTimeout, settings.SysResendTimeout, self, AttemptSysMsgRedelivery) + val autoResendTimer = context.system.scheduler.schedule(settings.SysResendTimeout, + settings.SysResendTimeout, + self, + AttemptSysMsgRedelivery) override val supervisorStrategy = OneForOneStrategy(loggingEnabled = false) { case _: AssociationProblem => Escalate case NonFatal(e) => val causedBy = if (e.getCause == null) "" else s"Caused by: [${e.getCause.getMessage}]" - log.warning( - "Association with remote system [{}] has failed, address is now gated for [{}] ms. Reason: [{}] {}", - remoteAddress, settings.RetryGateClosedFor.toMillis, e.getMessage, causedBy) + log.warning("Association with remote system [{}] has failed, address is now gated for [{}] ms. Reason: [{}] {}", + remoteAddress, + settings.RetryGateClosedFor.toMillis, + e.getMessage, + causedBy) uidConfirmed = false // Need confirmation of UID again if ((resendBuffer.nacked.nonEmpty || resendBuffer.nonAcked.nonEmpty) && bailoutAt.isEmpty) bailoutAt = Some(Deadline.now + settings.InitialSysMsgDeliveryTimeout) @@ -255,7 +286,7 @@ private[remote] class ReliableDeliverySupervisor( } var writer: ActorRef = createWriter() - var uid: Option[Int] = handleOrActive map { _.handshakeInfo.uid } + var uid: Option[Int] = handleOrActive.map { _.handshakeInfo.uid } var bailoutAt: Option[Deadline] = None var maxSilenceTimer: Option[Cancellable] = None // Processing of Acks has to be delayed until the UID after a reconnect is discovered. Depending whether the @@ -267,7 +298,10 @@ private[remote] class ReliableDeliverySupervisor( var uidConfirmed: Boolean = uid.isDefined && (uid != refuseUid) if (uid.isDefined && (uid == refuseUid)) - throw new HopelessAssociation(localAddress, remoteAddress, uid, + throw new HopelessAssociation( + localAddress, + remoteAddress, + uid, new IllegalStateException( s"The remote system [$remoteAddress] has a UID [${uid.get}] that has been quarantined. Association aborted.")) @@ -278,7 +312,9 @@ private[remote] class ReliableDeliverySupervisor( // Such a situation may arise when the EndpointWriter is shut down, and all of its mailbox contents are delivered // to dead letters. These messages should be ignored, as they still live in resendBuffer and might be delivered to // the remote system later. - (resendBuffer.nacked ++ resendBuffer.nonAcked) foreach { s => context.system.deadLetters ! s.copy(seqOpt = None) } + (resendBuffer.nacked ++ resendBuffer.nonAcked).foreach { s => + context.system.deadLetters ! s.copy(seqOpt = None) + } receiveBuffers.remove(Link(localAddress, remoteAddress)) autoResendTimer.cancel() maxSilenceTimer.foreach(_.cancel()) @@ -304,9 +340,13 @@ private[remote] class ReliableDeliverySupervisor( try resendBuffer = resendBuffer.acknowledge(ack) catch { case NonFatal(e) => - throw new HopelessAssociation(localAddress, remoteAddress, uid, - new IllegalStateException(s"Error encountered while processing system message " + - s"acknowledgement buffer: $resendBuffer ack: $ack", e)) + throw new HopelessAssociation(localAddress, + remoteAddress, + uid, + new IllegalStateException( + s"Error encountered while processing system message " + + s"acknowledgement buffer: $resendBuffer ack: $ack", + e)) } resendNacked() @@ -329,7 +369,7 @@ private[remote] class ReliableDeliverySupervisor( resendAll() case s: EndpointWriter.StopReading => - writer forward s + writer.forward(s) case Ungate => // ok, not gated } @@ -353,7 +393,10 @@ private[remote] class ReliableDeliverySupervisor( // again it will be immediately quarantined due to out-of-sync system message buffer and becomes quarantined. // In other words, this action is safe. if (bailoutAt.exists(_.isOverdue())) - throw new HopelessAssociation(localAddress, remoteAddress, uid, + throw new HopelessAssociation( + localAddress, + remoteAddress, + uid, new java.util.concurrent.TimeoutException("Delivery of system messages timed out and they were dropped.")) writer = createWriter() // Resending will be triggered by the incoming GotUid message after the connection finished @@ -382,8 +425,12 @@ private[remote] class ReliableDeliverySupervisor( goToActive() } case TooLongIdle => - throw new HopelessAssociation(localAddress, remoteAddress, uid, - new TimeoutException("Remote system has been silent for too long. " + + throw new HopelessAssociation( + localAddress, + remoteAddress, + uid, + new TimeoutException( + "Remote system has been silent for too long. " + s"(more than ${settings.QuarantineSilentSystemTimeout.toUnit(TimeUnit.HOURS)} hours)")) case EndpointWriter.FlushAndStop => context.stop(self) case EndpointWriter.StopReading(w, replyTo) => @@ -393,7 +440,8 @@ private[remote] class ReliableDeliverySupervisor( private def goToIdle(): Unit = { if (maxSilenceTimer.isEmpty) - maxSilenceTimer = Some(context.system.scheduler.scheduleOnce(settings.QuarantineSilentSystemTimeout, self, TooLongIdle)) + maxSilenceTimer = Some( + context.system.scheduler.scheduleOnce(settings.QuarantineSilentSystemTimeout, self, TooLongIdle)) context.become(idle) } @@ -404,7 +452,7 @@ private[remote] class ReliableDeliverySupervisor( } def flushWait: Receive = { - case IsIdle => // Do not reply, we will Terminate soon, which will do the inbound connection unstashing + case IsIdle => // Do not reply, we will Terminate soon, which will do the inbound connection unstashing case Terminated(_) => // Clear buffer to prevent sending system messages to dead letters -- at this point we are shutting down // and don't really know if they were properly delivered or not. @@ -424,43 +472,48 @@ private[remote] class ReliableDeliverySupervisor( writer ! sequencedSend } else writer ! send - private def resendNacked(): Unit = resendBuffer.nacked foreach { writer ! _ } + private def resendNacked(): Unit = resendBuffer.nacked.foreach { writer ! _ } private def resendAll(): Unit = { resendNacked() - resendBuffer.nonAcked.take(settings.SysResendLimit) foreach { writer ! _ } + resendBuffer.nonAcked.take(settings.SysResendLimit).foreach { writer ! _ } } private def tryBuffer(s: Send): Unit = try { - resendBuffer = resendBuffer buffer s + resendBuffer = resendBuffer.buffer(s) } catch { case NonFatal(e) => throw new HopelessAssociation(localAddress, remoteAddress, uid, e) } private def createWriter(): ActorRef = { - context.watch(context.actorOf(RARP(context.system).configureDispatcher(EndpointWriter.props( - handleOrActive = currentHandle, - localAddress = localAddress, - remoteAddress = remoteAddress, - refuseUid, - transport = transport, - settings = settings, - AkkaPduProtobufCodec, - receiveBuffers = receiveBuffers, - reliableDeliverySupervisor = Some(self))).withDeploy(Deploy.local), "endpointWriter")) + context.watch( + context.actorOf( + RARP(context.system) + .configureDispatcher(EndpointWriter.props(handleOrActive = currentHandle, + localAddress = localAddress, + remoteAddress = remoteAddress, + refuseUid, + transport = transport, + settings = settings, + AkkaPduProtobufCodec, + receiveBuffers = receiveBuffers, + reliableDeliverySupervisor = Some(self))) + .withDeploy(Deploy.local), + "endpointWriter")) } } /** * INTERNAL API */ -private[remote] abstract class EndpointActor( - val localAddress: Address, - val remoteAddress: Address, - val transport: Transport, - val settings: RemoteSettings, - val codec: AkkaPduCodec) extends Actor with ActorLogging { +private[remote] abstract class EndpointActor(val localAddress: Address, + val remoteAddress: Address, + val transport: Transport, + val settings: RemoteSettings, + val codec: AkkaPduCodec) + extends Actor + with ActorLogging { def inbound: Boolean @@ -471,9 +524,9 @@ private[remote] abstract class EndpointActor( def publishDisassociated(): Unit = tryPublish(DisassociatedEvent(localAddress, remoteAddress, inbound)) - private def tryPublish(ev: AssociationEvent): Unit = try - eventPublisher.notifyListeners(ev) - catch { case NonFatal(e) => log.error(e, "Unable to publish error event to EventStream.") } + private def tryPublish(ev: AssociationEvent): Unit = + try eventPublisher.notifyListeners(ev) + catch { case NonFatal(e) => log.error(e, "Unable to publish error event to EventStream.") } } /** @@ -481,18 +534,25 @@ private[remote] abstract class EndpointActor( */ private[remote] object EndpointWriter { - def props( - handleOrActive: Option[AkkaProtocolHandle], - localAddress: Address, - remoteAddress: Address, - refuseUid: Option[Int], - transport: AkkaProtocolTransport, - settings: RemoteSettings, - codec: AkkaPduCodec, - receiveBuffers: ConcurrentHashMap[Link, ResendState], - reliableDeliverySupervisor: Option[ActorRef]): Props = - Props(classOf[EndpointWriter], handleOrActive, localAddress, remoteAddress, refuseUid, transport, settings, codec, - receiveBuffers, reliableDeliverySupervisor) + def props(handleOrActive: Option[AkkaProtocolHandle], + localAddress: Address, + remoteAddress: Address, + refuseUid: Option[Int], + transport: AkkaProtocolTransport, + settings: RemoteSettings, + codec: AkkaPduCodec, + receiveBuffers: ConcurrentHashMap[Link, ResendState], + reliableDeliverySupervisor: Option[ActorRef]): Props = + Props(classOf[EndpointWriter], + handleOrActive, + localAddress, + remoteAddress, + refuseUid, + transport, + settings, + codec, + receiveBuffers, + reliableDeliverySupervisor) /** * This message signals that the current association maintained by the local EndpointWriter and EndpointReader is @@ -526,17 +586,16 @@ private[remote] object EndpointWriter { /** * INTERNAL API */ -private[remote] class EndpointWriter( - handleOrActive: Option[AkkaProtocolHandle], - localAddress: Address, - remoteAddress: Address, - refuseUid: Option[Int], - transport: AkkaProtocolTransport, - settings: RemoteSettings, - codec: AkkaPduCodec, - val receiveBuffers: ConcurrentHashMap[Link, ResendState], - val reliableDeliverySupervisor: Option[ActorRef]) - extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) { +private[remote] class EndpointWriter(handleOrActive: Option[AkkaProtocolHandle], + localAddress: Address, + remoteAddress: Address, + refuseUid: Option[Int], + transport: AkkaProtocolTransport, + settings: RemoteSettings, + codec: AkkaPduCodec, + val receiveBuffers: ConcurrentHashMap[Link, ResendState], + val reliableDeliverySupervisor: Option[ActorRef]) + extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) { import EndpointWriter._ import context.dispatcher @@ -548,7 +607,7 @@ private[remote] class EndpointWriter( var reader: Option[ActorRef] = None var handle: Option[AkkaProtocolHandle] = handleOrActive - val readerId = Iterator from 0 + val readerId = Iterator.from(0) def newAckDeadline: Deadline = Deadline.now + settings.SysMsgAckTimeout var ackDeadline: Deadline = newAckDeadline @@ -591,7 +650,7 @@ private[remote] class EndpointWriter( case Some(h) => reader = startReadEndpoint(h) case None => - transport.associate(remoteAddress, refuseUid).map(Handle(_)) pipeTo self + transport.associate(remoteAddress, refuseUid).map(Handle(_)).pipeTo(self) } } @@ -600,11 +659,9 @@ private[remote] class EndpointWriter( override def postStop(): Unit = { ackIdleTimer.cancel() - while (!prioBuffer.isEmpty) - extendedSystem.deadLetters ! prioBuffer.poll - while (!buffer.isEmpty) - extendedSystem.deadLetters ! buffer.poll - handle foreach { _.disassociate(stopReason) } + while (!prioBuffer.isEmpty) extendedSystem.deadLetters ! prioBuffer.poll + while (!buffer.isEmpty) extendedSystem.deadLetters ! buffer.poll + handle.foreach { _.disassociate(stopReason) } eventPublisher.notifyListeners(DisassociatedEvent(localAddress, remoteAddress, inbound)) } @@ -616,7 +673,8 @@ private[remote] class EndpointWriter( case Status.Failure(e: InvalidAssociationException) => publishAndThrow(new InvalidAssociation(localAddress, remoteAddress, e), Logging.WarningLevel) case Status.Failure(e) => - publishAndThrow(new EndpointAssociationException(s"Association failed with [$remoteAddress]", e), Logging.DebugLevel) + publishAndThrow(new EndpointAssociationException(s"Association failed with [$remoteAddress]", e), + Logging.DebugLevel) case Handle(inboundHandle) => // Assert handle == None? context.parent ! ReliableDeliverySupervisor.GotUid(inboundHandle.handshakeInfo.uid, remoteAddress) @@ -627,9 +685,9 @@ private[remote] class EndpointWriter( } def enqueueInBuffer(msg: AnyRef): Unit = msg match { - case s @ Send(_: PriorityMessage, _, _, _) => prioBuffer offer s - case s @ Send(ActorSelectionMessage(_: PriorityMessage, _, _), _, _, _) => prioBuffer offer s - case _ => buffer offer msg + case s @ Send(_: PriorityMessage, _, _, _) => prioBuffer.offer(s) + case s @ Send(ActorSelectionMessage(_: PriorityMessage, _, _), _, _, _) => prioBuffer.offer(s) + case _ => buffer.offer(msg) } val buffering: Receive = { @@ -637,7 +695,7 @@ private[remote] class EndpointWriter( case BackoffTimer => sendBufferedMessages() case FlushAndStop => // Flushing is postponed after the pending writes - buffer offer FlushAndStop + buffer.offer(FlushAndStop) context.system.scheduler.scheduleOnce(settings.FlushWait, self, FlushAndStopTimeout) case FlushAndStopTimeout => // enough @@ -707,7 +765,8 @@ private[remote] class EndpointWriter( if (buffer.isEmpty && prioBuffer.isEmpty) { // FIXME remove this when testing/tuning is completed if (log.isDebugEnabled) - log.debug(s"Drained buffer with maxWriteCount: $maxWriteCount, fullBackoffCount: $fullBackoffCount" + + log.debug( + s"Drained buffer with maxWriteCount: $maxWriteCount, fullBackoffCount: $fullBackoffCount" + s", smallBackoffCount: $smallBackoffCount, noBackoffCount: $noBackoffCount " + s", adaptiveBackoff: ${adaptiveBackoffNanos / 1000}") fullBackoffCount = 1 @@ -725,10 +784,10 @@ private[remote] class EndpointWriter( if (size > settings.LogBufferSizeExceeding) { val now = System.nanoTime() if (now - largeBufferLogTimestamp >= LogBufferSizeInterval) { - log.warning( - "[{}] buffered messages in EndpointWriter for [{}]. " + - "You should probably implement flow control to avoid flooding the remote connection.", - size, remoteAddress) + log.warning("[{}] buffered messages in EndpointWriter for [{}]. " + + "You should probably implement flow control to avoid flooding the remote connection.", + size, + remoteAddress) largeBufferLogTimestamp = now } } @@ -779,56 +838,66 @@ private[remote] class EndpointWriter( trySendPureAck() } - def writeSend(s: Send): Boolean = try { - handle match { - case Some(h) => - if (provider.remoteSettings.LogSend && log.isDebugEnabled) { - def msgLog = s"RemoteMessage: [${s.message}] to [${s.recipient}]<+[${s.recipient.path}] from [${s.senderOption.getOrElse(extendedSystem.deadLetters)}]" - log.debug("sending message {}", msgLog) - } - - val pdu = codec.constructMessage( - s.recipient.localAddressToUse, - s.recipient, - serializeMessage(s.message), - s.senderOption, - seqOption = s.seqOpt, - ackOption = lastAck) - - val pduSize = pdu.size - remoteMetrics.logPayloadBytes(s.message, pduSize) - - if (pduSize > transport.maximumPayloadBytes) { - val reason = new OversizedPayloadException(s"Discarding oversized payload sent to ${s.recipient}: max allowed size ${transport.maximumPayloadBytes} bytes, actual size of encoded ${s.message.getClass} was ${pdu.size} bytes.") - log.error(reason, "Transient association error (association remains live)") - true - } else { - val ok = h.write(pdu) - if (ok) { - ackDeadline = newAckDeadline - lastAck = None + def writeSend(s: Send): Boolean = + try { + handle match { + case Some(h) => + if (provider.remoteSettings.LogSend && log.isDebugEnabled) { + def msgLog = + s"RemoteMessage: [${s.message}] to [${s.recipient}]<+[${s.recipient.path}] from [${s.senderOption + .getOrElse(extendedSystem.deadLetters)}]" + log.debug("sending message {}", msgLog) } - ok - } - case None => - throw new EndpointException("Internal error: Endpoint is in state Writing, but no association handle is present.") + val pdu = codec.constructMessage(s.recipient.localAddressToUse, + s.recipient, + serializeMessage(s.message), + s.senderOption, + seqOption = s.seqOpt, + ackOption = lastAck) + + val pduSize = pdu.size + remoteMetrics.logPayloadBytes(s.message, pduSize) + + if (pduSize > transport.maximumPayloadBytes) { + val reason = new OversizedPayloadException( + s"Discarding oversized payload sent to ${s.recipient}: max allowed size ${transport.maximumPayloadBytes} bytes, actual size of encoded ${s.message.getClass} was ${pdu.size} bytes.") + log.error(reason, "Transient association error (association remains live)") + true + } else { + val ok = h.write(pdu) + if (ok) { + ackDeadline = newAckDeadline + lastAck = None + } + ok + } + + case None => + throw new EndpointException( + "Internal error: Endpoint is in state Writing, but no association handle is present.") + } + } catch { + case e: NotSerializableException => + log.error( + e, + "Serializer not defined for message type [{}]. Transient association error (association remains live)", + s.message.getClass) + true + case e: IllegalArgumentException => + log.error( + e, + "Serializer not defined for message type [{}]. Transient association error (association remains live)", + s.message.getClass) + true + case e: MessageSerializer.SerializationException => + log.error(e, "{} Transient association error (association remains live)", e.getMessage) + true + case e: EndpointException => + publishAndThrow(e, Logging.ErrorLevel) + case NonFatal(e) => + publishAndThrow(new EndpointException("Failed to write message to the transport", e), Logging.ErrorLevel) } - } catch { - case e: NotSerializableException => - log.error(e, "Serializer not defined for message type [{}]. Transient association error (association remains live)", s.message.getClass) - true - case e: IllegalArgumentException => - log.error(e, "Serializer not defined for message type [{}]. Transient association error (association remains live)", s.message.getClass) - true - case e: MessageSerializer.SerializationException => - log.error(e, "{} Transient association error (association remains live)", e.getMessage) - true - case e: EndpointException => - publishAndThrow(e, Logging.ErrorLevel) - case NonFatal(e) => - publishAndThrow(new EndpointException("Failed to write message to the transport", e), Logging.ErrorLevel) - } def handoff: Receive = { case Terminated(_) => @@ -858,7 +927,7 @@ private[remote] class EndpointWriter( } case TakeOver(newHandle, replyTo) => // Shutdown old reader - handle foreach { _.disassociate("the association was replaced by a new one", log) } + handle.foreach { _.disassociate("the association was replaced by a new one", log) } handle = Some(newHandle) replyTo ! TookOver(self, newHandle) context.become(handoff) @@ -892,10 +961,22 @@ private[remote] class EndpointWriter( private def startReadEndpoint(handle: AkkaProtocolHandle): Some[ActorRef] = { val newReader = - context.watch(context.actorOf( - RARP(context.system).configureDispatcher(EndpointReader.props(localAddress, remoteAddress, transport, settings, codec, - msgDispatch, inbound, handle.handshakeInfo.uid, reliableDeliverySupervisor, receiveBuffers)).withDeploy(Deploy.local), - "endpointReader-" + AddressUrlEncoder(remoteAddress) + "-" + readerId.next())) + context.watch( + context.actorOf( + RARP(context.system) + .configureDispatcher( + EndpointReader.props(localAddress, + remoteAddress, + transport, + settings, + codec, + msgDispatch, + inbound, + handle.handshakeInfo.uid, + reliableDeliverySupervisor, + receiveBuffers)) + .withDeploy(Deploy.local), + "endpointReader-" + AddressUrlEncoder(remoteAddress) + "-" + readerId.next())) handle.readHandlerPromise.success(ActorHandleEventListener(newReader)) Some(newReader) } @@ -916,36 +997,44 @@ private[remote] class EndpointWriter( */ private[remote] object EndpointReader { - def props( - localAddress: Address, - remoteAddress: Address, - transport: Transport, - settings: RemoteSettings, - codec: AkkaPduCodec, - msgDispatch: InboundMessageDispatcher, - inbound: Boolean, - uid: Int, - reliableDeliverySupervisor: Option[ActorRef], - receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props = - Props(classOf[EndpointReader], localAddress, remoteAddress, transport, settings, codec, msgDispatch, inbound, - uid, reliableDeliverySupervisor, receiveBuffers) + def props(localAddress: Address, + remoteAddress: Address, + transport: Transport, + settings: RemoteSettings, + codec: AkkaPduCodec, + msgDispatch: InboundMessageDispatcher, + inbound: Boolean, + uid: Int, + reliableDeliverySupervisor: Option[ActorRef], + receiveBuffers: ConcurrentHashMap[Link, ResendState]): Props = + Props(classOf[EndpointReader], + localAddress, + remoteAddress, + transport, + settings, + codec, + msgDispatch, + inbound, + uid, + reliableDeliverySupervisor, + receiveBuffers) } /** * INTERNAL API */ -private[remote] class EndpointReader( - localAddress: Address, - remoteAddress: Address, - transport: Transport, - settings: RemoteSettings, - codec: AkkaPduCodec, - msgDispatch: InboundMessageDispatcher, - val inbound: Boolean, - val uid: Int, - val reliableDeliverySupervisor: Option[ActorRef], - val receiveBuffers: ConcurrentHashMap[Link, ResendState]) extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) { +private[remote] class EndpointReader(localAddress: Address, + remoteAddress: Address, + transport: Transport, + settings: RemoteSettings, + codec: AkkaPduCodec, + msgDispatch: InboundMessageDispatcher, + val inbound: Boolean, + val uid: Int, + val reliableDeliverySupervisor: Option[ActorRef], + val receiveBuffers: ConcurrentHashMap[Link, ResendState]) + extends EndpointActor(localAddress, remoteAddress, transport, settings, codec) { import EndpointWriter.{ OutboundAck, StopReading, StoppedReading } @@ -974,7 +1063,9 @@ private[remote] class EndpointReader( if (expectedState eq null) { if (receiveBuffers.putIfAbsent(key, ResendState(uid, ackedReceiveBuffer)) ne null) updateSavedState(key, receiveBuffers.get(key)) - } else if (!receiveBuffers.replace(key, expectedState, merge(ResendState(uid, ackedReceiveBuffer), expectedState))) + } else if (!receiveBuffers.replace(key, + expectedState, + merge(ResendState(uid, ackedReceiveBuffer), expectedState))) updateSavedState(key, receiveBuffers.get(key)) } @@ -995,19 +1086,20 @@ private[remote] class EndpointReader( if (msg.reliableDeliveryEnabled) { ackedReceiveBuffer = ackedReceiveBuffer.receive(msg) deliverAndAck() - } else try - msgDispatch.dispatch(msg.recipient, msg.recipientAddress, msg.serializedMessage, msg.senderOption) - catch { - case e: NotSerializableException => logTransientSerializationError(msg, e) - case e: IllegalArgumentException => logTransientSerializationError(msg, e) - } + } else + try msgDispatch.dispatch(msg.recipient, msg.recipientAddress, msg.serializedMessage, msg.senderOption) + catch { + case e: NotSerializableException => logTransientSerializationError(msg, e) + case e: IllegalArgumentException => logTransientSerializationError(msg, e) + } case None => } case InboundPayload(oversized) => log.error( - new OversizedPayloadException(s"Discarding oversized payload received: " + + new OversizedPayloadException( + s"Discarding oversized payload received: " + s"max allowed size [${transport.maximumPayloadBytes}] bytes, actual size [${oversized.size}] bytes."), "Transient error while reading from association (association remains live)") @@ -1020,12 +1112,11 @@ private[remote] class EndpointReader( private def logTransientSerializationError(msg: AkkaPduCodec.Message, error: Exception): Unit = { val sm = msg.serializedMessage - log.warning( - "Serializer not defined for message with serializer id [{}] and manifest [{}]. " + - "Transient association error (association remains live). {}", - sm.getSerializerId, - if (sm.hasMessageManifest) sm.getMessageManifest.toStringUtf8 else "", - error.getMessage) + log.warning("Serializer not defined for message with serializer id [{}] and manifest [{}]. " + + "Transient association error (association remains live). {}", + sm.getSerializerId, + if (sm.hasMessageManifest) sm.getMessageManifest.toStringUtf8 else "", + error.getMessage) } def notReading: Receive = { @@ -1040,12 +1131,15 @@ private[remote] class EndpointReader( if (log.isWarningEnabled) log.warning("Discarding inbound message to [{}] in read-only association to [{}]. " + - "If this happens often you may consider using akka.remote.use-passive-connections=off " + - "or use Artery TCP.", msgOption.map(_.recipient).getOrElse("unknown"), remoteAddress) + "If this happens often you may consider using akka.remote.use-passive-connections=off " + + "or use Artery TCP.", + msgOption.map(_.recipient).getOrElse("unknown"), + remoteAddress) case InboundPayload(oversized) => log.error( - new OversizedPayloadException(s"Discarding oversized payload received in read-only association: " + + new OversizedPayloadException( + s"Discarding oversized payload received in read-only association: " + s"max allowed size [${transport.maximumPayloadBytes}] bytes, actual size [${oversized.size}] bytes."), "Transient error while reading from association (association remains live)") @@ -1061,12 +1155,12 @@ private[remote] class EndpointReader( remoteAddress, InvalidAssociationException("The remote system terminated the association because it is shutting down.")) case AssociationHandle.Quarantined => - throw InvalidAssociation( - localAddress, - remoteAddress, - InvalidAssociationException("The remote system has quarantined this system. No further associations " + - "to the remote system are possible until this system is restarted."), - Some(AssociationHandle.Quarantined)) + throw InvalidAssociation(localAddress, + remoteAddress, + InvalidAssociationException( + "The remote system has quarantined this system. No further associations " + + "to the remote system are possible until this system is restarted."), + Some(AssociationHandle.Quarantined)) } private def deliverAndAck(): Unit = { @@ -1075,14 +1169,15 @@ private[remote] class EndpointReader( // Notify writer that some messages can be acked context.parent ! OutboundAck(ack) - deliver foreach { m => + deliver.foreach { m => msgDispatch.dispatch(m.recipient, m.recipientAddress, m.serializedMessage, m.senderOption) } } - private def tryDecodeMessageAndAck(pdu: ByteString): (Option[Ack], Option[Message]) = try { - codec.decodeMessage(pdu, provider, localAddress) - } catch { - case NonFatal(e) => throw new EndpointException("Error while decoding incoming Akka PDU", e) - } + private def tryDecodeMessageAndAck(pdu: ByteString): (Option[Ack], Option[Message]) = + try { + codec.decodeMessage(pdu, provider, localAddress) + } catch { + case NonFatal(e) => throw new EndpointException("Error while decoding incoming Akka PDU", e) + } } diff --git a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala index f8ecca7969..feff8ff414 100644 --- a/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala +++ b/akka-remote/src/main/scala/akka/remote/FailureDetectorRegistry.scala @@ -65,13 +65,16 @@ private[akka] object FailureDetectorLoader { * @return A configured instance of the given [[FailureDetector]] implementation */ def load(fqcn: String, config: Config, system: ActorSystem): FailureDetector = { - system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[FailureDetector]( - fqcn, List( - classOf[Config] -> config, - classOf[EventStream] -> system.eventStream)).recover({ - case e => throw new ConfigurationException( - s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e) - }).get + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[FailureDetector](fqcn, + List(classOf[Config] -> config, classOf[EventStream] -> system.eventStream)) + .recover({ + case e => + throw new ConfigurationException(s"Could not create custom failure detector [$fqcn] due to: ${e.toString}", e) + }) + .get } /** diff --git a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala index a9b81d9a62..8518f0334c 100644 --- a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala @@ -28,10 +28,11 @@ private[akka] object MessageSerializer { * Uses Akka Serialization for the specified ActorSystem to transform the given MessageProtocol to a message */ def deserialize(system: ExtendedActorSystem, messageProtocol: SerializedMessage): AnyRef = { - SerializationExtension(system).deserialize( - messageProtocol.getMessage.toByteArray, - messageProtocol.getSerializerId, - if (messageProtocol.hasMessageManifest) messageProtocol.getMessageManifest.toStringUtf8 else "").get + SerializationExtension(system) + .deserialize(messageProtocol.getMessage.toByteArray, + messageProtocol.getSerializerId, + if (messageProtocol.hasMessageManifest) messageProtocol.getMessageManifest.toStringUtf8 else "") + .get } /** @@ -60,11 +61,15 @@ private[akka] object MessageSerializer { } catch { case NonFatal(e) => throw new SerializationException(s"Failed to serialize remote message [${message.getClass}] " + - s"using serializer [${serializer.getClass}].", e) + s"using serializer [${serializer.getClass}].", + e) } finally Serialization.currentTransportInformation.value = oldInfo } - def serializeForArtery(serialization: Serialization, outboundEnvelope: OutboundEnvelope, headerBuilder: HeaderBuilder, envelope: EnvelopeBuffer): Unit = { + def serializeForArtery(serialization: Serialization, + outboundEnvelope: OutboundEnvelope, + headerBuilder: HeaderBuilder, + envelope: EnvelopeBuffer): Unit = { val message = outboundEnvelope.message val serializer = serialization.findSerializerFor(message) val oldInfo = Serialization.currentTransportInformation.value @@ -84,13 +89,12 @@ private[akka] object MessageSerializer { } finally Serialization.currentTransportInformation.value = oldInfo } - def deserializeForArtery( - @unused system: ExtendedActorSystem, - @unused originUid: Long, - serialization: Serialization, - serializer: Int, - classManifest: String, - envelope: EnvelopeBuffer): AnyRef = { + def deserializeForArtery(@unused system: ExtendedActorSystem, + @unused originUid: Long, + serialization: Serialization, + serializer: Int, + classManifest: String, + envelope: EnvelopeBuffer): AnyRef = { serialization.deserializeByteBuffer(envelope.byteBuffer, serializer, classManifest) } } diff --git a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala index ad8efcf1c2..ebf16e04ae 100644 --- a/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/PhiAccrualFailureDetector.scala @@ -50,27 +50,24 @@ import akka.util.Helpers.ConfigOps * @param clock The clock, returning current time in milliseconds, but can be faked for testing * purposes. It is only used for measuring intervals (duration). */ -class PhiAccrualFailureDetector( - val threshold: Double, - val maxSampleSize: Int, - val minStdDeviation: FiniteDuration, - val acceptableHeartbeatPause: FiniteDuration, - val firstHeartbeatEstimate: FiniteDuration, - eventStream: Option[EventStream])( - implicit - clock: Clock) extends FailureDetector { +class PhiAccrualFailureDetector(val threshold: Double, + val maxSampleSize: Int, + val minStdDeviation: FiniteDuration, + val acceptableHeartbeatPause: FiniteDuration, + val firstHeartbeatEstimate: FiniteDuration, + eventStream: Option[EventStream])(implicit + clock: Clock) + extends FailureDetector { /** * Constructor without eventStream to support backwards compatibility */ - def this( - threshold: Double, - maxSampleSize: Int, - minStdDeviation: FiniteDuration, - acceptableHeartbeatPause: FiniteDuration, - firstHeartbeatEstimate: FiniteDuration)(implicit clock: Clock) = - this( - threshold, maxSampleSize, minStdDeviation, acceptableHeartbeatPause, firstHeartbeatEstimate, None)(clock) + def this(threshold: Double, + maxSampleSize: Int, + minStdDeviation: FiniteDuration, + acceptableHeartbeatPause: FiniteDuration, + firstHeartbeatEstimate: FiniteDuration)(implicit clock: Clock) = + this(threshold, maxSampleSize, minStdDeviation, acceptableHeartbeatPause, firstHeartbeatEstimate, None)(clock) /** * Constructor that reads parameters from config. @@ -79,13 +76,12 @@ class PhiAccrualFailureDetector( * `heartbeat-interval`. */ def this(config: Config, ev: EventStream) = - this( - threshold = config.getDouble("threshold"), - maxSampleSize = config.getInt("max-sample-size"), - minStdDeviation = config.getMillisDuration("min-std-deviation"), - acceptableHeartbeatPause = config.getMillisDuration("acceptable-heartbeat-pause"), - firstHeartbeatEstimate = config.getMillisDuration("heartbeat-interval"), - Some(ev)) + this(threshold = config.getDouble("threshold"), + maxSampleSize = config.getInt("max-sample-size"), + minStdDeviation = config.getMillisDuration("min-std-deviation"), + acceptableHeartbeatPause = config.getMillisDuration("acceptable-heartbeat-pause"), + firstHeartbeatEstimate = config.getMillisDuration("heartbeat-interval"), + Some(ev)) require(threshold > 0.0, "failure-detector.threshold must be > 0") require(maxSampleSize > 0, "failure-detector.max-sample-size must be > 0") @@ -138,7 +134,10 @@ class PhiAccrualFailureDetector( // don't use the first heartbeat after failure for the history, since a long pause will skew the stats if (isAvailable(timestamp)) { if (interval >= (acceptableHeartbeatPauseMillis / 3 * 2) && eventStream.isDefined) - eventStream.get.publish(Warning(this.toString, getClass, s"heartbeat interval is growing too large for address $address: $interval millis")) + eventStream.get.publish( + Warning(this.toString, + getClass, + s"heartbeat interval is growing too large for address $address: $interval millis")) oldState.history :+ interval } else oldState.history } @@ -205,11 +204,11 @@ private[akka] object HeartbeatHistory { * The stats (mean, variance, stdDeviation) are not defined for * for empty HeartbeatHistory, i.e. throws ArithmeticException. */ - def apply(maxSampleSize: Int): HeartbeatHistory = HeartbeatHistory( - maxSampleSize = maxSampleSize, - intervals = immutable.IndexedSeq.empty, - intervalSum = 0L, - squaredIntervalSum = 0L) + def apply(maxSampleSize: Int): HeartbeatHistory = + HeartbeatHistory(maxSampleSize = maxSampleSize, + intervals = immutable.IndexedSeq.empty, + intervalSum = 0L, + squaredIntervalSum = 0L) } @@ -220,11 +219,10 @@ private[akka] object HeartbeatHistory { * The stats (mean, variance, stdDeviation) are not defined for * for empty HeartbeatHistory, i.e. throws ArithmeticException. */ -private[akka] final case class HeartbeatHistory private ( - maxSampleSize: Int, - intervals: immutable.IndexedSeq[Long], - intervalSum: Long, - squaredIntervalSum: Long) { +private[akka] final case class HeartbeatHistory private (maxSampleSize: Int, + intervals: immutable.IndexedSeq[Long], + intervalSum: Long, + squaredIntervalSum: Long) { // Heartbeat histories are created trough the firstHeartbeat variable of the PhiAccrualFailureDetector // which always have intervals.size > 0. @@ -244,20 +242,19 @@ private[akka] final case class HeartbeatHistory private ( @tailrec final def :+(interval: Long): HeartbeatHistory = { if (intervals.size < maxSampleSize) - HeartbeatHistory( - maxSampleSize, - intervals = intervals :+ interval, - intervalSum = intervalSum + interval, - squaredIntervalSum = squaredIntervalSum + pow2(interval)) + HeartbeatHistory(maxSampleSize, + intervals = intervals :+ interval, + intervalSum = intervalSum + interval, + squaredIntervalSum = squaredIntervalSum + pow2(interval)) else dropOldest :+ interval // recur } - private def dropOldest: HeartbeatHistory = HeartbeatHistory( - maxSampleSize, - intervals = intervals drop 1, - intervalSum = intervalSum - intervals.head, - squaredIntervalSum = squaredIntervalSum - pow2(intervals.head)) + private def dropOldest: HeartbeatHistory = + HeartbeatHistory(maxSampleSize, + intervals = intervals.drop(1), + intervalSum = intervalSum - intervals.head, + squaredIntervalSum = squaredIntervalSum - pow2(intervals.head)) private def pow2(x: Long) = x * x } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 247035560e..e0338a1277 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -35,7 +35,7 @@ import akka.serialization.Serialization @InternalApi private[akka] object RemoteActorRefProvider { private final case class Internals(transport: RemoteTransport, remoteDaemon: InternalActorRef) - extends NoSerializationVerificationNeeded + extends NoSerializationVerificationNeeded sealed trait TerminatorState case object Uninitialized extends TerminatorState @@ -44,8 +44,10 @@ private[akka] object RemoteActorRefProvider { case object WaitTransportShutdown extends TerminatorState case object Finished extends TerminatorState - private class RemotingTerminator(systemGuardian: ActorRef) extends Actor with FSM[TerminatorState, Option[Internals]] - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { + private class RemotingTerminator(systemGuardian: ActorRef) + extends Actor + with FSM[TerminatorState, Option[Internals]] + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import context.dispatcher startWith(Uninitialized, None) @@ -53,7 +55,7 @@ private[akka] object RemoteActorRefProvider { when(Uninitialized) { case Event(i: Internals, _) => systemGuardian ! RegisterTerminationHook - goto(Idle) using Some(i) + goto(Idle).using(Some(i)) } when(Idle) { @@ -67,7 +69,7 @@ private[akka] object RemoteActorRefProvider { when(WaitDaemonShutdown) { case Event(TerminationHookDone, Some(internals)) => log.info("Remote daemon shut down; proceeding with flushing remote transports.") - internals.transport.shutdown() pipeTo self + internals.transport.shutdown().pipeTo(self) goto(WaitTransportShutdown) } @@ -91,10 +93,8 @@ private[akka] object RemoteActorRefProvider { * and handled as dead letters to the original (remote) destination. Without this special case, DeathWatch related * functionality breaks, like the special handling of Watch messages arriving to dead letters. */ - private class RemoteDeadLetterActorRef( - _provider: ActorRefProvider, - _path: ActorPath, - _eventStream: EventStream) extends DeadLetterActorRef(_provider, _path, _eventStream) { + private class RemoteDeadLetterActorRef(_provider: ActorRefProvider, _path: ActorPath, _eventStream: EventStream) + extends DeadLetterActorRef(_provider, _path, _eventStream) { import EndpointManager.Send override def !(message: Any)(implicit sender: ActorRef): Unit = message match { @@ -107,11 +107,15 @@ private[akka] object RemoteActorRefProvider { // the dead letter status if (seqOpt.isEmpty) super.!(DeadLetter(m, senderOption.getOrElse(_provider.deadLetters), recipient)) case env: OutboundEnvelope => - super.!(DeadLetter(unwrapSystemMessageEnvelope(env.message), env.sender.getOrElse(_provider.deadLetters), - env.recipient.getOrElse(_provider.deadLetters))) + super.!( + DeadLetter(unwrapSystemMessageEnvelope(env.message), + env.sender.getOrElse(_provider.deadLetters), + env.recipient.getOrElse(_provider.deadLetters))) case DeadLetter(env: OutboundEnvelope, _, _) => - super.!(DeadLetter(unwrapSystemMessageEnvelope(env.message), env.sender.getOrElse(_provider.deadLetters), - env.recipient.getOrElse(_provider.deadLetters))) + super.!( + DeadLetter(unwrapSystemMessageEnvelope(env.message), + env.sender.getOrElse(_provider.deadLetters), + env.recipient.getOrElse(_provider.deadLetters))) case _ => super.!(message)(sender) } @@ -132,11 +136,11 @@ private[akka] object RemoteActorRefProvider { * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. * */ -private[akka] class RemoteActorRefProvider( - val systemName: String, - val settings: ActorSystem.Settings, - val eventStream: EventStream, - val dynamicAccess: DynamicAccess) extends ActorRefProvider { +private[akka] class RemoteActorRefProvider(val systemName: String, + val settings: ActorSystem.Settings, + val eventStream: EventStream, + val dynamicAccess: DynamicAccess) + extends ActorRefProvider { import RemoteActorRefProvider._ val remoteSettings: RemoteSettings = new RemoteSettings(settings.config) @@ -149,7 +153,12 @@ private[akka] class RemoteActorRefProvider( */ protected def createDeployer: RemoteDeployer = new RemoteDeployer(settings, dynamicAccess) - private val local = new LocalActorRefProvider(systemName, settings, eventStream, dynamicAccess, deployer, + private val local = new LocalActorRefProvider( + systemName, + settings, + eventStream, + dynamicAccess, + deployer, Some(deadLettersPath => new RemoteDeadLetterActorRef(this, deadLettersPath, eventStream))) @volatile @@ -164,7 +173,8 @@ private[akka] class RemoteActorRefProvider( override def guardian: LocalActorRef = local.guardian override def systemGuardian: LocalActorRef = local.systemGuardian override def terminationFuture: Future[Terminated] = local.terminationFuture - override def registerTempActor(actorRef: InternalActorRef, path: ActorPath): Unit = local.registerTempActor(actorRef, path) + override def registerTempActor(actorRef: InternalActorRef, path: ActorPath): Unit = + local.registerTempActor(actorRef, path) override def unregisterTempActor(path: ActorPath): Unit = local.unregisterTempActor(path) override def tempPath(): ActorPath = local.tempPath() override def tempContainer: VirtualPathContainer = local.tempContainer @@ -195,23 +205,20 @@ private[akka] class RemoteActorRefProvider( val internals = Internals( remoteDaemon = { - val d = new RemoteSystemDaemon( - system, - local.rootPath / "remote", - rootGuardian, - remotingTerminator, - _log, - untrustedMode = remoteSettings.UntrustedMode) + val d = new RemoteSystemDaemon(system, + local.rootPath / "remote", + rootGuardian, + remotingTerminator, + _log, + untrustedMode = remoteSettings.UntrustedMode) local.registerExtraNames(Map(("remote", d))) d }, - transport = - if (remoteSettings.Artery.Enabled) remoteSettings.Artery.Transport match { - case ArterySettings.AeronUpd => new ArteryAeronUdpTransport(system, this) - case ArterySettings.Tcp => new ArteryTcpTransport(system, this, tlsEnabled = false) - case ArterySettings.TlsTcp => new ArteryTcpTransport(system, this, tlsEnabled = true) - } - else new Remoting(system, this)) + transport = if (remoteSettings.Artery.Enabled) remoteSettings.Artery.Transport match { + case ArterySettings.AeronUpd => new ArteryAeronUdpTransport(system, this) + case ArterySettings.Tcp => new ArteryTcpTransport(system, this, tlsEnabled = false) + case ArterySettings.TlsTcp => new ArteryTcpTransport(system, this, tlsEnabled = true) + } else new Remoting(system, this)) _internals = internals remotingTerminator ! internals @@ -230,26 +237,34 @@ private[akka] class RemoteActorRefProvider( val failureDetector = createRemoteWatcherFailureDetector(system) system.systemActorOf( configureDispatcher( - RemoteWatcher.props( - failureDetector, - heartbeatInterval = WatchHeartBeatInterval, - unreachableReaperInterval = WatchUnreachableReaperInterval, - heartbeatExpectedResponseAfter = WatchHeartbeatExpectedResponseAfter)), + RemoteWatcher.props(failureDetector, + heartbeatInterval = WatchHeartBeatInterval, + unreachableReaperInterval = WatchUnreachableReaperInterval, + heartbeatExpectedResponseAfter = WatchHeartbeatExpectedResponseAfter)), "remote-watcher") } protected def createRemoteWatcherFailureDetector(system: ExtendedActorSystem): FailureDetectorRegistry[Address] = { def createFailureDetector(): FailureDetector = - FailureDetectorLoader.load(remoteSettings.WatchFailureDetectorImplementationClass, remoteSettings.WatchFailureDetectorConfig, system) + FailureDetectorLoader.load(remoteSettings.WatchFailureDetectorImplementationClass, + remoteSettings.WatchFailureDetectorConfig, + system) new DefaultFailureDetectorRegistry(() => createFailureDetector()) } protected def createRemoteDeploymentWatcher(system: ActorSystemImpl): ActorRef = - system.systemActorOf(remoteSettings.configureDispatcher(Props[RemoteDeploymentWatcher]()), "remote-deployment-watcher") + system.systemActorOf(remoteSettings.configureDispatcher(Props[RemoteDeploymentWatcher]()), + "remote-deployment-watcher") - def actorOf(system: ActorSystemImpl, props: Props, supervisor: InternalActorRef, path: ActorPath, - systemService: Boolean, deploy: Option[Deploy], lookupDeploy: Boolean, async: Boolean): InternalActorRef = + def actorOf(system: ActorSystemImpl, + props: Props, + supervisor: InternalActorRef, + path: ActorPath, + systemService: Boolean, + deploy: Option[Deploy], + lookupDeploy: Boolean, + async: Boolean): InternalActorRef = if (systemService) local.actorOf(system, props, supervisor, path, systemService, deploy, lookupDeploy, async) else { @@ -298,38 +313,42 @@ private[akka] class RemoteActorRefProvider( case "user" | "system" => deployer.lookup(elems.drop(1)) case "remote" => lookupRemotes(elems) case _ => None - } - else None + } else None val deployment = { deploy.toList ::: lookup.toList match { case Nil => Nil - case l => List(l reduce ((a, b) => b withFallback a)) + case l => List(l.reduce((a, b) => b.withFallback(a))) } } - Iterator(props.deploy) ++ deployment.iterator reduce ((a, b) => b withFallback a) match { + (Iterator(props.deploy) ++ deployment.iterator).reduce((a, b) => b.withFallback(a)) match { case d @ Deploy(_, _, _, RemoteScope(address), _, _) => if (hasAddress(address)) { local.actorOf(system, props, supervisor, path, false, deployment.headOption, false, async) } else if (props.deploy.scope == LocalScope) { - throw new ConfigurationException(s"configuration requested remote deployment for local-only Props at [$path]") - } else try { + throw new ConfigurationException( + s"configuration requested remote deployment for local-only Props at [$path]") + } else try { - // for consistency we check configuration of dispatcher and mailbox locally - val dispatcher = system.dispatchers.lookup(props.dispatcher) - system.mailboxes.getMailboxType(props, dispatcher.configurator.config) + try { + // for consistency we check configuration of dispatcher and mailbox locally + val dispatcher = system.dispatchers.lookup(props.dispatcher) + system.mailboxes.getMailboxType(props, dispatcher.configurator.config) + } catch { + case NonFatal(e) => + throw new ConfigurationException( + s"configuration problem while creating [$path] with dispatcher [${props.dispatcher}] and mailbox [${props.mailbox}]", + e) + } + val localAddress = transport.localAddressForRemote(address) + val rpath = + (RootActorPath(address) / "remote" / localAddress.protocol / localAddress.hostPort / path.elements) + .withUid(path.uid) + new RemoteActorRef(transport, localAddress, rpath, supervisor, Some(props), Some(d)) } catch { - case NonFatal(e) => throw new ConfigurationException( - s"configuration problem while creating [$path] with dispatcher [${props.dispatcher}] and mailbox [${props.mailbox}]", e) + case NonFatal(e) => throw new IllegalArgumentException(s"remote deployment failed for [$path]", e) } - val localAddress = transport.localAddressForRemote(address) - val rpath = (RootActorPath(address) / "remote" / localAddress.protocol / localAddress.hostPort / path.elements). - withUid(path.uid) - new RemoteActorRef(transport, localAddress, rpath, supervisor, Some(props), Some(d)) - } catch { - case NonFatal(e) => throw new IllegalArgumentException(s"remote deployment failed for [$path]", e) - } case _ => local.actorOf(system, props, supervisor, path, systemService, deployment.headOption, false, async) @@ -339,14 +358,19 @@ private[akka] class RemoteActorRefProvider( @deprecated("use actorSelection instead of actorFor", "2.2") override private[akka] def actorFor(path: ActorPath): InternalActorRef = { if (hasAddress(path.address)) actorFor(rootGuardian, path.elements) - else try { - new RemoteActorRef(transport, transport.localAddressForRemote(path.address), - path, Nobody, props = None, deploy = None) - } catch { - case NonFatal(e) => - log.error(e, "Error while looking up address [{}]", path.address) - new EmptyLocalActorRef(this, path, eventStream) - } + else + try { + new RemoteActorRef(transport, + transport.localAddressForRemote(path.address), + path, + Nobody, + props = None, + deploy = None) + } catch { + case NonFatal(e) => + log.error(e, "Error while looking up address [{}]", path.address) + new EmptyLocalActorRef(this, path, eventStream) + } } @deprecated("use actorSelection instead of actorFor", "2.2") @@ -356,8 +380,12 @@ private[akka] class RemoteActorRefProvider( else { val rootPath = RootActorPath(address) / elems try { - new RemoteActorRef(transport, transport.localAddressForRemote(address), - rootPath, Nobody, props = None, deploy = None) + new RemoteActorRef(transport, + transport.localAddressForRemote(address), + rootPath, + Nobody, + props = None, + deploy = None) } catch { case NonFatal(e) => log.error(e, "Error while looking up address [{}]", rootPath.address) @@ -373,14 +401,19 @@ private[akka] class RemoteActorRefProvider( def rootGuardianAt(address: Address): ActorRef = { if (hasAddress(address)) rootGuardian - else try { - new RemoteActorRef(transport, transport.localAddressForRemote(address), - RootActorPath(address), Nobody, props = None, deploy = None) - } catch { - case NonFatal(e) => - log.error(e, "No root guardian at [{}]", address) - new EmptyLocalActorRef(this, RootActorPath(address), eventStream) - } + else + try { + new RemoteActorRef(transport, + transport.localAddressForRemote(address), + RootActorPath(address), + Nobody, + props = None, + deploy = None) + } catch { + case NonFatal(e) => + log.error(e, "No root guardian at [{}]", address) + new EmptyLocalActorRef(this, RootActorPath(address), eventStream) + } } /** @@ -392,13 +425,19 @@ private[akka] class RemoteActorRefProvider( case ActorPathExtractor(address, elems) => if (hasAddress(address)) local.resolveActorRef(rootGuardian, elems) - else try { - new RemoteActorRef(transport, localAddress, RootActorPath(address) / elems, Nobody, props = None, deploy = None) - } catch { - case NonFatal(e) => - log.warning("Error while resolving ActorRef [{}] due to [{}]", path, e.getMessage) - new EmptyLocalActorRef(this, RootActorPath(address) / elems, eventStream) - } + else + try { + new RemoteActorRef(transport, + localAddress, + RootActorPath(address) / elems, + Nobody, + props = None, + deploy = None) + } catch { + case NonFatal(e) => + log.warning("Error while resolving ActorRef [{}] due to [{}]", path, e.getMessage) + new EmptyLocalActorRef(this, RootActorPath(address) / elems, eventStream) + } case _ => log.debug("Resolve (deserialization) of unknown (invalid) path [{}], using deadLetters.", path) deadLetters @@ -424,8 +463,12 @@ private[akka] class RemoteActorRefProvider( else { val rootPath = RootActorPath(address) / elems try { - new RemoteActorRef(transport, transport.localAddressForRemote(address), - rootPath, Nobody, props = None, deploy = None) + new RemoteActorRef(transport, + transport.localAddressForRemote(address), + rootPath, + Nobody, + props = None, + deploy = None) } catch { case NonFatal(e) => log.warning("Error while resolving ActorRef [{}] due to [{}]", path, e.getMessage) @@ -439,14 +482,19 @@ private[akka] class RemoteActorRefProvider( def resolveActorRef(path: ActorPath): ActorRef = { if (hasAddress(path.address)) local.resolveActorRef(rootGuardian, path.elements) - else try { - new RemoteActorRef(transport, transport.localAddressForRemote(path.address), - path, Nobody, props = None, deploy = None) - } catch { - case NonFatal(e) => - log.warning("Error while resolving ActorRef [{}] due to [{}]", path, e.getMessage) - new EmptyLocalActorRef(this, path, eventStream) - } + else + try { + new RemoteActorRef(transport, + transport.localAddressForRemote(path.address), + path, + Nobody, + props = None, + deploy = None) + } catch { + case NonFatal(e) => + log.warning("Error while resolving ActorRef [{}] due to [{}]", path, e.getMessage) + new EmptyLocalActorRef(this, path, eventStream) + } } /** @@ -459,16 +507,18 @@ private[akka] class RemoteActorRefProvider( // actorSelection can't be used here because then it is not guaranteed that the actor is created // before someone can send messages to it resolveActorRef(RootActorPath(ref.path.address) / "remote") ! - DaemonMsgCreate(props, deploy, ref.path.toSerializationFormat, supervisor) + DaemonMsgCreate(props, deploy, ref.path.toSerializationFormat, supervisor) remoteDeploymentWatcher ! RemoteDeploymentWatcher.WatchRemote(ref, supervisor) } def getExternalAddressFor(addr: Address): Option[Address] = { addr match { - case _ if hasAddress(addr) => Some(local.rootPath.address) - case Address(_, _, Some(_), Some(_)) => try Some(transport.localAddressForRemote(addr)) catch { case NonFatal(_) => None } - case _ => None + case _ if hasAddress(addr) => Some(local.rootPath.address) + case Address(_, _, Some(_), Some(_)) => + try Some(transport.localAddressForRemote(addr)) + catch { case NonFatal(_) => None } + case _ => None } } @@ -513,14 +563,14 @@ private[akka] trait RemoteRef extends ActorRefScope { * Remote ActorRef that is used when referencing the Actor on a different node than its "home" node. * This reference is network-aware (remembers its origin) and immutable. */ -private[akka] class RemoteActorRef private[akka] ( - remote: RemoteTransport, - val localAddressToUse: Address, - val path: ActorPath, - val getParent: InternalActorRef, - props: Option[Props], - deploy: Option[Deploy]) - extends InternalActorRef with RemoteRef { +private[akka] class RemoteActorRef private[akka] (remote: RemoteTransport, + val localAddressToUse: Address, + val path: ActorPath, + val getParent: InternalActorRef, + props: Option[Props], + deploy: Option[Deploy]) + extends InternalActorRef + with RemoteRef { if (path.address.hasLocalScope) throw new IllegalArgumentException(s"Unexpected local address in RemoteActorRef [$this]") @@ -542,7 +592,7 @@ private[akka] class RemoteActorRef private[akka] ( val s = name.toStream s.headOption match { case None => this - case Some("..") => getParent getChild name + case Some("..") => getParent.getChild(name) case _ => new RemoteActorRef(remote, localAddressToUse, path / s, Nobody, props = None, deploy = None) } } @@ -565,7 +615,9 @@ private[akka] class RemoteActorRef private[akka] ( */ def isWatchIntercepted(watchee: ActorRef, watcher: ActorRef) = if (watchee.path.uid == akka.actor.ActorCell.undefinedUid) { - provider.log.debug("actorFor is deprecated, and watching a remote ActorRef acquired with actorFor is not reliable: [{}]", watchee.path) + provider.log.debug( + "actorFor is deprecated, and watching a remote ActorRef acquired with actorFor is not reliable: [{}]", + watchee.path) false // Not managed by the remote watcher, so not reliable to communication failure or remote system crash } else { // If watchee != this then watcher should == this. This is a reverse watch, and it is not intercepted @@ -588,7 +640,8 @@ private[akka] class RemoteActorRef private[akka] ( override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = { if (message == null) throw InvalidMessageException("Message is null") - try remote.send(message, OptionVal(sender), this) catch handleException(message, sender) + try remote.send(message, OptionVal(sender), this) + catch handleException(message, sender) } override def provider: RemoteActorRefProvider = remote.provider diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index 50d847cdc4..a0d02eddc9 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -6,7 +6,19 @@ package akka.remote import scala.annotation.tailrec import scala.util.control.NonFatal -import akka.actor.{ Actor, ActorPath, ActorPathExtractor, ActorRef, ActorSystemImpl, AddressTerminated, Deploy, InternalActorRef, Nobody, Props, VirtualPathContainer } +import akka.actor.{ + Actor, + ActorPath, + ActorPathExtractor, + ActorRef, + ActorSystemImpl, + AddressTerminated, + Deploy, + InternalActorRef, + Nobody, + Props, + VirtualPathContainer +} import akka.event.{ AddressTerminatedTopic, LogMarker, MarkerLoggingAdapter } import akka.dispatch.sysmsg.{ DeathWatchNotification, SystemMessage, Watch } import akka.actor.ActorRefWithCell @@ -33,7 +45,8 @@ private[akka] sealed trait DaemonMsg * INTERNAL API */ @SerialVersionUID(1L) -private[akka] final case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) extends DaemonMsg +private[akka] final case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) + extends DaemonMsg /** * INTERNAL API @@ -42,14 +55,13 @@ private[akka] final case class DaemonMsgCreate(props: Props, deploy: Deploy, pat * * It acts as the brain of the remote that responds to system remote events (messages) and undertakes action. */ -private[akka] class RemoteSystemDaemon( - system: ActorSystemImpl, - _path: ActorPath, - _parent: InternalActorRef, - terminator: ActorRef, - _log: MarkerLoggingAdapter, - val untrustedMode: Boolean) - extends VirtualPathContainer(system.provider, _path, _parent, _log) { +private[akka] class RemoteSystemDaemon(system: ActorSystemImpl, + _path: ActorPath, + _parent: InternalActorRef, + terminator: ActorRef, + _log: MarkerLoggingAdapter, + val untrustedMode: Boolean) + extends VirtualPathContainer(system.provider, _path, _parent, _log) { import akka.actor.SystemGuardian._ @@ -107,7 +119,7 @@ private[akka] class RemoteSystemDaemon( if (last == -1) (Nobody, n) else rec(s.substring(0, last), n + 1) case ref if uid != undefinedUid && uid != ref.path.uid => (Nobody, n) - case ref => (ref, n) + case ref => (ref, n) } } @@ -142,73 +154,78 @@ private[akka] class RemoteSystemDaemon( case _ => super.sendSystemMessage(message) } - override def !(msg: Any)(implicit sender: ActorRef = Actor.noSender): Unit = try msg match { - case message: DaemonMsg => - log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, path.address) - message match { - case DaemonMsgCreate(_, _, path, _) if untrustedMode => - log.debug("does not accept deployments (untrusted) for [{}]", path) // TODO add security marker? + override def !(msg: Any)(implicit sender: ActorRef = Actor.noSender): Unit = + try msg match { + case message: DaemonMsg => + log.debug("Received command [{}] to RemoteSystemDaemon on [{}]", message, path.address) + message match { + case DaemonMsgCreate(_, _, path, _) if untrustedMode => + log.debug("does not accept deployments (untrusted) for [{}]", path) // TODO add security marker? - case DaemonMsgCreate(props, deploy, path, supervisor) if whitelistEnabled => - val name = props.clazz.getCanonicalName - if (remoteDeploymentWhitelist.contains(name)) - doCreateActor(message, props, deploy, path, supervisor) - else { - val ex = new NotWhitelistedClassRemoteDeploymentAttemptException(props.actorClass, remoteDeploymentWhitelist) - log.error(LogMarker.Security, ex, - "Received command to create remote Actor, but class [{}] is not white-listed! " + - "Target path: [{}]", props.actorClass, path) - } - case DaemonMsgCreate(props, deploy, path, supervisor) => - doCreateActor(message, props, deploy, path, supervisor) - } - - case sel: ActorSelectionMessage => - val (concatenatedChildNames, m) = { - val iter = sel.elements.iterator - // find child elements, and the message to send, which is a remaining ActorSelectionMessage - // in case of SelectChildPattern, otherwise the actual message of the selection - @tailrec def rec(acc: List[String]): (List[String], Any) = - if (iter.isEmpty) - (acc.reverse, sel.msg) - else { - iter.next() match { - case SelectChildName(name) => rec(name :: acc) - case SelectParent if acc.isEmpty => rec(acc) - case SelectParent => rec(acc.tail) - case pat: SelectChildPattern => (acc.reverse, sel.copy(elements = pat +: iter.toVector)) + case DaemonMsgCreate(props, deploy, path, supervisor) if whitelistEnabled => + val name = props.clazz.getCanonicalName + if (remoteDeploymentWhitelist.contains(name)) + doCreateActor(message, props, deploy, path, supervisor) + else { + val ex = + new NotWhitelistedClassRemoteDeploymentAttemptException(props.actorClass, remoteDeploymentWhitelist) + log.error(LogMarker.Security, + ex, + "Received command to create remote Actor, but class [{}] is not white-listed! " + + "Target path: [{}]", + props.actorClass, + path) } - } - rec(Nil) - } - getChild(concatenatedChildNames.iterator) match { - case Nobody => - val emptyRef = new EmptyLocalActorRef(system.provider, path / sel.elements.map(_.toString), - system.eventStream) - emptyRef.tell(sel, sender) - case child => - child.tell(m, sender) - } + case DaemonMsgCreate(props, deploy, path, supervisor) => + doCreateActor(message, props, deploy, path, supervisor) + } - case Identify(messageId) => sender ! ActorIdentity(messageId, Some(this)) + case sel: ActorSelectionMessage => + val (concatenatedChildNames, m) = { + val iter = sel.elements.iterator + // find child elements, and the message to send, which is a remaining ActorSelectionMessage + // in case of SelectChildPattern, otherwise the actual message of the selection + @tailrec def rec(acc: List[String]): (List[String], Any) = + if (iter.isEmpty) + (acc.reverse, sel.msg) + else { + iter.next() match { + case SelectChildName(name) => rec(name :: acc) + case SelectParent if acc.isEmpty => rec(acc) + case SelectParent => rec(acc.tail) + case pat: SelectChildPattern => (acc.reverse, sel.copy(elements = pat +: iter.toVector)) + } + } + rec(Nil) + } + getChild(concatenatedChildNames.iterator) match { + case Nobody => + val emptyRef = + new EmptyLocalActorRef(system.provider, path / sel.elements.map(_.toString), system.eventStream) + emptyRef.tell(sel, sender) + case child => + child.tell(m, sender) + } - case TerminationHook => - terminating.switchOn { - terminationHookDoneWhenNoChildren() - foreachChild { system.stop } - } + case Identify(messageId) => sender ! ActorIdentity(messageId, Some(this)) - case AddressTerminated(address) => - foreachChild { - case a: InternalActorRef if a.getParent.path.address == address => system.stop(a) - case _ => // skip, this child doesn't belong to the terminated address - } + case TerminationHook => + terminating.switchOn { + terminationHookDoneWhenNoChildren() + foreachChild { system.stop } + } - case unknown => log.warning(LogMarker.Security, "Unknown message [{}] received by [{}]", unknown, this) + case AddressTerminated(address) => + foreachChild { + case a: InternalActorRef if a.getParent.path.address == address => system.stop(a) + case _ => // skip, this child doesn't belong to the terminated address + } - } catch { - case NonFatal(e) => log.error(e, "exception while processing remote command [{}] from [{}]", msg, sender) - } + case unknown => log.warning(LogMarker.Security, "Unknown message [{}] received by [{}]", unknown, this) + + } catch { + case NonFatal(e) => log.error(e, "exception while processing remote command [{}] from [{}]", msg, sender) + } private def doCreateActor(message: DaemonMsg, props: Props, deploy: Deploy, path: String, supervisor: ActorRef) = { path match { @@ -225,14 +242,21 @@ private[akka] class RemoteSystemDaemon( } val isTerminating = !terminating.whileOff { val parent = supervisor.asInstanceOf[InternalActorRef] - val actor = system.provider.actorOf(system, props, parent, - p, systemService = false, Some(deploy), lookupDeploy = true, async = false) + val actor = system.provider.actorOf(system, + props, + parent, + p, + systemService = false, + Some(deploy), + lookupDeploy = true, + async = false) addChild(childName, actor) actor.sendSystemMessage(Watch(actor, this)) actor.start() if (addChildParentNeedsWatch(parent, actor)) parent.sendSystemMessage(Watch(parent, this)) } - if (isTerminating) log.error("Skipping [{}] to RemoteSystemDaemon on [{}] while terminating", message, p.address) + if (isTerminating) + log.error("Skipping [{}] to RemoteSystemDaemon on [{}] while terminating", message, p.address) case _ => log.debug("remote path does not match path from message [{}]", message) } @@ -246,7 +270,7 @@ private[akka] class RemoteSystemDaemon( /** INTERNAL API */ final class NotWhitelistedClassRemoteDeploymentAttemptException(illegal: Class[_], whitelist: immutable.Set[String]) - extends RuntimeException( - s"Attempted to deploy not whitelisted Actor class: " + + extends RuntimeException( + s"Attempted to deploy not whitelisted Actor class: " + s"[$illegal], " + s"whitelisted classes: [${whitelist.mkString(", ")}]") diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index c1829311f6..9ae41408f7 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -21,7 +21,8 @@ final case class RemoteScope(node: Address) extends Scope { /** * INTERNAL API */ -private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends Deployer(_settings, _pm) { +private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) + extends Deployer(_settings, _pm) { override def parseConfig(path: String, config: Config): Option[Deploy] = { super.parseConfig(path, config) match { @@ -32,10 +33,11 @@ private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: Dynamic case _ => val nodes = immutableSeq(deploy.config.getStringList("target.nodes")).map(AddressFromURIString(_)) if (nodes.isEmpty || deploy.routerConfig == NoRouter) d - else deploy.routerConfig match { - case r: Pool => Some(deploy.copy(routerConfig = RemoteRouterConfig(r, nodes))) - case _ => d - } + else + deploy.routerConfig match { + case r: Pool => Some(deploy.copy(routerConfig = RemoteRouterConfig(r, nodes))) + case _ => d + } } case None => None } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala index 764cc4ea85..0b6f4d088b 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeploymentWatcher.scala @@ -9,7 +9,7 @@ import akka.actor.Terminated import akka.actor.Actor import akka.actor.ActorRef import akka.dispatch.sysmsg.DeathWatchNotification -import akka.dispatch.{ UnboundedMessageQueueSemantics, RequiresMessageQueue } +import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } /** * INTERNAL API @@ -33,10 +33,10 @@ private[akka] class RemoteDeploymentWatcher extends Actor with RequiresMessageQu supervisors += (a -> supervisor) context.watch(a) - case t @ Terminated(a) if supervisors isDefinedAt a => + case t @ Terminated(a) if supervisors.isDefinedAt(a) => // send extra DeathWatchNotification to the supervisor so that it will remove the child - supervisors(a).sendSystemMessage(DeathWatchNotification(a, existenceConfirmed = t.existenceConfirmed, - addressTerminated = t.addressTerminated)) + supervisors(a).sendSystemMessage( + DeathWatchNotification(a, existenceConfirmed = t.existenceConfirmed, addressTerminated = t.addressTerminated)) supervisors -= a case _: Terminated => diff --git a/akka-remote/src/main/scala/akka/remote/RemoteMetricsExtension.scala b/akka-remote/src/main/scala/akka/remote/RemoteMetricsExtension.scala index 00ec176f1e..63e397800a 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteMetricsExtension.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteMetricsExtension.scala @@ -36,6 +36,7 @@ private[akka] object RemoteMetricsExtension extends ExtensionId[RemoteMetrics] w * INTERNAL API */ private[akka] trait RemoteMetrics extends Extension { + /** * Logging of the size of different message types. * Maximum detected size per message type is logged once, with @@ -56,8 +57,8 @@ private[akka] class RemoteMetricsOff extends RemoteMetrics { */ private[akka] class RemoteMetricsOn(system: ExtendedActorSystem) extends RemoteMetrics { - private val logFrameSizeExceeding: Int = RARP(system).provider.remoteSettings.LogFrameSizeExceeding - .getOrElse(Int.MaxValue) + private val logFrameSizeExceeding: Int = + RARP(system).provider.remoteSettings.LogFrameSizeExceeding.getOrElse(Int.MaxValue) private val log = Logging(system, this.getClass) private val maxPayloadBytes: ConcurrentHashMap[Class[_], Integer] = new ConcurrentHashMap diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index b2dae862c1..02d22d4f28 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -8,7 +8,7 @@ import com.typesafe.config.Config import scala.concurrent.duration._ import akka.util.Timeout import scala.collection.immutable -import akka.util.Helpers.{ ConfigOps, Requiring, toRootLowerCase } +import akka.util.Helpers.{ toRootLowerCase, ConfigOps, Requiring } import akka.japi.Util._ import akka.actor.Props import akka.event.Logging @@ -36,12 +36,15 @@ final class RemoteSettings(val config: Config) { val TrustedSelectionPaths: Set[String] = immutableSeq(getStringList("akka.remote.trusted-selection-paths")).toSet - val RemoteLifecycleEventsLogLevel: LogLevel = toRootLowerCase(getString("akka.remote.log-remote-lifecycle-events")) match { + val RemoteLifecycleEventsLogLevel + : LogLevel = toRootLowerCase(getString("akka.remote.log-remote-lifecycle-events")) match { case "on" => Logging.DebugLevel - case other => Logging.levelFor(other) match { - case Some(level) => level - case None => throw new ConfigurationException("Logging level must be one of (on, off, debug, info, warning, error)") - } + case other => + Logging.levelFor(other) match { + case Some(level) => level + case None => + throw new ConfigurationException("Logging level must be one of (on, off, debug, info, warning, error)") + } } val Dispatcher: String = getString("akka.remote.use-dispatcher") @@ -50,25 +53,25 @@ final class RemoteSettings(val config: Config) { val ShutdownTimeout: Timeout = { Timeout(config.getMillisDuration("akka.remote.shutdown-timeout")) - } requiring (_.duration > Duration.Zero, "shutdown-timeout must be > 0") + }.requiring(_.duration > Duration.Zero, "shutdown-timeout must be > 0") val FlushWait: FiniteDuration = { config.getMillisDuration("akka.remote.flush-wait-on-shutdown") - } requiring (_ > Duration.Zero, "flush-wait-on-shutdown must be > 0") + }.requiring(_ > Duration.Zero, "flush-wait-on-shutdown must be > 0") val StartupTimeout: Timeout = { Timeout(config.getMillisDuration("akka.remote.startup-timeout")) - } requiring (_.duration > Duration.Zero, "startup-timeout must be > 0") + }.requiring(_.duration > Duration.Zero, "startup-timeout must be > 0") val RetryGateClosedFor: FiniteDuration = { config.getMillisDuration("akka.remote.retry-gate-closed-for") - } requiring (_ >= Duration.Zero, "retry-gate-closed-for must be >= 0") + }.requiring(_ >= Duration.Zero, "retry-gate-closed-for must be >= 0") val UsePassiveConnections: Boolean = getBoolean("akka.remote.use-passive-connections") val BackoffPeriod: FiniteDuration = { config.getMillisDuration("akka.remote.backoff-interval") - } requiring (_ > Duration.Zero, "backoff-interval must be > 0") + }.requiring(_ > Duration.Zero, "backoff-interval must be > 0") val LogBufferSizeExceeding: Int = { val key = "akka.remote.log-buffer-size-exceeding" @@ -80,56 +83,55 @@ final class RemoteSettings(val config: Config) { val SysMsgAckTimeout: FiniteDuration = { config.getMillisDuration("akka.remote.system-message-ack-piggyback-timeout") - } requiring (_ > Duration.Zero, "system-message-ack-piggyback-timeout must be > 0") + }.requiring(_ > Duration.Zero, "system-message-ack-piggyback-timeout must be > 0") val SysResendTimeout: FiniteDuration = { config.getMillisDuration("akka.remote.resend-interval") - } requiring (_ > Duration.Zero, "resend-interval must be > 0") + }.requiring(_ > Duration.Zero, "resend-interval must be > 0") val SysResendLimit: Int = { config.getInt("akka.remote.resend-limit") - } requiring (_ > 0, "resend-limit must be > 0") + }.requiring(_ > 0, "resend-limit must be > 0") val SysMsgBufferSize: Int = { getInt("akka.remote.system-message-buffer-size") - } requiring (_ > 0, "system-message-buffer-size must be > 0") + }.requiring(_ > 0, "system-message-buffer-size must be > 0") val InitialSysMsgDeliveryTimeout: FiniteDuration = { config.getMillisDuration("akka.remote.initial-system-message-delivery-timeout") - } requiring (_ > Duration.Zero, "initial-system-message-delivery-timeout must be > 0") + }.requiring(_ > Duration.Zero, "initial-system-message-delivery-timeout must be > 0") val QuarantineSilentSystemTimeout: FiniteDuration = { config.getMillisDuration("akka.remote.quarantine-after-silence") - } requiring (_ > Duration.Zero, "quarantine-after-silence must be > 0") + }.requiring(_ > Duration.Zero, "quarantine-after-silence must be > 0") val QuarantineDuration: FiniteDuration = { - config.getMillisDuration("akka.remote.prune-quarantine-marker-after").requiring( - _ > Duration.Zero, - "prune-quarantine-marker-after must be > 0 ms") + config + .getMillisDuration("akka.remote.prune-quarantine-marker-after") + .requiring(_ > Duration.Zero, "prune-quarantine-marker-after must be > 0 ms") } val CommandAckTimeout: Timeout = { Timeout(config.getMillisDuration("akka.remote.command-ack-timeout")) - } requiring (_.duration > Duration.Zero, "command-ack-timeout must be > 0") + }.requiring(_.duration > Duration.Zero, "command-ack-timeout must be > 0") val WatchFailureDetectorConfig: Config = getConfig("akka.remote.watch-failure-detector") val WatchFailureDetectorImplementationClass: String = WatchFailureDetectorConfig.getString("implementation-class") val WatchHeartBeatInterval: FiniteDuration = { WatchFailureDetectorConfig.getMillisDuration("heartbeat-interval") - } requiring (_ > Duration.Zero, "watch-failure-detector.heartbeat-interval must be > 0") + }.requiring(_ > Duration.Zero, "watch-failure-detector.heartbeat-interval must be > 0") val WatchUnreachableReaperInterval: FiniteDuration = { WatchFailureDetectorConfig.getMillisDuration("unreachable-nodes-reaper-interval") - } requiring (_ > Duration.Zero, "watch-failure-detector.unreachable-nodes-reaper-interval must be > 0") + }.requiring(_ > Duration.Zero, "watch-failure-detector.unreachable-nodes-reaper-interval must be > 0") val WatchHeartbeatExpectedResponseAfter: FiniteDuration = { WatchFailureDetectorConfig.getMillisDuration("expected-response-after") - } requiring (_ > Duration.Zero, "watch-failure-detector.expected-response-after > 0") + }.requiring(_ > Duration.Zero, "watch-failure-detector.expected-response-after > 0") val Transports: immutable.Seq[(String, immutable.Seq[String], Config)] = transportNames.map { name => val transportConfig = transportConfigFor(name) - ( - transportConfig.getString("transport-class"), - immutableSeq(transportConfig.getStringList("applied-adapters")).reverse, - transportConfig) + (transportConfig.getString("transport-class"), + immutableSeq(transportConfig.getStringList("applied-adapters")).reverse, + transportConfig) } val Adapters: Map[String, String] = configToMap(getConfig("akka.remote.adapters")) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index d1502764ec..e97cefdc3a 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -12,7 +12,7 @@ import akka.event.LoggingAdapter import scala.collection.immutable import scala.concurrent.Future import scala.util.control.NoStackTrace -import akka.util.{ OptionVal, unused } +import akka.util.{ unused, OptionVal } /** * RemoteTransportException represents a general failure within a RemoteTransport, @@ -28,7 +28,8 @@ class RemoteTransportException(message: String, cause: Throwable) extends AkkaEx */ @SerialVersionUID(1L) class RemoteTransportExceptionNoStackTrace(message: String, cause: Throwable) - extends RemoteTransportException(message, cause) with NoStackTrace + extends RemoteTransportException(message, cause) + with NoStackTrace /** * INTERNAL API @@ -41,6 +42,7 @@ class RemoteTransportExceptionNoStackTrace(message: String, cause: Throwable) * received or when the start() method returns, whatever happens first. */ private[akka] abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: RemoteActorRefProvider) { + /** * Shuts down the remoting */ diff --git a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala index d3ae8a2e72..5c49bbeb0c 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteWatcher.scala @@ -22,13 +22,15 @@ private[akka] object RemoteWatcher { /** * Factory method for `RemoteWatcher` [[akka.actor.Props]]. */ - def props( - failureDetector: FailureDetectorRegistry[Address], - heartbeatInterval: FiniteDuration, - unreachableReaperInterval: FiniteDuration, - heartbeatExpectedResponseAfter: FiniteDuration): Props = - Props(classOf[RemoteWatcher], failureDetector, heartbeatInterval, unreachableReaperInterval, - heartbeatExpectedResponseAfter).withDeploy(Deploy.local) + def props(failureDetector: FailureDetectorRegistry[Address], + heartbeatInterval: FiniteDuration, + unreachableReaperInterval: FiniteDuration, + heartbeatExpectedResponseAfter: FiniteDuration): Props = + Props(classOf[RemoteWatcher], + failureDetector, + heartbeatInterval, + unreachableReaperInterval, + heartbeatExpectedResponseAfter).withDeploy(Deploy.local) final case class WatchRemote(watchee: InternalActorRef, watcher: InternalActorRef) final case class UnwatchRemote(watchee: InternalActorRef, watcher: InternalActorRef) @@ -50,9 +52,8 @@ private[akka] object RemoteWatcher { lazy val empty: Stats = counts(0, 0) def counts(watching: Int, watchingNodes: Int): Stats = Stats(watching, watchingNodes)(Set.empty, Set.empty) } - final case class Stats(watching: Int, watchingNodes: Int)( - val watchingRefs: Set[(ActorRef, ActorRef)], - val watchingAddresses: Set[Address]) { + final case class Stats(watching: Int, watchingNodes: Int)(val watchingRefs: Set[(ActorRef, ActorRef)], + val watchingAddresses: Set[Address]) { override def toString: String = { def formatWatchingRefs: String = watchingRefs.map(x => x._2.path.name + " -> " + x._1.path.name).mkString("[", ", ", "]") @@ -84,12 +85,13 @@ private[akka] object RemoteWatcher { * both directions, but independent of each other. * */ -private[akka] class RemoteWatcher( - failureDetector: FailureDetectorRegistry[Address], - heartbeatInterval: FiniteDuration, - unreachableReaperInterval: FiniteDuration, - heartbeatExpectedResponseAfter: FiniteDuration) - extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[akka] class RemoteWatcher(failureDetector: FailureDetectorRegistry[Address], + heartbeatInterval: FiniteDuration, + unreachableReaperInterval: FiniteDuration, + heartbeatExpectedResponseAfter: FiniteDuration) + extends Actor + with ActorLogging + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import RemoteWatcher._ import context.dispatcher @@ -103,18 +105,20 @@ private[akka] class RemoteWatcher( else (Heartbeat, HeartbeatRsp(AddressUidExtension(context.system).addressUid)) // actors that this node is watching, map of watchee -> Set(watchers) - val watching = new mutable.HashMap[InternalActorRef, mutable.Set[InternalActorRef]]() with mutable.MultiMap[InternalActorRef, InternalActorRef] + val watching = new mutable.HashMap[InternalActorRef, mutable.Set[InternalActorRef]]() + with mutable.MultiMap[InternalActorRef, InternalActorRef] // nodes that this node is watching, i.e. expecting heartbeats from these nodes. Map of address -> Set(watchee) on this address - val watcheeByNodes = new mutable.HashMap[Address, mutable.Set[InternalActorRef]]() with mutable.MultiMap[Address, InternalActorRef] + val watcheeByNodes = new mutable.HashMap[Address, mutable.Set[InternalActorRef]]() + with mutable.MultiMap[Address, InternalActorRef] def watchingNodes = watcheeByNodes.keySet var unreachable: Set[Address] = Set.empty var addressUids: Map[Address, Long] = Map.empty val heartbeatTask = scheduler.schedule(heartbeatInterval, heartbeatInterval, self, HeartbeatTick) - val failureDetectorReaperTask = scheduler.schedule(unreachableReaperInterval, unreachableReaperInterval, - self, ReapUnreachableTick) + val failureDetectorReaperTask = + scheduler.schedule(unreachableReaperInterval, unreachableReaperInterval, self, ReapUnreachableTick) override def postStop(): Unit = { super.postStop() @@ -135,10 +139,15 @@ private[akka] class RemoteWatcher( // test purpose case Stats => - val watchSet = watching.iterator.flatMap { case (wee, wers) => wers.map { wer => wee -> wer } }.toSet[(ActorRef, ActorRef)] - sender() ! Stats( - watching = watchSet.size, - watchingNodes = watchingNodes.size)(watchSet, watchingNodes.toSet) + val watchSet = watching.iterator + .flatMap { + case (wee, wers) => + wers.map { wer => + wee -> wer + } + } + .toSet[(ActorRef, ActorRef)] + sender() ! Stats(watching = watchSet.size, watchingNodes = watchingNodes.size)(watchSet, watchingNodes.toSet) } def receiveHeartbeat(): Unit = @@ -161,7 +170,7 @@ private[akka] class RemoteWatcher( } def reapUnreachable(): Unit = - watchingNodes foreach { a => + watchingNodes.foreach { a => if (!unreachable(a) && !failureDetector.isAvailable(a)) { log.warning("Detected unreachable: [{}]", a) quarantine(a, addressUids.get(a), "Deemed unreachable by remote failure detector", harmless = false) @@ -187,7 +196,7 @@ private[akka] class RemoteWatcher( watchNode(watchee) // add watch from self, this will actually send a Watch to the target when necessary - context watch watchee + context.watch(watchee) } def watchNode(watchee: InternalActorRef): Unit = { @@ -211,7 +220,7 @@ private[akka] class RemoteWatcher( if (watchers.isEmpty) { // clean up self watch when no more watchers of this watchee log.debug("Cleanup self watch of [{}]", watchee.path) - context unwatch watchee + context.unwatch(watchee) removeWatchee(watchee) } case None => @@ -256,7 +265,7 @@ private[akka] class RemoteWatcher( } def sendHeartbeat(): Unit = - watchingNodes foreach { a => + watchingNodes.foreach { a => if (!unreachable(a)) { if (failureDetector.isMonitoring(a)) { log.debug("Sending Heartbeat to [{}]", a) diff --git a/akka-remote/src/main/scala/akka/remote/Remoting.scala b/akka-remote/src/main/scala/akka/remote/Remoting.scala index d1fcb898d3..44ebfd7369 100644 --- a/akka-remote/src/main/scala/akka/remote/Remoting.scala +++ b/akka-remote/src/main/scala/akka/remote/Remoting.scala @@ -8,7 +8,7 @@ import akka.Done import akka.actor.SupervisorStrategy._ import akka.actor._ import akka.event.{ Logging, LoggingAdapter } -import akka.pattern.{ gracefulStop, pipe, ask } +import akka.pattern.{ ask, gracefulStop, pipe } import akka.remote.EndpointManager._ import akka.remote.Remoting.TransportSupervisor import akka.remote.transport.Transport.{ ActorAssociationEventListener, AssociationEventListener, InboundAssociation } @@ -16,9 +16,9 @@ import akka.remote.transport._ import com.typesafe.config.Config import java.net.URLEncoder import java.util.concurrent.TimeoutException -import scala.collection.immutable.{ Seq, HashMap } +import scala.collection.immutable.{ HashMap, Seq } import scala.concurrent.duration._ -import scala.concurrent.{ Promise, Await, Future } +import scala.concurrent.{ Await, Future, Promise } import scala.util.control.NonFatal import scala.util.{ Failure, Success } import akka.remote.transport.AkkaPduCodec.Message @@ -43,6 +43,7 @@ private[remote] object AddressUrlEncoder { private[akka] final case class RARP(provider: RemoteActorRefProvider) extends Extension { def configureDispatcher(props: Props): Props = provider.remoteSettings.configureDispatcher(props) } + /** * INTERNAL API */ @@ -75,7 +76,8 @@ private[remote] object Remoting { final val EndpointManagerName = "endpointManager" - def localAddressForRemote(transportMapping: Map[String, Set[(AkkaProtocolTransport, Address)]], remote: Address): Address = { + def localAddressForRemote(transportMapping: Map[String, Set[(AkkaProtocolTransport, Address)]], + remote: Address): Address = { transportMapping.get(remote.protocol) match { case Some(transports) => @@ -85,7 +87,7 @@ private[remote] object Remoting { case 0 => throw new RemoteTransportException( s"No transport is responsible for address: [$remote] although protocol [${remote.protocol}] is available." + - " Make sure at least one transport is configured to be responsible for the address.", + " Make sure at least one transport is configured to be responsible for the address.", null) case 1 => @@ -94,12 +96,15 @@ private[remote] object Remoting { case _ => throw new RemoteTransportException( s"Multiple transports are available for [$remote]: [${responsibleTransports.mkString(",")}]. " + - "Remoting cannot decide which transport to use to reach the remote system. Change your configuration " + - "so that only one transport is responsible for the address.", + "Remoting cannot decide which transport to use to reach the remote system. Change your configuration " + + "so that only one transport is responsible for the address.", null) } - case None => throw new RemoteTransportException( - s"No transport is loaded for protocol: [${remote.protocol}], available protocols: [${transportMapping.keys.mkString(", ")}]", null) + case None => + throw new RemoteTransportException( + s"No transport is loaded for protocol: [${remote.protocol}], available protocols: [${transportMapping.keys + .mkString(", ")}]", + null) } } @@ -112,9 +117,7 @@ private[remote] object Remoting { def receive = { case RegisterTransportActor(props, name) => - sender() ! context.actorOf( - RARP(context.system).configureDispatcher(props.withDeploy(Deploy.local)), - name) + sender() ! context.actorOf(RARP(context.system).configureDispatcher(props.withDeploy(Deploy.local)), name) } } @@ -123,7 +126,8 @@ private[remote] object Remoting { /** * INTERNAL API */ -private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) extends RemoteTransport(_system, _provider) { +private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) + extends RemoteTransport(_system, _provider) { @volatile private var endpointManager: Option[ActorRef] = None @volatile private var transportMapping: Map[String, Set[(AkkaProtocolTransport, Address)]] = _ @@ -136,11 +140,10 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc import provider.remoteSettings._ - val transportSupervisor = system.systemActorOf( - configureDispatcher(Props[TransportSupervisor]), - "transports") + val transportSupervisor = system.systemActorOf(configureDispatcher(Props[TransportSupervisor]), "transports") - override def localAddressForRemote(remote: Address): Address = Remoting.localAddressForRemote(transportMapping, remote) + override def localAddressForRemote(remote: Address): Address = + Remoting.localAddressForRemote(transportMapping, remote) val log: LoggingAdapter = Logging(system.eventStream, getClass.getName) val eventPublisher = new EventPublisher(system, log, RemoteLifecycleEventsLogLevel) @@ -159,20 +162,26 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc } import system.dispatcher - (manager ? ShutdownAndFlush).mapTo[Boolean].andThen { - case Success(flushSuccessful) => - if (!flushSuccessful) - log.warning("Shutdown finished, but flushing might not have been successful and some messages might have been dropped. " + - "Increase akka.remote.flush-wait-on-shutdown to a larger value to avoid this.") - finalize() + (manager ? ShutdownAndFlush) + .mapTo[Boolean] + .andThen { + case Success(flushSuccessful) => + if (!flushSuccessful) + log.warning( + "Shutdown finished, but flushing might not have been successful and some messages might have been dropped. " + + "Increase akka.remote.flush-wait-on-shutdown to a larger value to avoid this.") + finalize() - case Failure(e) => - notifyError("Failure during shutdown of remoting.", e) - finalize() - } map { _ => Done } // RARP needs only akka.Done, not a boolean + case Failure(e) => + notifyError("Failure during shutdown of remoting.", e) + finalize() + } + .map { _ => + Done + } // RARP needs only akka.Done, not a boolean case None => log.warning("Remoting is not running. Ignoring shutdown attempt.") - Future successful Done + Future.successful(Done) } } @@ -182,7 +191,8 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc case None => log.info("Starting remoting") val manager: ActorRef = system.systemActorOf( - configureDispatcher(Props(classOf[EndpointManager], provider.remoteSettings.config, log)).withDeploy(Deploy.local), + configureDispatcher(Props(classOf[EndpointManager], provider.remoteSettings.config, log)) + .withDeploy(Deploy.local), Remoting.EndpointManagerName) endpointManager = Some(manager) @@ -190,14 +200,15 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc val addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]] = Promise() manager ! Listen(addressesPromise) - val transports: Seq[(AkkaProtocolTransport, Address)] = Await.result( - addressesPromise.future, - StartupTimeout.duration) + val transports: Seq[(AkkaProtocolTransport, Address)] = + Await.result(addressesPromise.future, StartupTimeout.duration) if (transports.isEmpty) throw new RemoteTransportException("No transport drivers were loaded.", null) - transportMapping = transports.groupBy { - case (transport, _) => transport.schemeIdentifier - } map { case (k, v) => k -> v.toSet } + transportMapping = transports + .groupBy { + case (transport, _) => transport.schemeIdentifier + } + .map { case (k, v) => k -> v.toSet } defaultAddress = transports.head._2 addresses = transports.map { _._2 }.toSet @@ -209,7 +220,9 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc } catch { case e: TimeoutException => - notifyError("Startup timed out. This is usually related to actor system host setting or host name resolution misconfiguration.", e) + notifyError( + "Startup timed out. This is usually related to actor system host setting or host name resolution misconfiguration.", + e) throw e case NonFatal(e) => notifyError("Startup failed", e) @@ -221,24 +234,33 @@ private[remote] class Remoting(_system: ExtendedActorSystem, _provider: RemoteAc } } - override def send(message: Any, senderOption: OptionVal[ActorRef], recipient: RemoteActorRef): Unit = endpointManager match { - case Some(manager) => manager.tell(Send(message, senderOption, recipient), sender = senderOption getOrElse Actor.noSender) - case None => throw new RemoteTransportExceptionNoStackTrace("Attempted to send remote message but Remoting is not running.", null) - } + override def send(message: Any, senderOption: OptionVal[ActorRef], recipient: RemoteActorRef): Unit = + endpointManager match { + case Some(manager) => + manager.tell(Send(message, senderOption, recipient), sender = senderOption.getOrElse(Actor.noSender)) + case None => + throw new RemoteTransportExceptionNoStackTrace("Attempted to send remote message but Remoting is not running.", + null) + } override def managementCommand(cmd: Any): Future[Boolean] = endpointManager match { case Some(manager) => import system.dispatcher implicit val timeout = CommandAckTimeout - manager ? ManagementCommand(cmd) map { case ManagementCommandAck(status) => status } - case None => throw new RemoteTransportExceptionNoStackTrace("Attempted to send management command but Remoting is not running.", null) + (manager ? ManagementCommand(cmd)).map { case ManagementCommandAck(status) => status } + case None => + throw new RemoteTransportExceptionNoStackTrace( + "Attempted to send management command but Remoting is not running.", + null) } override def quarantine(remoteAddress: Address, uid: Option[Long], reason: String): Unit = endpointManager match { case Some(manager) => manager ! Quarantine(remoteAddress, uid.map(_.toInt)) - case _ => throw new RemoteTransportExceptionNoStackTrace( - s"Attempted to quarantine address [$remoteAddress] with UID [$uid] but Remoting is not running", null) + case _ => + throw new RemoteTransportExceptionNoStackTrace( + s"Attempted to quarantine address [$remoteAddress] with UID [$uid] but Remoting is not running", + null) } private[akka] def boundAddresses: Map[String, Set[Address]] = { @@ -262,8 +284,12 @@ private[remote] object EndpointManager { final case class Listen(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]]) extends RemotingCommand case object StartupFinished extends RemotingCommand case object ShutdownAndFlush extends RemotingCommand - final case class Send(message: Any, senderOption: OptionVal[ActorRef], recipient: RemoteActorRef, seqOpt: Option[SeqNo] = None) - extends RemotingCommand with HasSequenceNumber { + final case class Send(message: Any, + senderOption: OptionVal[ActorRef], + recipient: RemoteActorRef, + seqOpt: Option[SeqNo] = None) + extends RemotingCommand + with HasSequenceNumber { override def toString = s"Remote message $senderOption -> $recipient" // This MUST throw an exception to indicate that we attempted to put a nonsequenced message in one of the @@ -276,12 +302,11 @@ private[remote] object EndpointManager { // Messages internal to EndpointManager case object Prune extends NoSerializationVerificationNeeded - final case class ListensResult( - addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]], - results: Seq[(AkkaProtocolTransport, Address, Promise[AssociationEventListener])]) - extends NoSerializationVerificationNeeded + final case class ListensResult(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]], + results: Seq[(AkkaProtocolTransport, Address, Promise[AssociationEventListener])]) + extends NoSerializationVerificationNeeded final case class ListensFailure(addressesPromise: Promise[Seq[(AkkaProtocolTransport, Address)]], cause: Throwable) - extends NoSerializationVerificationNeeded + extends NoSerializationVerificationNeeded // Helper class to store address pairs final case class Link(localAddress: Address, remoteAddress: Address) @@ -441,15 +466,16 @@ private[remote] object EndpointManager { /** * INTERNAL API */ -private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends Actor - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) + extends Actor + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import EndpointManager._ import context.dispatcher val settings = new RemoteSettings(conf) val extendedSystem = context.system.asInstanceOf[ExtendedActorSystem] - val endpointId: Iterator[Int] = Iterator from 0 + val endpointId: Iterator[Int] = Iterator.from(0) val eventPublisher = new EventPublisher(context.system, log, settings.RemoteLifecycleEventsLogLevel) @@ -470,12 +496,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends def handleStashedInbound(endpoint: ActorRef, writerIsIdle: Boolean): Unit = { val stashed = stashedInbound.getOrElse(endpoint, Vector.empty) stashedInbound -= endpoint - stashed foreach (handleInboundAssociation(_, writerIsIdle)) + stashed.foreach(handleInboundAssociation(_, writerIsIdle)) } def keepQuarantinedOr(remoteAddress: Address)(body: => Unit): Unit = endpoints.refuseUid(remoteAddress) match { case Some(uid) => - log.info("Quarantined address [{}] is still unreachable or has not been restarted. Keeping it quarantined.", remoteAddress) + log.info("Quarantined address [{}] is still unreachable or has not been restarted. Keeping it quarantined.", + remoteAddress) // Restoring Quarantine marker overwritten by a Pass(endpoint, refuseUid) pair while probing remote system. endpoints.markAsQuarantined(remoteAddress, uid, Deadline.now + settings.QuarantineDuration) case None => body @@ -484,8 +511,10 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends override val supervisorStrategy = { def hopeless(e: HopelessAssociation): SupervisorStrategy.Directive = e match { case HopelessAssociation(_, remoteAddress, Some(uid), reason) => - log.error(reason, "Association to [{}] with UID [{}] irrecoverably failed. Quarantining address.", - remoteAddress, uid) + log.error(reason, + "Association to [{}] with UID [{}] irrecoverably failed. Quarantining address.", + remoteAddress, + uid) settings.QuarantineDuration match { case d: FiniteDuration => endpoints.markAsQuarantined(remoteAddress, uid, Deadline.now + d) @@ -496,10 +525,10 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends case HopelessAssociation(_, remoteAddress, None, _) => keepQuarantinedOr(remoteAddress) { - log.warning( - "Association to [{}] with unknown UID is irrecoverably failed. " + - "Address cannot be quarantined without knowing the UID, gating instead for {} ms.", - remoteAddress, settings.RetryGateClosedFor.toMillis) + log.warning("Association to [{}] with unknown UID is irrecoverably failed. " + + "Address cannot be quarantined without knowing the UID, gating instead for {} ms.", + remoteAddress, + settings.RetryGateClosedFor.toMillis) endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor) } Stop @@ -511,9 +540,12 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends val causedBy = if (reason.getCause == null) "" else s"Caused by: [${reason.getCause.getMessage}]" log.warning( "Tried to associate with unreachable remote address [{}]. " + - "Address is now gated for {} ms, all messages to this address will be delivered to dead letters. " + - "Reason: [{}] {}", - remoteAddress, settings.RetryGateClosedFor.toMillis, reason.getMessage, causedBy) + "Address is now gated for {} ms, all messages to this address will be delivered to dead letters. " + + "Reason: [{}] {}", + remoteAddress, + settings.RetryGateClosedFor.toMillis, + reason.getMessage, + causedBy) endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor) } disassiciationInfo.foreach { @@ -525,10 +557,10 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends case ShutDownAssociation(_, remoteAddress, _) => keepQuarantinedOr(remoteAddress) { - log.debug( - "Remote system with address [{}] has shut down. " + - "Address is now gated for {} ms, all messages to this address will be delivered to dead letters.", - remoteAddress, settings.RetryGateClosedFor.toMillis) + log.debug("Remote system with address [{}] has shut down. " + + "Address is now gated for {} ms, all messages to this address will be delivered to dead letters.", + remoteAddress, + settings.RetryGateClosedFor.toMillis) endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor) } Stop @@ -542,7 +574,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends case NonFatal(e) => e match { case _: EndpointDisassociatedException | _: EndpointAssociationException => // no logging - case _ => log.error(e, e.getMessage) + case _ => log.error(e, e.getMessage) } endpoints.markAsFailed(sender(), Deadline.now + settings.RetryGateClosedFor) Stop @@ -554,19 +586,25 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends def receive = { case Listen(addressesPromise) => - listens map { ListensResult(addressesPromise, _) } recover { - case NonFatal(e) => ListensFailure(addressesPromise, e) - } pipeTo self + listens + .map { ListensResult(addressesPromise, _) } + .recover { + case NonFatal(e) => ListensFailure(addressesPromise, e) + } + .pipeTo(self) case ListensResult(addressesPromise, results) => - transportMapping = results.groupBy { - case (_, transportAddress, _) => transportAddress - } map { - case (a, t) if t.size > 1 => - throw new RemoteTransportException(s"There are more than one transports listening on local address [$a]", null) - case (a, t) => a -> t.head._1 - } + transportMapping = results + .groupBy { + case (_, transportAddress, _) => transportAddress + } + .map { + case (a, t) if t.size > 1 => + throw new RemoteTransportException(s"There are more than one transports listening on local address [$a]", + null) + case (a, t) => a -> t.head._1 + } // Register to each transport as listener and collect mapping to addresses - val transportsAndAddresses = results map { + val transportsAndAddresses = results.map { case (transport, address, promise) => promise.success(ActorAssociationEventListener(self)) transport -> address @@ -589,17 +627,17 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends case ManagementCommand(cmd) => val allStatuses: immutable.Seq[Future[Boolean]] = transportMapping.values.iterator.map(transport => transport.managementCommand(cmd)).to(immutable.IndexedSeq) - akka.compat.Future.fold(allStatuses)(true)(_ && _) map ManagementCommandAck pipeTo sender() + akka.compat.Future.fold(allStatuses)(true)(_ && _).map(ManagementCommandAck).pipeTo(sender()) case Quarantine(address, uidToQuarantineOption) => // Stop writers (endpoints.writableEndpointWithPolicyFor(address), uidToQuarantineOption) match { case (Some(Pass(endpoint, _)), None) => context.stop(endpoint) - log.warning( - "Association to [{}] with unknown UID is reported as quarantined, but " + - "address cannot be quarantined without knowing the UID, gating instead for {} ms.", - address, settings.RetryGateClosedFor.toMillis) + log.warning("Association to [{}] with unknown UID is reported as quarantined, but " + + "address cannot be quarantined without knowing the UID, gating instead for {} ms.", + address, + settings.RetryGateClosedFor.toMillis) endpoints.markAsFailed(endpoint, Deadline.now + settings.RetryGateClosedFor) case (Some(Pass(endpoint, uidOption)), Some(quarantineUid)) => uidOption match { @@ -610,11 +648,13 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends // or it does not match with the UID to be quarantined case None if !endpoints.refuseUid(address).contains(quarantineUid) => // the quarantine uid may be got fresh by cluster gossip, so update refuseUid for late handle when the writer got uid - endpoints.registerWritableEndpointRefuseUid(address, quarantineUid, Deadline.now + settings.QuarantineDuration) + endpoints.registerWritableEndpointRefuseUid(address, + quarantineUid, + Deadline.now + settings.QuarantineDuration) case _ => //the quarantine uid has lost the race with some failure, do nothing } case (Some(Quarantined(uid, _)), Some(quarantineUid)) if uid == quarantineUid => // the UID to be quarantined already exists, do nothing - case (_, Some(quarantineUid)) => + case (_, Some(quarantineUid)) => // the current state is gated or quarantined, and we know the UID, update endpoints.markAsQuarantined(address, quarantineUid, Deadline.now + settings.QuarantineDuration) eventPublisher.notifyListeners(QuarantinedEvent(address, quarantineUid)) @@ -624,13 +664,14 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends // Stop inbound read-only associations (endpoints.readOnlyEndpointFor(address), uidToQuarantineOption) match { case (Some((endpoint, _)), None) => context.stop(endpoint) - case (Some((endpoint, currentUid)), Some(quarantineUid)) if currentUid == quarantineUid => context.stop(endpoint) + case (Some((endpoint, currentUid)), Some(quarantineUid)) if currentUid == quarantineUid => + context.stop(endpoint) case _ => // nothing to stop } def matchesQuarantine(handle: AkkaProtocolHandle): Boolean = { handle.remoteAddress == address && - uidToQuarantineOption.forall(_ == handle.handshakeInfo.uid) + uidToQuarantineOption.forall(_ == handle.handshakeInfo.uid) } // Stop all matching pending read handoffs @@ -660,16 +701,14 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends val recipientAddress = recipientRef.path.address def createAndRegisterWritingEndpoint(): ActorRef = { - endpoints.registerWritableEndpoint( - recipientAddress, - uid = None, - createEndpoint( - recipientAddress, - recipientRef.localAddressToUse, - transportMapping(recipientRef.localAddressToUse), - settings, - handleOption = None, - writing = true)) + endpoints.registerWritableEndpoint(recipientAddress, + uid = None, + createEndpoint(recipientAddress, + recipientRef.localAddressToUse, + transportMapping(recipientRef.localAddressToUse), + settings, + handleOption = None, + writing = true)) } endpoints.writableEndpointWithPolicyFor(recipientAddress) match { @@ -717,21 +756,20 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends // Shutdown all endpoints and signal to sender() when ready (and whether all endpoints were shut down gracefully) def shutdownAll[T](resources: IterableOnce[T])(shutdown: T => Future[Boolean]): Future[Boolean] = { - Future.sequence(resources.toList.map(shutdown)) - .map(_.forall(identity)) - .recover { - case NonFatal(_) => false - } + Future.sequence(resources.toList.map(shutdown)).map(_.forall(identity)).recover { + case NonFatal(_) => false + } } (for { // The construction of the future for shutdownStatus has to happen after the flushStatus future has been finished // so that endpoints are shut down before transports. - flushStatus <- shutdownAll(endpoints.allEndpoints)(gracefulStop(_, settings.FlushWait, EndpointWriter.FlushAndStop)) + flushStatus <- shutdownAll(endpoints.allEndpoints)( + gracefulStop(_, settings.FlushWait, EndpointWriter.FlushAndStop)) shutdownStatus <- shutdownAll(transportMapping.values)(_.shutdown()) - } yield flushStatus && shutdownStatus) pipeTo sender() + } yield flushStatus && shutdownStatus).pipeTo(sender()) - pendingReadHandoffs.valuesIterator foreach (_.disassociate(AssociationHandle.Shutdown)) + pendingReadHandoffs.valuesIterator.foreach(_.disassociate(AssociationHandle.Shutdown)) // Ignore all other writes normalShutdown = true @@ -745,44 +783,51 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends } def handleInboundAssociation(ia: InboundAssociation, writerIsIdle: Boolean): Unit = ia match { - case ia @ InboundAssociation(handle: AkkaProtocolHandle) => endpoints.readOnlyEndpointFor(handle.remoteAddress) match { - case Some((endpoint, _)) => - pendingReadHandoffs.get(endpoint) foreach (_.disassociate("the existing readOnly association was replaced by a new incoming one", log)) - pendingReadHandoffs += endpoint -> handle - endpoint ! EndpointWriter.TakeOver(handle, self) - endpoints.writableEndpointWithPolicyFor(handle.remoteAddress) match { - case Some(Pass(ep, _)) => ep ! ReliableDeliverySupervisor.Ungate - case _ => - } - case None => - if (endpoints.isQuarantined(handle.remoteAddress, handle.handshakeInfo.uid)) - handle.disassociate(AssociationHandle.Quarantined) - else endpoints.writableEndpointWithPolicyFor(handle.remoteAddress) match { - case Some(Pass(ep, None)) => - // Idle writer will never send a GotUid or a Terminated so we need to "provoke it" - // to get an unstash event - if (!writerIsIdle) { - ep ! ReliableDeliverySupervisor.IsIdle - stashedInbound += ep -> (stashedInbound.getOrElse(ep, Vector.empty) :+ ia) - } else - createAndRegisterEndpoint(handle) - case Some(Pass(ep, Some(uid))) => - if (handle.handshakeInfo.uid == uid) { - pendingReadHandoffs.get(ep) foreach (_.disassociate("the existing writable association was replaced by a new incoming one", log)) - pendingReadHandoffs += ep -> handle - ep ! EndpointWriter.StopReading(ep, self) - ep ! ReliableDeliverySupervisor.Ungate - } else { - context.stop(ep) - endpoints.unregisterEndpoint(ep) - pendingReadHandoffs -= ep - endpoints.markAsQuarantined(handle.remoteAddress, uid, Deadline.now + settings.QuarantineDuration) - createAndRegisterEndpoint(handle) + case ia @ InboundAssociation(handle: AkkaProtocolHandle) => + endpoints.readOnlyEndpointFor(handle.remoteAddress) match { + case Some((endpoint, _)) => + pendingReadHandoffs + .get(endpoint) + .foreach(_.disassociate("the existing readOnly association was replaced by a new incoming one", log)) + pendingReadHandoffs += endpoint -> handle + endpoint ! EndpointWriter.TakeOver(handle, self) + endpoints.writableEndpointWithPolicyFor(handle.remoteAddress) match { + case Some(Pass(ep, _)) => ep ! ReliableDeliverySupervisor.Ungate + case _ => + } + case None => + if (endpoints.isQuarantined(handle.remoteAddress, handle.handshakeInfo.uid)) + handle.disassociate(AssociationHandle.Quarantined) + else + endpoints.writableEndpointWithPolicyFor(handle.remoteAddress) match { + case Some(Pass(ep, None)) => + // Idle writer will never send a GotUid or a Terminated so we need to "provoke it" + // to get an unstash event + if (!writerIsIdle) { + ep ! ReliableDeliverySupervisor.IsIdle + stashedInbound += ep -> (stashedInbound.getOrElse(ep, Vector.empty) :+ ia) + } else + createAndRegisterEndpoint(handle) + case Some(Pass(ep, Some(uid))) => + if (handle.handshakeInfo.uid == uid) { + pendingReadHandoffs + .get(ep) + .foreach( + _.disassociate("the existing writable association was replaced by a new incoming one", log)) + pendingReadHandoffs += ep -> handle + ep ! EndpointWriter.StopReading(ep, self) + ep ! ReliableDeliverySupervisor.Ungate + } else { + context.stop(ep) + endpoints.unregisterEndpoint(ep) + pendingReadHandoffs -= ep + endpoints.markAsQuarantined(handle.remoteAddress, uid, Deadline.now + settings.QuarantineDuration) + createAndRegisterEndpoint(handle) + } + case _ => + createAndRegisterEndpoint(handle) } - case _ => - createAndRegisterEndpoint(handle) - } - } + } case _ => // ignore } @@ -790,13 +835,12 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends val writing = settings.UsePassiveConnections && !endpoints.hasWritableEndpointFor(handle.remoteAddress) eventPublisher.notifyListeners(AssociatedEvent(handle.localAddress, handle.remoteAddress, inbound = true)) - val endpoint = createEndpoint( - handle.remoteAddress, - handle.localAddress, - transportMapping(handle.localAddress), - settings, - Some(handle), - writing) + val endpoint = createEndpoint(handle.remoteAddress, + handle.localAddress, + transportMapping(handle.localAddress), + settings, + Some(handle), + writing) if (writing) endpoints.registerWritableEndpoint(handle.remoteAddress, Some(handle.handshakeInfo.uid), endpoint) @@ -823,14 +867,18 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends // The chain at this point: // Driver val driver = extendedSystem.dynamicAccess - .createInstanceFor[Transport](fqn, args).recover({ + .createInstanceFor[Transport](fqn, args) + .recover({ - case exception => throw new IllegalArgumentException( - s"Cannot instantiate transport [$fqn]. " + + case exception => + throw new IllegalArgumentException( + s"Cannot instantiate transport [$fqn]. " + "Make sure it extends [akka.remote.transport.Transport] and has constructor with " + - "[akka.actor.ExtendedActorSystem] and [com.typesafe.config.Config] parameters", exception) + "[akka.actor.ExtendedActorSystem] and [com.typesafe.config.Config] parameters", + exception) - }).get + }) + .get // Iteratively decorates the bottom level driver with a list of adapters. // The chain at this point: @@ -850,7 +898,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends // Collect all transports, listen addresses and listener promises in one future Future.sequence(transports.map { transport => - transport.listen map { case (address, listenerPromise) => (transport, address, listenerPromise) } + transport.listen.map { case (address, listenerPromise) => (transport, address, listenerPromise) } }) } @@ -860,13 +908,12 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends pendingReadHandoffs -= takingOverFrom eventPublisher.notifyListeners(AssociatedEvent(handle.localAddress, handle.remoteAddress, inbound = true)) - val endpoint = createEndpoint( - handle.remoteAddress, - handle.localAddress, - transportMapping(handle.localAddress), - settings, - Some(handle), - writing = false) + val endpoint = createEndpoint(handle.remoteAddress, + handle.localAddress, + transportMapping(handle.localAddress), + settings, + Some(handle), + writing = false) endpoints.registerReadOnlyEndpoint(handle.remoteAddress, endpoint, handle.handshakeInfo.uid) } } @@ -876,48 +923,55 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends pendingReadHandoffs -= takingOverFrom } - private def createEndpoint( - remoteAddress: Address, - localAddress: Address, - transport: AkkaProtocolTransport, - endpointSettings: RemoteSettings, - handleOption: Option[AkkaProtocolHandle], - writing: Boolean): ActorRef = { + private def createEndpoint(remoteAddress: Address, + localAddress: Address, + transport: AkkaProtocolTransport, + endpointSettings: RemoteSettings, + handleOption: Option[AkkaProtocolHandle], + writing: Boolean): ActorRef = { require(transportMapping contains localAddress, "Transport mapping is not defined for the address") // refuseUid is ignored for read-only endpoints since the UID of the remote system is already known and has passed // quarantine checks val refuseUid = endpoints.refuseUid(remoteAddress) - if (writing) context.watch(context.actorOf( - RARP(extendedSystem).configureDispatcher(ReliableDeliverySupervisor.props( - handleOption, - localAddress, - remoteAddress, - refuseUid, - transport, - endpointSettings, - AkkaPduProtobufCodec, - receiveBuffers)).withDeploy(Deploy.local), - "reliableEndpointWriter-" + AddressUrlEncoder(remoteAddress) + "-" + endpointId.next())) - else context.watch(context.actorOf( - RARP(extendedSystem).configureDispatcher(EndpointWriter.props( - handleOption, - localAddress, - remoteAddress, - refuseUid, - transport, - endpointSettings, - AkkaPduProtobufCodec, - receiveBuffers, - reliableDeliverySupervisor = None)).withDeploy(Deploy.local), - "endpointWriter-" + AddressUrlEncoder(remoteAddress) + "-" + endpointId.next())) + if (writing) + context.watch( + context.actorOf( + RARP(extendedSystem) + .configureDispatcher( + ReliableDeliverySupervisor.props(handleOption, + localAddress, + remoteAddress, + refuseUid, + transport, + endpointSettings, + AkkaPduProtobufCodec, + receiveBuffers)) + .withDeploy(Deploy.local), + "reliableEndpointWriter-" + AddressUrlEncoder(remoteAddress) + "-" + endpointId.next())) + else + context.watch( + context.actorOf( + RARP(extendedSystem) + .configureDispatcher( + EndpointWriter.props(handleOption, + localAddress, + remoteAddress, + refuseUid, + transport, + endpointSettings, + AkkaPduProtobufCodec, + receiveBuffers, + reliableDeliverySupervisor = None)) + .withDeploy(Deploy.local), + "endpointWriter-" + AddressUrlEncoder(remoteAddress) + "-" + endpointId.next())) } private var normalShutdown = false override def postStop(): Unit = { pruneTimerCancellable.cancel() - pendingReadHandoffs.valuesIterator foreach (_.disassociate(AssociationHandle.Shutdown)) + pendingReadHandoffs.valuesIterator.foreach(_.disassociate(AssociationHandle.Shutdown)) if (!normalShutdown) { // Remaining running endpoints are children, so they will clean up themselves. @@ -926,7 +980,7 @@ private[remote] class EndpointManager(conf: Config, log: LoggingAdapter) extends // responsibility of them (because they are sitting in a mailbox). log.error("Remoting system has been terminated abrubtly. Attempting to shut down transports") // The result of this shutdown is async, should we try to Await for a short duration? - transportMapping.values map (_.shutdown()) + transportMapping.values.map(_.shutdown()) } } diff --git a/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala b/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala index f7d7431b89..c3d03def29 100644 --- a/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala +++ b/akka-remote/src/main/scala/akka/remote/RemotingLifecycleEvent.scala @@ -28,11 +28,8 @@ sealed trait AssociationEvent extends RemotingLifecycleEvent { } @SerialVersionUID(1L) -final case class AssociatedEvent( - localAddress: Address, - remoteAddress: Address, - inbound: Boolean) - extends AssociationEvent { +final case class AssociatedEvent(localAddress: Address, remoteAddress: Address, inbound: Boolean) + extends AssociationEvent { protected override def eventName: String = "Associated" override def logLevel: Logging.LogLevel = Logging.DebugLevel @@ -40,22 +37,19 @@ final case class AssociatedEvent( } @SerialVersionUID(1L) -final case class DisassociatedEvent( - localAddress: Address, - remoteAddress: Address, - inbound: Boolean) - extends AssociationEvent { +final case class DisassociatedEvent(localAddress: Address, remoteAddress: Address, inbound: Boolean) + extends AssociationEvent { protected override def eventName: String = "Disassociated" override def logLevel: Logging.LogLevel = Logging.DebugLevel } @SerialVersionUID(1L) -final case class AssociationErrorEvent( - cause: Throwable, - localAddress: Address, - remoteAddress: Address, - inbound: Boolean, - logLevel: Logging.LogLevel) extends AssociationEvent { +final case class AssociationErrorEvent(cause: Throwable, + localAddress: Address, + remoteAddress: Address, + inbound: Boolean, + logLevel: Logging.LogLevel) + extends AssociationEvent { protected override def eventName: String = "AssociationError" override def toString: String = s"${super.toString}: Error [${cause.getMessage}] [${Logging.stackTraceFor(cause)}]" def getCause: Throwable = cause @@ -95,8 +89,8 @@ final case class QuarantinedEvent(address: Address, longUid: Long) extends Remot override def logLevel: Logging.LogLevel = Logging.WarningLevel override val toString: String = s"Association to [$address] having UID [$longUid] is irrecoverably failed. UID is now quarantined and all " + - "messages to this UID will be delivered to dead letters. Remote ActorSystem must be restarted to recover " + - "from this situation." + "messages to this UID will be delivered to dead letters. Remote ActorSystem must be restarted to recover " + + "from this situation." // For binary compatibility @@ -114,15 +108,17 @@ final case class QuarantinedEvent(address: Address, longUid: Long) extends Remot * The `uniqueAddress` was quarantined but it was due to normal shutdown or cluster leaving/exiting. */ @SerialVersionUID(1L) -final case class GracefulShutdownQuarantinedEvent(uniqueAddress: UniqueAddress, reason: String) extends RemotingLifecycleEvent { +final case class GracefulShutdownQuarantinedEvent(uniqueAddress: UniqueAddress, reason: String) + extends RemotingLifecycleEvent { override def logLevel: Logging.LogLevel = Logging.InfoLevel override val toString: String = s"Association to [${uniqueAddress.address}] having UID [${uniqueAddress.uid}] has been stopped. All " + - s"messages to this UID will be delivered to dead letters. Reason: $reason " + s"messages to this UID will be delivered to dead letters. Reason: $reason " } @SerialVersionUID(1L) -final case class ThisActorSystemQuarantinedEvent(localAddress: Address, remoteAddress: Address) extends RemotingLifecycleEvent { +final case class ThisActorSystemQuarantinedEvent(localAddress: Address, remoteAddress: Address) + extends RemotingLifecycleEvent { override def logLevel: LogLevel = Logging.WarningLevel override val toString: String = s"The remote system ${remoteAddress} has quarantined this system ${localAddress}." } diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala b/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala index 2e71091bb2..38492d7dc8 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ArterySettings.scala @@ -25,8 +25,7 @@ private[akka] final class ArterySettings private (config: Config) { import config._ def withDisabledCompression(): ArterySettings = - ArterySettings(ConfigFactory.parseString( - """|akka.remote.artery.advanced.compression { + ArterySettings(ConfigFactory.parseString("""|akka.remote.artery.advanced.compression { | actor-refs.max = 0 | manifests.max = 0 |}""".stripMargin).withFallback(config)) @@ -37,8 +36,7 @@ private[akka] final class ArterySettings private (config: Config) { val config: Config = getConfig("canonical") import config._ - val Port: Int = getInt("port").requiring(port => - 0 to 65535 contains port, "canonical.port must be 0 through 65535") + val Port: Int = getInt("port").requiring(port => 0 to 65535 contains port, "canonical.port must be 0 through 65535") val Hostname: String = getHostname("hostname", config) } @@ -55,8 +53,8 @@ private[akka] final class ArterySettings private (config: Config) { case other => other } - val BindTimeout: FiniteDuration = config.getMillisDuration("bind-timeout").requiring( - _ > Duration.Zero, "bind-timeout can not be negative") + val BindTimeout: FiniteDuration = + config.getMillisDuration("bind-timeout").requiring(_ > Duration.Zero, "bind-timeout can not be negative") } val LargeMessageDestinations: WildcardIndex[NotUsed] = @@ -78,8 +76,10 @@ private[akka] final class ArterySettings private (config: Config) { case AeronUpd.configName => AeronUpd case Tcp.configName => Tcp case TlsTcp.configName => TlsTcp - case other => throw new IllegalArgumentException(s"Unknown transport [$other], possible values: " + - s""""${AeronUpd.configName}", "${Tcp.configName}", or "${TlsTcp.configName}"""") + case other => + throw new IllegalArgumentException( + s"Unknown transport [$other], possible values: " + + s""""${AeronUpd.configName}", "${Tcp.configName}", or "${TlsTcp.configName}"""") } /** @@ -109,88 +109,108 @@ private[akka] final class ArterySettings private (config: Config) { } val EmbeddedMediaDriver: Boolean = getBoolean("embedded-media-driver") - val AeronDirectoryName: String = getString("aeron-dir") requiring (dir => - EmbeddedMediaDriver || dir.nonEmpty, "aeron-dir must be defined when using external media driver") + val AeronDirectoryName: String = getString("aeron-dir").requiring( + dir => EmbeddedMediaDriver || dir.nonEmpty, + "aeron-dir must be defined when using external media driver") val DeleteAeronDirectory: Boolean = getBoolean("delete-aeron-dir") - val IdleCpuLevel: Int = getInt("idle-cpu-level").requiring(level => - 1 <= level && level <= 10, "idle-cpu-level must be between 1 and 10") - val OutboundLanes: Int = getInt("outbound-lanes").requiring(n => - n > 0, "outbound-lanes must be greater than zero") - val InboundLanes: Int = getInt("inbound-lanes").requiring(n => - n > 0, "inbound-lanes must be greater than zero") - val SysMsgBufferSize: Int = getInt("system-message-buffer-size").requiring( - _ > 0, "system-message-buffer-size must be more than zero") - val OutboundMessageQueueSize: Int = getInt("outbound-message-queue-size").requiring( - _ > 0, "outbound-message-queue-size must be more than zero") - val OutboundControlQueueSize: Int = getInt("outbound-control-queue-size").requiring( - _ > 0, "outbound-control-queue-size must be more than zero") - val OutboundLargeMessageQueueSize: Int = getInt("outbound-large-message-queue-size").requiring( - _ > 0, "outbound-large-message-queue-size must be more than zero") + val IdleCpuLevel: Int = + getInt("idle-cpu-level").requiring(level => 1 <= level && level <= 10, "idle-cpu-level must be between 1 and 10") + val OutboundLanes: Int = getInt("outbound-lanes").requiring(n => n > 0, "outbound-lanes must be greater than zero") + val InboundLanes: Int = getInt("inbound-lanes").requiring(n => n > 0, "inbound-lanes must be greater than zero") + val SysMsgBufferSize: Int = + getInt("system-message-buffer-size").requiring(_ > 0, "system-message-buffer-size must be more than zero") + val OutboundMessageQueueSize: Int = + getInt("outbound-message-queue-size").requiring(_ > 0, "outbound-message-queue-size must be more than zero") + val OutboundControlQueueSize: Int = + getInt("outbound-control-queue-size").requiring(_ > 0, "outbound-control-queue-size must be more than zero") + val OutboundLargeMessageQueueSize: Int = getInt("outbound-large-message-queue-size") + .requiring(_ > 0, "outbound-large-message-queue-size must be more than zero") val SystemMessageResendInterval: FiniteDuration = - config.getMillisDuration("system-message-resend-interval").requiring(interval => - interval > Duration.Zero, "system-message-resend-interval must be more than zero") - val HandshakeTimeout: FiniteDuration = config.getMillisDuration("handshake-timeout").requiring(interval => - interval > Duration.Zero, "handshake-timeout must be more than zero") + config + .getMillisDuration("system-message-resend-interval") + .requiring(interval => interval > Duration.Zero, "system-message-resend-interval must be more than zero") + val HandshakeTimeout: FiniteDuration = config + .getMillisDuration("handshake-timeout") + .requiring(interval => interval > Duration.Zero, "handshake-timeout must be more than zero") val HandshakeRetryInterval: FiniteDuration = - config.getMillisDuration("handshake-retry-interval").requiring(interval => - interval > Duration.Zero, "handshake-retry-interval must be more than zero") + config + .getMillisDuration("handshake-retry-interval") + .requiring(interval => interval > Duration.Zero, "handshake-retry-interval must be more than zero") val InjectHandshakeInterval: FiniteDuration = - config.getMillisDuration("inject-handshake-interval").requiring(interval => - interval > Duration.Zero, "inject-handshake-interval must be more than zero") - val ConnectionTimeout: FiniteDuration = config.getMillisDuration("connection-timeout").requiring(interval => - interval > Duration.Zero, "connection-timeout must be more than zero") - val GiveUpMessageAfter: FiniteDuration = config.getMillisDuration("give-up-message-after").requiring(interval => - interval > Duration.Zero, "give-up-message-after must be more than zero") + config + .getMillisDuration("inject-handshake-interval") + .requiring(interval => interval > Duration.Zero, "inject-handshake-interval must be more than zero") + val ConnectionTimeout: FiniteDuration = config + .getMillisDuration("connection-timeout") + .requiring(interval => interval > Duration.Zero, "connection-timeout must be more than zero") + val GiveUpMessageAfter: FiniteDuration = config + .getMillisDuration("give-up-message-after") + .requiring(interval => interval > Duration.Zero, "give-up-message-after must be more than zero") val GiveUpSystemMessageAfter: FiniteDuration = - config.getMillisDuration("give-up-system-message-after").requiring(interval => - interval > Duration.Zero, "give-up-system-message-after must be more than zero") - val StopIdleOutboundAfter: FiniteDuration = config.getMillisDuration("stop-idle-outbound-after") + config + .getMillisDuration("give-up-system-message-after") + .requiring(interval => interval > Duration.Zero, "give-up-system-message-after must be more than zero") + val StopIdleOutboundAfter: FiniteDuration = config + .getMillisDuration("stop-idle-outbound-after") .requiring(interval => interval > Duration.Zero, "stop-idle-outbound-after must be more than zero") - val QuarantineIdleOutboundAfter: FiniteDuration = config.getMillisDuration("quarantine-idle-outbound-after") - .requiring( - interval => interval > StopIdleOutboundAfter, - "quarantine-idle-outbound-after must be greater than stop-idle-outbound-after") + val QuarantineIdleOutboundAfter: FiniteDuration = config + .getMillisDuration("quarantine-idle-outbound-after") + .requiring(interval => interval > StopIdleOutboundAfter, + "quarantine-idle-outbound-after must be greater than stop-idle-outbound-after") val StopQuarantinedAfterIdle: FiniteDuration = - config.getMillisDuration("stop-quarantined-after-idle").requiring(interval => - interval > Duration.Zero, "stop-quarantined-after-idle must be more than zero") + config + .getMillisDuration("stop-quarantined-after-idle") + .requiring(interval => interval > Duration.Zero, "stop-quarantined-after-idle must be more than zero") val RemoveQuarantinedAssociationAfter: FiniteDuration = - config.getMillisDuration("remove-quarantined-association-after").requiring(interval => - interval > Duration.Zero, "remove-quarantined-association-after must be more than zero") + config + .getMillisDuration("remove-quarantined-association-after") + .requiring(interval => interval > Duration.Zero, "remove-quarantined-association-after must be more than zero") val ShutdownFlushTimeout: FiniteDuration = - config.getMillisDuration("shutdown-flush-timeout").requiring(interval => - interval > Duration.Zero, "shutdown-flush-timeout must be more than zero") + config + .getMillisDuration("shutdown-flush-timeout") + .requiring(interval => interval > Duration.Zero, "shutdown-flush-timeout must be more than zero") val InboundRestartTimeout: FiniteDuration = - config.getMillisDuration("inbound-restart-timeout").requiring(interval => - interval > Duration.Zero, "inbound-restart-timeout must be more than zero") + config + .getMillisDuration("inbound-restart-timeout") + .requiring(interval => interval > Duration.Zero, "inbound-restart-timeout must be more than zero") val InboundMaxRestarts: Int = getInt("inbound-max-restarts") val OutboundRestartBackoff: FiniteDuration = - config.getMillisDuration("outbound-restart-backoff").requiring(interval => - interval > Duration.Zero, "outbound-restart-backoff must be more than zero") + config + .getMillisDuration("outbound-restart-backoff") + .requiring(interval => interval > Duration.Zero, "outbound-restart-backoff must be more than zero") val OutboundRestartTimeout: FiniteDuration = - config.getMillisDuration("outbound-restart-timeout").requiring(interval => - interval > Duration.Zero, "outbound-restart-timeout must be more than zero") + config + .getMillisDuration("outbound-restart-timeout") + .requiring(interval => interval > Duration.Zero, "outbound-restart-timeout must be more than zero") val OutboundMaxRestarts: Int = getInt("outbound-max-restarts") val ClientLivenessTimeout: FiniteDuration = - config.getMillisDuration("client-liveness-timeout").requiring(interval => - interval > Duration.Zero, "client-liveness-timeout must be more than zero") - val ImageLivenessTimeout: FiniteDuration = config.getMillisDuration("image-liveness-timeout").requiring(interval => - interval > Duration.Zero, "image-liveness-timeout must be more than zero") + config + .getMillisDuration("client-liveness-timeout") + .requiring(interval => interval > Duration.Zero, "client-liveness-timeout must be more than zero") + val ImageLivenessTimeout: FiniteDuration = config + .getMillisDuration("image-liveness-timeout") + .requiring(interval => interval > Duration.Zero, "image-liveness-timeout must be more than zero") require(ImageLivenessTimeout < HandshakeTimeout, "image-liveness-timeout must be less than handshake-timeout") - val DriverTimeout: FiniteDuration = config.getMillisDuration("driver-timeout").requiring(interval => - interval > Duration.Zero, "driver-timeout must be more than zero") + val DriverTimeout: FiniteDuration = config + .getMillisDuration("driver-timeout") + .requiring(interval => interval > Duration.Zero, "driver-timeout must be more than zero") val FlightRecorderEnabled: Boolean = getBoolean("flight-recorder.enabled") val FlightRecorderDestination: String = getString("flight-recorder.destination") val Compression = new Compression(getConfig("compression")) - final val MaximumFrameSize: Int = math.min(getBytes("maximum-frame-size"), Int.MaxValue).toInt + final val MaximumFrameSize: Int = math + .min(getBytes("maximum-frame-size"), Int.MaxValue) + .toInt .requiring(_ >= 32 * 1024, "maximum-frame-size must be greater than or equal to 32 KiB") - final val BufferPoolSize: Int = getInt("buffer-pool-size") - .requiring(_ > 0, "buffer-pool-size must be greater than 0") + final val BufferPoolSize: Int = + getInt("buffer-pool-size").requiring(_ > 0, "buffer-pool-size must be greater than 0") final val InboundHubBufferSize = BufferPoolSize / 2 - final val MaximumLargeFrameSize: Int = math.min(getBytes("maximum-large-frame-size"), Int.MaxValue).toInt + final val MaximumLargeFrameSize: Int = math + .min(getBytes("maximum-large-frame-size"), Int.MaxValue) + .toInt .requiring(_ >= 32 * 1024, "maximum-large-frame-size must be greater than or equal to 32 KiB") - final val LargeBufferPoolSize: Int = getInt("large-buffer-pool-size") - .requiring(_ > 0, "large-buffer-pool-size must be greater than 0") + final val LargeBufferPoolSize: Int = + getInt("large-buffer-pool-size").requiring(_ > 0, "large-buffer-pool-size must be greater than 0") } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala index 2873a921b6..674e136a13 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ArteryTransport.scala @@ -55,7 +55,7 @@ import akka.stream.SharedKillSwitch import akka.stream.scaladsl.Flow import akka.stream.scaladsl.Keep import akka.stream.scaladsl.Sink -import akka.util.{ OptionVal, WildcardIndex, unused } +import akka.util.{ unused, OptionVal, WildcardIndex } /** * INTERNAL API @@ -63,6 +63,7 @@ import akka.util.{ OptionVal, WildcardIndex, unused } * Separate trait to facilitate testing without real transport. */ private[remote] trait InboundContext { + /** * The local inbound address. */ @@ -97,12 +98,11 @@ private[remote] trait InboundContext { */ private[remote] object AssociationState { def apply(): AssociationState = - new AssociationState( - incarnation = 1, - uniqueRemoteAddressPromise = Promise(), - lastUsedTimestamp = new AtomicLong(System.nanoTime()), - controlIdleKillSwitch = OptionVal.None, - quarantined = ImmutableLongMap.empty[QuarantinedTimestamp]) + new AssociationState(incarnation = 1, + uniqueRemoteAddressPromise = Promise(), + lastUsedTimestamp = new AtomicLong(System.nanoTime()), + controlIdleKillSwitch = OptionVal.None, + quarantined = ImmutableLongMap.empty[QuarantinedTimestamp]) final case class QuarantinedTimestamp(nanoTime: Long) { override def toString: String = @@ -113,12 +113,11 @@ private[remote] object AssociationState { /** * INTERNAL API */ -private[remote] final class AssociationState( - val incarnation: Int, - val uniqueRemoteAddressPromise: Promise[UniqueAddress], - val lastUsedTimestamp: AtomicLong, // System.nanoTime timestamp - val controlIdleKillSwitch: OptionVal[SharedKillSwitch], - val quarantined: ImmutableLongMap[AssociationState.QuarantinedTimestamp]) { +private[remote] final class AssociationState(val incarnation: Int, + val uniqueRemoteAddressPromise: Promise[UniqueAddress], + val lastUsedTimestamp: AtomicLong, // System.nanoTime timestamp + val controlIdleKillSwitch: OptionVal[SharedKillSwitch], + val quarantined: ImmutableLongMap[AssociationState.QuarantinedTimestamp]) { import AssociationState.QuarantinedTimestamp @@ -145,18 +144,20 @@ private[remote] final class AssociationState( } def newIncarnation(remoteAddressPromise: Promise[UniqueAddress]): AssociationState = - new AssociationState(incarnation + 1, remoteAddressPromise, - lastUsedTimestamp = new AtomicLong(System.nanoTime()), controlIdleKillSwitch, quarantined) + new AssociationState(incarnation + 1, + remoteAddressPromise, + lastUsedTimestamp = new AtomicLong(System.nanoTime()), + controlIdleKillSwitch, + quarantined) def newQuarantined(): AssociationState = uniqueRemoteAddressPromise.future.value match { case Some(Success(a)) => - new AssociationState( - incarnation, - uniqueRemoteAddressPromise, - lastUsedTimestamp = new AtomicLong(System.nanoTime()), - controlIdleKillSwitch, - quarantined = quarantined.updated(a.uid, QuarantinedTimestamp(System.nanoTime()))) + new AssociationState(incarnation, + uniqueRemoteAddressPromise, + lastUsedTimestamp = new AtomicLong(System.nanoTime()), + controlIdleKillSwitch, + quarantined = quarantined.updated(a.uid, QuarantinedTimestamp(System.nanoTime()))) case _ => this } @@ -170,8 +171,11 @@ private[remote] final class AssociationState( def isQuarantined(uid: Long): Boolean = quarantined.contains(uid) def withControlIdleKillSwitch(killSwitch: OptionVal[SharedKillSwitch]): AssociationState = - new AssociationState(incarnation, uniqueRemoteAddressPromise, lastUsedTimestamp, - controlIdleKillSwitch = killSwitch, quarantined) + new AssociationState(incarnation, + uniqueRemoteAddressPromise, + lastUsedTimestamp, + controlIdleKillSwitch = killSwitch, + quarantined) override def toString(): String = { val a = uniqueRemoteAddressPromise.future.value match { @@ -190,6 +194,7 @@ private[remote] final class AssociationState( * Separate trait to facilitate testing without real transport. */ private[remote] trait OutboundContext { + /** * The local inbound address. */ @@ -229,8 +234,10 @@ private[remote] trait OutboundContext { * INTERNAL API */ private[remote] object FlushOnShutdown { - def props(done: Promise[Done], timeout: FiniteDuration, - inboundContext: InboundContext, associations: Set[Association]): Props = { + def props(done: Promise[Done], + timeout: FiniteDuration, + inboundContext: InboundContext, + associations: Set[Association]): Props = { require(associations.nonEmpty) Props(new FlushOnShutdown(done, timeout, inboundContext, associations)) } @@ -241,11 +248,11 @@ private[remote] object FlushOnShutdown { /** * INTERNAL API */ -private[remote] class FlushOnShutdown( - done: Promise[Done], - timeout: FiniteDuration, - @unused inboundContext: InboundContext, - associations: Set[Association]) extends Actor { +private[remote] class FlushOnShutdown(done: Promise[Done], + timeout: FiniteDuration, + @unused inboundContext: InboundContext, + associations: Set[Association]) + extends Actor { var remaining = Map.empty[UniqueAddress, Int] @@ -298,7 +305,8 @@ private[remote] class FlushOnShutdown( * INTERNAL API */ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) - extends RemoteTransport(_system, _provider) with InboundContext { + extends RemoteTransport(_system, _provider) + with InboundContext { import ArteryTransport._ import FlightRecorderEvents._ @@ -334,6 +342,7 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr } @volatile private[this] var _inboundCompressionAccess: OptionVal[InboundCompressionAccess] = OptionVal.None + /** Only access compression tables via the CompressionAccess */ def inboundCompressionAccess: OptionVal[InboundCompressionAccess] = _inboundCompressionAccess protected def setInboundCompressionAccess(a: InboundCompressionAccess): Unit = @@ -357,21 +366,23 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr val largeMessageChannelEnabled: Boolean = !settings.LargeMessageDestinations.wildcardTree.isEmpty || - !settings.LargeMessageDestinations.doubleWildcardTree.isEmpty + !settings.LargeMessageDestinations.doubleWildcardTree.isEmpty private val priorityMessageDestinations = WildcardIndex[NotUsed]() - // These destinations are not defined in configuration because it should not - // be possible to abuse the control channel + // These destinations are not defined in configuration because it should not + // be possible to abuse the control channel .insert(Array("system", "remote-watcher"), NotUsed) // these belongs to cluster and should come from there .insert(Array("system", "cluster", "core", "daemon", "heartbeatSender"), NotUsed) .insert(Array("system", "cluster", "core", "daemon", "crossDcHeartbeatSender"), NotUsed) .insert(Array("system", "cluster", "heartbeatReceiver"), NotUsed) - private val restartCounter = new RestartCounter(settings.Advanced.InboundMaxRestarts, settings.Advanced.InboundRestartTimeout) + private val restartCounter = + new RestartCounter(settings.Advanced.InboundMaxRestarts, settings.Advanced.InboundRestartTimeout) - protected val envelopeBufferPool = new EnvelopeBufferPool(settings.Advanced.MaximumFrameSize, settings.Advanced.BufferPoolSize) + protected val envelopeBufferPool = + new EnvelopeBufferPool(settings.Advanced.MaximumFrameSize, settings.Advanced.BufferPoolSize) protected val largeEnvelopeBufferPool = if (largeMessageChannelEnabled) new EnvelopeBufferPool(settings.Advanced.MaximumLargeFrameSize, settings.Advanced.LargeBufferPoolSize) @@ -380,8 +391,9 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr private val inboundEnvelopePool = ReusableInboundEnvelope.createObjectPool(capacity = 16) // The outboundEnvelopePool is shared among all outbound associations - private val outboundEnvelopePool = ReusableOutboundEnvelope.createObjectPool(capacity = - settings.Advanced.OutboundMessageQueueSize * settings.Advanced.OutboundLanes * 3) + private val outboundEnvelopePool = ReusableOutboundEnvelope.createObjectPool( + capacity = + settings.Advanced.OutboundMessageQueueSize * settings.Advanced.OutboundLanes * 3) /** * Thread-safe flight recorder for top level events. @@ -401,15 +413,15 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr } private val associationRegistry = new AssociationRegistry( - remoteAddress => new Association( - this, - materializer, - controlMaterializer, - remoteAddress, - controlSubject, - settings.LargeMessageDestinations, - priorityMessageDestinations, - outboundEnvelopePool)) + remoteAddress => + new Association(this, + materializer, + controlMaterializer, + remoteAddress, + controlSubject, + settings.LargeMessageDestinations, + priorityMessageDestinations, + outboundEnvelopePool)) def remoteAddresses: Set[Address] = associationRegistry.allAssociations.map(_.remoteAddress) @@ -434,22 +446,19 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr else ArteryTransport.autoSelectPort(settings.Bind.Hostname, udp) } else settings.Bind.Port - _localAddress = UniqueAddress( - Address(ArteryTransport.ProtocolName, system.name, settings.Canonical.Hostname, port), - AddressUidExtension(system).longAddressUid) + _localAddress = UniqueAddress(Address(ArteryTransport.ProtocolName, system.name, settings.Canonical.Hostname, port), + AddressUidExtension(system).longAddressUid) _addresses = Set(_localAddress.address) - _bindAddress = UniqueAddress( - Address(ArteryTransport.ProtocolName, system.name, settings.Bind.Hostname, bindPort), - AddressUidExtension(system).longAddressUid) + _bindAddress = UniqueAddress(Address(ArteryTransport.ProtocolName, system.name, settings.Bind.Hostname, bindPort), + AddressUidExtension(system).longAddressUid) // TODO: This probably needs to be a global value instead of an event as events might rotate out of the log topLevelFlightRecorder.loFreq(Transport_UniqueAddressSet, _localAddress.toString()) materializer = ActorMaterializer.systemMaterializer(settings.Advanced.MaterializerSettings, "remote", system) - controlMaterializer = ActorMaterializer.systemMaterializer( - settings.Advanced.MaterializerSettings, - "remoteControl", system) + controlMaterializer = + ActorMaterializer.systemMaterializer(settings.Advanced.MaterializerSettings, "remoteControl", system) messageDispatcher = new MessageDispatcher(system, provider) topLevelFlightRecorder.loFreq(Transport_MaterializerStarted, NoMetaData) @@ -460,13 +469,16 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr startRemoveQuarantinedAssociationTask() if (localAddress.address == bindAddress.address) - log.info( - "Remoting started with transport [Artery {}]; listening on address [{}] with UID [{}]", - settings.Transport, bindAddress.address, bindAddress.uid) + log.info("Remoting started with transport [Artery {}]; listening on address [{}] with UID [{}]", + settings.Transport, + bindAddress.address, + bindAddress.uid) else { log.info( s"Remoting started with transport [Artery ${settings.Transport}]; listening on address [{}] and bound to [{}] with UID [{}]", - localAddress.address, bindAddress.address, localAddress.uid) + localAddress.address, + bindAddress.address, + localAddress.uid) } } @@ -508,9 +520,9 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr // totalTimeout will be 0 when no tasks registered, so at least 3.seconds val totalTimeout = coord.totalTimeout().max(3.seconds) if (!coord.jvmHooksLatch.await(totalTimeout.toMillis, TimeUnit.MILLISECONDS)) - log.warning( - "CoordinatedShutdown took longer than [{}]. Shutting down [{}] via shutdownHook", - totalTimeout, localAddress) + log.warning("CoordinatedShutdown took longer than [{}]. Shutting down [{}] via shutdownHook", + totalTimeout, + localAddress) else log.debug("Shutting down [{}] via shutdownHook", localAddress) if (hasBeenShutdown.compareAndSet(false, true)) { @@ -542,16 +554,20 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr } } } else - log.debug( - "Discarding incoming ActorRef compression advertisement from [{}] that was " + - "prepared for another incarnation with uid [{}] than current uid [{}], table: [{}]", - from, table.originUid, localAddress.uid, table) + log.debug("Discarding incoming ActorRef compression advertisement from [{}] that was " + + "prepared for another incarnation with uid [{}] than current uid [{}], table: [{}]", + from, + table.originUid, + localAddress.uid, + table) case ack: ActorRefCompressionAdvertisementAck => inboundCompressionAccess match { case OptionVal.Some(access) => access.confirmActorRefCompressionAdvertisementAck(ack) case _ => log.debug(s"Received {} version: [{}] however no inbound compression access was present. " + - s"ACK will not take effect, however it will be redelivered and likely to apply then.", Logging.simpleName(ack), ack.tableVersion) + s"ACK will not take effect, however it will be redelivered and likely to apply then.", + Logging.simpleName(ack), + ack.tableVersion) } case ClassManifestCompressionAdvertisement(from, table) => @@ -567,16 +583,20 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr } } } else - log.debug( - "Discarding incoming Class Manifest compression advertisement from [{}] that was " + - "prepared for another incarnation with uid [{}] than current uid [{}], table: [{}]", - from, table.originUid, localAddress.uid, table) + log.debug("Discarding incoming Class Manifest compression advertisement from [{}] that was " + + "prepared for another incarnation with uid [{}] than current uid [{}], table: [{}]", + from, + table.originUid, + localAddress.uid, + table) case ack: ClassManifestCompressionAdvertisementAck => inboundCompressionAccess match { case OptionVal.Some(access) => access.confirmClassManifestCompressionAdvertisementAck(ack) case _ => log.debug(s"Received {} version: [{}] however no inbound compression access was present. " + - s"ACK will not take effect, however it will be redelivered and likely to apply then.", Logging.simpleName(ack), ack.tableVersion) + s"ACK will not take effect, however it will be redelivered and likely to apply then.", + Logging.simpleName(ack), + ack.tableVersion) } } @@ -600,11 +620,13 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr } - protected def attachInboundStreamRestart(streamName: String, streamCompleted: Future[Done], restart: () => Unit): Unit = { + protected def attachInboundStreamRestart(streamName: String, + streamCompleted: Future[Done], + restart: () => Unit): Unit = { implicit val ec = materializer.executionContext streamCompleted.failed.foreach { - case ShutdownSignal => // shutdown as expected - case _: AeronTerminated => // shutdown already in progress + case ShutdownSignal => // shutdown as expected + case _: AeronTerminated => // shutdown already in progress case cause if isShutdown => // don't restart after shutdown, but log some details so we notice log.error(cause, s"{} failed after shutdown. {}", streamName, cause.getMessage) @@ -615,8 +637,12 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr topLevelFlightRecorder.loFreq(Transport_RestartInbound, s"$localAddress - $streamName") restart() } else { - log.error(cause, "{} failed and restarted {} times within {} seconds. Terminating system. {}", - streamName, settings.Advanced.InboundMaxRestarts, settings.Advanced.InboundRestartTimeout.toSeconds, cause.getMessage) + log.error(cause, + "{} failed and restarted {} times within {} seconds. Terminating system. {}", + streamName, + settings.Advanced.InboundMaxRestarts, + settings.Advanced.InboundRestartTimeout.toSeconds, + cause.getMessage) system.terminate() } } @@ -632,8 +658,9 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr if (allAssociations.isEmpty) Future.successful(Done) else { val flushingPromise = Promise[Done]() - system.systemActorOf(FlushOnShutdown.props(flushingPromise, settings.Advanced.ShutdownFlushTimeout, - this, allAssociations), "remoteFlushOnShutdown") + system.systemActorOf( + FlushOnShutdown.props(flushingPromise, settings.Advanced.ShutdownFlushTimeout, this, allAssociations), + "remoteFlushOnShutdown") flushingPromise.future } implicit val ec = system.dispatcher @@ -649,7 +676,7 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr killSwitch.abort(ShutdownSignal) topLevelFlightRecorder.loFreq(Transport_KillSwitchPulled, NoMetaData) for { - _ <- streamsCompleted.recover { case _ => Done } + _ <- streamsCompleted.recover { case _ => Done } _ <- shutdownTransport().recover { case _ => Done } } yield { // no need to explicitly shut down the contained access since it's lifecycle is bound to the Decoder @@ -756,46 +783,69 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr } def outboundLarge(outboundContext: OutboundContext): Sink[OutboundEnvelope, Future[Done]] = - createOutboundSink(LargeStreamId, outboundContext, largeEnvelopeBufferPool) - .mapMaterializedValue { case (_, d) => d } + createOutboundSink(LargeStreamId, outboundContext, largeEnvelopeBufferPool).mapMaterializedValue { + case (_, d) => d + } def outbound(outboundContext: OutboundContext): Sink[OutboundEnvelope, (OutboundCompressionAccess, Future[Done])] = createOutboundSink(OrdinaryStreamId, outboundContext, envelopeBufferPool) - private def createOutboundSink(streamId: Int, outboundContext: OutboundContext, - bufferPool: EnvelopeBufferPool): Sink[OutboundEnvelope, (OutboundCompressionAccess, Future[Done])] = { + private def createOutboundSink( + streamId: Int, + outboundContext: OutboundContext, + bufferPool: EnvelopeBufferPool): Sink[OutboundEnvelope, (OutboundCompressionAccess, Future[Done])] = { - outboundLane(outboundContext, bufferPool, streamId) - .toMat(outboundTransportSink(outboundContext, streamId, bufferPool))(Keep.both) + outboundLane(outboundContext, bufferPool, streamId).toMat( + outboundTransportSink(outboundContext, streamId, bufferPool))(Keep.both) } def outboundTransportSink(outboundContext: OutboundContext): Sink[EnvelopeBuffer, Future[Done]] = outboundTransportSink(outboundContext, OrdinaryStreamId, envelopeBufferPool) - protected def outboundTransportSink(outboundContext: OutboundContext, streamId: Int, + protected def outboundTransportSink(outboundContext: OutboundContext, + streamId: Int, bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] - def outboundLane(outboundContext: OutboundContext): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] = + def outboundLane( + outboundContext: OutboundContext): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] = outboundLane(outboundContext, envelopeBufferPool, OrdinaryStreamId) - private def outboundLane( - outboundContext: OutboundContext, - bufferPool: EnvelopeBufferPool, streamId: Int): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] = { + private def outboundLane(outboundContext: OutboundContext, + bufferPool: EnvelopeBufferPool, + streamId: Int): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] = { - Flow.fromGraph(killSwitch.flow[OutboundEnvelope]) - .via(new OutboundHandshake(system, outboundContext, outboundEnvelopePool, settings.Advanced.HandshakeTimeout, - settings.Advanced.HandshakeRetryInterval, settings.Advanced.InjectHandshakeInterval, Duration.Undefined)) + Flow + .fromGraph(killSwitch.flow[OutboundEnvelope]) + .via( + new OutboundHandshake(system, + outboundContext, + outboundEnvelopePool, + settings.Advanced.HandshakeTimeout, + settings.Advanced.HandshakeRetryInterval, + settings.Advanced.InjectHandshakeInterval, + Duration.Undefined)) .viaMat(createEncoder(bufferPool, streamId))(Keep.right) } - def outboundControl(outboundContext: OutboundContext): Sink[OutboundEnvelope, (OutboundControlIngress, Future[Done])] = { - val livenessProbeInterval = (settings.Advanced.QuarantineIdleOutboundAfter / 10) - .max(settings.Advanced.HandshakeRetryInterval) - Flow.fromGraph(killSwitch.flow[OutboundEnvelope]) - .via(new OutboundHandshake(system, outboundContext, outboundEnvelopePool, settings.Advanced.HandshakeTimeout, - settings.Advanced.HandshakeRetryInterval, settings.Advanced.InjectHandshakeInterval, livenessProbeInterval)) - .via(new SystemMessageDelivery(outboundContext, system.deadLetters, settings.Advanced.SystemMessageResendInterval, - settings.Advanced.SysMsgBufferSize)) + def outboundControl( + outboundContext: OutboundContext): Sink[OutboundEnvelope, (OutboundControlIngress, Future[Done])] = { + val livenessProbeInterval = + (settings.Advanced.QuarantineIdleOutboundAfter / 10).max(settings.Advanced.HandshakeRetryInterval) + Flow + .fromGraph(killSwitch.flow[OutboundEnvelope]) + .via( + new OutboundHandshake(system, + outboundContext, + outboundEnvelopePool, + settings.Advanced.HandshakeTimeout, + settings.Advanced.HandshakeRetryInterval, + settings.Advanced.InjectHandshakeInterval, + livenessProbeInterval)) + .via( + new SystemMessageDelivery(outboundContext, + system.deadLetters, + settings.Advanced.SystemMessageResendInterval, + settings.Advanced.SysMsgBufferSize)) // note that System messages must not be dropped before the SystemMessageDelivery stage .via(outboundTestFlow(outboundContext)) .viaMat(new OutboundControlJunction(outboundContext, outboundEnvelopePool))(Keep.right) @@ -805,11 +855,14 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr // TODO we can also add scrubbing stage that would collapse sys msg acks/nacks and remove duplicate Quarantine messages } - def createEncoder(pool: EnvelopeBufferPool, streamId: Int): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] = - Flow.fromGraph(new Encoder(localAddress, system, outboundEnvelopePool, pool, streamId, settings.LogSend, - settings.Version)) + def createEncoder(pool: EnvelopeBufferPool, + streamId: Int): Flow[OutboundEnvelope, EnvelopeBuffer, OutboundCompressionAccess] = + Flow.fromGraph( + new Encoder(localAddress, system, outboundEnvelopePool, pool, streamId, settings.LogSend, settings.Version)) - def createDecoder(settings: ArterySettings, compressions: InboundCompressions): Flow[EnvelopeBuffer, InboundEnvelope, InboundCompressionAccess] = + def createDecoder( + settings: ArterySettings, + compressions: InboundCompressions): Flow[EnvelopeBuffer, InboundEnvelope, InboundCompressionAccess] = Flow.fromGraph(new Decoder(this, system, localAddress, settings, compressions, inboundEnvelopePool)) def createDeserializer(bufferPool: EnvelopeBufferPool): Flow[InboundEnvelope, InboundEnvelope, NotUsed] = @@ -856,10 +909,10 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr .via(new InboundQuarantineCheck(this)) .toMat(messageDispatcherSink)(Keep.right) - def inboundFlow(settings: ArterySettings, compressions: InboundCompressions): Flow[EnvelopeBuffer, InboundEnvelope, InboundCompressionAccess] = { - Flow[EnvelopeBuffer] - .via(killSwitch.flow) - .viaMat(createDecoder(settings, compressions))(Keep.right) + def inboundFlow( + settings: ArterySettings, + compressions: InboundCompressions): Flow[EnvelopeBuffer, InboundEnvelope, InboundCompressionAccess] = { + Flow[EnvelopeBuffer].via(killSwitch.flow).viaMat(createDecoder(settings, compressions))(Keep.right) } // large messages flow does not use compressions, since the message size dominates the size anyway @@ -926,9 +979,7 @@ private[remote] object ArteryTransport { // thrown when the transport is shutting down and something triggers a new association object ShuttingDown extends RuntimeException with NoStackTrace - final case class InboundStreamMatValues[LifeCycle]( - lifeCycle: LifeCycle, - completed: Future[Done]) + final case class InboundStreamMatValues[LifeCycle](lifeCycle: LifeCycle, completed: Future[Done]) def autoSelectPort(hostname: String, udp: Boolean): Int = { if (udp) { diff --git a/akka-remote/src/main/scala/akka/remote/artery/Association.scala b/akka-remote/src/main/scala/akka/remote/artery/Association.scala index 9e932b128a..288e65da9f 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Association.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Association.scala @@ -107,10 +107,9 @@ private[remote] object Association { case object OutboundStreamStopIdleSignal extends RuntimeException("") with StopSignal with NoStackTrace case object OutboundStreamStopQuarantinedSignal extends RuntimeException("") with StopSignal with NoStackTrace - final case class OutboundStreamMatValues( - streamKillSwitch: OptionVal[SharedKillSwitch], - completed: Future[Done], - stopping: OptionVal[StopSignal]) + final case class OutboundStreamMatValues(streamKillSwitch: OptionVal[SharedKillSwitch], + completed: Future[Done], + stopping: OptionVal[StopSignal]) } /** @@ -119,16 +118,16 @@ private[remote] object Association { * Thread-safe, mutable holder for association state. Main entry point for remote destined message to a specific * remote address. */ -private[remote] class Association( - val transport: ArteryTransport, - val materializer: Materializer, - val controlMaterializer: Materializer, - override val remoteAddress: Address, - override val controlSubject: ControlMessageSubject, - largeMessageDestinations: WildcardIndex[NotUsed], - priorityMessageDestinations: WildcardIndex[NotUsed], - outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope]) - extends AbstractAssociation with OutboundContext { +private[remote] class Association(val transport: ArteryTransport, + val materializer: Materializer, + val controlMaterializer: Materializer, + override val remoteAddress: Address, + override val controlSubject: ControlMessageSubject, + largeMessageDestinations: WildcardIndex[NotUsed], + priorityMessageDestinations: WildcardIndex[NotUsed], + outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope]) + extends AbstractAssociation + with OutboundContext { import Association._ import FlightRecorderEvents._ @@ -140,7 +139,8 @@ private[remote] class Association( override def settings = transport.settings private def advancedSettings = transport.settings.Advanced - private val restartCounter = new RestartCounter(advancedSettings.OutboundMaxRestarts, advancedSettings.OutboundRestartTimeout) + private val restartCounter = + new RestartCounter(advancedSettings.OutboundMaxRestarts, advancedSettings.OutboundRestartTimeout) // We start with the raw wrapped queue and then it is replaced with the materialized value of // the `SendQueue` after materialization. Using same underlying queue. This makes it possible to @@ -260,9 +260,8 @@ private[remote] class Association( } def completeHandshake(peer: UniqueAddress): Future[Done] = { - require( - remoteAddress == peer.address, - s"wrong remote address in completeHandshake, got ${peer.address}, expected $remoteAddress") + require(remoteAddress == peer.address, + s"wrong remote address in completeHandshake, got ${peer.address}, expected $remoteAddress") val current = associationState current.uniqueRemoteAddressValue() match { @@ -285,9 +284,11 @@ private[remote] class Association( current.uniqueRemoteAddressValue() match { case Some(old) => cancelStopQuarantinedTimer() - log.debug( - "Incarnation {} of association to [{}] with new UID [{}] (old UID [{}])", - newState.incarnation, peer.address, peer.uid, old.uid) + log.debug("Incarnation {} of association to [{}] with new UID [{}] (old UID [{}])", + newState.incarnation, + peer.address, + peer.uid, + old.uid) clearInboundCompression(old.uid) case None => // Failed, nothing to do @@ -305,8 +306,7 @@ private[remote] class Association( try { if (!transport.isShutdown && !isRemovedAfterQuarantined()) { if (associationState.isQuarantined()) { - log.debug("Send control message [{}] to quarantined [{}]", Logging.messageClassName(message), - remoteAddress) + log.debug("Send control message [{}] to quarantined [{}]", Logging.messageClassName(message), remoteAddress) setupStopQuarantinedTimer() } outboundControlIngress.sendControlMessage(message) @@ -334,9 +334,11 @@ private[remote] class Association( val reason = if (removed) "removed unused quarantined association" else s"overflow of send queue, size [$qSize]" - log.debug( - "Dropping message [{}] from [{}] to [{}] due to {}", - Logging.messageClassName(message), sender.getOrElse(deadletters), recipient.getOrElse(recipient), reason) + log.debug("Dropping message [{}] from [{}] to [{}] due to {}", + Logging.messageClassName(message), + sender.getOrElse(deadletters), + recipient.getOrElse(recipient), + reason) } flightRecorder.hiFreq(Transport_SendQueueOverflow, queueIndex) deadletters ! env @@ -349,7 +351,9 @@ private[remote] class Association( // allow ActorSelectionMessage to pass through quarantine, to be able to establish interaction with new system if (message.isInstanceOf[ActorSelectionMessage] || !quarantined || messageIsClearSystemMessageDelivery) { if (quarantined && !messageIsClearSystemMessageDelivery) { - log.debug("Quarantine piercing attempt with message [{}] to [{}]", Logging.messageClassName(message), recipient.getOrElse("")) + log.debug("Quarantine piercing attempt with message [{}] to [{}]", + Logging.messageClassName(message), + recipient.getOrElse("")) setupStopQuarantinedTimer() } try { @@ -383,9 +387,11 @@ private[remote] class Association( case ShuttingDown => // silence it } } else if (log.isDebugEnabled) - log.debug( - "Dropping message [{}] from [{}] to [{}] due to quarantined system [{}]", - Logging.messageClassName(message), sender.getOrElse(deadletters), recipient.getOrElse(recipient), remoteAddress) + log.debug("Dropping message [{}] from [{}] to [{}] due to quarantined system [{}]", + Logging.messageClassName(message), + sender.getOrElse(deadletters), + recipient.getOrElse(recipient), + remoteAddress) } private def selectQueue(recipient: OptionVal[RemoteActorRef]): Int = { @@ -437,8 +443,7 @@ private[remote] class Association( var sent = 0 queues.iterator.filter(q => q.isEnabled && !q.isInstanceOf[LazyQueueWrapper]).foreach { queue => try { - val envelope = outboundEnvelopePool.acquire() - .init(OptionVal.None, msg, OptionVal.Some(replyTo)) + val envelope = outboundEnvelopePool.acquire().init(OptionVal.None, msg, OptionVal.Some(replyTo)) queue.offer(envelope) sent += 1 @@ -467,17 +472,21 @@ private[remote] class Association( if (swapState(current, newState)) { // quarantine state change was performed if (harmless) { - log.info( - "Association to [{}] having UID [{}] has been stopped. All " + - "messages to this UID will be delivered to dead letters. Reason: {}", - remoteAddress, u, reason) - transport.system.eventStream.publish(GracefulShutdownQuarantinedEvent(UniqueAddress(remoteAddress, u), reason)) + log.info("Association to [{}] having UID [{}] has been stopped. All " + + "messages to this UID will be delivered to dead letters. Reason: {}", + remoteAddress, + u, + reason) + transport.system.eventStream + .publish(GracefulShutdownQuarantinedEvent(UniqueAddress(remoteAddress, u), reason)) } else { log.warning( "Association to [{}] with UID [{}] is irrecoverably failed. UID is now quarantined and all " + - "messages to this UID will be delivered to dead letters. " + - "Remote ActorSystem must be restarted to recover from this situation. Reason: {}", - remoteAddress, u, reason) + "messages to this UID will be delivered to dead letters. " + + "Remote ActorSystem must be restarted to recover from this situation. Reason: {}", + remoteAddress, + u, + reason) transport.system.eventStream.publish(QuarantinedEvent(remoteAddress, u)) } flightRecorder.loFreq(Transport_Quarantined, s"$remoteAddress - $u") @@ -496,12 +505,16 @@ private[remote] class Association( case Some(peer) => log.info( "Quarantine of [{}] ignored due to non-matching UID, quarantine requested for [{}] but current is [{}]. {}", - remoteAddress, u, peer.uid, reason) + remoteAddress, + u, + peer.uid, + reason) send(ClearSystemMessageDelivery(current.incarnation - 1), OptionVal.None, OptionVal.None) case None => log.info( "Quarantine of [{}] ignored because handshake not completed, quarantine request was for old incarnation. {}", - remoteAddress, reason) + remoteAddress, + reason) } case None => log.warning("Quarantine of [{}] ignored because unknown UID", remoteAddress) @@ -676,7 +689,8 @@ private[remote] class Association( } val (queueValue, (control, completed)) = - Source.fromGraph(new SendQueue[OutboundEnvelope](sendQueuePostStop)) + Source + .fromGraph(new SendQueue[OutboundEnvelope](sendQueuePostStop)) .via(streamKillSwitch.flow) .toMat(transport.outboundControl(this))(Keep.both) .run()(materializer) @@ -690,15 +704,18 @@ private[remote] class Association( updateStreamMatValues(ControlQueueIndex, streamKillSwitch, completed) setupIdleTimer() - attachOutboundStreamRestart("Outbound control stream", ControlQueueIndex, controlQueueSize, - completed, () => runOutboundControlStream()) + attachOutboundStreamRestart("Outbound control stream", + ControlQueueIndex, + controlQueueSize, + completed, + () => runOutboundControlStream()) } private def getOrCreateQueueWrapper(queueIndex: Int, capacity: Int): QueueWrapper = { val unused = queuesVisibility // volatile read to see latest queues array queues(queueIndex) match { case existing: QueueWrapper => existing - case _ => + case _ => // use new queue for restarts QueueWrapperImpl(createQueue(capacity, queueIndex)) } @@ -717,7 +734,8 @@ private[remote] class Association( queuesVisibility = true // volatile write for visibility of the queues array val (queueValue, _, changeCompression, completed) = - Source.fromGraph(new SendQueue[OutboundEnvelope](sendToDeadLetters)) + Source + .fromGraph(new SendQueue[OutboundEnvelope](sendToDeadLetters)) .via(streamKillSwitch.flow) .viaMat(transport.outboundTestFlow(this))(Keep.both) .toMat(transport.outbound(this))({ case ((a, b), (c, d)) => (a, b, c, d) }) // "keep all, exploded" @@ -730,8 +748,11 @@ private[remote] class Association( outboundCompressionAccess = Vector(changeCompression) updateStreamMatValues(OrdinaryQueueIndex, streamKillSwitch, completed) - attachOutboundStreamRestart("Outbound message stream", OrdinaryQueueIndex, queueSize, - completed, () => runOutboundOrdinaryMessagesStream()) + attachOutboundStreamRestart("Outbound message stream", + OrdinaryQueueIndex, + queueSize, + completed, + () => runOutboundOrdinaryMessagesStream()) } else { log.debug("Starting outbound message stream to [{}] with [{}] lanes", remoteAddress, outboundLanes) @@ -742,7 +763,8 @@ private[remote] class Association( wrapper }.toVector - val lane = Source.fromGraph(new SendQueue[OutboundEnvelope](sendToDeadLetters)) + val lane = Source + .fromGraph(new SendQueue[OutboundEnvelope](sendToDeadLetters)) .via(streamKillSwitch.flow) .via(transport.outboundTestFlow(this)) .viaMat(transport.outboundLane(this))(Keep.both) @@ -753,14 +775,18 @@ private[remote] class Association( case ((q, c), w) => (q, c, w) } - val (mergeHub, transportSinkCompleted) = MergeHub.source[EnvelopeBuffer] + val (mergeHub, transportSinkCompleted) = MergeHub + .source[EnvelopeBuffer] .via(streamKillSwitch.flow) - .toMat(transport.outboundTransportSink(this))(Keep.both).run()(materializer) + .toMat(transport.outboundTransportSink(this))(Keep.both) + .run()(materializer) val values: Vector[(SendQueue.QueueValue[OutboundEnvelope], Encoder.OutboundCompressionAccess, Future[Done])] = - (0 until outboundLanes).iterator.map { _ => - lane.to(mergeHub).run()(materializer) - }.to(Vector) + (0 until outboundLanes).iterator + .map { _ => + lane.to(mergeHub).run()(materializer) + } + .to(Vector) val (queueValues, compressionAccessValues, laneCompletedValues) = values.unzip3 @@ -770,7 +796,9 @@ private[remote] class Association( Future.firstCompletedOf(laneCompletedValues).failed.foreach { reason => streamKillSwitch.abort(reason) } - (laneCompletedValues :+ transportSinkCompleted).foreach(_.foreach { _ => streamKillSwitch.shutdown() }) + (laneCompletedValues :+ transportSinkCompleted).foreach(_.foreach { _ => + streamKillSwitch.shutdown() + }) val allCompleted = Future.sequence(laneCompletedValues).flatMap(_ => transportSinkCompleted) @@ -783,8 +811,11 @@ private[remote] class Association( outboundCompressionAccess = compressionAccessValues - attachOutboundStreamRestart("Outbound message stream", OrdinaryQueueIndex, queueSize, - allCompleted, () => runOutboundOrdinaryMessagesStream()) + attachOutboundStreamRestart("Outbound message stream", + OrdinaryQueueIndex, + queueSize, + allCompleted, + () => runOutboundOrdinaryMessagesStream()) } } @@ -797,7 +828,8 @@ private[remote] class Association( val streamKillSwitch = KillSwitches.shared("outboundLargeMessagesKillSwitch") - val (queueValue, completed) = Source.fromGraph(new SendQueue[OutboundEnvelope](sendToDeadLetters)) + val (queueValue, completed) = Source + .fromGraph(new SendQueue[OutboundEnvelope](sendToDeadLetters)) .via(streamKillSwitch.flow) .via(transport.outboundTestFlow(this)) .toMat(transport.outboundLarge(this))(Keep.both) @@ -809,12 +841,18 @@ private[remote] class Association( queuesVisibility = true // volatile write for visibility of the queues array updateStreamMatValues(LargeQueueIndex, streamKillSwitch, completed) - attachOutboundStreamRestart("Outbound large message stream", LargeQueueIndex, largeQueueSize, - completed, () => runOutboundLargeMessagesStream()) + attachOutboundStreamRestart("Outbound large message stream", + LargeQueueIndex, + largeQueueSize, + completed, + () => runOutboundLargeMessagesStream()) } - private def attachOutboundStreamRestart(streamName: String, queueIndex: Int, queueCapacity: Int, - streamCompleted: Future[Done], restart: () => Unit): Unit = { + private def attachOutboundStreamRestart(streamName: String, + queueIndex: Int, + queueCapacity: Int, + streamCompleted: Future[Done], + restart: () => Unit): Unit = { def lazyRestart(): Unit = { flightRecorder.loFreq(Transport_RestartOutbound, s"$remoteAddress - $streamName") @@ -861,7 +899,6 @@ private[remote] class Association( // ActorSystem shutdown cancelAllTimers() case cause => - // it might have been stopped as expected due to idle or quarantine // for the TCP transport the exception is "converted" to StreamTcpException val stoppedIdle = cause == OutboundStreamStopIdleSignal || @@ -878,7 +915,7 @@ private[remote] class Association( if (queueIndex == ControlQueueIndex && !stoppedQuarantined) { cause match { case _: HandshakeTimeoutException => // ok, quarantine not possible without UID - case _ => + case _ => // Must quarantine in case all system messages haven't been delivered. // See also comment in the stoppedIdle case below quarantine(s"Outbound control stream restarted. $cause") @@ -889,27 +926,33 @@ private[remote] class Association( log.debug("{} to [{}] was idle and stopped. It will be restarted if used again.", streamName, remoteAddress) lazyRestart() } else if (stoppedQuarantined) { - log.debug("{} to [{}] was quarantined and stopped. It will be restarted if used again.", streamName, remoteAddress) + log.debug("{} to [{}] was quarantined and stopped. It will be restarted if used again.", + streamName, + remoteAddress) lazyRestart() } else if (bypassRestartCounter || restartCounter.restart()) { log.error(cause, "{} to [{}] failed. Restarting it. {}", streamName, remoteAddress, cause.getMessage) lazyRestart() } else { - log.error(cause, s"{} to [{}] failed and restarted {} times within {} seconds. Terminating system. ${cause.getMessage}", - streamName, remoteAddress, advancedSettings.OutboundMaxRestarts, advancedSettings.OutboundRestartTimeout.toSeconds) + log.error(cause, + s"{} to [{}] failed and restarted {} times within {} seconds. Terminating system. ${cause.getMessage}", + streamName, + remoteAddress, + advancedSettings.OutboundMaxRestarts, + advancedSettings.OutboundRestartTimeout.toSeconds) cancelAllTimers() transport.system.terminate() } } } - private def updateStreamMatValues(streamId: Int, streamKillSwitch: SharedKillSwitch, + private def updateStreamMatValues(streamId: Int, + streamKillSwitch: SharedKillSwitch, completed: Future[Done]): Unit = { implicit val ec = materializer.executionContext - updateStreamMatValues( - streamId, - OutboundStreamMatValues(OptionVal.Some(streamKillSwitch), completed.recover { case _ => Done }, - stopping = OptionVal.None)) + updateStreamMatValues(streamId, OutboundStreamMatValues(OptionVal.Some(streamKillSwitch), completed.recover { + case _ => Done + }, stopping = OptionVal.None)) } @tailrec private def updateStreamMatValues(streamId: Int, values: OutboundStreamMatValues): Unit = { @@ -932,7 +975,7 @@ private[remote] class Association( private def getStopReason(streamId: Int): OptionVal[StopSignal] = { streamMatValues.get().get(streamId) match { case Some(OutboundStreamMatValues(_, _, stopping)) => stopping - case None => OptionVal.None + case None => OptionVal.None } } @@ -956,9 +999,11 @@ private[remote] class Association( */ def streamsCompleted: Future[Done] = { implicit val ec = materializer.executionContext - Future.sequence(streamMatValues.get().values.map { - case OutboundStreamMatValues(_, done, _) => done - }).map(_ => Done) + Future + .sequence(streamMatValues.get().values.map { + case OutboundStreamMatValues(_, done, _) => done + }) + .map(_ => Done) } override def toString: String = diff --git a/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala b/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala index b5cc1ab2b7..e3287a9629 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Codecs.scala @@ -13,7 +13,12 @@ import scala.util.control.NonFatal import akka.Done import akka.actor.{ EmptyLocalActorRef, _ } import akka.event.Logging -import akka.remote.artery.Decoder.{ AdvertiseActorRefsCompressionTable, AdvertiseClassManifestsCompressionTable, InboundCompressionAccess, InboundCompressionAccessImpl } +import akka.remote.artery.Decoder.{ + AdvertiseActorRefsCompressionTable, + AdvertiseClassManifestsCompressionTable, + InboundCompressionAccess, + InboundCompressionAccessImpl +} import akka.remote.artery.SystemMessageDelivery.SystemMessageEnvelope import akka.remote.artery.compress.CompressionProtocol._ import akka.remote.artery.compress._ @@ -21,7 +26,7 @@ import akka.remote.{ MessageSerializer, OversizedPayloadException, RemoteActorRe import akka.serialization.{ Serialization, SerializationExtension, Serializers } import akka.stream._ import akka.stream.stage._ -import akka.util.{ OptionVal, Unsafe, unused } +import akka.util.{ unused, OptionVal, Unsafe } import akka.remote.artery.OutboundHandshake.HandshakeReq /** @@ -38,23 +43,25 @@ private[remote] object Encoder { /** * INTERNAL API */ -private[remote] class Encoder( - uniqueLocalAddress: UniqueAddress, - system: ExtendedActorSystem, - outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope], - bufferPool: EnvelopeBufferPool, - @unused streamId: Int, - debugLogSend: Boolean, - version: Byte) - extends GraphStageWithMaterializedValue[FlowShape[OutboundEnvelope, EnvelopeBuffer], Encoder.OutboundCompressionAccess] { +private[remote] class Encoder(uniqueLocalAddress: UniqueAddress, + system: ExtendedActorSystem, + outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope], + bufferPool: EnvelopeBufferPool, + @unused streamId: Int, + debugLogSend: Boolean, + version: Byte) + extends GraphStageWithMaterializedValue[FlowShape[OutboundEnvelope, EnvelopeBuffer], + Encoder.OutboundCompressionAccess] { import Encoder._ val in: Inlet[OutboundEnvelope] = Inlet("Artery.Encoder.in") val out: Outlet[EnvelopeBuffer] = Outlet("Artery.Encoder.out") val shape: FlowShape[OutboundEnvelope, EnvelopeBuffer] = FlowShape(in, out) - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, OutboundCompressionAccess) = { - val logic = new GraphStageLogic(shape) with InHandler with OutHandler with StageLogging with OutboundCompressionAccess { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, OutboundCompressionAccess) = { + val logic = new GraphStageLogic(shape) with InHandler with OutHandler with StageLogging + with OutboundCompressionAccess { private val headerBuilder = HeaderBuilder.out() headerBuilder.setVersion(version) @@ -134,9 +141,10 @@ private[remote] class Encoder( envelope.byteBuffer.flip() if (debugLogSendEnabled) - log.debug( - "sending remote message [{}] to [{}] from [{}]", - outboundEnvelope.message, outboundEnvelope.recipient.getOrElse(""), outboundEnvelope.sender.getOrElse("")) + log.debug("sending remote message [{}] to [{}] from [{}]", + outboundEnvelope.message, + outboundEnvelope.recipient.getOrElse(""), + outboundEnvelope.sender.getOrElse("")) push(out, envelope) @@ -145,15 +153,18 @@ private[remote] class Encoder( bufferPool.release(envelope) outboundEnvelope.message match { case _: SystemMessageEnvelope => - log.error(e, "Failed to serialize system message [{}].", - Logging.messageClassName(outboundEnvelope.message)) + log.error(e, + "Failed to serialize system message [{}].", + Logging.messageClassName(outboundEnvelope.message)) throw e case _ if e.isInstanceOf[java.nio.BufferOverflowException] => - val reason = new OversizedPayloadException("Discarding oversized payload sent to " + + val reason = new OversizedPayloadException( + "Discarding oversized payload sent to " + s"${outboundEnvelope.recipient}: max allowed size ${envelope.byteBuffer.limit()} " + s"bytes. Message type [${Logging.messageClassName(outboundEnvelope.message)}].") - log.error(reason, "Failed to serialize oversized message [{}].", - Logging.messageClassName(outboundEnvelope.message)) + log.error(reason, + "Failed to serialize oversized message [{}].", + Logging.messageClassName(outboundEnvelope.message)) pull(in) case _ => log.error(e, "Failed to serialize message [{}].", Logging.messageClassName(outboundEnvelope.message)) @@ -199,10 +210,9 @@ private[remote] class Encoder( * INTERNAL API */ private[remote] object Decoder { - private final case class RetryResolveRemoteDeployedRecipient( - attemptsLeft: Int, - recipientPath: String, - inboundEnvelope: InboundEnvelope) + private final case class RetryResolveRemoteDeployedRecipient(attemptsLeft: Int, + recipientPath: String, + inboundEnvelope: InboundEnvelope) private object Tick @@ -214,8 +224,10 @@ private[remote] object Decoder { /** For testing purposes, usually triggered by timer from within Decoder operator. */ def runNextActorRefAdvertisement(): Unit + /** For testing purposes, usually triggered by timer from within Decoder operator. */ def runNextClassManifestAdvertisement(): Unit + /** For testing purposes */ def currentCompressionOriginUids: Future[Set[Long]] @@ -233,15 +245,16 @@ private[remote] object Decoder { case ActorRefCompressionAdvertisementAck(from, tableVersion) => compressions.confirmActorRefCompressionAdvertisement(from.uid, tableVersion) } - private val confirmClassManifestCompressionAdvertisementCb = getAsyncCallback[ClassManifestCompressionAdvertisementAck] { - case ClassManifestCompressionAdvertisementAck(from, tableVersion) => - compressions.confirmClassManifestCompressionAdvertisement(from.uid, tableVersion) + private val confirmClassManifestCompressionAdvertisementCb = + getAsyncCallback[ClassManifestCompressionAdvertisementAck] { + case ClassManifestCompressionAdvertisementAck(from, tableVersion) => + compressions.confirmClassManifestCompressionAdvertisement(from.uid, tableVersion) + } + private val runNextActorRefAdvertisementCb = getAsyncCallback[Unit] { _ => + compressions.runNextActorRefAdvertisement() } - private val runNextActorRefAdvertisementCb = getAsyncCallback[Unit] { - _ => compressions.runNextActorRefAdvertisement() - } - private val runNextClassManifestAdvertisementCb = getAsyncCallback[Unit] { - _ => compressions.runNextClassManifestAdvertisement() + private val runNextClassManifestAdvertisementCb = getAsyncCallback[Unit] { _ => + compressions.runNextClassManifestAdvertisement() } private val currentCompressionOriginUidsCb = getAsyncCallback[Promise[Set[Long]]] { p => p.success(compressions.currentOriginUids) @@ -262,7 +275,8 @@ private[remote] object Decoder { /** * External call from ChangeInboundCompression materialized value */ - override def confirmClassManifestCompressionAdvertisementAck(ack: ClassManifestCompressionAdvertisementAck): Future[Done] = + override def confirmClassManifestCompressionAdvertisementAck( + ack: ClassManifestCompressionAdvertisementAck): Future[Done] = confirmClassManifestCompressionAdvertisementCb.invokeWithFeedback(ack) /** @@ -296,8 +310,9 @@ private[remote] object Decoder { /** * INTERNAL API */ -private[remote] final class ActorRefResolveCacheWithAddress(provider: RemoteActorRefProvider, localAddress: UniqueAddress) - extends LruBoundedCache[String, InternalActorRef](capacity = 1024, evictAgeThreshold = 600) { +private[remote] final class ActorRefResolveCacheWithAddress(provider: RemoteActorRefProvider, + localAddress: UniqueAddress) + extends LruBoundedCache[String, InternalActorRef](capacity = 1024, evictAgeThreshold = 600) { override protected def compute(k: String): InternalActorRef = provider.resolveActorRefWithLocalAddress(k, localAddress.address) @@ -310,14 +325,13 @@ private[remote] final class ActorRefResolveCacheWithAddress(provider: RemoteActo /** * INTERNAL API */ -private[remote] class Decoder( - inboundContext: InboundContext, - system: ExtendedActorSystem, - uniqueLocalAddress: UniqueAddress, - settings: ArterySettings, - inboundCompressions: InboundCompressions, - inEnvelopePool: ObjectPool[ReusableInboundEnvelope]) - extends GraphStageWithMaterializedValue[FlowShape[EnvelopeBuffer, InboundEnvelope], InboundCompressionAccess] { +private[remote] class Decoder(inboundContext: InboundContext, + system: ExtendedActorSystem, + uniqueLocalAddress: UniqueAddress, + settings: ArterySettings, + inboundCompressions: InboundCompressions, + inEnvelopePool: ObjectPool[ReusableInboundEnvelope]) + extends GraphStageWithMaterializedValue[FlowShape[EnvelopeBuffer, InboundEnvelope], InboundCompressionAccess] { import Decoder.Tick val in: Inlet[EnvelopeBuffer] = Inlet("Artery.Decoder.in") @@ -325,7 +339,8 @@ private[remote] class Decoder( val shape: FlowShape[EnvelopeBuffer, InboundEnvelope] = FlowShape(in, out) def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, InboundCompressionAccess) = { - val logic = new TimerGraphStageLogic(shape) with InboundCompressionAccessImpl with InHandler with OutHandler with StageLogging { + val logic = new TimerGraphStageLogic(shape) with InboundCompressionAccessImpl with InHandler with OutHandler + with StageLogging { import Decoder.RetryResolveRemoteDeployedRecipient override val compressions = inboundCompressions @@ -356,146 +371,156 @@ private[remote] class Decoder( case _ => // not advertising actor ref compressions } settings.Advanced.Compression.Manifests.AdvertisementInterval match { - case d: FiniteDuration => schedulePeriodicallyWithInitialDelay(AdvertiseClassManifestsCompressionTable, d, d) - case _ => // not advertising class manifest compressions + case d: FiniteDuration => + schedulePeriodicallyWithInitialDelay(AdvertiseClassManifestsCompressionTable, d, d) + case _ => // not advertising class manifest compressions } } } - override def onPush(): Unit = try { - messageCount += 1 - val envelope = grab(in) - headerBuilder.resetMessageFields() - envelope.parseHeader(headerBuilder) + override def onPush(): Unit = + try { + messageCount += 1 + val envelope = grab(in) + headerBuilder.resetMessageFields() + envelope.parseHeader(headerBuilder) - val originUid = headerBuilder.uid - val association = inboundContext.association(originUid) + val originUid = headerBuilder.uid + val association = inboundContext.association(originUid) - val recipient: OptionVal[InternalActorRef] = try headerBuilder.recipientActorRef(originUid) match { - case OptionVal.Some(ref) => - OptionVal(ref.asInstanceOf[InternalActorRef]) - case OptionVal.None if headerBuilder.recipientActorRefPath.isDefined => - resolveRecipient(headerBuilder.recipientActorRefPath.get) - case _ => - OptionVal.None - } catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) - OptionVal.None - } - - val sender: OptionVal[InternalActorRef] = try headerBuilder.senderActorRef(originUid) match { - case OptionVal.Some(ref) => - OptionVal(ref.asInstanceOf[InternalActorRef]) - case OptionVal.None if headerBuilder.senderActorRefPath.isDefined => - OptionVal(actorRefResolver.getOrCompute(headerBuilder.senderActorRefPath.get)) - case _ => - OptionVal.None - } catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) - OptionVal.None - } - - val classManifestOpt = try headerBuilder.manifest(originUid) catch { - case NonFatal(e) => - // probably version mismatch due to restarted system - log.warning("Couldn't decompress manifest from originUid [{}]. {}", originUid, e) - OptionVal.None - } - - if ((recipient.isEmpty && headerBuilder.recipientActorRefPath.isEmpty && !headerBuilder.isNoRecipient) || - (sender.isEmpty && headerBuilder.senderActorRefPath.isEmpty && !headerBuilder.isNoSender)) { - log.debug("Dropping message for unknown recipient/sender. It was probably sent from system [{}] with compression " + - "table [{}] built for previous incarnation of the destination system, or it was compressed with a table " + - "that has already been discarded in the destination system.", originUid, - headerBuilder.inboundActorRefCompressionTableVersion) - pull(in) - } else if (classManifestOpt.isEmpty) { - log.debug("Dropping message with unknown manifest. It was probably sent from system [{}] with compression " + - "table [{}] built for previous incarnation of the destination system, or it was compressed with a table " + - "that has already been discarded in the destination system.", originUid, - headerBuilder.inboundActorRefCompressionTableVersion) - pull(in) - } else { - val classManifest = classManifestOpt.get - - if ((messageCount & heavyHitterMask) == 0) { - // --- hit refs and manifests for heavy-hitter counting - association match { - case OptionVal.Some(assoc) => - val remoteAddress = assoc.remoteAddress - sender match { - case OptionVal.Some(snd) => - compressions.hitActorRef(originUid, remoteAddress, snd, 1) - case OptionVal.None => - } - - recipient match { - case OptionVal.Some(rcp) => - compressions.hitActorRef(originUid, remoteAddress, rcp, 1) - case OptionVal.None => - } - - compressions.hitClassManifest(originUid, remoteAddress, classManifest, 1) - - case _ => - // we don't want to record hits for compression while handshake is still in progress. - log.debug("Decoded message but unable to record hits for compression as no remoteAddress known. No association yet?") - } - // --- end of hit refs and manifests for heavy-hitter counting + val recipient: OptionVal[InternalActorRef] = try headerBuilder.recipientActorRef(originUid) match { + case OptionVal.Some(ref) => + OptionVal(ref.asInstanceOf[InternalActorRef]) + case OptionVal.None if headerBuilder.recipientActorRefPath.isDefined => + resolveRecipient(headerBuilder.recipientActorRefPath.get) + case _ => + OptionVal.None + } catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) + OptionVal.None } - val decoded = inEnvelopePool.acquire().init( - recipient, - sender, - originUid, - headerBuilder.serializer, - classManifest, - headerBuilder.flags, - envelope, - association, - lane = 0) + val sender: OptionVal[InternalActorRef] = try headerBuilder.senderActorRef(originUid) match { + case OptionVal.Some(ref) => + OptionVal(ref.asInstanceOf[InternalActorRef]) + case OptionVal.None if headerBuilder.senderActorRefPath.isDefined => + OptionVal(actorRefResolver.getOrCompute(headerBuilder.senderActorRefPath.get)) + case _ => + OptionVal.None + } catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress sender from originUid [{}]. {}", originUid, e) + OptionVal.None + } - if (recipient.isEmpty && !headerBuilder.isNoRecipient) { + val classManifestOpt = try headerBuilder.manifest(originUid) + catch { + case NonFatal(e) => + // probably version mismatch due to restarted system + log.warning("Couldn't decompress manifest from originUid [{}]. {}", originUid, e) + OptionVal.None + } - // The remote deployed actor might not be created yet when resolving the - // recipient for the first message that is sent to it, best effort retry. - // However, if the retried resolve isn't successful the ref is banned and - // we will not do the delayed retry resolve again. The reason for that is - // if many messages are sent to such dead refs the resolve process will slow - // down other messages. - val recipientActorRefPath = headerBuilder.recipientActorRefPath.get - if (bannedRemoteDeployedActorRefs.contains(recipientActorRefPath)) { - - headerBuilder.recipientActorRefPath match { - case OptionVal.Some(path) => - val ref = actorRefResolver.getOrCompute(path) - if (ref.isInstanceOf[EmptyLocalActorRef]) log.warning( - "Message for banned (terminated, unresolved) remote deployed recipient [{}].", - recipientActorRefPath) - push(out, decoded.withRecipient(ref)) - case OptionVal.None => - log.warning( - "Dropping message for banned (terminated, unresolved) remote deployed recipient [{}].", - recipientActorRefPath) - pull(in) - } - - } else - scheduleOnce(RetryResolveRemoteDeployedRecipient( - retryResolveRemoteDeployedRecipientAttempts, - recipientActorRefPath, decoded), retryResolveRemoteDeployedRecipientInterval) + if ((recipient.isEmpty && headerBuilder.recipientActorRefPath.isEmpty && !headerBuilder.isNoRecipient) || + (sender.isEmpty && headerBuilder.senderActorRefPath.isEmpty && !headerBuilder.isNoSender)) { + log.debug( + "Dropping message for unknown recipient/sender. It was probably sent from system [{}] with compression " + + "table [{}] built for previous incarnation of the destination system, or it was compressed with a table " + + "that has already been discarded in the destination system.", + originUid, + headerBuilder.inboundActorRefCompressionTableVersion) + pull(in) + } else if (classManifestOpt.isEmpty) { + log.debug( + "Dropping message with unknown manifest. It was probably sent from system [{}] with compression " + + "table [{}] built for previous incarnation of the destination system, or it was compressed with a table " + + "that has already been discarded in the destination system.", + originUid, + headerBuilder.inboundActorRefCompressionTableVersion) + pull(in) } else { - push(out, decoded) + val classManifest = classManifestOpt.get + + if ((messageCount & heavyHitterMask) == 0) { + // --- hit refs and manifests for heavy-hitter counting + association match { + case OptionVal.Some(assoc) => + val remoteAddress = assoc.remoteAddress + sender match { + case OptionVal.Some(snd) => + compressions.hitActorRef(originUid, remoteAddress, snd, 1) + case OptionVal.None => + } + + recipient match { + case OptionVal.Some(rcp) => + compressions.hitActorRef(originUid, remoteAddress, rcp, 1) + case OptionVal.None => + } + + compressions.hitClassManifest(originUid, remoteAddress, classManifest, 1) + + case _ => + // we don't want to record hits for compression while handshake is still in progress. + log.debug( + "Decoded message but unable to record hits for compression as no remoteAddress known. No association yet?") + } + // --- end of hit refs and manifests for heavy-hitter counting + } + + val decoded = inEnvelopePool + .acquire() + .init(recipient, + sender, + originUid, + headerBuilder.serializer, + classManifest, + headerBuilder.flags, + envelope, + association, + lane = 0) + + if (recipient.isEmpty && !headerBuilder.isNoRecipient) { + + // The remote deployed actor might not be created yet when resolving the + // recipient for the first message that is sent to it, best effort retry. + // However, if the retried resolve isn't successful the ref is banned and + // we will not do the delayed retry resolve again. The reason for that is + // if many messages are sent to such dead refs the resolve process will slow + // down other messages. + val recipientActorRefPath = headerBuilder.recipientActorRefPath.get + if (bannedRemoteDeployedActorRefs.contains(recipientActorRefPath)) { + + headerBuilder.recipientActorRefPath match { + case OptionVal.Some(path) => + val ref = actorRefResolver.getOrCompute(path) + if (ref.isInstanceOf[EmptyLocalActorRef]) + log.warning("Message for banned (terminated, unresolved) remote deployed recipient [{}].", + recipientActorRefPath) + push(out, decoded.withRecipient(ref)) + case OptionVal.None => + log.warning("Dropping message for banned (terminated, unresolved) remote deployed recipient [{}].", + recipientActorRefPath) + pull(in) + } + + } else + scheduleOnce( + RetryResolveRemoteDeployedRecipient(retryResolveRemoteDeployedRecipientAttempts, + recipientActorRefPath, + decoded), + retryResolveRemoteDeployedRecipientInterval) + } else { + push(out, decoded) + } } + } catch { + case NonFatal(e) => + log.warning("Dropping message due to: {}", e) + pull(in) } - } catch { - case NonFatal(e) => - log.warning("Dropping message due to: {}", e) - pull(in) - } private def resolveRecipient(path: String): OptionVal[InternalActorRef] = { actorRefResolver.getOrCompute(path) match { @@ -529,18 +554,19 @@ private[remote] class Decoder( tickTimestamp = now case AdvertiseActorRefsCompressionTable => - compressions.runNextActorRefAdvertisement() // TODO: optimise these operations, otherwise they stall the hotpath + compressions + .runNextActorRefAdvertisement() // TODO: optimise these operations, otherwise they stall the hotpath case AdvertiseClassManifestsCompressionTable => - compressions.runNextClassManifestAdvertisement() // TODO: optimise these operations, otherwise they stall the hotpath + compressions + .runNextClassManifestAdvertisement() // TODO: optimise these operations, otherwise they stall the hotpath case RetryResolveRemoteDeployedRecipient(attemptsLeft, recipientPath, inboundEnvelope) => resolveRecipient(recipientPath) match { case OptionVal.None => if (attemptsLeft > 0) - scheduleOnce(RetryResolveRemoteDeployedRecipient( - attemptsLeft - 1, - recipientPath, inboundEnvelope), retryResolveRemoteDeployedRecipientInterval) + scheduleOnce(RetryResolveRemoteDeployedRecipient(attemptsLeft - 1, recipientPath, inboundEnvelope), + retryResolveRemoteDeployedRecipientInterval) else { // No more attempts left. If the retried resolve isn't successful the ref is banned and // we will not do the delayed retry resolve again. The reason for that is @@ -572,10 +598,10 @@ private[remote] class Decoder( /** * INTERNAL API */ -private[remote] class Deserializer( - @unused inboundContext: InboundContext, - system: ExtendedActorSystem, - bufferPool: EnvelopeBufferPool) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { +private[remote] class Deserializer(@unused inboundContext: InboundContext, + system: ExtendedActorSystem, + bufferPool: EnvelopeBufferPool) + extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { val in: Inlet[InboundEnvelope] = Inlet("Artery.Deserializer.in") val out: Outlet[InboundEnvelope] = Outlet("Artery.Deserializer.out") @@ -603,8 +629,12 @@ private[remote] class Deserializer( try { val startTime: Long = if (instruments.timeSerialization) System.nanoTime else 0 - val deserializedMessage = MessageSerializer.deserializeForArtery( - system, envelope.originUid, serialization, envelope.serializer, envelope.classManifest, envelope.envelopeBuffer) + val deserializedMessage = MessageSerializer.deserializeForArtery(system, + envelope.originUid, + serialization, + envelope.serializer, + envelope.classManifest, + envelope.envelopeBuffer) val envelopeWithMessage = envelope.withMessage(deserializedMessage) @@ -620,9 +650,11 @@ private[remote] class Deserializer( case OptionVal.Some(a) => a.remoteAddress case OptionVal.None => "unknown" } - log.warning( - "Failed to deserialize message from [{}] with serializer id [{}] and manifest [{}]. {}", - from, envelope.serializer, envelope.classManifest, e) + log.warning("Failed to deserialize message from [{}] with serializer id [{}] and manifest [{}]. {}", + from, + envelope.serializer, + envelope.classManifest, + e) pull(in) } finally { val buf = envelope.envelopeBuffer @@ -643,11 +675,11 @@ private[remote] class Deserializer( * that an application message arrives in the InboundHandshake operator before the * handshake is completed and then it would be dropped. */ -private[remote] class DuplicateHandshakeReq( - numberOfLanes: Int, - inboundContext: InboundContext, - system: ExtendedActorSystem, - bufferPool: EnvelopeBufferPool) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { +private[remote] class DuplicateHandshakeReq(numberOfLanes: Int, + inboundContext: InboundContext, + system: ExtendedActorSystem, + bufferPool: EnvelopeBufferPool) + extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { val in: Inlet[InboundEnvelope] = Inlet("Artery.DuplicateHandshakeReq.in") val out: Outlet[InboundEnvelope] = Outlet("Artery.DuplicateHandshakeReq.out") @@ -671,7 +703,8 @@ private[remote] class DuplicateHandshakeReq( if (_serializerId == -1) { val serialization = SerializationExtension(system) val ser = serialization.serializerFor(classOf[HandshakeReq]) - _manifest = Serializers.manifestFor(ser, HandshakeReq(inboundContext.localAddress, inboundContext.localAddress.address)) + _manifest = + Serializers.manifestFor(ser, HandshakeReq(inboundContext.localAddress, inboundContext.localAddress.address)) _serializerId = ser.identifier } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/Control.scala b/akka-remote/src/main/scala/akka/remote/artery/Control.scala index 981a17a46b..96c70d1b9f 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Control.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Control.scala @@ -80,7 +80,7 @@ private[remote] object InboundControlJunction { // messages for the stream callback private[InboundControlJunction] sealed trait CallbackMessage private[InboundControlJunction] final case class Attach(observer: ControlMessageObserver, done: Promise[Done]) - extends CallbackMessage + extends CallbackMessage private[InboundControlJunction] final case class Dettach(observer: ControlMessageObserver) extends CallbackMessage } @@ -88,7 +88,8 @@ private[remote] object InboundControlJunction { * INTERNAL API */ private[remote] class InboundControlJunction - extends GraphStageWithMaterializedValue[FlowShape[InboundEnvelope, InboundEnvelope], InboundControlJunction.ControlMessageSubject] { + extends GraphStageWithMaterializedValue[FlowShape[InboundEnvelope, InboundEnvelope], + InboundControlJunction.ControlMessageSubject] { import InboundControlJunction._ val in: Inlet[InboundEnvelope] = Inlet("InboundControlJunction.in") @@ -157,9 +158,10 @@ private[remote] object OutboundControlJunction { /** * INTERNAL API */ -private[remote] class OutboundControlJunction( - outboundContext: OutboundContext, outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope]) - extends GraphStageWithMaterializedValue[FlowShape[OutboundEnvelope, OutboundEnvelope], OutboundControlJunction.OutboundControlIngress] { +private[remote] class OutboundControlJunction(outboundContext: OutboundContext, + outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope]) + extends GraphStageWithMaterializedValue[FlowShape[OutboundEnvelope, OutboundEnvelope], + OutboundControlJunction.OutboundControlIngress] { import OutboundControlJunction._ val in: Inlet[OutboundEnvelope] = Inlet("OutboundControlJunction.in") val out: Outlet[OutboundEnvelope] = Outlet("OutboundControlJunction.out") @@ -167,7 +169,8 @@ private[remote] class OutboundControlJunction( override def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { - val logic = new GraphStageLogic(shape) with InHandler with OutHandler with StageLogging with OutboundControlIngress { + val logic = new GraphStageLogic(shape) with InHandler with OutHandler with StageLogging + with OutboundControlIngress { val sendControlMessageCallback = getAsyncCallback[ControlMessage](internalSendControlMessage) private val maxControlMessageBufferSize: Int = outboundContext.settings.Advanced.OutboundControlQueueSize @@ -201,8 +204,7 @@ private[remote] class OutboundControlJunction( } private def wrap(message: ControlMessage): OutboundEnvelope = - outboundEnvelopePool.acquire().init( - recipient = OptionVal.None, message = message, sender = OptionVal.None) + outboundEnvelopePool.acquire().init(recipient = OptionVal.None, message = message, sender = OptionVal.None) override def sendControlMessage(message: ControlMessage): Unit = sendControlMessageCallback.invoke(message) diff --git a/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala b/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala index 06e398bb2b..2fb31ccae9 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/EnvelopeBufferPool.scala @@ -49,6 +49,7 @@ private[remote] final class ByteFlag(val mask: Byte) extends AnyVal { def isEnabled(byteFlags: Byte): Boolean = (byteFlags.toInt & mask) != 0 override def toString = s"ByteFlag(${ByteFlag.binaryLeftPad(mask)})" } + /** * INTERNAL API */ @@ -148,11 +149,13 @@ private[remote] sealed trait HeaderBuilder { def uid: Long def setSenderActorRef(ref: ActorRef): Unit + /** * Retrive the compressed ActorRef by the compressionId carried by this header. * Returns `None` if ActorRef was not compressed, and then the literal [[senderActorRefPath]] should be used. */ def senderActorRef(originUid: Long): OptionVal[ActorRef] + /** * Retrive the raw literal actor path, instead of using the compressed value. * Returns `None` if ActorRef was compressed (!). To obtain the path in such case call [[senderActorRef]] and extract the path from it directly. @@ -166,11 +169,13 @@ private[remote] sealed trait HeaderBuilder { def isNoRecipient: Boolean def setRecipientActorRef(ref: ActorRef): Unit + /** * Retrive the compressed ActorRef by the compressionId carried by this header. * Returns `None` if ActorRef was not compressed, and then the literal [[recipientActorRefPath]] should be used. */ def recipientActorRef(originUid: Long): OptionVal[ActorRef] + /** * Retrive the raw literal actor path, instead of using the compressed value. * Returns `None` if ActorRef was compressed (!). To obtain the path in such case call [[recipientActorRefPath]] and extract the path from it directly. @@ -196,7 +201,7 @@ private[remote] sealed trait HeaderBuilder { * INTERNAL API */ private[remote] final class SerializationFormatCache - extends LruBoundedCache[ActorRef, String](capacity = 1024, evictAgeThreshold = 600) { + extends LruBoundedCache[ActorRef, String](capacity = 1024, evictAgeThreshold = 600) { override protected def compute(ref: ActorRef): String = Serialization.serializedActorPath(ref) @@ -210,10 +215,10 @@ private[remote] final class SerializationFormatCache /** * INTERNAL API */ -private[remote] final class HeaderBuilderImpl( - inboundCompression: InboundCompressions, - var _outboundActorRefCompression: CompressionTable[ActorRef], - var _outboundClassManifestCompression: CompressionTable[String]) extends HeaderBuilder { +private[remote] final class HeaderBuilderImpl(inboundCompression: InboundCompressions, + var _outboundActorRefCompression: CompressionTable[ActorRef], + var _outboundClassManifestCompression: CompressionTable[String]) + extends HeaderBuilder { import HeaderBuilder.DeadLettersCode private[this] val toSerializationFormat: SerializationFormatCache = new SerializationFormatCache @@ -355,9 +360,7 @@ private[remote] final class HeaderBuilderImpl( override def manifest(originUid: Long): OptionVal[String] = { if (_manifest ne null) OptionVal.Some(_manifest) else { - inboundCompression.decompressClassManifest( - originUid, - inboundClassManifestCompressionTableVersion, _manifestIdx) + inboundCompression.decompressClassManifest(originUid, inboundClassManifestCompressionTableVersion, _manifestIdx) } } @@ -367,16 +370,16 @@ private[remote] final class HeaderBuilderImpl( override def toString = "HeaderBuilderImpl(" + - "version:" + version + ", " + - "flags:" + ByteFlag.binaryLeftPad(flags) + ", " + - "UID:" + uid + ", " + - "_senderActorRef:" + _senderActorRef + ", " + - "_senderActorRefIdx:" + _senderActorRefIdx + ", " + - "_recipientActorRef:" + _recipientActorRef + ", " + - "_recipientActorRefIdx:" + _recipientActorRefIdx + ", " + - "_serializer:" + _serializer + ", " + - "_manifest:" + _manifest + ", " + - "_manifestIdx:" + _manifestIdx + ")" + "version:" + version + ", " + + "flags:" + ByteFlag.binaryLeftPad(flags) + ", " + + "UID:" + uid + ", " + + "_senderActorRef:" + _senderActorRef + ", " + + "_senderActorRefIdx:" + _senderActorRefIdx + ", " + + "_recipientActorRef:" + _recipientActorRef + ", " + + "_recipientActorRefIdx:" + _recipientActorRefIdx + ", " + + "_serializer:" + _serializer + ", " + + "_manifest:" + _manifest + ", " + + "_manifestIdx:" + _manifestIdx + ")" } @@ -454,7 +457,7 @@ private[remote] final class EnvelopeBuffer(val byteBuffer: ByteBuffer) { if (header.version > ArteryTransport.HighestVersion) throw new IllegalArgumentException( s"Incompatible protocol version [${header.version}], " + - s"highest known version for this node is [${ArteryTransport.HighestVersion}]") + s"highest known version for this node is [${ArteryTransport.HighestVersion}]") header.setFlags(byteBuffer.get(FlagsOffset)) // compression table versions (stored in the Tag) diff --git a/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala b/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala index 95a33b0e5c..1553ae8ee5 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/FixedSizePartitionHub.scala @@ -11,14 +11,14 @@ import org.agrona.concurrent.OneToOneConcurrentArrayQueue /** * INTERNAL API */ -@InternalApi private[akka] class FixedSizePartitionHub[T]( - partitioner: T => Int, - lanes: Int, - bufferSize: Int) extends PartitionHub[T]( - // during tear down or restart it's possible that some streams have been removed - // and then we must drop elements (return -1) - () => (info, elem) => if (info.size < lanes) -1 else info.consumerIdByIdx(partitioner(elem)), - lanes, bufferSize - 1) { +@InternalApi private[akka] class FixedSizePartitionHub[T](partitioner: T => Int, lanes: Int, bufferSize: Int) + extends PartitionHub[T]( + // during tear down or restart it's possible that some streams have been removed + // and then we must drop elements (return -1) + () => + (info, elem) => if (info.size < lanes) -1 else info.consumerIdByIdx(partitioner(elem)), + lanes, + bufferSize - 1) { // -1 because of the Completed token override def createQueue(): PartitionHub.Internal.PartitionQueue = @@ -29,7 +29,8 @@ import org.agrona.concurrent.OneToOneConcurrentArrayQueue /** * INTERNAL API */ -@InternalApi private[akka] class FixedSizePartitionQueue(lanes: Int, capacity: Int) extends PartitionHub.Internal.PartitionQueue { +@InternalApi private[akka] class FixedSizePartitionQueue(lanes: Int, capacity: Int) + extends PartitionHub.Internal.PartitionQueue { private val queues = { val arr = new Array[OneToOneConcurrentArrayQueue[AnyRef]](lanes) diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorder.scala b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorder.scala index a4e0080c3b..4e2e3d2a6c 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorder.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorder.scala @@ -137,12 +137,11 @@ private[remote] object RollingEventLogSection { /** * INTERNAL API */ -private[remote] class RollingEventLogSection( - fileChannel: FileChannel, - offset: Long, - entryCount: Long, - logBufferSize: Long, - recordSize: Int) { +private[remote] class RollingEventLogSection(fileChannel: FileChannel, + offset: Long, + entryCount: Long, + logBufferSize: Long, + recordSize: Int) { import RollingEventLogSection._ require(entryCount > 0, "entryCount must be greater than 0") @@ -285,7 +284,8 @@ private[remote] final case class SnapshotInProgress(latch: CountDownLatch) exten /** * INTERNAL API */ -private[remote] class FlightRecorder(val fileChannel: FileChannel) extends AtomicReference[FlightRecorderStatus](Running) { +private[remote] class FlightRecorder(val fileChannel: FileChannel) + extends AtomicReference[FlightRecorderStatus](Running) { import FlightRecorder._ private[this] val globalSection = new MappedResizeableBuffer(fileChannel, 0, GlobalSectionSize) @@ -294,26 +294,23 @@ private[remote] class FlightRecorder(val fileChannel: FileChannel) extends Atomi require((SnapshotCount & (SnapshotCount - 1)) == 0, "SnapshotCount must be power of two") private[this] val SnapshotMask = SnapshotCount - 1 private[this] val alertLogs = - new RollingEventLogSection( - fileChannel = fileChannel, - offset = AlertSectionOffset, - entryCount = AlertWindow, - logBufferSize = AlertLogSize, - recordSize = AlertRecordSize) + new RollingEventLogSection(fileChannel = fileChannel, + offset = AlertSectionOffset, + entryCount = AlertWindow, + logBufferSize = AlertLogSize, + recordSize = AlertRecordSize) private[this] val loFreqLogs = - new RollingEventLogSection( - fileChannel = fileChannel, - offset = LoFreqSectionOffset, - entryCount = LoFreqWindow, - logBufferSize = LoFreqLogSize, - recordSize = LoFreqRecordSize) + new RollingEventLogSection(fileChannel = fileChannel, + offset = LoFreqSectionOffset, + entryCount = LoFreqWindow, + logBufferSize = LoFreqLogSize, + recordSize = LoFreqRecordSize) private[this] val hiFreqLogs = - new RollingEventLogSection( - fileChannel = fileChannel, - offset = HiFreqSectionOffset, - entryCount = HiFreqWindow, - logBufferSize = HiFreqLogSize, - recordSize = HiFreqRecordSize) + new RollingEventLogSection(fileChannel = fileChannel, + offset = HiFreqSectionOffset, + entryCount = HiFreqWindow, + logBufferSize = HiFreqLogSize, + recordSize = HiFreqRecordSize) // No need for volatile, guarded by atomic CAS and set @volatile private var currentLog = 0 diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala index bbc19eb1d4..f7a03c4b3d 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderEvents.scala @@ -69,63 +69,57 @@ private[remote] object FlightRecorderEvents { val TcpInbound_Received = 173 // Used for presentation of the entries in the flight recorder - lazy val eventDictionary = Map( - Transport_MediaDriverStarted -> "Transport: Media driver started", - Transport_Started -> "Transport: started", - Transport_AeronErrorLogStarted -> "Transport: Aeron error log started", - Transport_TaskRunnerStarted -> "Transport: Task runner started", - Transport_UniqueAddressSet -> "Transport: Unique address set", - Transport_MaterializerStarted -> "Transport: Materializer started", - Transport_StartupFinished -> "Transport: Startup finished", - Transport_OnAvailableImage -> "Transport: onAvailableImage", - Transport_KillSwitchPulled -> "Transport: KillSwitch pulled", - Transport_Stopped -> "Transport: Stopped", - Transport_AeronErrorLogTaskStopped -> "Transport: Aeron errorLog task stopped", - Transport_MediaFileDeleted -> "Transport: Media file deleted", - Transport_FlightRecorderClose -> "Transport: Flight recorder closed", - Transport_SendQueueOverflow -> "Transport: Send queue overflow", - Transport_StopIdleOutbound -> "Transport: Remove idle outbound", - Transport_Quarantined -> "Transport: Quarantined association", - Transport_RemovedQuarantined -> "Transport: Removed idle quarantined association", - Transport_RestartOutbound -> "Transport: Restart outbound", - Transport_RestartInbound -> "Transport: Restart outbound", - - // Aeron Sink events - AeronSink_Started -> "AeronSink: Started", - AeronSink_TaskRunnerRemoved -> "AeronSink: Task runner removed", - AeronSink_PublicationClosed -> "AeronSink: Publication closed", - AeronSink_Stopped -> "AeronSink: Stopped", - AeronSink_EnvelopeGrabbed -> "AeronSink: Envelope grabbed", - AeronSink_EnvelopeOffered -> "AeronSink: Envelope offered", - AeronSink_GaveUpEnvelope -> "AeronSink: Gave up envelope", - AeronSink_DelegateToTaskRunner -> "AeronSink: Delegate to task runner", - AeronSink_ReturnFromTaskRunner -> "AeronSink: Return from task runner", - - // Aeron Source events - AeronSource_Started -> "AeronSource: Started", - AeronSource_Stopped -> "AeronSource: Stopped", - AeronSource_Received -> "AeronSource: Received", - AeronSource_DelegateToTaskRunner -> "AeronSource: Delegate to task runner", - AeronSource_ReturnFromTaskRunner -> "AeronSource: Return from task runner", - - // Compression events - Compression_CompressedActorRef -> "Compression: Compressed ActorRef", - Compression_AllocatedActorRefCompressionId -> "Compression: Allocated ActorRef compression id", - Compression_CompressedManifest -> "Compression: Compressed manifest", - Compression_AllocatedManifestCompressionId -> "Compression: Allocated manifest compression id", - Compression_Inbound_RunActorRefAdvertisement -> "InboundCompression: Run class manifest compression advertisement", - Compression_Inbound_RunClassManifestAdvertisement -> "InboundCompression: Run class manifest compression advertisement", - - // TCP outbound events - TcpOutbound_Connected -> "TCP out: Connected", - TcpOutbound_Sent -> "TCP out: Sent message", - - // TCP inbound events - TcpInbound_Bound -> "TCP in: Bound", - TcpInbound_Unbound -> "TCP in: Unbound", - TcpInbound_Connected -> "TCP in: New connection", - TcpInbound_Received -> "TCP in: Received message" - - ).map { case (int, str) => int.toLong -> str } + lazy val eventDictionary = Map(Transport_MediaDriverStarted -> "Transport: Media driver started", + Transport_Started -> "Transport: started", + Transport_AeronErrorLogStarted -> "Transport: Aeron error log started", + Transport_TaskRunnerStarted -> "Transport: Task runner started", + Transport_UniqueAddressSet -> "Transport: Unique address set", + Transport_MaterializerStarted -> "Transport: Materializer started", + Transport_StartupFinished -> "Transport: Startup finished", + Transport_OnAvailableImage -> "Transport: onAvailableImage", + Transport_KillSwitchPulled -> "Transport: KillSwitch pulled", + Transport_Stopped -> "Transport: Stopped", + Transport_AeronErrorLogTaskStopped -> "Transport: Aeron errorLog task stopped", + Transport_MediaFileDeleted -> "Transport: Media file deleted", + Transport_FlightRecorderClose -> "Transport: Flight recorder closed", + Transport_SendQueueOverflow -> "Transport: Send queue overflow", + Transport_StopIdleOutbound -> "Transport: Remove idle outbound", + Transport_Quarantined -> "Transport: Quarantined association", + Transport_RemovedQuarantined -> "Transport: Removed idle quarantined association", + Transport_RestartOutbound -> "Transport: Restart outbound", + Transport_RestartInbound -> "Transport: Restart outbound", + // Aeron Sink events + AeronSink_Started -> "AeronSink: Started", + AeronSink_TaskRunnerRemoved -> "AeronSink: Task runner removed", + AeronSink_PublicationClosed -> "AeronSink: Publication closed", + AeronSink_Stopped -> "AeronSink: Stopped", + AeronSink_EnvelopeGrabbed -> "AeronSink: Envelope grabbed", + AeronSink_EnvelopeOffered -> "AeronSink: Envelope offered", + AeronSink_GaveUpEnvelope -> "AeronSink: Gave up envelope", + AeronSink_DelegateToTaskRunner -> "AeronSink: Delegate to task runner", + AeronSink_ReturnFromTaskRunner -> "AeronSink: Return from task runner", + // Aeron Source events + AeronSource_Started -> "AeronSource: Started", + AeronSource_Stopped -> "AeronSource: Stopped", + AeronSource_Received -> "AeronSource: Received", + AeronSource_DelegateToTaskRunner -> "AeronSource: Delegate to task runner", + AeronSource_ReturnFromTaskRunner -> "AeronSource: Return from task runner", + // Compression events + Compression_CompressedActorRef -> "Compression: Compressed ActorRef", + Compression_AllocatedActorRefCompressionId -> "Compression: Allocated ActorRef compression id", + Compression_CompressedManifest -> "Compression: Compressed manifest", + Compression_AllocatedManifestCompressionId -> "Compression: Allocated manifest compression id", + Compression_Inbound_RunActorRefAdvertisement -> "InboundCompression: Run class manifest compression advertisement", + Compression_Inbound_RunClassManifestAdvertisement -> "InboundCompression: Run class manifest compression advertisement", + // TCP outbound events + TcpOutbound_Connected -> "TCP out: Connected", + TcpOutbound_Sent -> "TCP out: Sent message", + // TCP inbound events + TcpInbound_Bound -> "TCP in: Bound", + TcpInbound_Unbound -> "TCP in: Unbound", + TcpInbound_Connected -> "TCP in: New connection", + TcpInbound_Received -> "TCP in: Received message").map { + case (int, str) => int.toLong -> str + } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderReader.scala b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderReader.scala index f4f8666984..f7a9a9fea1 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderReader.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/FlightRecorderReader.scala @@ -11,7 +11,7 @@ import java.time.Instant import org.agrona.concurrent.MappedResizeableBuffer -import scala.collection.{ SortedSet, immutable } +import scala.collection.{ immutable, SortedSet } /** * Internal API @@ -35,13 +35,12 @@ private[akka] object FlightRecorderReader { case object Live extends LogState case object Snapshot extends LogState - case class SectionParameters( - offset: Long, - sectionSize: Long, - logSize: Long, - window: Long, - recordSize: Long, - entriesPerRecord: Long) { + case class SectionParameters(offset: Long, + sectionSize: Long, + logSize: Long, + window: Long, + recordSize: Long, + entriesPerRecord: Long) { override def toString: String = s""" | offset = $offset @@ -54,29 +53,26 @@ private[akka] object FlightRecorderReader { """.stripMargin } - val AlertSectionParameters = SectionParameters( - offset = AlertSectionOffset, - sectionSize = AlertSectionSize, - logSize = AlertLogSize, - window = AlertWindow, - recordSize = AlertRecordSize, - entriesPerRecord = 1) + val AlertSectionParameters = SectionParameters(offset = AlertSectionOffset, + sectionSize = AlertSectionSize, + logSize = AlertLogSize, + window = AlertWindow, + recordSize = AlertRecordSize, + entriesPerRecord = 1) - val LoFreqSectionParameters = SectionParameters( - offset = LoFreqSectionOffset, - sectionSize = LoFreqSectionSize, - logSize = LoFreqLogSize, - window = LoFreqWindow, - recordSize = LoFreqRecordSize, - entriesPerRecord = 1) + val LoFreqSectionParameters = SectionParameters(offset = LoFreqSectionOffset, + sectionSize = LoFreqSectionSize, + logSize = LoFreqLogSize, + window = LoFreqWindow, + recordSize = LoFreqRecordSize, + entriesPerRecord = 1) - val HiFreqSectionParameters = SectionParameters( - offset = HiFreqSectionOffset, - sectionSize = HiFreqSectionSize, - logSize = HiFreqLogSize, - window = HiFreqWindow, - recordSize = HiFreqRecordSize, - entriesPerRecord = HiFreqBatchSize) + val HiFreqSectionParameters = SectionParameters(offset = HiFreqSectionOffset, + sectionSize = HiFreqSectionSize, + logSize = HiFreqLogSize, + window = HiFreqWindow, + recordSize = HiFreqRecordSize, + entriesPerRecord = HiFreqBatchSize) def dumpToStdout(flightRecorderFile: Path): Unit = { var raFile: RandomAccessFile = null @@ -91,7 +87,8 @@ private[akka] object FlightRecorderReader { val hiFreq: Seq[FlightRecorderReader#Entry] = reader.structure.hiFreqLog.logs.flatMap(_.compactEntries) val loFreq: Seq[FlightRecorderReader#Entry] = reader.structure.loFreqLog.logs.flatMap(_.richEntries) - implicit val ordering = Ordering.fromLessThan[FlightRecorderReader#Entry]((a, b) => a.timeStamp.isBefore(b.timeStamp)) + implicit val ordering = + Ordering.fromLessThan[FlightRecorderReader#Entry]((a, b) => a.timeStamp.isBefore(b.timeStamp)) val sorted = SortedSet[FlightRecorderReader#Entry](alerts: _*) ++ hiFreq ++ loFreq println("--- FLIGHT RECORDER LOG") @@ -155,7 +152,9 @@ private[akka] final class FlightRecorderReader(fileChannel: FileChannel) { fileBuffer.getBytes(recordStartOffset + 21, metadata) val entry = RichEntry( - timeStamp = Instant.ofEpochMilli(fileBuffer.getLong(recordStartOffset)).plusNanos(fileBuffer.getLong(recordStartOffset + 8)), + timeStamp = Instant + .ofEpochMilli(fileBuffer.getLong(recordStartOffset)) + .plusNanos(fileBuffer.getLong(recordStartOffset + 8)), dirty = fileBuffer.getLong(recordOffset) == RollingEventLogSection.Dirty, code = fileBuffer.getInt(recordStartOffset + 16), metadata = metadata) @@ -179,7 +178,8 @@ private[akka] final class FlightRecorderReader(fileChannel: FileChannel) { dirty = fileBuffer.getLong(recordOffset) == RollingEventLogSection.Dirty val entiresHeaderOffset = recordOffset + RollingEventLogSection.CommitEntrySize entriesLeft = fileBuffer.getLong(entiresHeaderOffset + HiFreqEntryCountFieldOffset) - timeStamp = Instant.ofEpochMilli(fileBuffer.getLong(entiresHeaderOffset)) + timeStamp = Instant + .ofEpochMilli(fileBuffer.getLong(entiresHeaderOffset)) .plusNanos(fileBuffer.getLong(entiresHeaderOffset + 8)) entryOffset = entiresHeaderOffset + 32 } @@ -189,11 +189,10 @@ private[akka] final class FlightRecorderReader(fileChannel: FileChannel) { override def next(): CompactEntry = { if (entriesLeft == -1L) readHeader() - val entry = CompactEntry( - timeStamp, - dirty, - code = fileBuffer.getLong(entryOffset), - param = fileBuffer.getLong(entryOffset + 8)) + val entry = CompactEntry(timeStamp, + dirty, + code = fileBuffer.getLong(entryOffset), + param = fileBuffer.getLong(entryOffset + 8)) entriesLeft -= 1 if (entriesLeft == 0) { diff --git a/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala b/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala index 14d06c0c77..25c6dcad9d 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/Handshake.scala @@ -15,7 +15,7 @@ import akka.stream.FlowShape import akka.stream.Inlet import akka.stream.Outlet import akka.stream.stage._ -import akka.util.{ OptionVal, unused } +import akka.util.{ unused, OptionVal } import akka.Done import akka.actor.Address @@ -48,15 +48,14 @@ private[remote] object OutboundHandshake { /** * INTERNAL API */ -private[remote] class OutboundHandshake( - @unused system: ActorSystem, - outboundContext: OutboundContext, - outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope], - timeout: FiniteDuration, - retryInterval: FiniteDuration, - injectHandshakeInterval: FiniteDuration, - livenessProbeInterval: Duration) - extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] { +private[remote] class OutboundHandshake(@unused system: ActorSystem, + outboundContext: OutboundContext, + outboundEnvelopePool: ObjectPool[ReusableOutboundEnvelope], + timeout: FiniteDuration, + retryInterval: FiniteDuration, + injectHandshakeInterval: FiniteDuration, + livenessProbeInterval: Duration) + extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] { val in: Inlet[OutboundEnvelope] = Inlet("OutboundHandshake.in") val out: Outlet[OutboundEnvelope] = Outlet("OutboundHandshake.out") @@ -160,17 +159,20 @@ private[remote] class OutboundHandshake( if (handshakeState == Completed && isAvailable(out) && pendingMessage.isEmpty) { val lastUsedDuration = (System.nanoTime() - outboundContext.associationState.lastUsedTimestamp.get()).nanos if (lastUsedDuration >= livenessProbeInterval) { - log.info( - "Association to [{}] has been idle for [{}] seconds, sending HandshakeReq to validate liveness", - outboundContext.remoteAddress, lastUsedDuration.toSeconds) + log.info("Association to [{}] has been idle for [{}] seconds, sending HandshakeReq to validate liveness", + outboundContext.remoteAddress, + lastUsedDuration.toSeconds) push(out, createHandshakeReqEnvelope()) } } } private def createHandshakeReqEnvelope(): OutboundEnvelope = { - outboundEnvelopePool.acquire().init( - recipient = OptionVal.None, message = HandshakeReq(outboundContext.localAddress, outboundContext.remoteAddress), sender = OptionVal.None) + outboundEnvelopePool + .acquire() + .init(recipient = OptionVal.None, + message = HandshakeReq(outboundContext.localAddress, outboundContext.remoteAddress), + sender = OptionVal.None) } private def handshakeCompleted(): Unit = { @@ -190,8 +192,9 @@ private[remote] class OutboundHandshake( if (isAvailable(out)) pushHandshakeReq() case HandshakeTimeout => - failStage(new HandshakeTimeoutException( - s"Handshake with [${outboundContext.remoteAddress}] did not complete within ${timeout.toMillis} ms")) + failStage( + new HandshakeTimeoutException( + s"Handshake with [${outboundContext.remoteAddress}] did not complete within ${timeout.toMillis} ms")) } setHandlers(in, out, this) @@ -202,7 +205,8 @@ private[remote] class OutboundHandshake( /** * INTERNAL API */ -private[remote] class InboundHandshake(inboundContext: InboundContext, inControlStream: Boolean) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { +private[remote] class InboundHandshake(inboundContext: InboundContext, inControlStream: Boolean) + extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { val in: Inlet[InboundEnvelope] = Inlet("InboundHandshake.in") val out: Outlet[InboundEnvelope] = Outlet("InboundHandshake.out") override val shape: FlowShape[InboundEnvelope, InboundEnvelope] = FlowShape(in, out) @@ -213,25 +217,30 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl // InHandler if (inControlStream) - setHandler(in, new InHandler { - override def onPush(): Unit = { - val env = grab(in) - env.message match { - case HandshakeReq(from, to) => onHandshakeReq(from, to) - case HandshakeRsp(from) => - // Touch the lastUsedTimestamp here also because when sending the extra low frequency HandshakeRsp - // the timestamp is not supposed to be updated when sending but when receiving reply, which confirms - // that the other system is alive. - inboundContext.association(from.address).associationState.lastUsedTimestamp.set(System.nanoTime()) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + val env = grab(in) + env.message match { + case HandshakeReq(from, to) => onHandshakeReq(from, to) + case HandshakeRsp(from) => + // Touch the lastUsedTimestamp here also because when sending the extra low frequency HandshakeRsp + // the timestamp is not supposed to be updated when sending but when receiving reply, which confirms + // that the other system is alive. + inboundContext + .association(from.address) + .associationState + .lastUsedTimestamp + .set(System.nanoTime()) - after(inboundContext.completeHandshake(from)) { - pull(in) - } - case _ => - onMessage(env) - } - } - }) + after(inboundContext.completeHandshake(from)) { + pull(in) + } + case _ => + onMessage(env) + } + } + }) else setHandler(in, new InHandler { override def onPush(): Unit = { @@ -251,13 +260,14 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl pull(in) } } else { - log.warning( - "Dropping Handshake Request from [{}] addressed to unknown local address [{}]. " + - "Local address is [{}]. Check that the sending system uses the same " + - "address to contact recipient system as defined in the " + - "'akka.remote.artery.canonical.hostname' of the recipient system. " + - "The name of the ActorSystem must also match.", - from, to, inboundContext.localAddress.address) + log.warning("Dropping Handshake Request from [{}] addressed to unknown local address [{}]. " + + "Local address is [{}]. Check that the sending system uses the same " + + "address to contact recipient system as defined in the " + + "'akka.remote.artery.canonical.hostname' of the recipient system. " + + "The name of the ActorSystem must also match.", + from, + to, + inboundContext.localAddress.address) pull(in) } @@ -284,11 +294,12 @@ private[remote] class InboundHandshake(inboundContext: InboundContext, inControl push(out, env) else { if (log.isDebugEnabled) - log.debug( - s"Dropping message [{}] from unknown system with UID [{}]. " + - "This system with UID [{}] was probably restarted. " + - "Messages will be accepted when new handshake has been completed.", - env.message.getClass.getName, env.originUid, inboundContext.localAddress.uid) + log.debug(s"Dropping message [{}] from unknown system with UID [{}]. " + + "This system with UID [{}] was probably restarted. " + + "Messages will be accepted when new handshake has been completed.", + env.message.getClass.getName, + env.originUid, + inboundContext.localAddress.uid) pull(in) } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/ImmutableLongMap.scala b/akka-remote/src/main/scala/akka/remote/artery/ImmutableLongMap.scala index d0afcc0170..a71a8ccce7 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/ImmutableLongMap.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/ImmutableLongMap.scala @@ -24,8 +24,8 @@ private[akka] object ImmutableLongMap { * Keys and values are backed by arrays and lookup is performed with binary * search. It's intended for rather small (<1000) maps. */ -private[akka] class ImmutableLongMap[A >: Null] private ( - private val keys: Array[Long], private val values: Array[A])(implicit t: ClassTag[A]) { +private[akka] class ImmutableLongMap[A >: Null] private (private val keys: Array[Long], private val values: Array[A])( + implicit t: ClassTag[A]) { val size: Int = keys.length diff --git a/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala b/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala index 962ecff2b6..5ed9d67c16 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/InboundEnvelope.scala @@ -13,18 +13,17 @@ import akka.actor.NoSerializationVerificationNeeded * INTERNAL API */ private[remote] object InboundEnvelope { + /** * Only used in tests */ - def apply( - recipient: OptionVal[InternalActorRef], - message: AnyRef, - sender: OptionVal[ActorRef], - originUid: Long, - association: OptionVal[OutboundContext]): InboundEnvelope = { + def apply(recipient: OptionVal[InternalActorRef], + message: AnyRef, + sender: OptionVal[ActorRef], + originUid: Long, + association: OptionVal[OutboundContext]): InboundEnvelope = { val env = new ReusableInboundEnvelope - env.init(recipient, sender, originUid, -1, "", 0, null, association, lane = 0) - .withMessage(message) + env.init(recipient, sender, originUid, -1, "", 0, null, association, lane = 0).withMessage(message) } } @@ -60,9 +59,11 @@ private[remote] trait InboundEnvelope extends NoSerializationVerificationNeeded * INTERNAL API */ private[remote] object ReusableInboundEnvelope { - def createObjectPool(capacity: Int) = new ObjectPool[ReusableInboundEnvelope]( - capacity, - create = () => new ReusableInboundEnvelope, clear = inEnvelope => inEnvelope.asInstanceOf[ReusableInboundEnvelope].clear()) + def createObjectPool(capacity: Int) = + new ObjectPool[ReusableInboundEnvelope](capacity, + create = () => new ReusableInboundEnvelope, + clear = inEnvelope => + inEnvelope.asInstanceOf[ReusableInboundEnvelope].clear()) } /** @@ -118,16 +119,15 @@ private[remote] final class ReusableInboundEnvelope extends InboundEnvelope { _lane = 0 } - def init( - recipient: OptionVal[InternalActorRef], - sender: OptionVal[ActorRef], - originUid: Long, - serializer: Int, - classManifest: String, - flags: Byte, - envelopeBuffer: EnvelopeBuffer, - association: OptionVal[OutboundContext], - lane: Int): InboundEnvelope = { + def init(recipient: OptionVal[InternalActorRef], + sender: OptionVal[ActorRef], + originUid: Long, + serializer: Int, + classManifest: String, + flags: Byte, + envelopeBuffer: EnvelopeBuffer, + association: OptionVal[OutboundContext], + lane: Int): InboundEnvelope = { _recipient = recipient _sender = sender _originUid = originUid @@ -148,7 +148,8 @@ private[remote] final class ReusableInboundEnvelope extends InboundEnvelope { override def copyForLane(lane: Int): InboundEnvelope = { val buf = if (envelopeBuffer eq null) null else envelopeBuffer.copy() val env = new ReusableInboundEnvelope - env.init(recipient, sender, originUid, serializer, classManifest, flags, buf, association, lane) + env + .init(recipient, sender, originUid, serializer, classManifest, flags, buf, association, lane) .withMessage(message) } diff --git a/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala b/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala index d8453d7591..cc52ebaf5e 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/InboundQuarantineCheck.scala @@ -18,7 +18,8 @@ import akka.actor.ActorSelectionMessage /** * INTERNAL API */ -private[remote] class InboundQuarantineCheck(inboundContext: InboundContext) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { +private[remote] class InboundQuarantineCheck(inboundContext: InboundContext) + extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { val in: Inlet[InboundEnvelope] = Inlet("InboundQuarantineCheck.in") val out: Outlet[InboundEnvelope] = Outlet("InboundQuarantineCheck.out") override val shape: FlowShape[InboundEnvelope, InboundEnvelope] = FlowShape(in, out) @@ -38,9 +39,10 @@ private[remote] class InboundQuarantineCheck(inboundContext: InboundContext) ext case OptionVal.Some(association) => if (association.associationState.isQuarantined(env.originUid)) { if (log.isDebugEnabled) - log.debug( - "Dropping message [{}] from [{}#{}] because the system is quarantined", - Logging.messageClassName(env.message), association.remoteAddress, env.originUid) + log.debug("Dropping message [{}] from [{}#{}] because the system is quarantined", + Logging.messageClassName(env.message), + association.remoteAddress, + env.originUid) // avoid starting outbound stream for heartbeats if (!env.message.isInstanceOf[Quarantined] && !isHeartbeat(env.message)) inboundContext.sendControl( @@ -53,9 +55,9 @@ private[remote] class InboundQuarantineCheck(inboundContext: InboundContext) ext } private def isHeartbeat(msg: Any): Boolean = msg match { - case _: HeartbeatMessage => true + case _: HeartbeatMessage => true case ActorSelectionMessage(_: HeartbeatMessage, _, _) => true - case _ => false + case _ => false } // OutHandler diff --git a/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala b/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala index 1fa016ccfc..2d984a7574 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/LruBoundedCache.scala @@ -23,7 +23,8 @@ private[akka] case class CacheStatistics(entries: Int, maxProbeDistance: Int, av * to kick out entires that are considered old. The implementation tries to keep the map close to full, only evicting * old entries when needed. */ -private[akka] abstract class LruBoundedCache[K: ClassTag, V <: AnyRef: ClassTag](capacity: Int, evictAgeThreshold: Int) { +private[akka] abstract class LruBoundedCache[K: ClassTag, V <: AnyRef: ClassTag](capacity: Int, + evictAgeThreshold: Int) { require(capacity > 0, "Capacity must be larger than zero") require((capacity & (capacity - 1)) == 0, "Capacity must be power of two") require(evictAgeThreshold <= capacity, "Age threshold must be less than capacity.") @@ -184,9 +185,9 @@ private[akka] abstract class LruBoundedCache[K: ClassTag, V <: AnyRef: ClassTag] override def toString = s"LruBoundedCache(" + - s" values = ${values.mkString("[", ",", "]")}," + - s" hashes = ${hashes.map(_ & Mask).mkString("[", ",", "]")}," + - s" epochs = ${epochs.mkString("[", ",", "]")}," + - s" distances = ${hashes.indices.map(probeDistanceOf).mkString("[", ",", "]")}," + - s" $epoch)" + s" values = ${values.mkString("[", ",", "]")}," + + s" hashes = ${hashes.map(_ & Mask).mkString("[", ",", "]")}," + + s" epochs = ${epochs.mkString("[", ",", "]")}," + + s" distances = ${hashes.indices.map(probeDistanceOf).mkString("[", ",", "]")}," + + s" $epoch)" } diff --git a/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala b/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala index 55d1a240c9..f343a5bcd1 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/MessageDispatcher.scala @@ -20,9 +20,7 @@ import akka.util.OptionVal /** * INTERNAL API */ -private[remote] class MessageDispatcher( - system: ExtendedActorSystem, - provider: RemoteActorRefProvider) { +private[remote] class MessageDispatcher(system: ExtendedActorSystem, provider: RemoteActorRefProvider) { private val remoteDaemon = provider.remoteDaemon private val log = Logging.withMarker(system, getClass.getName) @@ -46,52 +44,57 @@ private[remote] class MessageDispatcher( case `remoteDaemon` => if (UntrustedMode) { - if (debugLogEnabled) log.debug( - LogMarker.Security, - "dropping daemon message [{}] in untrusted mode", - messageClassName(message)) + if (debugLogEnabled) + log.debug(LogMarker.Security, "dropping daemon message [{}] in untrusted mode", messageClassName(message)) } else { - if (LogReceive && debugLogEnabled) log.debug( - "received daemon message [{}] from [{}]", - message, senderOption.getOrElse(originAddress.getOrElse(""))) + if (LogReceive && debugLogEnabled) + log.debug("received daemon message [{}] from [{}]", + message, + senderOption.getOrElse(originAddress.getOrElse(""))) remoteDaemon ! message } case l @ (_: LocalRef | _: RepointableRef) if l.isLocal => - if (LogReceive && debugLogEnabled) log.debug( - "received message [{}] to [{}] from [{}]", - message, recipient, senderOption.getOrElse("")) + if (LogReceive && debugLogEnabled) + log.debug("received message [{}] to [{}] from [{}]", message, recipient, senderOption.getOrElse("")) message match { case sel: ActorSelectionMessage => if (UntrustedMode && (!TrustedSelectionPaths.contains(sel.elements.mkString("/", "/", "")) || - sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) { - if (debugLogEnabled) log.debug( - LogMarker.Security, - "operating in UntrustedMode, dropping inbound actor selection to [{}], " + - "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration", - sel.elements.mkString("/", "/", "")) + sel.msg.isInstanceOf[PossiblyHarmful] || l != provider.rootGuardian)) { + if (debugLogEnabled) + log.debug(LogMarker.Security, + "operating in UntrustedMode, dropping inbound actor selection to [{}], " + + "allow it by adding the path to 'akka.remote.trusted-selection-paths' configuration", + sel.elements.mkString("/", "/", "")) } else // run the receive logic for ActorSelectionMessage here to make sure it is not stuck on busy user actor ActorSelection.deliverSelection(l, sender, sel) case msg: PossiblyHarmful if UntrustedMode => - if (debugLogEnabled) log.debug( - LogMarker.Security, - "operating in UntrustedMode, dropping inbound PossiblyHarmful message of type [{}] to [{}] from [{}]", - messageClassName(msg), recipient, senderOption.getOrElse(originAddress.getOrElse(""))) + if (debugLogEnabled) + log.debug( + LogMarker.Security, + "operating in UntrustedMode, dropping inbound PossiblyHarmful message of type [{}] to [{}] from [{}]", + messageClassName(msg), + recipient, + senderOption.getOrElse(originAddress.getOrElse(""))) case msg: SystemMessage => l.sendSystemMessage(msg) case msg => l.!(msg)(sender) } case r @ (_: RemoteRef | _: RepointableRef) if !r.isLocal && !UntrustedMode => - if (LogReceive && debugLogEnabled) log.debug( - "received remote-destined message [{}] to [{}] from [{}]", - message, recipient, senderOption.getOrElse(originAddress.getOrElse(""))) + if (LogReceive && debugLogEnabled) + log.debug("received remote-destined message [{}] to [{}] from [{}]", + message, + recipient, + senderOption.getOrElse(originAddress.getOrElse(""))) // if it was originally addressed to us but is in fact remote from our point of view (i.e. remote-deployed) r.!(message)(sender) - case r => log.error( - "dropping message [{}] for unknown recipient [{}] from [{}]", - messageClassName(message), r, senderOption.getOrElse(originAddress.getOrElse(""))) + case r => + log.error("dropping message [{}] for unknown recipient [{}] from [{}]", + messageClassName(message), + r, + senderOption.getOrElse(originAddress.getOrElse(""))) } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala b/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala index fc14ae8749..aef3d51919 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/OutboundEnvelope.scala @@ -13,10 +13,7 @@ import akka.util.OptionVal * INTERNAL API */ private[remote] object OutboundEnvelope { - def apply( - recipient: OptionVal[RemoteActorRef], - message: AnyRef, - sender: OptionVal[ActorRef]): OutboundEnvelope = { + def apply(recipient: OptionVal[RemoteActorRef], message: AnyRef, sender: OptionVal[ActorRef]): OutboundEnvelope = { val env = new ReusableOutboundEnvelope env.init(recipient, message, sender) } @@ -40,9 +37,10 @@ private[remote] trait OutboundEnvelope extends NoSerializationVerificationNeeded * INTERNAL API */ private[remote] object ReusableOutboundEnvelope { - def createObjectPool(capacity: Int) = new ObjectPool[ReusableOutboundEnvelope]( - capacity, - create = () => new ReusableOutboundEnvelope, clear = outEnvelope => outEnvelope.clear()) + def createObjectPool(capacity: Int) = + new ObjectPool[ReusableOutboundEnvelope](capacity, + create = () => new ReusableOutboundEnvelope, + clear = outEnvelope => outEnvelope.clear()) } /** @@ -71,10 +69,7 @@ private[remote] final class ReusableOutboundEnvelope extends OutboundEnvelope { _sender = OptionVal.None } - def init( - recipient: OptionVal[RemoteActorRef], - message: AnyRef, - sender: OptionVal[ActorRef]): OutboundEnvelope = { + def init(recipient: OptionVal[RemoteActorRef], message: AnyRef, sender: OptionVal[ActorRef]): OutboundEnvelope = { _recipient = recipient _message = message _sender = sender diff --git a/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala b/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala index 02f8d32796..254944a628 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/RemoteInstrument.scala @@ -11,7 +11,7 @@ import scala.collection.immutable import scala.util.control.NonFatal import akka.actor.{ ActorRef, ExtendedActorSystem } import akka.event.{ Logging, LoggingAdapter } -import akka.util.{ OptionVal, unused } +import akka.util.{ unused, OptionVal } import akka.util.ccompat._ /** @@ -27,6 +27,7 @@ import akka.util.ccompat._ * delegate to any shared instance it doesn't have to be thread-safe. */ abstract class RemoteInstrument { + /** * Instrument identifier. * @@ -93,10 +94,9 @@ abstract class RemoteInstrument { * }}} * */ -private[remote] final class RemoteInstruments( - private val system: ExtendedActorSystem, - private val log: LoggingAdapter, - _instruments: Vector[RemoteInstrument]) { +private[remote] final class RemoteInstruments(private val system: ExtendedActorSystem, + private val log: LoggingAdapter, + _instruments: Vector[RemoteInstrument]) { import RemoteInstruments._ def this(system: ExtendedActorSystem, log: LoggingAdapter) = this(system, log, RemoteInstruments.create(system, log)) @@ -122,9 +122,9 @@ private[remote] final class RemoteInstruments( serializeInstrument(instrument, oe, buffer) } catch { case NonFatal(t) => - log.debug( - "Skipping serialization of RemoteInstrument {} since it failed with {}", - instrument.identifier, t.getMessage) + log.debug("Skipping serialization of RemoteInstrument {} since it failed with {}", + instrument.identifier, + t.getMessage) buffer.position(rewindPos) } i += 1 @@ -145,11 +145,16 @@ private[remote] final class RemoteInstruments( } } - private def serializeInstrument(instrument: RemoteInstrument, outboundEnvelope: OutboundEnvelope, buffer: ByteBuffer): Unit = { + private def serializeInstrument(instrument: RemoteInstrument, + outboundEnvelope: OutboundEnvelope, + buffer: ByteBuffer): Unit = { val startPos = buffer.position() buffer.putInt(0) val dataPos = buffer.position() - instrument.remoteWriteMetadata(outboundEnvelope.recipient.orNull, outboundEnvelope.message, outboundEnvelope.sender.orNull, buffer) + instrument.remoteWriteMetadata(outboundEnvelope.recipient.orNull, + outboundEnvelope.message, + outboundEnvelope.sender.orNull, + buffer) val endPos = buffer.position() if (endPos == dataPos) { // if the instrument didn't write anything, then rewind to the start @@ -188,9 +193,9 @@ private[remote] final class RemoteInstruments( deserializeInstrument(instrument, inboundEnvelope, buffer) } catch { case NonFatal(t) => - log.debug( - "Skipping deserialization of RemoteInstrument {} since it failed with {}", - instrument.identifier, t.getMessage) + log.debug("Skipping deserialization of RemoteInstrument {} since it failed with {}", + instrument.identifier, + t.getMessage) } i += 1 } else if (key > identifier) { @@ -205,9 +210,9 @@ private[remote] final class RemoteInstruments( buffer.position(nextPos) } } else { - if (log.isDebugEnabled) log.debug( - "Skipping serialized data in message for RemoteInstrument(s) {} that has no local match", - remoteInstrumentIdIteratorRaw(buffer, endPos).mkString("[", ", ", "]")) + if (log.isDebugEnabled) + log.debug("Skipping serialized data in message for RemoteInstrument(s) {} that has no local match", + remoteInstrumentIdIteratorRaw(buffer, endPos).mkString("[", ", ", "]")) } } catch { case NonFatal(t) => @@ -217,8 +222,13 @@ private[remote] final class RemoteInstruments( } } - private def deserializeInstrument(instrument: RemoteInstrument, inboundEnvelope: InboundEnvelope, buffer: ByteBuffer): Unit = { - instrument.remoteReadMetadata(inboundEnvelope.recipient.orNull, inboundEnvelope.message, inboundEnvelope.sender.orNull, buffer) + private def deserializeInstrument(instrument: RemoteInstrument, + inboundEnvelope: InboundEnvelope, + buffer: ByteBuffer): Unit = { + instrument.remoteReadMetadata(inboundEnvelope.recipient.orNull, + inboundEnvelope.message, + inboundEnvelope.sender.orNull, + buffer) } def messageSent(outboundEnvelope: OutboundEnvelope, size: Int, time: Long): Unit = { @@ -237,8 +247,15 @@ private[remote] final class RemoteInstruments( messageSent(0) } - private def messageSentInstrument(instrument: RemoteInstrument, outboundEnvelope: OutboundEnvelope, size: Int, time: Long): Unit = { - instrument.remoteMessageSent(outboundEnvelope.recipient.orNull, outboundEnvelope.message, outboundEnvelope.sender.orNull, size, time) + private def messageSentInstrument(instrument: RemoteInstrument, + outboundEnvelope: OutboundEnvelope, + size: Int, + time: Long): Unit = { + instrument.remoteMessageSent(outboundEnvelope.recipient.orNull, + outboundEnvelope.message, + outboundEnvelope.sender.orNull, + size, + time) } def messageReceived(inboundEnvelope: InboundEnvelope, size: Int, time: Long): Unit = { @@ -257,8 +274,15 @@ private[remote] final class RemoteInstruments( messageRecieved(0) } - private def messageReceivedInstrument(instrument: RemoteInstrument, inboundEnvelope: InboundEnvelope, size: Int, time: Long): Unit = { - instrument.remoteMessageReceived(inboundEnvelope.recipient.orNull, inboundEnvelope.message, inboundEnvelope.sender.orNull, size, time) + private def messageReceivedInstrument(instrument: RemoteInstrument, + inboundEnvelope: InboundEnvelope, + size: Int, + time: Long): Unit = { + instrument.remoteMessageReceived(inboundEnvelope.recipient.orNull, + inboundEnvelope.message, + inboundEnvelope.sender.orNull, + size, + time) } private def remoteInstrumentIdIteratorRaw(buffer: ByteBuffer, endPos: Int): Iterator[Int] = { @@ -294,11 +318,16 @@ private[remote] object RemoteInstruments { val c = system.settings.config val path = "akka.remote.artery.advanced.instruments" import scala.collection.JavaConverters._ - c.getStringList(path).asScala.iterator.map { fqcn => - system - .dynamicAccess.createInstanceFor[RemoteInstrument](fqcn, Nil) - .orElse(system.dynamicAccess.createInstanceFor[RemoteInstrument](fqcn, List(classOf[ExtendedActorSystem] -> system))) - .get - }.to(immutable.Vector) + c.getStringList(path) + .asScala + .iterator + .map { fqcn => + system.dynamicAccess + .createInstanceFor[RemoteInstrument](fqcn, Nil) + .orElse(system.dynamicAccess + .createInstanceFor[RemoteInstrument](fqcn, List(classOf[ExtendedActorSystem] -> system))) + .get + } + .to(immutable.Vector) } } diff --git a/akka-remote/src/main/scala/akka/remote/artery/SendQueue.scala b/akka-remote/src/main/scala/akka/remote/artery/SendQueue.scala index 93af187d4b..a36f5dd84e 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/SendQueue.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/SendQueue.scala @@ -42,7 +42,7 @@ private[remote] object SendQueue { * INTERNAL API */ private[remote] final class SendQueue[T](postStopAction: Vector[T] => Unit) - extends GraphStageWithMaterializedValue[SourceShape[T], SendQueue.QueueValue[T]] { + extends GraphStageWithMaterializedValue[SourceShape[T], SendQueue.QueueValue[T]] { import SendQueue._ val out: Outlet[T] = Outlet("SendQueue.out") diff --git a/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala b/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala index d296c52c32..2302c286a7 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/SystemMessageDelivery.scala @@ -70,12 +70,11 @@ import akka.util.OptionVal /** * INTERNAL API */ -@InternalApi private[remote] class SystemMessageDelivery( - outboundContext: OutboundContext, - deadLetters: ActorRef, - resendInterval: FiniteDuration, - maxBufferSize: Int) - extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] { +@InternalApi private[remote] class SystemMessageDelivery(outboundContext: OutboundContext, + deadLetters: ActorRef, + resendInterval: FiniteDuration, + maxBufferSize: Int) + extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] { import SystemMessageDelivery._ @@ -166,9 +165,9 @@ import akka.util.OptionVal private val nackCallback = getAsyncCallback[Nack] { reply => if (reply.seqNo <= seqNo) { ack(reply.seqNo) - log.warning( - "Received negative acknowledgement of system message from [{}], highest acknowledged [{}]", - outboundContext.remoteAddress, reply.seqNo) + log.warning("Received negative acknowledgement of system message from [{}], highest acknowledged [{}]", + outboundContext.remoteAddress, + reply.seqNo) // Nack should be very rare (connection issue) so no urgency of resending, it will be resent // by the scheduled tick. } @@ -182,7 +181,7 @@ import akka.util.OptionVal @tailrec private def clearUnacknowledged(ackedSeqNo: Long): Unit = { if (!unacknowledged.isEmpty && - unacknowledged.peek().message.asInstanceOf[SystemMessageEnvelope].seqNo <= ackedSeqNo) { + unacknowledged.peek().message.asInstanceOf[SystemMessageEnvelope].seqNo <= ackedSeqNo) { unacknowledged.removeFirst() if (unacknowledged.isEmpty) cancelTimer(resendInterval) @@ -278,7 +277,7 @@ import akka.util.OptionVal if (!unacknowledged.isEmpty && (System.nanoTime() - ackTimestamp > giveUpAfterNanos)) throw new GaveUpSystemMessageException( s"Gave up sending system message to [${outboundContext.remoteAddress}] after " + - s"${outboundContext.settings.Advanced.GiveUpSystemMessageAfter.pretty}.") + s"${outboundContext.settings.Advanced.GiveUpSystemMessageAfter.pretty}.") } private def clear(): Unit = { @@ -321,7 +320,8 @@ import akka.util.OptionVal /** * INTERNAL API */ -@InternalApi private[remote] class SystemMessageAcker(inboundContext: InboundContext) extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { +@InternalApi private[remote] class SystemMessageAcker(inboundContext: InboundContext) + extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { import SystemMessageDelivery._ import SystemMessageAcker._ @@ -363,9 +363,10 @@ import akka.util.OptionVal push(out, unwrapped) } else if (n < expectedSeqNo) { if (log.isDebugEnabled) - log.debug( - "Deduplicate system message [{}] from [{}], expected [{}]", - n, fromRemoteAddressStr, expectedSeqNo) + log.debug("Deduplicate system message [{}] from [{}], expected [{}]", + n, + fromRemoteAddressStr, + expectedSeqNo) inboundContext.sendControl(ackReplyTo.address, Ack(expectedSeqNo - 1, localAddress)) pull(in) } else { @@ -377,7 +378,10 @@ import akka.util.OptionVal else "" log.warning( "Sending negative acknowledgement of system message [{}] from [{}], highest acknowledged [{}]{}", - n, fromRemoteAddressStr, expectedSeqNo - 1, maxNackReached) + n, + fromRemoteAddressStr, + expectedSeqNo - 1, + maxNackReached) } inboundContext.sendControl(ackReplyTo.address, Nack(expectedSeqNo - 1, localAddress)) pull(in) @@ -394,4 +398,3 @@ import akka.util.OptionVal setHandlers(in, out, this) } } - diff --git a/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala b/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala index 9b2acf01c0..0574e3c784 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/TestStage.scala @@ -18,6 +18,7 @@ import akka.util.OptionVal import akka.event.Logging object TestManagementCommands { + /** INTERNAL API */ @SerialVersionUID(1L) final case class FailInboundStreamOnce(ex: Throwable) @@ -56,6 +57,7 @@ private[remote] class SharedTestState { if (state.compareAndSet(current, current.copy(failInboundStream = Some(ex)))) () else failInboundStreamOnce(ex) } + /** * Get the exception to fail the inbound stream with and immediately reset the state to not-failed. * This is used to simulate a single failure on the stream, where a successful restart recovers operations. @@ -100,15 +102,13 @@ private[remote] class SharedTestState { /** * INTERNAL API */ -private[remote] final case class TestState( - blackholes: Map[Address, Set[Address]], - failInboundStream: Option[Throwable]) +private[remote] final case class TestState(blackholes: Map[Address, Set[Address]], failInboundStream: Option[Throwable]) /** * INTERNAL API */ private[remote] class OutboundTestStage(outboundContext: OutboundContext, state: SharedTestState) - extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] { + extends GraphStage[FlowShape[OutboundEnvelope, OutboundEnvelope]] { val in: Inlet[OutboundEnvelope] = Inlet("OutboundTestStage.in") val out: Outlet[OutboundEnvelope] = Outlet("OutboundTestStage.out") override val shape: FlowShape[OutboundEnvelope, OutboundEnvelope] = FlowShape(in, out) @@ -120,9 +120,9 @@ private[remote] class OutboundTestStage(outboundContext: OutboundContext, state: override def onPush(): Unit = { val env = grab(in) if (state.isBlackhole(outboundContext.localAddress.address, outboundContext.remoteAddress)) { - log.debug( - "dropping outbound message [{}] to [{}] because of blackhole", - Logging.messageClassName(env.message), outboundContext.remoteAddress) + log.debug("dropping outbound message [{}] to [{}] because of blackhole", + Logging.messageClassName(env.message), + outboundContext.remoteAddress) pull(in) // drop message } else push(out, env) @@ -140,7 +140,7 @@ private[remote] class OutboundTestStage(outboundContext: OutboundContext, state: * INTERNAL API */ private[remote] class InboundTestStage(inboundContext: InboundContext, state: SharedTestState) - extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { + extends GraphStage[FlowShape[InboundEnvelope, InboundEnvelope]] { val in: Inlet[InboundEnvelope] = Inlet("InboundTestStage.in") val out: Outlet[InboundEnvelope] = Outlet("InboundTestStage.out") override val shape: FlowShape[InboundEnvelope, InboundEnvelope] = FlowShape(in, out) @@ -162,9 +162,10 @@ private[remote] class InboundTestStage(inboundContext: InboundContext, state: Sh push(out, env) case OptionVal.Some(association) => if (state.isBlackhole(inboundContext.localAddress.address, association.remoteAddress)) { - log.debug( - "dropping inbound message [{}] from [{}] with UID [{}] because of blackhole", - Logging.messageClassName(env.message), association.remoteAddress, env.originUid) + log.debug("dropping inbound message [{}] from [{}] with UID [{}] because of blackhole", + Logging.messageClassName(env.message), + association.remoteAddress, + env.originUid) pull(in) // drop message } else push(out, env) @@ -179,4 +180,3 @@ private[remote] class InboundTestStage(inboundContext: InboundContext, state: Sh } } - diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala index c333ecadfd..4b1261b1a9 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSink.scala @@ -39,9 +39,14 @@ private[remote] object AeronSink { private val TimerCheckPeriod = 1 << 13 // 8192 private val TimerCheckMask = TimerCheckPeriod - 1 - private final class OfferTask(pub: Publication, var buffer: UnsafeBuffer, var msgSize: Int, onOfferSuccess: AsyncCallback[Unit], - giveUpAfter: Duration, onGiveUp: AsyncCallback[Unit], onPublicationClosed: AsyncCallback[Unit]) - extends (() => Boolean) { + private final class OfferTask(pub: Publication, + var buffer: UnsafeBuffer, + var msgSize: Int, + onOfferSuccess: AsyncCallback[Unit], + giveUpAfter: Duration, + onGiveUp: AsyncCallback[Unit], + onPublicationClosed: AsyncCallback[Unit]) + extends (() => Boolean) { val giveUpAfterNanos = giveUpAfter match { case f: FiniteDuration => f.toNanos case _ => -1L @@ -63,7 +68,8 @@ private[remote] object AeronSink { } else if (result == Publication.CLOSED) { onPublicationClosed.invoke(()) true - } else if (giveUpAfterNanos >= 0 && (n & TimerCheckMask) == 0 && (System.nanoTime() - startTime) > giveUpAfterNanos) { + } else if (giveUpAfterNanos >= 0 && (n & TimerCheckMask) == 0 && (System + .nanoTime() - startTime) > giveUpAfterNanos) { // the task is invoked by the spinning thread, only check nanoTime each 8192th invocation n = 0L onGiveUp.invoke(()) @@ -79,15 +85,14 @@ private[remote] object AeronSink { * INTERNAL API * @param channel eg. "aeron:udp?endpoint=localhost:40123" */ -private[remote] class AeronSink( - channel: String, - streamId: Int, - aeron: Aeron, - taskRunner: TaskRunner, - pool: EnvelopeBufferPool, - giveUpAfter: Duration, - flightRecorder: EventSink) - extends GraphStageWithMaterializedValue[SinkShape[EnvelopeBuffer], Future[Done]] { +private[remote] class AeronSink(channel: String, + streamId: Int, + aeron: Aeron, + taskRunner: TaskRunner, + pool: EnvelopeBufferPool, + giveUpAfter: Duration, + flightRecorder: EventSink) + extends GraphStageWithMaterializedValue[SinkShape[EnvelopeBuffer], Future[Done]] { import AeronSink._ import TaskRunner._ import FlightRecorderEvents._ @@ -108,8 +113,13 @@ private[remote] class AeronSink( private val spinning = 2 * taskRunner.idleCpuLevel private var backoffCount = spinning private var lastMsgSize = 0 - private val offerTask = new OfferTask(pub, null, lastMsgSize, getAsyncCallback(_ => taskOnOfferSuccess()), - giveUpAfter, getAsyncCallback(_ => onGiveUp()), getAsyncCallback(_ => onPublicationClosed())) + private val offerTask = new OfferTask(pub, + null, + lastMsgSize, + getAsyncCallback(_ => taskOnOfferSuccess()), + giveUpAfter, + getAsyncCallback(_ => onGiveUp()), + getAsyncCallback(_ => onPublicationClosed())) private val addOfferTask: Add = Add(offerTask) private var offerTaskInProgress = false diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala index f5df9c6511..4764969c21 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/AeronSource.scala @@ -30,37 +30,40 @@ import scala.concurrent.{ Future, Promise } */ private[remote] object AeronSource { - private def pollTask(sub: Subscription, handler: MessageHandler, onMessage: AsyncCallback[EnvelopeBuffer]): () => Boolean = { - () => - { - handler.reset - sub.poll(handler.fragmentsHandler, 1) - val msg = handler.messageReceived - handler.reset() // for GC - if (msg ne null) { - onMessage.invoke(msg) - true - } else - false - } + private def pollTask(sub: Subscription, + handler: MessageHandler, + onMessage: AsyncCallback[EnvelopeBuffer]): () => Boolean = { () => + { + handler.reset + sub.poll(handler.fragmentsHandler, 1) + val msg = handler.messageReceived + handler.reset() // for GC + if (msg ne null) { + onMessage.invoke(msg) + true + } else + false + } } class MessageHandler(pool: EnvelopeBufferPool) { def reset(): Unit = messageReceived = null - private[remote] var messageReceived: EnvelopeBuffer = null // private to avoid scalac warning about exposing EnvelopeBuffer + private[remote] var messageReceived + : EnvelopeBuffer = null // private to avoid scalac warning about exposing EnvelopeBuffer val fragmentsHandler = new Fragments(data => messageReceived = data, pool) } - class Fragments(onMessage: EnvelopeBuffer => Unit, pool: EnvelopeBufferPool) extends FragmentAssembler(new FragmentHandler { - override def onFragment(aeronBuffer: DirectBuffer, offset: Int, length: Int, header: Header): Unit = { - val envelope = pool.acquire() - aeronBuffer.getBytes(offset, envelope.byteBuffer, length) - envelope.byteBuffer.flip() - onMessage(envelope) - } - }) + class Fragments(onMessage: EnvelopeBuffer => Unit, pool: EnvelopeBufferPool) + extends FragmentAssembler(new FragmentHandler { + override def onFragment(aeronBuffer: DirectBuffer, offset: Int, length: Int, header: Header): Unit = { + val envelope = pool.acquire() + aeronBuffer.getBytes(offset, envelope.byteBuffer, length) + envelope.byteBuffer.flip() + onMessage(envelope) + } + }) trait AeronLifecycle { def onUnavailableImage(sessionId: Int): Unit @@ -75,15 +78,14 @@ private[remote] object AeronSource { * @param spinning the amount of busy spinning to be done synchronously before deferring to the TaskRunner * when waiting for data */ -private[remote] class AeronSource( - channel: String, - streamId: Int, - aeron: Aeron, - taskRunner: TaskRunner, - pool: EnvelopeBufferPool, - flightRecorder: EventSink, - spinning: Int) - extends GraphStageWithMaterializedValue[SourceShape[EnvelopeBuffer], AeronSource.AeronLifecycle] { +private[remote] class AeronSource(channel: String, + streamId: Int, + aeron: Aeron, + taskRunner: TaskRunner, + pool: EnvelopeBufferPool, + flightRecorder: EventSink, + spinning: Int) + extends GraphStageWithMaterializedValue[SourceShape[EnvelopeBuffer], AeronSource.AeronLifecycle] { import AeronSource._ import TaskRunner._ @@ -125,12 +127,12 @@ private[remote] class AeronSource( override def postStop(): Unit = { taskRunner.command(Remove(addPollTask.task)) - try subscription.close() catch { + try subscription.close() + catch { case e: DriverTimeoutException => // media driver was shutdown log.debug("DriverTimeout when closing subscription. {}", e) - } finally - flightRecorder.loFreq(AeronSource_Stopped, channelMetadata) + } finally flightRecorder.loFreq(AeronSource_Stopped, channelMetadata) } // OutHandler diff --git a/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala index 1730ad1a61..fb1dc28fc3 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/aeron/ArteryAeronUdpTransport.scala @@ -51,7 +51,7 @@ import org.agrona.concurrent.status.CountersReader.MetaData * INTERNAL API */ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider) - extends ArteryTransport(_system, _provider) { + extends ArteryTransport(_system, _provider) { import AeronSource.AeronLifecycle import ArteryTransport._ import Decoder.InboundCompressionAccess @@ -138,7 +138,8 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro val maybeDriver = mediaDriver.getAndSet(None) maybeDriver.foreach { driver => // this is only for embedded media driver - try driver.close() catch { + try driver.close() + catch { case NonFatal(e) => // don't think driver.close will ever throw, but just in case log.warning("Couldn't close Aeron embedded media driver due to [{}]", e) @@ -151,9 +152,9 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro } } catch { case NonFatal(e) => - log.warning( - "Couldn't delete Aeron embedded media driver files in [{}] due to [{}]", - driver.aeronDirectoryName, e) + log.warning("Couldn't delete Aeron embedded media driver files in [{}] due to [{}]", + driver.aeronDirectoryName, + e) } } } @@ -199,12 +200,13 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro private def handleFatalError(cause: Throwable): Unit = { if (fatalErrorOccured.compareAndSet(false, true)) { if (!isShutdown) { - log.error(cause, "Fatal Aeron error {}. Have to terminate ActorSystem because it lost contact with the " + - "{} Aeron media driver. Possible configuration properties to mitigate the problem are " + - "'client-liveness-timeout' or 'driver-timeout'. {}", - Logging.simpleName(cause), - if (settings.Advanced.EmbeddedMediaDriver) "embedded" else "external", - cause) + log.error(cause, + "Fatal Aeron error {}. Have to terminate ActorSystem because it lost contact with the " + + "{} Aeron media driver. Possible configuration properties to mitigate the problem are " + + "'client-liveness-timeout' or 'driver-timeout'. {}", + Logging.simpleName(cause), + if (settings.Advanced.EmbeddedMediaDriver) "embedded" else "external", + cause) taskRunner.stop() aeronErrorLogTask.cancel() if (settings.LogAeronCounters) aeronCounterTask.cancel() @@ -273,10 +275,9 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro } } - override protected def outboundTransportSink( - outboundContext: OutboundContext, - streamId: Int, - bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] = { + override protected def outboundTransportSink(outboundContext: OutboundContext, + streamId: Int, + bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] = { val giveUpAfter = if (streamId == ControlStreamId) settings.Advanced.GiveUpSystemMessageAfter else settings.Advanced.GiveUpMessageAfter @@ -284,17 +285,29 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro // Aeron transport. Would be difficult to handle the Future[Done] materialized value. // If we want to stop for Aeron also it is probably easier to stop the publication inside the // AeronSink, i.e. not using a KillSwitch. - Sink.fromGraph(new AeronSink(outboundChannel(outboundContext.remoteAddress), streamId, aeron, taskRunner, - bufferPool, giveUpAfter, createFlightRecorderEventSink())) + Sink.fromGraph( + new AeronSink(outboundChannel(outboundContext.remoteAddress), + streamId, + aeron, + taskRunner, + bufferPool, + giveUpAfter, + createFlightRecorderEventSink())) } private def aeronSource(streamId: Int, pool: EnvelopeBufferPool): Source[EnvelopeBuffer, AeronSource.AeronLifecycle] = - Source.fromGraph(new AeronSource(inboundChannel, streamId, aeron, taskRunner, pool, - createFlightRecorderEventSink(), aeronSourceSpinningStrategy)) + Source.fromGraph( + new AeronSource(inboundChannel, + streamId, + aeron, + taskRunner, + pool, + createFlightRecorderEventSink(), + aeronSourceSpinningStrategy)) private def aeronSourceSpinningStrategy: Int = if (settings.Advanced.InboundLanes > 1 || // spinning was identified to be the cause of massive slowdowns with multiple lanes, see #21365 - settings.Advanced.IdleCpuLevel < 5) 0 // also don't spin for small IdleCpuLevels + settings.Advanced.IdleCpuLevel < 5) 0 // also don't spin for small IdleCpuLevels else 50 * settings.Advanced.IdleCpuLevel - 240 override protected def runInboundStreams(): Unit = { @@ -342,21 +355,30 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro val (resourceLife, compressionAccess, laneHub) = laneSource - .toMat(Sink.fromGraph(new FixedSizePartitionHub[InboundEnvelope](inboundLanePartitioner, inboundLanes, - settings.Advanced.InboundHubBufferSize)))({ case ((a, b), c) => (a, b, c) }) + .toMat( + Sink.fromGraph( + new FixedSizePartitionHub[InboundEnvelope](inboundLanePartitioner, + inboundLanes, + settings.Advanced.InboundHubBufferSize)))({ + case ((a, b), c) => (a, b, c) + }) .run()(materializer) val lane = inboundSink(envelopeBufferPool) val completedValues: Vector[Future[Done]] = - (0 until inboundLanes).iterator.map { _ => - laneHub.toMat(lane)(Keep.right).run()(materializer) - }.to(immutable.Vector) + (0 until inboundLanes).iterator + .map { _ => + laneHub.toMat(lane)(Keep.right).run()(materializer) + } + .to(immutable.Vector) import system.dispatcher // tear down the upstream hub part if downstream lane fails // lanes are not completed with success by themselves so we don't have to care about onSuccess - Future.firstCompletedOf(completedValues).failed.foreach { reason => laneKillSwitch.abort(reason) } + Future.firstCompletedOf(completedValues).failed.foreach { reason => + laneKillSwitch.abort(reason) + } val allCompleted = Future.sequence(completedValues).map(_ => Done) (resourceLife, compressionAccess, allCompleted) @@ -380,11 +402,13 @@ private[remote] class ArteryAeronUdpTransport(_system: ExtendedActorSystem, _pro attachInboundStreamRestart("Inbound large message stream", completed, () => runInboundLargeMessagesStream()) } - private def updateStreamMatValues(streamId: Int, aeronSourceLifecycle: AeronSource.AeronLifecycle, completed: Future[Done]): Unit = { + private def updateStreamMatValues(streamId: Int, + aeronSourceLifecycle: AeronSource.AeronLifecycle, + completed: Future[Done]): Unit = { implicit val ec = materializer.executionContext - updateStreamMatValues(streamId, InboundStreamMatValues[AeronLifecycle]( - aeronSourceLifecycle, - completed.recover { case _ => Done })) + updateStreamMatValues(streamId, InboundStreamMatValues[AeronLifecycle](aeronSourceLifecycle, completed.recover { + case _ => Done + })) } override protected def shutdownTransport(): Future[Done] = { diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionProtocol.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionProtocol.scala index 1de4f9d49c..6385614a4d 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionProtocol.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionProtocol.scala @@ -27,8 +27,9 @@ private[remote] object CompressionProtocol { * INTERNAL API * Sent by the "receiving" node after allocating a compression id to a given [[akka.actor.ActorRef]] */ - private[remote] final case class ActorRefCompressionAdvertisement(from: UniqueAddress, table: CompressionTable[ActorRef]) - extends CompressionAdvertisement[ActorRef] + private[remote] final case class ActorRefCompressionAdvertisement(from: UniqueAddress, + table: CompressionTable[ActorRef]) + extends CompressionAdvertisement[ActorRef] /** * INTERNAL API @@ -38,14 +39,16 @@ private[remote] object CompressionProtocol { * table. */ private[remote] final case class ActorRefCompressionAdvertisementAck(from: UniqueAddress, tableVersion: Byte) - extends ControlMessage with CompressionAckMessage + extends ControlMessage + with CompressionAckMessage /** * INTERNAL API * Sent by the "receiving" node after allocating a compression id to a given class manifest */ - private[remote] final case class ClassManifestCompressionAdvertisement(from: UniqueAddress, table: CompressionTable[String]) - extends CompressionAdvertisement[String] + private[remote] final case class ClassManifestCompressionAdvertisement(from: UniqueAddress, + table: CompressionTable[String]) + extends CompressionAdvertisement[String] /** * INTERNAL API @@ -55,10 +58,12 @@ private[remote] object CompressionProtocol { * table. */ private[remote] final case class ClassManifestCompressionAdvertisementAck(from: UniqueAddress, tableVersion: Byte) - extends ControlMessage with CompressionAckMessage + extends ControlMessage + with CompressionAckMessage /** INTERNAL API */ private[remote] object Events { + /** INTERNAL API */ private[remote] sealed trait Event @@ -66,10 +71,12 @@ private[remote] object CompressionProtocol { final case class HeavyHitterDetected(key: Any, id: Int, count: Long) extends Event /** INTERNAL API */ - final case class ReceivedActorRefCompressionTable(from: UniqueAddress, table: CompressionTable[ActorRef]) extends Event + final case class ReceivedActorRefCompressionTable(from: UniqueAddress, table: CompressionTable[ActorRef]) + extends Event /** INTERNAL API */ - final case class ReceivedClassManifestCompressionTable(from: UniqueAddress, table: CompressionTable[String]) extends Event + final case class ReceivedClassManifestCompressionTable(from: UniqueAddress, table: CompressionTable[String]) + extends Event } diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala index c7fa31397f..fd8870f3c8 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/CompressionTable.scala @@ -27,9 +27,11 @@ private[remote] final case class CompressionTable[T](originUid: Long, version: B // TODO: these are some expensive sanity checks, about the numbers being consecutive, without gaps // TODO: we can remove them, make them re-map (not needed I believe though) val expectedGaplessSum = Integer.valueOf((dictionary.size * (dictionary.size + 1)) / 2) /* Dirichlet */ - require(dictionary.values.min == 0, "Compression table should start allocating from 0, yet lowest allocated id was " + dictionary.values.min) - require(dictionary.values.sum + dictionary.size == expectedGaplessSum, "Given compression map does not seem to be gap-less and starting from zero, " + - "which makes compressing it into an Array difficult, bailing out! Map was: " + dictionary) + require(dictionary.values.min == 0, + "Compression table should start allocating from 0, yet lowest allocated id was " + dictionary.values.min) + require(dictionary.values.sum + dictionary.size == expectedGaplessSum, + "Given compression map does not seem to be gap-less and starting from zero, " + + "which makes compressing it into an Array difficult, bailing out! Map was: " + dictionary) val tups = new Array[(Object, Int)](dictionary.size).asInstanceOf[Array[(T, Int)]] val ts = new Array[Object](dictionary.size).asInstanceOf[Array[T]] @@ -51,13 +53,14 @@ private[remote] final case class CompressionTable[T](originUid: Long, version: B DecompressionTable[T](originUid, version, ts) } } + /** INTERNAL API */ private[remote] object CompressionTable { final val NotCompressedId = -1 final val CompareBy2ndValue: Comparator[(Object, Int)] = new Comparator[(Object, Int)] { override def compare(o1: (Object, Int), o2: (Object, Int)): Int = - o1._2 compare o2._2 + o1._2.compare(o2._2) } def compareBy2ndValue[T]: Comparator[Tuple2[T, Int]] = CompareBy2ndValue.asInstanceOf[Comparator[(T, Int)]] diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala index cd1364d284..de5031e84e 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/DecompressionTable.scala @@ -15,7 +15,8 @@ private[remote] final case class DecompressionTable[T](originUid: Long, version: def get(idx: Int): T = { if (idx >= length) - throw new IllegalArgumentException(s"Attempted decompression of unknown id: [$idx]! " + + throw new IllegalArgumentException( + s"Attempted decompression of unknown id: [$idx]! " + s"Only $length ids allocated in table version [$version] for origin [$originUid].") table(idx) } @@ -26,7 +27,7 @@ private[remote] final case class DecompressionTable[T](originUid: Long, version: /** Writes complete table as String (heavy operation) */ override def toString = s"DecompressionTable($originUid, $version, " + - s"Map(${table.zipWithIndex.map({ case (t, i) => s"$i -> $t" }).mkString(",")}))" + s"Map(${table.zipWithIndex.map({ case (t, i) => s"$i -> $t" }).mkString(",")}))" } /** INTERNAL API */ diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala index 0d5e9c65b9..98bfab66ab 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/InboundCompressions.scala @@ -14,7 +14,7 @@ import akka.actor.Address import akka.event.Logging import akka.event.LoggingAdapter import akka.remote.artery._ -import akka.util.{ OptionVal, unused } +import akka.util.{ unused, OptionVal } import org.agrona.collections.Long2ObjectHashMap /** @@ -27,12 +27,14 @@ private[remote] trait InboundCompressions { def hitActorRef(originUid: Long, remote: Address, ref: ActorRef, n: Int): Unit def decompressActorRef(originUid: Long, tableVersion: Byte, idx: Int): OptionVal[ActorRef] def confirmActorRefCompressionAdvertisement(originUid: Long, tableVersion: Byte): Unit + /** Triggers compression advertisement via control message. */ def runNextActorRefAdvertisement(): Unit def hitClassManifest(originUid: Long, remote: Address, manifest: String, n: Int): Unit def decompressClassManifest(originUid: Long, tableVersion: Byte, idx: Int): OptionVal[String] def confirmClassManifestCompressionAdvertisement(originUid: Long, tableVersion: Byte): Unit + /** Triggers compression advertisement via control message. */ def runNextClassManifestAdvertisement(): Unit @@ -51,11 +53,11 @@ private[remote] trait InboundCompressions { * One per incoming Aeron stream, actual compression tables are kept per-originUid and created on demand. * All access is via the Decoder stage. */ -private[remote] final class InboundCompressionsImpl( - system: ActorSystem, - inboundContext: InboundContext, - settings: ArterySettings.Compression, - eventSink: EventSink = IgnoreEventSink) extends InboundCompressions { +private[remote] final class InboundCompressionsImpl(system: ActorSystem, + inboundContext: InboundContext, + settings: ArterySettings.Compression, + eventSink: EventSink = IgnoreEventSink) + extends InboundCompressions { private[this] val _actorRefsIns = new Long2ObjectHashMap[InboundActorRefCompression]() private[this] val _inboundActorRefsLog = Logging(system, classOf[InboundActorRefCompression]) @@ -96,6 +98,7 @@ private[remote] final class InboundCompressionsImpl( case a => a.confirmAdvertisement(tableVersion, gaveUp = false) } } + /** Send compression table advertisement over control stream. Should be called from Decoder. */ override def runNextActorRefAdvertisement(): Unit = { val vs = _actorRefsIns.values.iterator() @@ -127,6 +130,7 @@ private[remote] final class InboundCompressionsImpl( case a => a.confirmAdvertisement(tableVersion, gaveUp = false) } } + /** Send compression table advertisement over control stream. Should be called from Decoder. */ override def runNextClassManifestAdvertisement(): Unit = { val vs = _classManifestsIns.values.iterator() @@ -166,37 +170,45 @@ private[remote] final class InboundCompressionsImpl( * It can be used to advertise a compression table. * If the association is not complete - we simply dont advertise the table, which is fine (handshake not yet complete). */ -private[remote] final class InboundActorRefCompression( - log: LoggingAdapter, - settings: ArterySettings.Compression, - originUid: Long, - inboundContext: InboundContext, - heavyHitters: TopHeavyHitters[ActorRef]) - extends InboundCompression[ActorRef](log, settings, originUid, inboundContext, heavyHitters) { +private[remote] final class InboundActorRefCompression(log: LoggingAdapter, + settings: ArterySettings.Compression, + originUid: Long, + inboundContext: InboundContext, + heavyHitters: TopHeavyHitters[ActorRef]) + extends InboundCompression[ActorRef](log, settings, originUid, inboundContext, heavyHitters) { override def decompress(tableVersion: Byte, idx: Int): OptionVal[ActorRef] = super.decompressInternal(tableVersion, idx, 0) override def advertiseCompressionTable(outboundContext: OutboundContext, table: CompressionTable[ActorRef]): Unit = { - log.debug(s"Advertise {} compression [{}] to [{}#{}]", Logging.simpleName(getClass), table, outboundContext.remoteAddress, originUid) - outboundContext.sendControl(CompressionProtocol.ActorRefCompressionAdvertisement(inboundContext.localAddress, table)) + log.debug(s"Advertise {} compression [{}] to [{}#{}]", + Logging.simpleName(getClass), + table, + outboundContext.remoteAddress, + originUid) + outboundContext.sendControl( + CompressionProtocol.ActorRefCompressionAdvertisement(inboundContext.localAddress, table)) } } /** * INTERNAL API */ -private[remote] final class InboundManifestCompression( - log: LoggingAdapter, - settings: ArterySettings.Compression, - originUid: Long, - inboundContext: InboundContext, - heavyHitters: TopHeavyHitters[String]) - extends InboundCompression[String](log, settings, originUid, inboundContext, heavyHitters) { +private[remote] final class InboundManifestCompression(log: LoggingAdapter, + settings: ArterySettings.Compression, + originUid: Long, + inboundContext: InboundContext, + heavyHitters: TopHeavyHitters[String]) + extends InboundCompression[String](log, settings, originUid, inboundContext, heavyHitters) { override def advertiseCompressionTable(outboundContext: OutboundContext, table: CompressionTable[String]): Unit = { - log.debug(s"Advertise {} compression [{}] to [{}#{}]", Logging.simpleName(getClass), table, outboundContext.remoteAddress, originUid) - outboundContext.sendControl(CompressionProtocol.ClassManifestCompressionAdvertisement(inboundContext.localAddress, table)) + log.debug(s"Advertise {} compression [{}] to [{}#{}]", + Logging.simpleName(getClass), + table, + outboundContext.remoteAddress, + originUid) + outboundContext.sendControl( + CompressionProtocol.ClassManifestCompressionAdvertisement(inboundContext.localAddress, table)) } override def increment(remoteAddress: Address, value: String, n: Long): Unit = @@ -205,6 +217,7 @@ private[remote] final class InboundManifestCompression( override def decompress(incomingTableVersion: Byte, idx: Int): OptionVal[String] = decompressInternal(incomingTableVersion, idx, 0) } + /** * INTERNAL API */ @@ -213,12 +226,12 @@ private[remote] object InboundCompression { final val KeepOldTablesNumber = 3 // TODO could be configurable object Tables { - def empty[T] = Tables( - oldTables = List(DecompressionTable.disabled[T]), - activeTable = DecompressionTable.empty[T], - nextTable = DecompressionTable.empty[T].copy(version = 1), - advertisementInProgress = None, - keepOldTables = KeepOldTablesNumber) + def empty[T] = + Tables(oldTables = List(DecompressionTable.disabled[T]), + activeTable = DecompressionTable.empty[T], + nextTable = DecompressionTable.empty[T].copy(version = 1), + advertisementInProgress = None, + keepOldTables = KeepOldTablesNumber) } /** @@ -228,16 +241,16 @@ private[remote] object InboundCompression { * It starts with containing only a single "disabled" table (versioned as `DecompressionTable.DisabledVersion`), * and from there on continuously accumulates at most [[keepOldTables]] recently used tables. */ - final case class Tables[T]( - oldTables: List[DecompressionTable[T]], - activeTable: DecompressionTable[T], - nextTable: DecompressionTable[T], - advertisementInProgress: Option[CompressionTable[T]], - keepOldTables: Int) { + final case class Tables[T](oldTables: List[DecompressionTable[T]], + activeTable: DecompressionTable[T], + nextTable: DecompressionTable[T], + advertisementInProgress: Option[CompressionTable[T]], + keepOldTables: Int) { def selectTable(version: Int): OptionVal[DecompressionTable[T]] = { if (activeTable.version == version) { - if (ArterySettings.Compression.Debug) println(s"[compress] Found table [version: ${version}], was [ACTIVE]${activeTable}") + if (ArterySettings.Compression.Debug) + println(s"[compress] Found table [version: ${version}], was [ACTIVE]${activeTable}") OptionVal.Some(activeTable) } else { @tailrec def find(tables: List[DecompressionTable[T]]): OptionVal[DecompressionTable[T]] = { @@ -253,9 +266,11 @@ private[remote] object InboundCompression { if (ArterySettings.Compression.Debug) { found match { case OptionVal.Some(t) => - println(s"[compress] Found table [version: ${version}], was [OLD][${t}], old tables: [${oldTables.map(_.version)}]") + println( + s"[compress] Found table [version: ${version}], was [OLD][${t}], old tables: [${oldTables.map(_.version)}]") case OptionVal.None => - println(s"[compress] Did not find table [version: ${version}], old tables: [${oldTables.map(_.version)}], activeTable: ${activeTable}, nextTable: ${nextTable}") + println(s"[compress] Did not find table [version: ${version}], old tables: [${oldTables + .map(_.version)}], activeTable: ${activeTable}, nextTable: ${nextTable}") } } found @@ -267,12 +282,11 @@ private[remote] object InboundCompression { if (version == 127) 0 else (version + 1).toByte - Tables( - oldTables = (activeTable :: oldTables).take(keepOldTables), - activeTable = nextTable, - nextTable = DecompressionTable.empty[T].copy(version = incrementTableVersion(nextTable.version)), - advertisementInProgress = None, - keepOldTables = keepOldTables) + Tables(oldTables = (activeTable :: oldTables).take(keepOldTables), + activeTable = nextTable, + nextTable = DecompressionTable.empty[T].copy(version = incrementTableVersion(nextTable.version)), + advertisementInProgress = None, + keepOldTables = keepOldTables) } } @@ -284,12 +298,11 @@ private[remote] object InboundCompression { * * Access to this class must be externally synchronised (e.g. by accessing it from only Actors or a GraphStage etc). */ -private[remote] abstract class InboundCompression[T >: Null]( - val log: LoggingAdapter, - val settings: ArterySettings.Compression, - val originUid: Long, - inboundContext: InboundContext, - val heavyHitters: TopHeavyHitters[T]) { +private[remote] abstract class InboundCompression[T >: Null](val log: LoggingAdapter, + val settings: ArterySettings.Compression, + val originUid: Long, + inboundContext: InboundContext, + val heavyHitters: TopHeavyHitters[T]) { private[this] var tables: InboundCompression.Tables[T] = InboundCompression.Tables.empty @@ -316,13 +329,15 @@ private[remote] abstract class InboundCompression[T >: Null]( */ @tailrec final def decompressInternal(incomingTableVersion: Byte, idx: Int, attemptCounter: Int): OptionVal[T] = { // effectively should never loop more than once, to avoid infinite recursion blow up eagerly - if (attemptCounter > 2) throw new IllegalStateException(s"Unable to decompress $idx from table $incomingTableVersion. Internal tables: $tables") + if (attemptCounter > 2) + throw new IllegalStateException( + s"Unable to decompress $idx from table $incomingTableVersion. Internal tables: $tables") val current = tables val activeVersion = current.activeTable.version def incomingVersionIsAdvertisementInProgress(incomingTableVersion: Byte): Boolean = current.advertisementInProgress.isDefined && - incomingTableVersion == current.advertisementInProgress.get.version + incomingTableVersion == current.advertisementInProgress.get.version if (incomingTableVersion == DecompressionTable.DisabledVersion) { // no compression, bail out early @@ -335,21 +350,23 @@ private[remote] abstract class InboundCompression[T >: Null]( else throw new UnknownCompressedIdException(idx) case _ if incomingVersionIsAdvertisementInProgress(incomingTableVersion) => - log.debug( - "Received first value from originUid [{}] compressed using the advertised compression table, " + - "flipping to it (version: {})", - originUid, current.nextTable.version) + log.debug("Received first value from originUid [{}] compressed using the advertised compression table, " + + "flipping to it (version: {})", + originUid, + current.nextTable.version) confirmAdvertisement(incomingTableVersion, gaveUp = false) decompressInternal(incomingTableVersion, idx, attemptCounter + 1) // recurse case _ => // which means that incoming version was > nextTable.version, which likely that // it is using a table that was built for previous incarnation of this system - log.warning( - "Inbound message from originUid [{}] is using unknown compression table version. " + - "It may have been sent with compression table built for previous incarnation of this system. " + - "Versions activeTable: {}, nextTable: {}, incomingTable: {}", - originUid, activeVersion, current.nextTable.version, incomingTableVersion) + log.warning("Inbound message from originUid [{}] is using unknown compression table version. " + + "It may have been sent with compression table built for previous incarnation of this system. " + + "Versions activeTable: {}, nextTable: {}, incomingTable: {}", + originUid, + activeVersion, + current.nextTable.version, + incomingTableVersion) OptionVal.None } } @@ -359,13 +376,16 @@ private[remote] abstract class InboundCompression[T >: Null]( tables.advertisementInProgress match { case Some(inProgress) if tableVersion == inProgress.version => tables = tables.startUsingNextTable() - log.debug( - "{} compression table version [{}] for originUid [{}]", - if (gaveUp) "Gave up" else "Confirmed", tableVersion, originUid) + log.debug("{} compression table version [{}] for originUid [{}]", + if (gaveUp) "Gave up" else "Confirmed", + tableVersion, + originUid) case Some(inProgress) if tableVersion != inProgress.version => - log.debug( - "{} compression table version [{}] for originUid [{}] but other version in progress [{}]", - if (gaveUp) "Gave up" else "Confirmed", tableVersion, originUid, inProgress.version) + log.debug("{} compression table version [{}] for originUid [{}] but other version in progress [{}]", + if (gaveUp) "Gave up" else "Confirmed", + tableVersion, + originUid, + inProgress.version) case None => // already confirmed } @@ -414,7 +434,9 @@ private[remote] abstract class InboundCompression[T >: Null]( resendCount = 0 advertiseCompressionTable(association, table) } else if (association.isOrdinaryMessageStreamActive()) { - log.debug("{} for originUid [{}] not changed, no need to advertise same.", Logging.simpleName(tables.activeTable), originUid) + log.debug("{} for originUid [{}] not changed, no need to advertise same.", + Logging.simpleName(tables.activeTable), + originUid) } case OptionVal.None => @@ -430,17 +452,19 @@ private[remote] abstract class InboundCompression[T >: Null]( inboundContext.association(originUid) match { case OptionVal.Some(association) => - log.debug( - "Advertisement in progress for originUid [{}] version [{}], resending [{}:{}]", - originUid, inProgress.version, resendCount, maxResendCount) + log.debug("Advertisement in progress for originUid [{}] version [{}], resending [{}:{}]", + originUid, + inProgress.version, + resendCount, + maxResendCount) advertiseCompressionTable(association, inProgress) // resend case OptionVal.None => } } else { // give up, it might be dead - log.debug( - "Advertisement in progress for originUid [{}] version [{}] but no confirmation after retries.", - originUid, inProgress.version) + log.debug("Advertisement in progress for originUid [{}] version [{}] but no confirmation after retries.", + originUid, + inProgress.version) confirmAdvertisement(inProgress.version, gaveUp = true) } } @@ -471,8 +495,8 @@ private[remote] abstract class InboundCompression[T >: Null]( * INTERNAL API */ private[akka] final class UnknownCompressedIdException(id: Long) - extends RuntimeException( - s"Attempted de-compress unknown id [$id]! " + + extends RuntimeException( + s"Attempted de-compress unknown id [$id]! " + s"This could happen if this node has started a new ActorSystem bound to the same address as previously, " + s"and previous messages from a remote system were still in flight (using an old compression table). " + s"The remote system is expected to drop the compression table and this system will advertise a new one.") diff --git a/akka-remote/src/main/scala/akka/remote/artery/compress/TopHeavyHitters.scala b/akka-remote/src/main/scala/akka/remote/artery/compress/TopHeavyHitters.scala index 6c7bd95777..5a28d91c98 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/compress/TopHeavyHitters.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/compress/TopHeavyHitters.scala @@ -222,7 +222,8 @@ private[remote] final class TopHeavyHitters[T >: Null](val max: Int)(implicit cl */ private def updateExistingHeavyHitter(foundHashIndex: Int, count: Long): Unit = { if (weights(foundHashIndex) > count) - throw new IllegalArgumentException(s"Weights can be only incremented or kept the same, not decremented. " + + throw new IllegalArgumentException( + s"Weights can be only incremented or kept the same, not decremented. " + s"Previous weight was [${weights(foundHashIndex)}], attempted to modify it to [$count].") weights(foundHashIndex) = count // we don't need to change `hashCode`, `heapIndex` or `item`, those remain the same // Position in the heap might have changed as count was incremented diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala index a1dae590f6..358940a05e 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/ArteryTcpTransport.scala @@ -62,9 +62,10 @@ private[remote] object ArteryTcpTransport { /** * INTERNAL API */ -private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider: RemoteActorRefProvider, +private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, + _provider: RemoteActorRefProvider, tlsEnabled: Boolean) - extends ArteryTransport(_system, _provider) { + extends ArteryTransport(_system, _provider) { import ArteryTransport._ import ArteryTcpTransport._ import FlightRecorderEvents._ @@ -84,12 +85,17 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider OptionVal.Some(p.sslEngineProvider(system)) case None => // load from config - OptionVal.Some(system.dynamicAccess.createInstanceFor[SSLEngineProvider]( - settings.SSLEngineProviderClassName, - List((classOf[ActorSystem], system))).recover { - case e => throw new ConfigurationException( - s"Could not create SSLEngineProvider [${settings.SSLEngineProviderClassName}]", e) - }.get) + OptionVal.Some( + system.dynamicAccess + .createInstanceFor[SSLEngineProvider](settings.SSLEngineProviderClassName, + List((classOf[ActorSystem], system))) + .recover { + case e => + throw new ConfigurationException( + s"Could not create SSLEngineProvider [${settings.SSLEngineProviderClassName}]", + e) + } + .get) } } else OptionVal.None @@ -97,10 +103,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider // nothing specific here } - override protected def outboundTransportSink( - outboundContext: OutboundContext, - streamId: Int, - bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] = { + override protected def outboundTransportSink(outboundContext: OutboundContext, + streamId: Int, + bufferPool: EnvelopeBufferPool): Sink[EnvelopeBuffer, Future[Done]] = { implicit val sys: ActorSystem = system val afr = createFlightRecorderEventSink() @@ -112,17 +117,15 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider def connectionFlow: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] = if (tlsEnabled) { val sslProvider = sslEngineProvider.get - Tcp().outgoingTlsConnectionWithSSLEngine( - remoteAddress, - createSSLEngine = () => sslProvider.createClientSSLEngine(host, port), - connectTimeout = settings.Advanced.ConnectionTimeout, - verifySession = session => optionToTry(sslProvider.verifyClientSession(host, session))) + Tcp().outgoingTlsConnectionWithSSLEngine(remoteAddress, + createSSLEngine = () => sslProvider.createClientSSLEngine(host, port), + connectTimeout = settings.Advanced.ConnectionTimeout, + verifySession = session => + optionToTry(sslProvider.verifyClientSession(host, session))) } else { - Tcp() - .outgoingConnection( - remoteAddress, - halfClose = true, // issue https://github.com/akka/akka/issues/24392 if set to false - connectTimeout = settings.Advanced.ConnectionTimeout) + Tcp().outgoingConnection(remoteAddress, + halfClose = true, // issue https://github.com/akka/akka/issues/24392 if set to false + connectTimeout = settings.Advanced.ConnectionTimeout) } def connectionFlowWithRestart: Flow[ByteString, ByteString, NotUsed] = { @@ -132,10 +135,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider Flow[ByteString] .via(Flow.lazyInitAsync(() => { // only open the actual connection if any new messages are sent - afr.loFreq( - TcpOutbound_Connected, - s"${outboundContext.remoteAddress.host.get}:${outboundContext.remoteAddress.port.get} " + - s"/ ${streamName(streamId)}") + afr.loFreq(TcpOutbound_Connected, + s"${outboundContext.remoteAddress.host.get}:${outboundContext.remoteAddress.port.get} " + + s"/ ${streamName(streamId)}") if (controlIdleKillSwitch.isDefined) outboundContext.asInstanceOf[Association].setControlIdleKillSwitch(controlIdleKillSwitch) Future.successful( @@ -150,9 +152,7 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider if (streamId == ControlStreamId) { // must replace the KillSwitch when restarted val controlIdleKillSwitch = KillSwitches.shared("outboundControlStreamIdleKillSwitch") - Flow[ByteString] - .via(controlIdleKillSwitch.flow) - .via(flow(OptionVal.Some(controlIdleKillSwitch))) + Flow[ByteString].via(controlIdleKillSwitch.flow).via(flow(OptionVal.Some(controlIdleKillSwitch))) } else { flow(OptionVal.None) } @@ -162,9 +162,10 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider // Restart of inner connection part important in control stream, since system messages // are buffered and resent from the outer SystemMessageDelivery stage. No maxRestarts limit for control // stream. For message stream it's best effort retry a few times. - RestartFlow.withBackoff[ByteString, ByteString]( - settings.Advanced.OutboundRestartBackoff, - settings.Advanced.OutboundRestartBackoff * 5, 0.1, maxRestarts)(flowFactory) + RestartFlow.withBackoff[ByteString, ByteString](settings.Advanced.OutboundRestartBackoff, + settings.Advanced.OutboundRestartBackoff * 5, + 0.1, + maxRestarts)(flowFactory) } Flow[EnvelopeBuffer] @@ -179,7 +180,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider TcpFraming.encodeFrameHeader(size) ++ bytes } .via(connectionFlowWithRestart) - .map(_ => throw new IllegalStateException(s"Unexpected incoming bytes in outbound connection to [${outboundContext.remoteAddress}]")) + .map(_ => + throw new IllegalStateException( + s"Unexpected incoming bytes in outbound connection to [${outboundContext.remoteAddress}]")) .toMat(Sink.ignore)(Keep.right) } @@ -210,11 +213,10 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider if (largeMessageChannelEnabled) runInboundLargeMessagesStream() else - ( - Flow[EnvelopeBuffer] - .map(_ => log.warning("Dropping large message, missing large-message-destinations configuration.")) - .to(Sink.ignore), - Promise[Done]().future) // never completed, not enabled + (Flow[EnvelopeBuffer] + .map(_ => log.warning("Dropping large message, missing large-message-destinations configuration.")) + .to(Sink.ignore), + Promise[Done]().future) // never completed, not enabled } // An inbound connection will only use one of the control, ordinary or large streams, but we have to @@ -223,22 +225,21 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider // decide where to attach it based on that byte. Then the streamId wouldn't have to be sent in each // frame. That was not chosen because it is more complicated to implement and might have more runtime // overhead. - inboundStream = - OptionVal.Some(Sink.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val partition = b.add(Partition[EnvelopeBuffer](3, env => { - env.streamId match { - case OrdinaryStreamId => 1 - case ControlStreamId => 0 - case LargeStreamId => 2 - case other => throw new IllegalArgumentException(s"Unexpected streamId [$other]") - } - })) - partition.out(0) ~> controlStream - partition.out(1) ~> ordinaryMessagesStream - partition.out(2) ~> largeMessagesStream - SinkShape(partition.in) + inboundStream = OptionVal.Some(Sink.fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + val partition = b.add(Partition[EnvelopeBuffer](3, env => { + env.streamId match { + case OrdinaryStreamId => 1 + case ControlStreamId => 0 + case LargeStreamId => 2 + case other => throw new IllegalArgumentException(s"Unexpected streamId [$other]") + } })) + partition.out(0) ~> controlStream + partition.out(1) ~> ordinaryMessagesStream + partition.out(2) ~> largeMessagesStream + SinkShape(partition.in) + })) // If something in the inboundConnectionFlow fails, e.g. framing, the connection will be teared down, // but other parts of the inbound streams don't have to restarted. @@ -259,16 +260,13 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider val connectionSource: Source[Tcp.IncomingConnection, Future[ServerBinding]] = if (tlsEnabled) { val sslProvider = sslEngineProvider.get - Tcp().bindTlsWithSSLEngine( - interface = bindHost, - port = bindPort, - createSSLEngine = () => sslProvider.createServerSSLEngine(bindHost, bindPort), - verifySession = session => optionToTry(sslProvider.verifyServerSession(bindHost, session))) + Tcp().bindTlsWithSSLEngine(interface = bindHost, + port = bindPort, + createSSLEngine = () => sslProvider.createServerSSLEngine(bindHost, bindPort), + verifySession = + session => optionToTry(sslProvider.verifyServerSession(bindHost, session))) } else { - Tcp().bind( - interface = bindHost, - port = bindPort, - halfClose = false) + Tcp().bind(interface = bindHost, port = bindPort, halfClose = false) } serverBinding = serverBinding match { @@ -276,16 +274,17 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider val afr = createFlightRecorderEventSink() val binding = connectionSource .to(Sink.foreach { connection => - afr.loFreq( - TcpInbound_Connected, - s"${connection.remoteAddress.getHostString}:${connection.remoteAddress.getPort}") + afr.loFreq(TcpInbound_Connected, + s"${connection.remoteAddress.getHostString}:${connection.remoteAddress.getPort}") connection.handleWith(inboundConnectionFlow) }) .run() .recoverWith { - case e => Future.failed(new RemoteTransportException( - s"Failed to bind TCP to [${localAddress.address.host.get}:${localAddress.address.port.get}] due to: " + - e.getMessage, e)) + case e => + Future.failed(new RemoteTransportException( + s"Failed to bind TCP to [${localAddress.address.host.get}:${localAddress.address.port.get}] due to: " + + e.getMessage, + e)) }(ExecutionContexts.sameThreadExecutionContext) // only on initial startup, when ActorSystem is starting @@ -308,7 +307,7 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider inboundKillSwitch = KillSwitches.shared("inboundKillSwitch") val allStopped: Future[Done] = for { - _ <- controlStreamCompleted.recover { case _ => Done } + _ <- controlStreamCompleted.recover { case _ => Done } _ <- ordinaryMessagesStreamCompleted.recover { case _ => Done } _ <- if (largeMessageChannelEnabled) largeMessagesStreamCompleted.recover { case _ => Done } else Future.successful(Done) @@ -323,7 +322,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider if (isShutdown) throw ArteryTransport.ShuttingDown val (hub, ctrl, completed) = - MergeHub.source[EnvelopeBuffer].addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) + MergeHub + .source[EnvelopeBuffer] + .addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) .via(inboundKillSwitch.flow) .via(inboundFlow(settings, NoInboundCompressions)) .toMat(inboundControlSink)({ case (a, (c, d)) => (a, c, d) }) @@ -339,7 +340,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider val (inboundHub: Sink[EnvelopeBuffer, NotUsed], inboundCompressionAccess, completed) = if (inboundLanes == 1) { - MergeHub.source[EnvelopeBuffer].addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) + MergeHub + .source[EnvelopeBuffer] + .addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) .via(inboundKillSwitch.flow) .viaMat(inboundFlow(settings, _inboundCompressions))(Keep.both) .toMat(inboundSink(envelopeBufferPool))({ case ((a, b), c) => (a, b, c) }) @@ -349,7 +352,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider // TODO perhaps a few more things can be extracted and DRY with AeronUpdTransport.runInboundOrdinaryMessagesStream val laneKillSwitch = KillSwitches.shared("laneKillSwitch") val laneSource: Source[InboundEnvelope, (Sink[EnvelopeBuffer, NotUsed], InboundCompressionAccess)] = - MergeHub.source[EnvelopeBuffer].addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) + MergeHub + .source[EnvelopeBuffer] + .addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) .via(inboundKillSwitch.flow) .via(laneKillSwitch.flow) .viaMat(inboundFlow(settings, _inboundCompressions))(Keep.both) @@ -357,21 +362,30 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider val (inboundHub, compressionAccess, laneHub) = laneSource - .toMat(Sink.fromGraph(new FixedSizePartitionHub[InboundEnvelope](inboundLanePartitioner, inboundLanes, - settings.Advanced.InboundHubBufferSize)))({ case ((a, b), c) => (a, b, c) }) + .toMat( + Sink.fromGraph( + new FixedSizePartitionHub[InboundEnvelope](inboundLanePartitioner, + inboundLanes, + settings.Advanced.InboundHubBufferSize)))({ + case ((a, b), c) => (a, b, c) + }) .run()(materializer) val lane = inboundSink(envelopeBufferPool) val completedValues: Vector[Future[Done]] = - (0 until inboundLanes).iterator.map { _ => - laneHub.toMat(lane)(Keep.right).run()(materializer) - }.to(immutable.Vector) + (0 until inboundLanes).iterator + .map { _ => + laneHub.toMat(lane)(Keep.right).run()(materializer) + } + .to(immutable.Vector) import system.dispatcher // tear down the upstream hub part if downstream lane fails // lanes are not completed with success by themselves so we don't have to care about onSuccess - Future.firstCompletedOf(completedValues).failed.foreach { reason => laneKillSwitch.abort(reason) } + Future.firstCompletedOf(completedValues).failed.foreach { reason => + laneKillSwitch.abort(reason) + } val allCompleted = Future.sequence(completedValues).map(_ => Done) (inboundHub, compressionAccess, allCompleted) @@ -388,7 +402,9 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider if (isShutdown) throw ArteryTransport.ShuttingDown val (hub, completed) = - MergeHub.source[EnvelopeBuffer].addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) + MergeHub + .source[EnvelopeBuffer] + .addAttributes(Attributes.logLevels(onFailure = LogLevels.Off)) .via(inboundKillSwitch.flow) .via(inboundLargeFlow(settings)) .toMat(inboundSink(largeEnvelopeBufferPool))(Keep.both) @@ -401,9 +417,8 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider private def updateStreamMatValues(completed: Future[Done]): Unit = { implicit val ec: ExecutionContext = materializer.executionContext - updateStreamMatValues(ControlStreamId, InboundStreamMatValues[NotUsed]( - NotUsed, - completed.recover { case _ => Done })) + updateStreamMatValues(ControlStreamId, + InboundStreamMatValues[NotUsed](NotUsed, completed.recover { case _ => Done })) } override protected def shutdownTransport(): Future[Done] = { @@ -423,7 +438,8 @@ private[remote] class ArteryTcpTransport(_system: ExtendedActorSystem, _provider b <- binding _ <- b.unbind() } yield { - topLevelFlightRecorder.loFreq(TcpInbound_Bound, s"${localAddress.address.host.get}:${localAddress.address.port}") + topLevelFlightRecorder.loFreq(TcpInbound_Bound, + s"${localAddress.address.host.get}:${localAddress.address.port}") Done } case None => diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/SSLEngineProvider.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/SSLEngineProvider.scala index b35e35b547..b39ce64c6a 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/SSLEngineProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/SSLEngineProvider.scala @@ -64,13 +64,12 @@ class SslTransportException(message: String, cause: Throwable) extends RuntimeEx * * Subclass may override protected methods to replace certain parts, such as key and trust manager. */ -@ApiMayChange class ConfigSSLEngineProvider( - protected val config: Config, - protected val log: MarkerLoggingAdapter) extends SSLEngineProvider { +@ApiMayChange class ConfigSSLEngineProvider(protected val config: Config, protected val log: MarkerLoggingAdapter) + extends SSLEngineProvider { - def this(system: ActorSystem) = this( - system.settings.config.getConfig("akka.remote.artery.ssl.config-ssl-engine"), - Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName)) + def this(system: ActorSystem) = + this(system.settings.config.getConfig("akka.remote.artery.ssl.config-ssl-engine"), + Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName)) val SSLKeyStore: String = config.getString("key-store") val SSLTrustStore: String = config.getString("trust-store") @@ -88,10 +87,11 @@ class SslTransportException(message: String, cause: Throwable) extends RuntimeEx if (HostnameVerification) log.debug("TLS/SSL hostname verification is enabled.") else - log.warning(LogMarker.Security, "TLS/SSL hostname verification is disabled. " + - "Please configure akka.remote.artery.ssl.config-ssl-engine.hostname-verification=on " + - "and ensure the X.509 certificate on the host is correct to remove this warning. " + - "See Akka reference documentation for more information.") + log.warning(LogMarker.Security, + "TLS/SSL hostname verification is disabled. " + + "Please configure akka.remote.artery.ssl.config-ssl-engine.hostname-verification=on " + + "and ensure the X.509 certificate on the host is correct to remove this warning. " + + "See Akka reference documentation for more information.") constructContext() } @@ -104,11 +104,15 @@ class SslTransportException(message: String, cause: Throwable) extends RuntimeEx ctx } catch { case e: FileNotFoundException => - throw new SslTransportException("Server SSL connection could not be established because key store could not be loaded", e) + throw new SslTransportException( + "Server SSL connection could not be established because key store could not be loaded", + e) case e: IOException => throw new SslTransportException("Server SSL connection could not be established because: " + e.getMessage, e) case e: GeneralSecurityException => - throw new SslTransportException("Server SSL connection could not be established because SSL context could not be constructed", e) + throw new SslTransportException( + "Server SSL connection could not be established because SSL context could not be constructed", + e) } } @@ -118,7 +122,8 @@ class SslTransportException(message: String, cause: Throwable) extends RuntimeEx protected def loadKeystore(filename: String, password: String): KeyStore = { val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) val fin = Files.newInputStream(Paths.get(filename)) - try keyStore.load(fin, password.toCharArray) finally Try(fin.close()) + try keyStore.load(fin, password.toCharArray) + finally Try(fin.close()) keyStore } @@ -153,11 +158,7 @@ class SslTransportException(message: String, cause: Throwable) extends RuntimeEx createSSLEngine(sslContext, role, hostname, port) } - private def createSSLEngine( - sslContext: SSLContext, - role: TLSRole, - hostname: String, - port: Int): SSLEngine = { + private def createSSLEngine(sslContext: SSLContext, role: TLSRole, hostname: String, port: Int): SSLEngine = { val engine = sslContext.createSSLEngine(hostname, port) @@ -198,7 +199,8 @@ object SSLEngineProviderSetup { * Java API: factory for defining a `SSLEngineProvider` that is passed in when ActorSystem * is created rather than creating one from configured class name. */ - def create(sslEngineProvider: java.util.function.Function[ExtendedActorSystem, SSLEngineProvider]): SSLEngineProviderSetup = + def create( + sslEngineProvider: java.util.function.Function[ExtendedActorSystem, SSLEngineProvider]): SSLEngineProviderSetup = apply(sys => sslEngineProvider(sys)) } @@ -211,8 +213,8 @@ object SSLEngineProviderSetup { * * Constructor is *Internal API*, use factories in [[SSLEngineProviderSetup()]] */ -@ApiMayChange class SSLEngineProviderSetup private ( - val sslEngineProvider: ExtendedActorSystem => SSLEngineProvider) extends Setup +@ApiMayChange class SSLEngineProviderSetup private (val sslEngineProvider: ExtendedActorSystem => SSLEngineProvider) + extends Setup /** * INTERNAL API @@ -231,7 +233,9 @@ object SSLEngineProviderSetup { new SecureRandom case unknown => - log.warning(LogMarker.Security, "Unknown SSL random number generator [{}] falling back to SecureRandom", unknown) + log.warning(LogMarker.Security, + "Unknown SSL random number generator [{}] falling back to SecureRandom", + unknown) new SecureRandom } rng.nextInt() // prevent stall on first access diff --git a/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala b/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala index bdd1500338..574a76a37a 100644 --- a/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala +++ b/akka-remote/src/main/scala/akka/remote/artery/tcp/TcpFraming.scala @@ -49,12 +49,10 @@ import akka.util.ByteString * of the frame. The `frameLength` is encoded as 4 bytes (little endian). */ def encodeFrameHeader(frameLength: Int): ByteString = - ByteString( - (frameLength & 0xff).toByte, - ((frameLength & 0xff00) >> 8).toByte, - ((frameLength & 0xff0000) >> 16).toByte, - ((frameLength & 0xff000000) >> 24).toByte - ) + ByteString((frameLength & 0xff).toByte, + ((frameLength & 0xff00) >> 8).toByte, + ((frameLength & 0xff0000) >> 16).toByte, + ((frameLength & 0xff000000) >> 24).toByte) } /** @@ -71,8 +69,9 @@ import akka.util.ByteString if (magic == TcpFraming.Magic) ParseResult(None, ReadStreamId) else - throw new FramingException("Stream didn't start with expected magic bytes, " + - s"got [${(magic ++ reader.remainingData).take(10).map(_ formatted "%02x").mkString(" ")}] " + + throw new FramingException( + "Stream didn't start with expected magic bytes, " + + s"got [${(magic ++ reader.remainingData).take(10).map(_.formatted("%02x")).mkString(" ")}] " + "Connection is rejected. Probably invalid accidental access.") } } diff --git a/akka-remote/src/main/scala/akka/remote/routing/RemoteRouterConfig.scala b/akka-remote/src/main/scala/akka/remote/routing/RemoteRouterConfig.scala index 6b265d3d51..c6ce8a9748 100644 --- a/akka-remote/src/main/scala/akka/remote/routing/RemoteRouterConfig.scala +++ b/akka-remote/src/main/scala/akka/remote/routing/RemoteRouterConfig.scala @@ -49,14 +49,16 @@ final case class RemoteRouterConfig(local: Pool, nodes: Iterable[Address]) exten override def newRoutee(routeeProps: Props, context: ActorContext): Routee = { val name = "c" + childNameCounter.incrementAndGet - val deploy = Deploy(config = ConfigFactory.empty(), routerConfig = routeeProps.routerConfig, - scope = RemoteScope(nodeAddressIter.next)) + val deploy = Deploy(config = ConfigFactory.empty(), + routerConfig = routeeProps.routerConfig, + scope = RemoteScope(nodeAddressIter.next)) // attachChild means that the provider will treat this call as if possibly done out of the wrong // context and use RepointableActorRef instead of LocalActorRef. Seems like a slightly sub-optimal // choice in a corner case (and hence not worth fixing). - val ref = context.asInstanceOf[ActorCell].attachChild( - local.enrichWithPoolDispatcher(routeeProps, context).withDeploy(deploy), name, systemService = false) + val ref = context + .asInstanceOf[ActorCell] + .attachChild(local.enrichWithPoolDispatcher(routeeProps, context).withDeploy(deploy), name, systemService = false) ActorRefRoutee(ref) } @@ -69,8 +71,8 @@ final case class RemoteRouterConfig(local: Pool, nodes: Iterable[Address]) exten override def resizer: Option[Resizer] = local.resizer override def withFallback(other: RouterConfig): RouterConfig = other match { - case RemoteRouterConfig(_: RemoteRouterConfig, _) => throw new IllegalStateException( - "RemoteRouterConfig is not allowed to wrap a RemoteRouterConfig") + case RemoteRouterConfig(_: RemoteRouterConfig, _) => + throw new IllegalStateException("RemoteRouterConfig is not allowed to wrap a RemoteRouterConfig") case RemoteRouterConfig(local: Pool, _) => copy(local = this.local.withFallback(local).asInstanceOf[Pool]) case _ => copy(local = this.local.withFallback(other).asInstanceOf[Pool]) diff --git a/akka-remote/src/main/scala/akka/remote/security/provider/SeedSize.scala b/akka-remote/src/main/scala/akka/remote/security/provider/SeedSize.scala index f18e00ade5..23d7a50bc1 100644 --- a/akka-remote/src/main/scala/akka/remote/security/provider/SeedSize.scala +++ b/akka-remote/src/main/scala/akka/remote/security/provider/SeedSize.scala @@ -17,4 +17,3 @@ private[provider] object SeedSize { val Seed192 = 24 val Seed256 = 32 } - diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ActorRefResolveCache.scala b/akka-remote/src/main/scala/akka/remote/serialization/ActorRefResolveCache.scala index 49d0a149f9..732bc80422 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ActorRefResolveCache.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ActorRefResolveCache.scala @@ -13,13 +13,14 @@ import akka.actor.ExtensionId import akka.actor.ExtensionIdProvider import akka.remote.RemoteActorRefProvider import akka.remote.artery.LruBoundedCache -import akka.util.{ Unsafe, unused } +import akka.util.{ unused, Unsafe } /** * INTERNAL API: Thread local cache per actor system */ private[akka] object ActorRefResolveThreadLocalCache - extends ExtensionId[ActorRefResolveThreadLocalCache] with ExtensionIdProvider { + extends ExtensionId[ActorRefResolveThreadLocalCache] + with ExtensionIdProvider { override def get(system: ActorSystem): ActorRefResolveThreadLocalCache = super.get(system) @@ -36,8 +37,9 @@ private[akka] class ActorRefResolveThreadLocalCache(val system: ExtendedActorSys private val provider = system.provider match { case r: RemoteActorRefProvider => r - case _ => throw new IllegalArgumentException( - "ActorRefResolveThreadLocalCache can only be used with RemoteActorRefProvider, " + + case _ => + throw new IllegalArgumentException( + "ActorRefResolveThreadLocalCache can only be used with RemoteActorRefProvider, " + s"not with ${system.provider.getClass}") } @@ -54,7 +56,7 @@ private[akka] class ActorRefResolveThreadLocalCache(val system: ExtendedActorSys * INTERNAL API */ private[akka] final class ActorRefResolveCache(provider: RemoteActorRefProvider) - extends LruBoundedCache[String, ActorRef](capacity = 1024, evictAgeThreshold = 600) { + extends LruBoundedCache[String, ActorRef](capacity = 1024, evictAgeThreshold = 600) { override protected def compute(k: String): ActorRef = provider.internalResolveActorRef(k) diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala index 8f7dc80784..d631f28f26 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ArteryMessageSerializer.scala @@ -38,69 +38,81 @@ private[akka] object ArteryMessageSerializer { } /** INTERNAL API */ -private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest with BaseSerializer { +private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSystem) + extends SerializerWithStringManifest + with BaseSerializer { import ArteryMessageSerializer._ private lazy val serialization = SerializationExtension(system) override def manifest(o: AnyRef): String = o match { // most frequent ones first - case _: SystemMessageDelivery.SystemMessageEnvelope => SystemMessageEnvelopeManifest - case _: SystemMessageDelivery.Ack => SystemMessageDeliveryAckManifest - case _: HandshakeReq => HandshakeReqManifest - case _: HandshakeRsp => HandshakeRspManifest - case _: RemoteWatcher.ArteryHeartbeat.type => ArteryHeartbeatManifest - case _: RemoteWatcher.ArteryHeartbeatRsp => ArteryHeartbeatRspManifest - case _: SystemMessageDelivery.Nack => SystemMessageDeliveryNackManifest - case _: Quarantined => QuarantinedManifest - case _: ActorSystemTerminating => ActorSystemTerminatingManifest - case _: ActorSystemTerminatingAck => ActorSystemTerminatingAckManifest - case _: CompressionProtocol.ActorRefCompressionAdvertisement => ActorRefCompressionAdvertisementManifest - case _: CompressionProtocol.ActorRefCompressionAdvertisementAck => ActorRefCompressionAdvertisementAckManifest + case _: SystemMessageDelivery.SystemMessageEnvelope => SystemMessageEnvelopeManifest + case _: SystemMessageDelivery.Ack => SystemMessageDeliveryAckManifest + case _: HandshakeReq => HandshakeReqManifest + case _: HandshakeRsp => HandshakeRspManifest + case _: RemoteWatcher.ArteryHeartbeat.type => ArteryHeartbeatManifest + case _: RemoteWatcher.ArteryHeartbeatRsp => ArteryHeartbeatRspManifest + case _: SystemMessageDelivery.Nack => SystemMessageDeliveryNackManifest + case _: Quarantined => QuarantinedManifest + case _: ActorSystemTerminating => ActorSystemTerminatingManifest + case _: ActorSystemTerminatingAck => ActorSystemTerminatingAckManifest + case _: CompressionProtocol.ActorRefCompressionAdvertisement => ActorRefCompressionAdvertisementManifest + case _: CompressionProtocol.ActorRefCompressionAdvertisementAck => ActorRefCompressionAdvertisementAckManifest case _: CompressionProtocol.ClassManifestCompressionAdvertisement => ClassManifestCompressionAdvertisementManifest - case _: CompressionProtocol.ClassManifestCompressionAdvertisementAck => ClassManifestCompressionAdvertisementAckManifest + case _: CompressionProtocol.ClassManifestCompressionAdvertisementAck => + ClassManifestCompressionAdvertisementAckManifest case _ => throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass} in [${getClass.getName}]") } override def toBinary(o: AnyRef): Array[Byte] = o match { // most frequent ones first - case env: SystemMessageDelivery.SystemMessageEnvelope => serializeSystemMessageEnvelope(env).toByteArray - case SystemMessageDelivery.Ack(seqNo, from) => serializeSystemMessageDeliveryAck(seqNo, from).toByteArray - case HandshakeReq(from, to) => serializeHandshakeReq(from, to).toByteArray - case HandshakeRsp(from) => serializeWithAddress(from).toByteArray - case RemoteWatcher.ArteryHeartbeat => Array.emptyByteArray - case RemoteWatcher.ArteryHeartbeatRsp(from) => serializeArteryHeartbeatRsp(from).toByteArray - case SystemMessageDelivery.Nack(seqNo, from) => serializeSystemMessageDeliveryAck(seqNo, from).toByteArray - case q: Quarantined => serializeQuarantined(q).toByteArray - case ActorSystemTerminating(from) => serializeWithAddress(from).toByteArray - case ActorSystemTerminatingAck(from) => serializeWithAddress(from).toByteArray - case adv: ActorRefCompressionAdvertisement => serializeActorRefCompressionAdvertisement(adv).toByteArray - case ActorRefCompressionAdvertisementAck(from, id) => serializeCompressionTableAdvertisementAck(from, id).toByteArray - case adv: ClassManifestCompressionAdvertisement => serializeCompressionAdvertisement(adv)(identity).toByteArray - case ClassManifestCompressionAdvertisementAck(from, id) => serializeCompressionTableAdvertisementAck(from, id).toByteArray + case env: SystemMessageDelivery.SystemMessageEnvelope => serializeSystemMessageEnvelope(env).toByteArray + case SystemMessageDelivery.Ack(seqNo, from) => serializeSystemMessageDeliveryAck(seqNo, from).toByteArray + case HandshakeReq(from, to) => serializeHandshakeReq(from, to).toByteArray + case HandshakeRsp(from) => serializeWithAddress(from).toByteArray + case RemoteWatcher.ArteryHeartbeat => Array.emptyByteArray + case RemoteWatcher.ArteryHeartbeatRsp(from) => serializeArteryHeartbeatRsp(from).toByteArray + case SystemMessageDelivery.Nack(seqNo, from) => serializeSystemMessageDeliveryAck(seqNo, from).toByteArray + case q: Quarantined => serializeQuarantined(q).toByteArray + case ActorSystemTerminating(from) => serializeWithAddress(from).toByteArray + case ActorSystemTerminatingAck(from) => serializeWithAddress(from).toByteArray + case adv: ActorRefCompressionAdvertisement => serializeActorRefCompressionAdvertisement(adv).toByteArray + case ActorRefCompressionAdvertisementAck(from, id) => + serializeCompressionTableAdvertisementAck(from, id).toByteArray + case adv: ClassManifestCompressionAdvertisement => serializeCompressionAdvertisement(adv)(identity).toByteArray + case ClassManifestCompressionAdvertisementAck(from, id) => + serializeCompressionTableAdvertisementAck(from, id).toByteArray } - override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = manifest match { // most frequent ones first (could be made a HashMap in the future) - case SystemMessageEnvelopeManifest => deserializeSystemMessageEnvelope(bytes) - case SystemMessageDeliveryAckManifest => deserializeSystemMessageDeliveryAck(bytes, SystemMessageDelivery.Ack) - case HandshakeReqManifest => deserializeHandshakeReq(bytes, HandshakeReq) - case HandshakeRspManifest => deserializeWithFromAddress(bytes, HandshakeRsp) - case SystemMessageDeliveryNackManifest => deserializeSystemMessageDeliveryAck(bytes, SystemMessageDelivery.Nack) - case QuarantinedManifest => deserializeQuarantined(ArteryControlFormats.Quarantined.parseFrom(bytes)) - case ActorSystemTerminatingManifest => deserializeWithFromAddress(bytes, ActorSystemTerminating) - case ActorSystemTerminatingAckManifest => deserializeWithFromAddress(bytes, ActorSystemTerminatingAck) - case ActorRefCompressionAdvertisementManifest => deserializeActorRefCompressionAdvertisement(bytes) - case ActorRefCompressionAdvertisementAckManifest => deserializeCompressionTableAdvertisementAck(bytes, ActorRefCompressionAdvertisementAck) - case ClassManifestCompressionAdvertisementManifest => deserializeCompressionAdvertisement(bytes, identity, ClassManifestCompressionAdvertisement) - case ClassManifestCompressionAdvertisementAckManifest => deserializeCompressionTableAdvertisementAck(bytes, ClassManifestCompressionAdvertisementAck) - case ArteryHeartbeatManifest => RemoteWatcher.ArteryHeartbeat - case ArteryHeartbeatRspManifest => deserializeArteryHeartbeatRsp(bytes, ArteryHeartbeatRsp) - case _ => throw new NotSerializableException(s"Manifest '$manifest' not defined for ArteryControlMessageSerializer (serializer id $identifier)") - } + override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = + manifest match { // most frequent ones first (could be made a HashMap in the future) + case SystemMessageEnvelopeManifest => deserializeSystemMessageEnvelope(bytes) + case SystemMessageDeliveryAckManifest => deserializeSystemMessageDeliveryAck(bytes, SystemMessageDelivery.Ack) + case HandshakeReqManifest => deserializeHandshakeReq(bytes, HandshakeReq) + case HandshakeRspManifest => deserializeWithFromAddress(bytes, HandshakeRsp) + case SystemMessageDeliveryNackManifest => deserializeSystemMessageDeliveryAck(bytes, SystemMessageDelivery.Nack) + case QuarantinedManifest => deserializeQuarantined(ArteryControlFormats.Quarantined.parseFrom(bytes)) + case ActorSystemTerminatingManifest => deserializeWithFromAddress(bytes, ActorSystemTerminating) + case ActorSystemTerminatingAckManifest => deserializeWithFromAddress(bytes, ActorSystemTerminatingAck) + case ActorRefCompressionAdvertisementManifest => deserializeActorRefCompressionAdvertisement(bytes) + case ActorRefCompressionAdvertisementAckManifest => + deserializeCompressionTableAdvertisementAck(bytes, ActorRefCompressionAdvertisementAck) + case ClassManifestCompressionAdvertisementManifest => + deserializeCompressionAdvertisement(bytes, identity, ClassManifestCompressionAdvertisement) + case ClassManifestCompressionAdvertisementAckManifest => + deserializeCompressionTableAdvertisementAck(bytes, ClassManifestCompressionAdvertisementAck) + case ArteryHeartbeatManifest => RemoteWatcher.ArteryHeartbeat + case ArteryHeartbeatRspManifest => deserializeArteryHeartbeatRsp(bytes, ArteryHeartbeatRsp) + case _ => + throw new NotSerializableException( + s"Manifest '$manifest' not defined for ArteryControlMessageSerializer (serializer id $identifier)") + } import scala.collection.JavaConverters._ def serializeQuarantined(quarantined: Quarantined): ArteryControlFormats.Quarantined = - ArteryControlFormats.Quarantined.newBuilder() + ArteryControlFormats.Quarantined + .newBuilder() .setFrom(serializeUniqueAddress(quarantined.from)) .setTo(serializeUniqueAddress(quarantined.to)) .build @@ -116,13 +128,15 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste if (str == DeadLettersRepresentation) system.deadLetters else system.provider.resolveActorRef(str) - def serializeActorRefCompressionAdvertisement(adv: ActorRefCompressionAdvertisement): ArteryControlFormats.CompressionTableAdvertisement = + def serializeActorRefCompressionAdvertisement( + adv: ActorRefCompressionAdvertisement): ArteryControlFormats.CompressionTableAdvertisement = serializeCompressionAdvertisement(adv)(serializeActorRef) def deserializeActorRefCompressionAdvertisement(bytes: Array[Byte]): ActorRefCompressionAdvertisement = deserializeCompressionAdvertisement(bytes, deserializeActorRef, ActorRefCompressionAdvertisement) - def serializeCompressionAdvertisement[T](adv: CompressionAdvertisement[T])(keySerializer: T => String): ArteryControlFormats.CompressionTableAdvertisement = { + def serializeCompressionAdvertisement[T](adv: CompressionAdvertisement[T])( + keySerializer: T => String): ArteryControlFormats.CompressionTableAdvertisement = { val builder = ArteryControlFormats.CompressionTableAdvertisement.newBuilder .setFrom(serializeUniqueAddress(adv.from)) @@ -131,20 +145,21 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste adv.table.dictionary.foreach { case (key, value) => - builder - .addKeys(keySerializer(key)) - .addValues(value) + builder.addKeys(keySerializer(key)).addValues(value) } builder.build } - def deserializeCompressionAdvertisement[T, U](bytes: Array[Byte], keyDeserializer: String => T, create: (UniqueAddress, CompressionTable[T]) => U): U = { + def deserializeCompressionAdvertisement[T, U](bytes: Array[Byte], + keyDeserializer: String => T, + create: (UniqueAddress, CompressionTable[T]) => U): U = { val protoAdv = ArteryControlFormats.CompressionTableAdvertisement.parseFrom(bytes) val kvs = - protoAdv.getKeysList.asScala.map(keyDeserializer).zip( - protoAdv.getValuesList.asScala.asInstanceOf[Iterable[Int]] /* to avoid having to call toInt explicitly */ ) + protoAdv.getKeysList.asScala + .map(keyDeserializer) + .zip(protoAdv.getValuesList.asScala.asInstanceOf[Iterable[Int]] /* to avoid having to call toInt explicitly */ ) val table = CompressionTable(protoAdv.getOriginUid, protoAdv.getTableVersion.byteValue, kvs.toMap) create(deserializeUniqueAddress(protoAdv.getFrom), table) @@ -156,12 +171,14 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste .setVersion(version) .build() - def deserializeCompressionTableAdvertisementAck(bytes: Array[Byte], create: (UniqueAddress, Byte) => AnyRef): AnyRef = { + def deserializeCompressionTableAdvertisementAck(bytes: Array[Byte], + create: (UniqueAddress, Byte) => AnyRef): AnyRef = { val msg = ArteryControlFormats.CompressionTableAdvertisementAck.parseFrom(bytes) create(deserializeUniqueAddress(msg.getFrom), msg.getVersion.toByte) } - def serializeSystemMessageEnvelope(env: SystemMessageDelivery.SystemMessageEnvelope): ArteryControlFormats.SystemMessageEnvelope = { + def serializeSystemMessageEnvelope( + env: SystemMessageDelivery.SystemMessageEnvelope): ArteryControlFormats.SystemMessageEnvelope = { val msg = MessageSerializer.serialize(system, env.message) val builder = @@ -179,19 +196,18 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste val protoEnv = ArteryControlFormats.SystemMessageEnvelope.parseFrom(bytes) SystemMessageDelivery.SystemMessageEnvelope( - serialization.deserialize( - protoEnv.getMessage.toByteArray, - protoEnv.getSerializerId, - if (protoEnv.hasMessageManifest) protoEnv.getMessageManifest.toStringUtf8 else "").get, + serialization + .deserialize(protoEnv.getMessage.toByteArray, + protoEnv.getSerializerId, + if (protoEnv.hasMessageManifest) protoEnv.getMessageManifest.toStringUtf8 else "") + .get, protoEnv.getSeqNo, deserializeUniqueAddress(protoEnv.getAckReplyTo)) } - def serializeSystemMessageDeliveryAck(seqNo: Long, from: UniqueAddress): ArteryControlFormats.SystemMessageDeliveryAck = - ArteryControlFormats.SystemMessageDeliveryAck.newBuilder - .setSeqNo(seqNo) - .setFrom(serializeUniqueAddress(from)) - .build + def serializeSystemMessageDeliveryAck(seqNo: Long, + from: UniqueAddress): ArteryControlFormats.SystemMessageDeliveryAck = + ArteryControlFormats.SystemMessageDeliveryAck.newBuilder.setSeqNo(seqNo).setFrom(serializeUniqueAddress(from)).build def deserializeSystemMessageDeliveryAck(bytes: Array[Byte], create: (Long, UniqueAddress) => AnyRef): AnyRef = { val protoAck = ArteryControlFormats.SystemMessageDeliveryAck.parseFrom(bytes) @@ -217,7 +233,8 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste } def serializeUniqueAddress(address: UniqueAddress): ArteryControlFormats.UniqueAddress = - ArteryControlFormats.UniqueAddress.newBuilder() + ArteryControlFormats.UniqueAddress + .newBuilder() .setAddress(serializeAddress(address.address)) .setUid(address.uid) .build() @@ -228,7 +245,8 @@ private[akka] final class ArteryMessageSerializer(val system: ExtendedActorSyste def serializeAddress(address: Address): ArteryControlFormats.Address = address match { case Address(protocol, system, Some(host), Some(port)) => - ArteryControlFormats.Address.newBuilder() + ArteryControlFormats.Address + .newBuilder() .setProtocol(protocol) .setSystem(system) .setHostname(host) diff --git a/akka-remote/src/main/scala/akka/remote/serialization/DaemonMsgCreateSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/DaemonMsgCreateSerializer.scala index 40857541db..8276b5e6bc 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/DaemonMsgCreateSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/DaemonMsgCreateSerializer.scala @@ -40,7 +40,6 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys def toBinary(obj: AnyRef): Array[Byte] = obj match { case DaemonMsgCreate(props, deploy, path, supervisor) => - def deployProto(d: Deploy): DeployData = { val builder = DeployData.newBuilder.setPath(d.path) @@ -72,9 +71,7 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys } def propsProto = { - val builder = PropsData.newBuilder - .setClazz(props.clazz.getName) - .setDeploy(deployProto(props.deploy)) + val builder = PropsData.newBuilder.setClazz(props.clazz.getName).setDeploy(deployProto(props.deploy)) props.args.foreach { arg => val (serializerId, hasManifest, manifest, bytes) = serialize(arg) builder.addArgs(ByteString.copyFrom(bytes)) @@ -85,12 +82,13 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys builder.build } - DaemonMsgCreateData.newBuilder. - setProps(propsProto). - setDeploy(deployProto(deploy)). - setPath(path). - setSupervisor(serializeActorRef(supervisor)). - build.toByteArray + DaemonMsgCreateData.newBuilder + .setProps(propsProto) + .setDeploy(deployProto(deploy)) + .setPath(path) + .setSupervisor(serializeActorRef(supervisor)) + .build + .toByteArray case _ => throw new IllegalArgumentException( @@ -105,10 +103,12 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys val config = if (protoDeploy.hasConfig) { if (protoDeploy.hasConfigSerializerId) { - serialization.deserialize( - protoDeploy.getConfig.toByteArray, - protoDeploy.getConfigSerializerId, - protoDeploy.getConfigManifest).get.asInstanceOf[Config] + serialization + .deserialize(protoDeploy.getConfig.toByteArray, + protoDeploy.getConfigSerializerId, + protoDeploy.getConfigManifest) + .get + .asInstanceOf[Config] } else { // old wire format oldDeserialize(protoDeploy.getConfig, classOf[Config]) @@ -118,10 +118,12 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys val routerConfig = if (protoDeploy.hasRouterConfig) { if (protoDeploy.hasRouterConfigSerializerId) { - serialization.deserialize( - protoDeploy.getRouterConfig.toByteArray, - protoDeploy.getRouterConfigSerializerId, - protoDeploy.getRouterConfigManifest).get.asInstanceOf[RouterConfig] + serialization + .deserialize(protoDeploy.getRouterConfig.toByteArray, + protoDeploy.getRouterConfigSerializerId, + protoDeploy.getRouterConfigManifest) + .get + .asInstanceOf[RouterConfig] } else { // old wire format oldDeserialize(protoDeploy.getRouterConfig, classOf[RouterConfig]) @@ -131,10 +133,12 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys val scope = if (protoDeploy.hasScope) { if (protoDeploy.hasScopeSerializerId) { - serialization.deserialize( - protoDeploy.getScope.toByteArray, - protoDeploy.getScopeSerializerId, - protoDeploy.getScopeManifest).get.asInstanceOf[Scope] + serialization + .deserialize(protoDeploy.getScope.toByteArray, + protoDeploy.getScopeSerializerId, + protoDeploy.getScopeManifest) + .get + .asInstanceOf[Scope] } else { // old wire format oldDeserialize(protoDeploy.getScope, classOf[Scope]) @@ -159,25 +163,26 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys val manifest = if (protoProps.getHasManifest(idx)) protoProps.getManifests(idx) else "" - serialization.deserialize( - protoProps.getArgs(idx).toByteArray(), - protoProps.getSerializerIds(idx), - manifest).get + serialization + .deserialize(protoProps.getArgs(idx).toByteArray(), protoProps.getSerializerIds(idx), manifest) + .get } } else { // message from an older node, which only provides data and class name // and never any serializer ids - (proto.getProps.getArgsList.asScala zip proto.getProps.getManifestsList.asScala).iterator - .map(oldDeserialize).to(immutable.Vector) + proto.getProps.getArgsList.asScala + .zip(proto.getProps.getManifestsList.asScala) + .iterator + .map(oldDeserialize) + .to(immutable.Vector) } Props(deploy(proto.getProps.getDeploy), actorClass, args) } - DaemonMsgCreate( - props = props, - deploy = deploy(proto.getDeploy), - path = proto.getPath, - supervisor = deserializeActorRef(system, proto.getSupervisor)) + DaemonMsgCreate(props = props, + deploy = deploy(proto.getDeploy), + path = proto.getPath, + supervisor = deserializeActorRef(system, proto.getSupervisor)) } private def serialize(any: Any): (Int, Boolean, String, Array[Byte]) = { @@ -197,7 +202,8 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys "null" } else { val className = m.getClass.getName - if (scala212OrLater && m.isInstanceOf[java.io.Serializable] && m.getClass.isSynthetic && className.contains("$Lambda$")) { + if (scala212OrLater && m.isInstanceOf[java.io.Serializable] && m.getClass.isSynthetic && className.contains( + "$Lambda$")) { // When the additional-protobuf serializers are not enabled // the serialization of the parameters is based on passing class name instead of // serializerId and manifest as we usually do. With Scala 2.12 the functions are generated as @@ -222,8 +228,9 @@ private[akka] final class DaemonMsgCreateSerializer(val system: ExtendedActorSys private def oldDeserialize[T: ClassTag](data: ByteString, clazz: Class[T]): T = { val bytes = data.toByteArray serialization.deserialize(bytes, clazz) match { - case Success(x: T) => x - case Success(other) => throw new IllegalArgumentException("Can't deserialize to [%s], got [%s]".format(clazz.getName, other)) + case Success(x: T) => x + case Success(other) => + throw new IllegalArgumentException("Can't deserialize to [%s], got [%s]".format(clazz.getName, other)) case Failure(e) => // Fallback to the java serializer, because some interfaces don't implement java.io.Serializable, // but the impl instance does. This could be optimized by adding java serializers in reference.conf: diff --git a/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala index d39d970273..77cf973cc2 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/MessageContainerSerializer.scala @@ -33,10 +33,10 @@ class MessageContainerSerializer(val system: ExtendedActorSystem) extends BaseSe val builder = ContainerFormats.SelectionEnvelope.newBuilder() val message = sel.msg.asInstanceOf[AnyRef] val serializer = serialization.findSerializerFor(message) - builder. - setEnclosedMessage(ByteString.copyFrom(serializer.toBinary(message))). - setSerializerId(serializer.identifier). - setWildcardFanOut(sel.wildcardFanOut) + builder + .setEnclosedMessage(ByteString.copyFrom(serializer.toBinary(message))) + .setSerializerId(serializer.identifier) + .setWildcardFanOut(sel.wildcardFanOut) val ms = Serializers.manifestFor(serializer, message) if (ms.nonEmpty) builder.setMessageManifest(ByteString.copyFromUtf8(ms)) @@ -53,29 +53,31 @@ class MessageContainerSerializer(val system: ExtendedActorSystem) extends BaseSe builder.build().toByteArray } - private def buildPattern(matcher: Option[String], tpe: ContainerFormats.PatternType): ContainerFormats.Selection.Builder = { + private def buildPattern(matcher: Option[String], + tpe: ContainerFormats.PatternType): ContainerFormats.Selection.Builder = { val builder = ContainerFormats.Selection.newBuilder().setType(tpe) - matcher foreach builder.setMatcher + matcher.foreach(builder.setMatcher) builder } def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): AnyRef = { val selectionEnvelope = ContainerFormats.SelectionEnvelope.parseFrom(bytes) val manifest = if (selectionEnvelope.hasMessageManifest) selectionEnvelope.getMessageManifest.toStringUtf8 else "" - val msg = serialization.deserialize( - selectionEnvelope.getEnclosedMessage.toByteArray, - selectionEnvelope.getSerializerId, - manifest).get + val msg = serialization + .deserialize(selectionEnvelope.getEnclosedMessage.toByteArray, selectionEnvelope.getSerializerId, manifest) + .get import scala.collection.JavaConverters._ - val elements: immutable.Iterable[SelectionPathElement] = selectionEnvelope.getPatternList.asScala.iterator.map { x => - x.getType match { - case CHILD_NAME => SelectChildName(x.getMatcher) - case CHILD_PATTERN => SelectChildPattern(x.getMatcher) - case PARENT => SelectParent - } + val elements: immutable.Iterable[SelectionPathElement] = selectionEnvelope.getPatternList.asScala.iterator + .map { x => + x.getType match { + case CHILD_NAME => SelectChildName(x.getMatcher) + case CHILD_PATTERN => SelectChildPattern(x.getMatcher) + case PARENT => SelectParent + } - }.to(immutable.IndexedSeq) + } + .to(immutable.IndexedSeq) val wildcardFanOut = if (selectionEnvelope.hasWildcardFanOut) selectionEnvelope.getWildcardFanOut else false ActorSelectionMessage(msg, elements, wildcardFanOut) } diff --git a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala index 3ddb5b2ed3..19ee710231 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/MiscMessageSerializer.scala @@ -67,37 +67,31 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW } private def serializeIdentify(identify: Identify): Array[Byte] = - ContainerFormats.Identify.newBuilder() + ContainerFormats.Identify + .newBuilder() .setMessageId(payloadSupport.payloadBuilder(identify.messageId)) .build() .toByteArray private def serializeActorIdentity(actorIdentity: ActorIdentity): Array[Byte] = { val builder = - ContainerFormats.ActorIdentity.newBuilder() + ContainerFormats.ActorIdentity + .newBuilder() .setCorrelationId(payloadSupport.payloadBuilder(actorIdentity.correlationId)) actorIdentity.ref.foreach { actorRef => builder.setRef(actorRefBuilder(actorRef)) } - builder - .build() - .toByteArray + builder.build().toByteArray } private def serializeSome(someValue: Any): Array[Byte] = - ContainerFormats.Option.newBuilder() - .setValue(payloadSupport.payloadBuilder(someValue)) - .build() - .toByteArray + ContainerFormats.Option.newBuilder().setValue(payloadSupport.payloadBuilder(someValue)).build().toByteArray private def serializeOptional(opt: Optional[_]): Array[Byte] = { if (opt.isPresent) - ContainerFormats.Option.newBuilder() - .setValue(payloadSupport.payloadBuilder(opt.get)) - .build() - .toByteArray + ContainerFormats.Option.newBuilder().setValue(payloadSupport.payloadBuilder(opt.get)).build().toByteArray else ParameterlessSerializedMessage } @@ -116,8 +110,7 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW } private def actorRefBuilder(actorRef: ActorRef): ContainerFormats.ActorRef.Builder = - ContainerFormats.ActorRef.newBuilder() - .setPath(Serialization.serializedActorPath(actorRef)) + ContainerFormats.ActorRef.newBuilder().setPath(Serialization.serializedActorPath(actorRef)) private def serializeStatusSuccess(success: Status.Success): Array[Byte] = payloadSupport.payloadBuilder(success.status).build().toByteArray @@ -130,10 +123,7 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW if (ex.getActor ne null) builder.setActor(actorRefBuilder(ex.getActor)) - builder - .setMessage(ex.getMessage) - .setCause(payloadSupport.payloadBuilder(ex.getCause)) - .build().toByteArray + builder.setMessage(ex.getMessage).setCause(payloadSupport.payloadBuilder(ex.getCause)).build().toByteArray } private def serializeConfig(c: Config): Array[Byte] = { @@ -143,7 +133,8 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW private def protoForAddressData(address: Address): AddressData.Builder = address match { case Address(protocol, actorSystem, Some(host), Some(port)) => - WireFormats.AddressData.newBuilder() + WireFormats.AddressData + .newBuilder() .setSystem(actorSystem) .setHostname(host) .setPort(port) @@ -153,7 +144,8 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW private def protoForAddress(address: Address): ArteryControlFormats.Address.Builder = address match { case Address(protocol, actorSystem, Some(host), Some(port)) => - ArteryControlFormats.Address.newBuilder() + ArteryControlFormats.Address + .newBuilder() .setSystem(actorSystem) .setHostname(host) .setPort(port) @@ -164,10 +156,12 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW protoForAddressData(address).build().toByteArray private def serializeClassicUniqueAddress(uniqueAddress: UniqueAddress): Array[Byte] = - ArteryControlFormats.UniqueAddress.newBuilder() + ArteryControlFormats.UniqueAddress + .newBuilder() .setUid(uniqueAddress.uid) .setAddress(protoForAddress(uniqueAddress.address)) - .build().toByteArray + .build() + .toByteArray private def serializeDefaultResizer(dr: DefaultResizer): Array[Byte] = { val builder = WireFormats.DefaultResizer.newBuilder() @@ -210,7 +204,8 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW private def serializeScatterGatherFirstCompletedPool(sgp: ScatterGatherFirstCompletedPool): Array[Byte] = { val builder = WireFormats.ScatterGatherPool.newBuilder() - builder.setGeneric(buildGenericRoutingPool(sgp.nrOfInstances, sgp.routerDispatcher, sgp.usePoolDispatcher, sgp.resizer)) + builder.setGeneric( + buildGenericRoutingPool(sgp.nrOfInstances, sgp.routerDispatcher, sgp.usePoolDispatcher, sgp.resizer)) builder.setWithin(buildFiniteDuration(sgp.within)) builder.build().toByteArray } @@ -230,11 +225,10 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW builder.build().toByteArray } - private def buildGenericRoutingPool( - nrOfInstances: Int, - routerDispatcher: String, - usePoolDispatcher: Boolean, - resizer: Option[Resizer]): WireFormats.GenericRoutingPool = { + private def buildGenericRoutingPool(nrOfInstances: Int, + routerDispatcher: String, + usePoolDispatcher: Boolean, + resizer: Option[Resizer]): WireFormats.GenericRoutingPool = { val builder = WireFormats.GenericRoutingPool.newBuilder() builder.setNrOfInstances(nrOfInstances) if (routerDispatcher != Dispatchers.DefaultDispatcherId) { @@ -258,10 +252,7 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW } private def buildFiniteDuration(duration: FiniteDuration): WireFormats.FiniteDuration = { - WireFormats.FiniteDuration.newBuilder() - .setValue(duration.length) - .setUnit(timeUnitToWire(duration.unit)) - .build() + WireFormats.FiniteDuration.newBuilder().setValue(duration.length).setUnit(timeUnitToWire(duration.unit)).build() } private def buildAddressData(address: Address): WireFormats.AddressData = { @@ -308,37 +299,36 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW private val TailChoppingPoolManifest = "ROTCP" private val RemoteRouterConfigManifest = "RORRC" - private val fromBinaryMap = Map[String, Array[Byte] => AnyRef]( - IdentifyManifest -> deserializeIdentify, - ActorIdentityManifest -> deserializeActorIdentity, - StatusSuccessManifest -> deserializeStatusSuccess, - StatusFailureManifest -> deserializeStatusFailure, - ThrowableManifest -> throwableSupport.deserializeThrowable, - ActorRefManifest -> deserializeActorRefBytes, - OptionManifest -> deserializeOption, - OptionalManifest -> deserializeOptional, - PoisonPillManifest -> ((_) => PoisonPill), - KillManifest -> ((_) => Kill), - RemoteWatcherHBManifest -> ((_) => RemoteWatcher.Heartbeat), - DoneManifest -> ((_) => Done), - NotUsedManifest -> ((_) => NotUsed), - AddressManifest -> deserializeAddressData, - UniqueAddressManifest -> deserializeUniqueAddress, - RemoteWatcherHBRespManifest -> deserializeHeartbeatRsp, - ActorInitializationExceptionManifest -> deserializeActorInitializationException, - LocalScopeManifest -> ((_) => LocalScope), - RemoteScopeManifest -> deserializeRemoteScope, - ConfigManifest -> deserializeConfig, - FromConfigManifest -> deserializeFromConfig, - DefaultResizerManifest -> deserializeDefaultResizer, - BalancingPoolManifest -> deserializeBalancingPool, - BroadcastPoolManifest -> deserializeBroadcastPool, - RandomPoolManifest -> deserializeRandomPool, - RoundRobinPoolManifest -> deserializeRoundRobinPool, - ScatterGatherPoolManifest -> deserializeScatterGatherPool, - TailChoppingPoolManifest -> deserializeTailChoppingPool, - RemoteRouterConfigManifest -> deserializeRemoteRouterConfig - ) + private val fromBinaryMap = Map[String, Array[Byte] => AnyRef](IdentifyManifest -> deserializeIdentify, + ActorIdentityManifest -> deserializeActorIdentity, + StatusSuccessManifest -> deserializeStatusSuccess, + StatusFailureManifest -> deserializeStatusFailure, + ThrowableManifest -> throwableSupport.deserializeThrowable, + ActorRefManifest -> deserializeActorRefBytes, + OptionManifest -> deserializeOption, + OptionalManifest -> deserializeOptional, + PoisonPillManifest -> ((_) => PoisonPill), + KillManifest -> ((_) => Kill), + RemoteWatcherHBManifest -> ((_) => + RemoteWatcher.Heartbeat), + DoneManifest -> ((_) => Done), + NotUsedManifest -> ((_) => NotUsed), + AddressManifest -> deserializeAddressData, + UniqueAddressManifest -> deserializeUniqueAddress, + RemoteWatcherHBRespManifest -> deserializeHeartbeatRsp, + ActorInitializationExceptionManifest -> deserializeActorInitializationException, + LocalScopeManifest -> ((_) => LocalScope), + RemoteScopeManifest -> deserializeRemoteScope, + ConfigManifest -> deserializeConfig, + FromConfigManifest -> deserializeFromConfig, + DefaultResizerManifest -> deserializeDefaultResizer, + BalancingPoolManifest -> deserializeBalancingPool, + BroadcastPoolManifest -> deserializeBroadcastPool, + RandomPoolManifest -> deserializeRandomPool, + RoundRobinPoolManifest -> deserializeRoundRobinPool, + ScatterGatherPoolManifest -> deserializeScatterGatherPool, + TailChoppingPoolManifest -> deserializeTailChoppingPool, + RemoteRouterConfigManifest -> deserializeRemoteRouterConfig) override def manifest(o: AnyRef): String = o match { @@ -378,8 +368,9 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = fromBinaryMap.get(manifest) match { case Some(deserializer) => deserializer(bytes) - case None => throw new NotSerializableException( - s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") + case None => + throw new NotSerializableException( + s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]") } private def deserializeIdentify(bytes: Array[Byte]): Identify = { @@ -433,30 +424,23 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW addressFromDataProto(WireFormats.AddressData.parseFrom(bytes)) private def addressFromDataProto(a: WireFormats.AddressData): Address = { - Address( - a.getProtocol, - a.getSystem, - // technically the presence of hostname and port are guaranteed, see our serializeAddressData - if (a.hasHostname) Some(a.getHostname) else None, - if (a.hasPort) Some(a.getPort) else None - ) + Address(a.getProtocol, + a.getSystem, + // technically the presence of hostname and port are guaranteed, see our serializeAddressData + if (a.hasHostname) Some(a.getHostname) else None, + if (a.hasPort) Some(a.getPort) else None) } private def addressFromProto(a: ArteryControlFormats.Address): Address = { - Address( - a.getProtocol, - a.getSystem, - // technically the presence of hostname and port are guaranteed, see our serializeAddressData - if (a.hasHostname) Some(a.getHostname) else None, - if (a.hasPort) Some(a.getPort) else None - ) + Address(a.getProtocol, + a.getSystem, + // technically the presence of hostname and port are guaranteed, see our serializeAddressData + if (a.hasHostname) Some(a.getHostname) else None, + if (a.hasPort) Some(a.getPort) else None) } private def deserializeUniqueAddress(bytes: Array[Byte]): UniqueAddress = { val u = ArteryControlFormats.UniqueAddress.parseFrom(bytes) - UniqueAddress( - addressFromProto(u.getAddress), - u.getUid - ) + UniqueAddress(addressFromProto(u.getAddress), u.getUid) } private def deserializeHeartbeatRsp(bytes: Array[Byte]): RemoteWatcher.HeartbeatRsp = { @@ -473,17 +457,14 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW if (message.startsWith(refString)) message.drop(refString.length + 2) else message - ActorInitializationException( - if (serializedEx.hasActor) ref else null, - reconstructedMessage, - payloadSupport.deserializePayload(serializedEx.getCause).asInstanceOf[Throwable]) + ActorInitializationException(if (serializedEx.hasActor) ref else null, + reconstructedMessage, + payloadSupport.deserializePayload(serializedEx.getCause).asInstanceOf[Throwable]) } private def deserializeRemoteScope(bytes: Array[Byte]): RemoteScope = { val rs = WireFormats.RemoteScope.parseFrom(bytes) - RemoteScope( - deserializeAddressData(rs.getNode) - ) + RemoteScope(deserializeAddressData(rs.getNode)) } private def deserializeConfig(bytes: Array[Byte]): Config = { @@ -496,101 +477,97 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW else { val fc = WireFormats.FromConfig.parseFrom(bytes) FromConfig( - resizer = if (fc.hasResizer) Some(payloadSupport.deserializePayload(fc.getResizer).asInstanceOf[Resizer]) else None, - routerDispatcher = if (fc.hasRouterDispatcher) fc.getRouterDispatcher else Dispatchers.DefaultDispatcherId - ) + resizer = + if (fc.hasResizer) Some(payloadSupport.deserializePayload(fc.getResizer).asInstanceOf[Resizer]) else None, + routerDispatcher = if (fc.hasRouterDispatcher) fc.getRouterDispatcher else Dispatchers.DefaultDispatcherId) } private def deserializeBalancingPool(bytes: Array[Byte]): BalancingPool = { val bp = WireFormats.GenericRoutingPool.parseFrom(bytes) - BalancingPool( - nrOfInstances = bp.getNrOfInstances, - routerDispatcher = if (bp.hasRouterDispatcher) bp.getRouterDispatcher else Dispatchers.DefaultDispatcherId) + BalancingPool(nrOfInstances = bp.getNrOfInstances, + routerDispatcher = + if (bp.hasRouterDispatcher) bp.getRouterDispatcher else Dispatchers.DefaultDispatcherId) } private def deserializeBroadcastPool(bytes: Array[Byte]): BroadcastPool = { val bp = WireFormats.GenericRoutingPool.parseFrom(bytes) - BroadcastPool( - nrOfInstances = bp.getNrOfInstances, - resizer = - if (bp.hasResizer) Some(payloadSupport.deserializePayload(bp.getResizer).asInstanceOf[Resizer]) - else None, - routerDispatcher = if (bp.hasRouterDispatcher) bp.getRouterDispatcher else Dispatchers.DefaultDispatcherId, - usePoolDispatcher = bp.getUsePoolDispatcher - ) + BroadcastPool(nrOfInstances = bp.getNrOfInstances, + resizer = + if (bp.hasResizer) Some(payloadSupport.deserializePayload(bp.getResizer).asInstanceOf[Resizer]) + else None, + routerDispatcher = + if (bp.hasRouterDispatcher) bp.getRouterDispatcher else Dispatchers.DefaultDispatcherId, + usePoolDispatcher = bp.getUsePoolDispatcher) } private def deserializeRandomPool(bytes: Array[Byte]): RandomPool = { val rp = WireFormats.GenericRoutingPool.parseFrom(bytes) - RandomPool( - nrOfInstances = rp.getNrOfInstances, - resizer = - if (rp.hasResizer) Some(payloadSupport.deserializePayload(rp.getResizer).asInstanceOf[Resizer]) - else None, - routerDispatcher = if (rp.hasRouterDispatcher) rp.getRouterDispatcher else Dispatchers.DefaultDispatcherId, - usePoolDispatcher = rp.getUsePoolDispatcher - ) + RandomPool(nrOfInstances = rp.getNrOfInstances, + resizer = + if (rp.hasResizer) Some(payloadSupport.deserializePayload(rp.getResizer).asInstanceOf[Resizer]) + else None, + routerDispatcher = + if (rp.hasRouterDispatcher) rp.getRouterDispatcher else Dispatchers.DefaultDispatcherId, + usePoolDispatcher = rp.getUsePoolDispatcher) } private def deserializeRoundRobinPool(bytes: Array[Byte]): RoundRobinPool = { val rp = WireFormats.GenericRoutingPool.parseFrom(bytes) - RoundRobinPool( - nrOfInstances = rp.getNrOfInstances, - resizer = - if (rp.hasResizer) Some(payloadSupport.deserializePayload(rp.getResizer).asInstanceOf[Resizer]) - else None, - routerDispatcher = if (rp.hasRouterDispatcher) rp.getRouterDispatcher else Dispatchers.DefaultDispatcherId, - usePoolDispatcher = rp.getUsePoolDispatcher - ) + RoundRobinPool(nrOfInstances = rp.getNrOfInstances, + resizer = + if (rp.hasResizer) Some(payloadSupport.deserializePayload(rp.getResizer).asInstanceOf[Resizer]) + else None, + routerDispatcher = + if (rp.hasRouterDispatcher) rp.getRouterDispatcher else Dispatchers.DefaultDispatcherId, + usePoolDispatcher = rp.getUsePoolDispatcher) } private def deserializeScatterGatherPool(bytes: Array[Byte]): ScatterGatherFirstCompletedPool = { val sgp = WireFormats.ScatterGatherPool.parseFrom(bytes) - ScatterGatherFirstCompletedPool( - nrOfInstances = sgp.getGeneric.getNrOfInstances, - resizer = - if (sgp.getGeneric.hasResizer) Some(payloadSupport.deserializePayload(sgp.getGeneric.getResizer).asInstanceOf[Resizer]) - else None, - within = deserializeFiniteDuration(sgp.getWithin), - routerDispatcher = - if (sgp.getGeneric.hasRouterDispatcher) sgp.getGeneric.getRouterDispatcher - else Dispatchers.DefaultDispatcherId - ) + ScatterGatherFirstCompletedPool(nrOfInstances = sgp.getGeneric.getNrOfInstances, + resizer = + if (sgp.getGeneric.hasResizer) + Some( + payloadSupport + .deserializePayload(sgp.getGeneric.getResizer) + .asInstanceOf[Resizer]) + else None, + within = deserializeFiniteDuration(sgp.getWithin), + routerDispatcher = + if (sgp.getGeneric.hasRouterDispatcher) sgp.getGeneric.getRouterDispatcher + else Dispatchers.DefaultDispatcherId) } private def deserializeTailChoppingPool(bytes: Array[Byte]): TailChoppingPool = { val tcp = WireFormats.TailChoppingPool.parseFrom(bytes) - TailChoppingPool( - nrOfInstances = tcp.getGeneric.getNrOfInstances, - resizer = - if (tcp.getGeneric.hasResizer) Some(payloadSupport.deserializePayload(tcp.getGeneric.getResizer).asInstanceOf[Resizer]) - else None, - routerDispatcher = if (tcp.getGeneric.hasRouterDispatcher) tcp.getGeneric.getRouterDispatcher else Dispatchers.DefaultDispatcherId, - usePoolDispatcher = tcp.getGeneric.getUsePoolDispatcher, - within = deserializeFiniteDuration(tcp.getWithin), - interval = deserializeFiniteDuration(tcp.getInterval) - ) + TailChoppingPool(nrOfInstances = tcp.getGeneric.getNrOfInstances, + resizer = + if (tcp.getGeneric.hasResizer) + Some(payloadSupport.deserializePayload(tcp.getGeneric.getResizer).asInstanceOf[Resizer]) + else None, + routerDispatcher = + if (tcp.getGeneric.hasRouterDispatcher) tcp.getGeneric.getRouterDispatcher + else Dispatchers.DefaultDispatcherId, + usePoolDispatcher = tcp.getGeneric.getUsePoolDispatcher, + within = deserializeFiniteDuration(tcp.getWithin), + interval = deserializeFiniteDuration(tcp.getInterval)) } private def deserializeRemoteRouterConfig(bytes: Array[Byte]): RemoteRouterConfig = { val rrc = WireFormats.RemoteRouterConfig.parseFrom(bytes) - RemoteRouterConfig( - local = payloadSupport.deserializePayload(rrc.getLocal).asInstanceOf[Pool], - nodes = rrc.getNodesList.asScala.map(deserializeAddressData) - ) + RemoteRouterConfig(local = payloadSupport.deserializePayload(rrc.getLocal).asInstanceOf[Pool], + nodes = rrc.getNodesList.asScala.map(deserializeAddressData)) } private def deserializeDefaultResizer(bytes: Array[Byte]): DefaultResizer = { val dr = WireFormats.DefaultResizer.parseFrom(bytes) - DefaultResizer( - lowerBound = dr.getLowerBound, - upperBound = dr.getUpperBound, - pressureThreshold = dr.getPressureThreshold, - rampupRate = dr.getRampupRate, - backoffThreshold = dr.getBackoffThreshold, - backoffRate = dr.getBackoffRate, - messagesPerResize = dr.getMessagesPerResize - ) + DefaultResizer(lowerBound = dr.getLowerBound, + upperBound = dr.getUpperBound, + pressureThreshold = dr.getPressureThreshold, + rampupRate = dr.getRampupRate, + backoffThreshold = dr.getBackoffThreshold, + backoffRate = dr.getBackoffRate, + messagesPerResize = dr.getMessagesPerResize) } private def deserializeTimeUnit(unit: WireFormats.TimeUnit): TimeUnit = unit match { @@ -604,17 +581,9 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW } private def deserializeFiniteDuration(duration: WireFormats.FiniteDuration): FiniteDuration = - FiniteDuration( - duration.getValue, - deserializeTimeUnit(duration.getUnit) - ) + FiniteDuration(duration.getValue, deserializeTimeUnit(duration.getUnit)) private def deserializeAddressData(address: WireFormats.AddressData): Address = { - Address( - address.getProtocol, - address.getSystem, - address.getHostname, - address.getPort - ) + Address(address.getProtocol, address.getSystem, address.getHostname, address.getPort) } } diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala index e7a9dad764..80bae8cafa 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ProtobufSerializer.scala @@ -9,7 +9,7 @@ import java.util.concurrent.atomic.AtomicReference import akka.actor.{ ActorRef, ExtendedActorSystem } import akka.remote.WireFormats.ActorRefData -import akka.serialization.{ Serialization, BaseSerializer } +import akka.serialization.{ BaseSerializer, Serialization } import scala.annotation.tailrec @@ -57,7 +57,8 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer val unCachedParsingMethod = if (method eq null) clazz.getDeclaredMethod("parseFrom", ProtobufSerializer.ARRAY_OF_BYTE_ARRAY: _*) else method - if (parsingMethodBindingRef.compareAndSet(parsingMethodBinding, parsingMethodBinding.updated(clazz, unCachedParsingMethod))) + if (parsingMethodBindingRef.compareAndSet(parsingMethodBinding, + parsingMethodBinding.updated(clazz, unCachedParsingMethod))) unCachedParsingMethod else parsingMethod(unCachedParsingMethod) @@ -65,7 +66,8 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer } parsingMethod().invoke(null, bytes) - case None => throw new IllegalArgumentException("Need a protobuf message class to be able to serialize bytes using protobuf") + case None => + throw new IllegalArgumentException("Need a protobuf message class to be able to serialize bytes using protobuf") } } @@ -80,7 +82,9 @@ class ProtobufSerializer(val system: ExtendedActorSystem) extends BaseSerializer val unCachedtoByteArrayMethod = if (method eq null) clazz.getMethod("toByteArray") else method - if (toByteArrayMethodBindingRef.compareAndSet(toByteArrayMethodBinding, toByteArrayMethodBinding.updated(clazz, unCachedtoByteArrayMethod))) + if (toByteArrayMethodBindingRef.compareAndSet( + toByteArrayMethodBinding, + toByteArrayMethodBinding.updated(clazz, unCachedtoByteArrayMethod))) unCachedtoByteArrayMethod else toByteArrayMethod(unCachedtoByteArrayMethod) diff --git a/akka-remote/src/main/scala/akka/remote/serialization/SystemMessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/serialization/SystemMessageSerializer.scala index e10bfb21b3..71bc494db2 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/SystemMessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/SystemMessageSerializer.scala @@ -45,36 +45,36 @@ class SystemMessageSerializer(val system: ExtendedActorSystem) extends BaseSeria case Supervise(child, async) => builder.setType(SUPERVISE) - val superviseData = SystemMessageFormats.SuperviseData.newBuilder() - .setChild(serializeActorRef(child)) - .setAsync(async) + val superviseData = + SystemMessageFormats.SuperviseData.newBuilder().setChild(serializeActorRef(child)).setAsync(async) builder.setSuperviseData(superviseData) case Watch(watchee, watcher) => builder.setType(WATCH) - val watchData = SystemMessageFormats.WatchData.newBuilder() + val watchData = SystemMessageFormats.WatchData + .newBuilder() .setWatchee(serializeActorRef(watchee)) .setWatcher(serializeActorRef(watcher)) builder.setWatchData(watchData) case Unwatch(watchee, watcher) => builder.setType(UNWATCH) - val watchData = SystemMessageFormats.WatchData.newBuilder() + val watchData = SystemMessageFormats.WatchData + .newBuilder() .setWatchee(serializeActorRef(watchee)) .setWatcher(serializeActorRef(watcher)) builder.setWatchData(watchData) case Failed(child, cause, uid) => builder.setType(FAILED) - val failedData = SystemMessageFormats.FailedData.newBuilder() - .setChild(serializeActorRef(child)) - .setUid(uid) + val failedData = SystemMessageFormats.FailedData.newBuilder().setChild(serializeActorRef(child)).setUid(uid) builder.setCauseData(serializeThrowable(cause)) builder.setFailedData(failedData) case DeathWatchNotification(actor, existenceConfirmed, addressTerminated) => builder.setType(DEATHWATCH_NOTIFICATION) - val deathWatchNotificationData = SystemMessageFormats.DeathWatchNotificationData.newBuilder() + val deathWatchNotificationData = SystemMessageFormats.DeathWatchNotificationData + .newBuilder() .setActor(serializeActorRef(actor)) .setExistenceConfirmed(existenceConfirmed) .setAddressTerminated(addressTerminated) @@ -120,26 +120,22 @@ class SystemMessageSerializer(val system: ExtendedActorSystem) extends BaseSeria Supervise(deserializeActorRef(sysmsg.getSuperviseData.getChild), sysmsg.getSuperviseData.getAsync) case WATCH => - Watch( - deserializeActorRef(sysmsg.getWatchData.getWatchee).asInstanceOf[InternalActorRef], - deserializeActorRef(sysmsg.getWatchData.getWatcher).asInstanceOf[InternalActorRef]) + Watch(deserializeActorRef(sysmsg.getWatchData.getWatchee).asInstanceOf[InternalActorRef], + deserializeActorRef(sysmsg.getWatchData.getWatcher).asInstanceOf[InternalActorRef]) case UNWATCH => - Unwatch( - deserializeActorRef(sysmsg.getWatchData.getWatchee).asInstanceOf[InternalActorRef], - deserializeActorRef(sysmsg.getWatchData.getWatcher).asInstanceOf[InternalActorRef]) + Unwatch(deserializeActorRef(sysmsg.getWatchData.getWatchee).asInstanceOf[InternalActorRef], + deserializeActorRef(sysmsg.getWatchData.getWatcher).asInstanceOf[InternalActorRef]) case FAILED => - Failed( - deserializeActorRef(sysmsg.getFailedData.getChild), - getCauseThrowable(sysmsg), - sysmsg.getFailedData.getUid.toInt) + Failed(deserializeActorRef(sysmsg.getFailedData.getChild), + getCauseThrowable(sysmsg), + sysmsg.getFailedData.getUid.toInt) case DEATHWATCH_NOTIFICATION => - DeathWatchNotification( - deserializeActorRef(sysmsg.getDwNotificationData.getActor), - sysmsg.getDwNotificationData.getExistenceConfirmed, - sysmsg.getDwNotificationData.getAddressTerminated) + DeathWatchNotification(deserializeActorRef(sysmsg.getDwNotificationData.getActor), + sysmsg.getDwNotificationData.getExistenceConfirmed, + sysmsg.getDwNotificationData.getAddressTerminated) } private def serializeThrowable(throwable: Throwable): ContainerFormats.Payload.Builder = { @@ -151,8 +147,7 @@ class SystemMessageSerializer(val system: ExtendedActorSystem) extends BaseSeria } private def serializeActorRef(actorRef: ActorRef): ContainerFormats.ActorRef.Builder = { - ContainerFormats.ActorRef.newBuilder() - .setPath(Serialization.serializedActorPath(actorRef)) + ContainerFormats.ActorRef.newBuilder().setPath(Serialization.serializedActorPath(actorRef)) } private def deserializeActorRef(serializedRef: ContainerFormats.ActorRef): ActorRef = { diff --git a/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala b/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala index a8c23c13f2..4899047bbc 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/ThrowableSupport.scala @@ -21,8 +21,7 @@ private[akka] class ThrowableSupport(system: ExtendedActorSystem) { } def toProtobufThrowable(t: Throwable): ContainerFormats.Throwable.Builder = { - val b = ContainerFormats.Throwable.newBuilder() - .setClassName(t.getClass.getName) + val b = ContainerFormats.Throwable.newBuilder().setClassName(t.getClass.getName) if (t.getMessage != null) b.setMessage(t.getMessage) if (t.getCause != null) @@ -40,7 +39,8 @@ private[akka] class ThrowableSupport(system: ExtendedActorSystem) { } def stackTraceElementBuilder(elem: StackTraceElement): ContainerFormats.StackTraceElement.Builder = { - val builder = ContainerFormats.StackTraceElement.newBuilder() + val builder = ContainerFormats.StackTraceElement + .newBuilder() .setClassName(elem.getClassName) .setMethodName(elem.getMethodName) .setLineNumber(elem.getLineNumber) @@ -56,26 +56,27 @@ private[akka] class ThrowableSupport(system: ExtendedActorSystem) { val t: Throwable = if (protoT.hasCause) { val cause = payloadSupport.deserializePayload(protoT.getCause).asInstanceOf[Throwable] - system.dynamicAccess.createInstanceFor[Throwable]( - protoT.getClassName, - List(classOf[String] -> protoT.getMessage, classOf[Throwable] -> cause)).get + system.dynamicAccess + .createInstanceFor[Throwable](protoT.getClassName, + List(classOf[String] -> protoT.getMessage, classOf[Throwable] -> cause)) + .get } else { // Important security note: before creating an instance of from the class name we // check that the class is a Throwable and that it has a configured serializer. val clazz = system.dynamicAccess.getClassFor[Throwable](protoT.getClassName).get serialization.serializerFor(clazz) // this will throw NotSerializableException if no serializer configured - system.dynamicAccess.createInstanceFor[Throwable]( - clazz, - List(classOf[String] -> protoT.getMessage)).get + system.dynamicAccess.createInstanceFor[Throwable](clazz, List(classOf[String] -> protoT.getMessage)).get } import scala.collection.JavaConverters._ val stackTrace = protoT.getStackTraceList.asScala.map { elem => val fileName = elem.getFileName - new StackTraceElement(elem.getClassName, elem.getMethodName, - if (fileName.length > 0) fileName else null, elem.getLineNumber) + new StackTraceElement(elem.getClassName, + elem.getMethodName, + if (fileName.length > 0) fileName else null, + elem.getLineNumber) }.toArray t.setStackTrace(stackTrace) t diff --git a/akka-remote/src/main/scala/akka/remote/serialization/WrappedPayloadSupport.scala b/akka-remote/src/main/scala/akka/remote/serialization/WrappedPayloadSupport.scala index 96ab5eba48..e4d55a2e44 100644 --- a/akka-remote/src/main/scala/akka/remote/serialization/WrappedPayloadSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/serialization/WrappedPayloadSupport.scala @@ -21,9 +21,7 @@ private[akka] class WrappedPayloadSupport(system: ExtendedActorSystem) { val builder = ContainerFormats.Payload.newBuilder() val serializer = serialization.findSerializerFor(payload) - builder - .setEnclosedMessage(ByteString.copyFrom(serializer.toBinary(payload))) - .setSerializerId(serializer.identifier) + builder.setEnclosedMessage(ByteString.copyFrom(serializer.toBinary(payload))).setSerializerId(serializer.identifier) val ms = Serializers.manifestFor(serializer, payload) if (ms.nonEmpty) builder.setMessageManifest(ByteString.copyFromUtf8(ms)) @@ -33,10 +31,7 @@ private[akka] class WrappedPayloadSupport(system: ExtendedActorSystem) { def deserializePayload(payload: ContainerFormats.Payload): Any = { val manifest = if (payload.hasMessageManifest) payload.getMessageManifest.toStringUtf8 else "" - serialization.deserialize( - payload.getEnclosedMessage.toByteArray, - payload.getSerializerId, - manifest).get + serialization.deserialize(payload.getEnclosedMessage.toByteArray, payload.getSerializerId, manifest).get } } diff --git a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala index a66dc16fba..25b3e29c34 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AbstractTransportAdapter.scala @@ -5,19 +5,20 @@ package akka.remote.transport import akka.actor._ -import akka.pattern.{ ask, pipe, gracefulStop } +import akka.pattern.{ ask, gracefulStop, pipe } import akka.remote.Remoting.RegisterTransportActor import akka.remote.transport.Transport._ import akka.remote.RARP import akka.util.Timeout import scala.collection.immutable import scala.concurrent.duration._ -import scala.concurrent.{ ExecutionContext, Promise, Future } -import akka.dispatch.{ UnboundedMessageQueueSemantics, RequiresMessageQueue } +import scala.concurrent.{ ExecutionContext, Future, Promise } +import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } import akka.remote.transport.AssociationHandle.DisassociateInfo import akka.actor.DeadLetterSuppression trait TransportAdapterProvider { + /** * Create the transport adapter that wraps an underlying transport. */ @@ -28,14 +29,18 @@ class TransportAdapters(system: ExtendedActorSystem) extends Extension { val settings = RARP(system).provider.remoteSettings private val adaptersTable: Map[String, TransportAdapterProvider] = for ((name, fqn) <- settings.Adapters) yield { - name -> system.dynamicAccess.createInstanceFor[TransportAdapterProvider](fqn, immutable.Seq.empty).recover({ - case e => throw new IllegalArgumentException(s"Cannot instantiate transport adapter [${fqn}]", e) - }).get + name -> system.dynamicAccess + .createInstanceFor[TransportAdapterProvider](fqn, immutable.Seq.empty) + .recover({ + case e => throw new IllegalArgumentException(s"Cannot instantiate transport adapter [${fqn}]", e) + }) + .get } def getAdapterProvider(name: String): TransportAdapterProvider = adaptersTable.get(name) match { case Some(provider) => provider - case None => throw new IllegalArgumentException(s"There is no registered transport adapter provider with name: [${name}]") + case None => + throw new IllegalArgumentException(s"There is no registered transport adapter provider with name: [${name}]") } } @@ -65,13 +70,13 @@ trait SchemeAugmenter { * An adapter that wraps a transport and provides interception */ abstract class AbstractTransportAdapter(protected val wrappedTransport: Transport)(implicit val ec: ExecutionContext) - extends Transport with SchemeAugmenter { + extends Transport + with SchemeAugmenter { protected def maximumOverhead: Int - protected def interceptListen( - listenAddress: Address, - listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] + protected def interceptListen(listenAddress: Address, + listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] protected def interceptAssociate(remoteAddress: Address, statusPromise: Promise[AssociationHandle]): Unit @@ -118,19 +123,15 @@ abstract class AbstractTransportAdapter(protected val wrappedTransport: Transpor } -abstract class AbstractTransportAdapterHandle( - val originalLocalAddress: Address, - val originalRemoteAddress: Address, - val wrappedHandle: AssociationHandle, - val addedSchemeIdentifier: String) extends AssociationHandle - with SchemeAugmenter { +abstract class AbstractTransportAdapterHandle(val originalLocalAddress: Address, + val originalRemoteAddress: Address, + val wrappedHandle: AssociationHandle, + val addedSchemeIdentifier: String) + extends AssociationHandle + with SchemeAugmenter { def this(wrappedHandle: AssociationHandle, addedSchemeIdentifier: String) = - this( - wrappedHandle.localAddress, - wrappedHandle.remoteAddress, - wrappedHandle, - addedSchemeIdentifier) + this(wrappedHandle.localAddress, wrappedHandle.remoteAddress, wrappedHandle, addedSchemeIdentifier) override val localAddress = augmentScheme(originalLocalAddress) override val remoteAddress = augmentScheme(originalRemoteAddress) @@ -141,18 +142,19 @@ object ActorTransportAdapter { sealed trait TransportOperation extends NoSerializationVerificationNeeded final case class ListenerRegistered(listener: AssociationEventListener) extends TransportOperation - final case class AssociateUnderlying(remoteAddress: Address, statusPromise: Promise[AssociationHandle]) extends TransportOperation - final case class ListenUnderlying( - listenAddress: Address, - upstreamListener: Future[AssociationEventListener]) extends TransportOperation + final case class AssociateUnderlying(remoteAddress: Address, statusPromise: Promise[AssociationHandle]) + extends TransportOperation + final case class ListenUnderlying(listenAddress: Address, upstreamListener: Future[AssociationEventListener]) + extends TransportOperation final case class DisassociateUnderlying(info: DisassociateInfo = AssociationHandle.Unknown) - extends TransportOperation with DeadLetterSuppression + extends TransportOperation + with DeadLetterSuppression implicit val AskTimeout = Timeout(5.seconds) } abstract class ActorTransportAdapter(wrappedTransport: Transport, system: ActorSystem) - extends AbstractTransportAdapter(wrappedTransport)(system.dispatcher) { + extends AbstractTransportAdapter(wrappedTransport)(system.dispatcher) { import ActorTransportAdapter._ @@ -164,9 +166,8 @@ abstract class ActorTransportAdapter(wrappedTransport: Transport, system: ActorS private def registerManager(): Future[ActorRef] = (system.actorSelection("/system/transports") ? RegisterTransportActor(managerProps, managerName)).mapTo[ActorRef] - override def interceptListen( - listenAddress: Address, - listenerPromise: Future[AssociationEventListener]): Future[AssociationEventListener] = { + override def interceptListen(listenAddress: Address, + listenerPromise: Future[AssociationEventListener]): Future[AssociationEventListener] = { registerManager().map { mgr => // Side effecting: storing the manager instance in volatile var // This is done only once: during the initialization of the protocol stack. The variable manager is not read @@ -187,8 +188,7 @@ abstract class ActorTransportAdapter(wrappedTransport: Transport, system: ActorS } yield stopResult && wrappedStopResult } -abstract class ActorTransportAdapterManager extends Actor - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +abstract class ActorTransportAdapterManager extends Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import ActorTransportAdapter.{ ListenUnderlying, ListenerRegistered } private var delayedEvents = immutable.Queue.empty[Any] @@ -207,11 +207,11 @@ abstract class ActorTransportAdapterManager extends Actor def receive: Receive = { case ListenUnderlying(listenAddress, upstreamListenerFuture) => localAddress = listenAddress - upstreamListenerFuture.future.map { ListenerRegistered(_) } pipeTo self + upstreamListenerFuture.future.map { ListenerRegistered(_) }.pipeTo(self) case ListenerRegistered(listener) => associationListener = listener - delayedEvents foreach { self.tell(_, Actor.noSender) } + delayedEvents.foreach { self.tell(_, Actor.noSender) } delayedEvents = immutable.Queue.empty[Any] context.become(ready) @@ -219,7 +219,7 @@ abstract class ActorTransportAdapterManager extends Actor * queue. The difference is that these messages will not survive a restart -- which is not needed here. * These messages will be processed in the ready state. */ - case otherEvent => delayedEvents = delayedEvents enqueue otherEvent + case otherEvent => delayedEvents = delayedEvents.enqueue(otherEvent) } diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala index 697444d7dd..5643f9b4c0 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaPduCodec.scala @@ -5,7 +5,7 @@ package akka.remote.transport import akka.AkkaException -import akka.actor.{ AddressFromURIString, InternalActorRef, Address, ActorRef } +import akka.actor.{ ActorRef, Address, AddressFromURIString, InternalActorRef } import akka.remote.WireFormats._ import akka.remote._ import akka.util.ByteString @@ -36,12 +36,12 @@ private[remote] object AkkaPduCodec { case object Heartbeat extends AkkaPdu final case class Payload(bytes: ByteString) extends AkkaPdu - final case class Message( - recipient: InternalActorRef, - recipientAddress: Address, - serializedMessage: SerializedMessage, - senderOption: OptionVal[ActorRef], - seqOption: Option[SeqNo]) extends HasSequenceNumber { + final case class Message(recipient: InternalActorRef, + recipientAddress: Address, + serializedMessage: SerializedMessage, + senderOption: OptionVal[ActorRef], + seqOption: Option[SeqNo]) + extends HasSequenceNumber { def reliableDeliveryEnabled = seqOption.isDefined @@ -56,6 +56,7 @@ private[remote] object AkkaPduCodec { */ private[remote] trait AkkaPduCodec { import AkkaPduCodec._ + /** * Returns an [[akka.remote.transport.AkkaPduCodec.AkkaPdu]] instance that represents the PDU contained in the raw * ByteString. @@ -93,15 +94,16 @@ private[remote] trait AkkaPduCodec { def constructHeartbeat: ByteString - def decodeMessage(raw: ByteString, provider: RemoteActorRefProvider, localAddress: Address): (Option[Ack], Option[Message]) + def decodeMessage(raw: ByteString, + provider: RemoteActorRefProvider, + localAddress: Address): (Option[Ack], Option[Message]) - def constructMessage( - localAddress: Address, - recipient: ActorRef, - serializedMessage: SerializedMessage, - senderOption: OptionVal[ActorRef], - seqOption: Option[SeqNo] = None, - ackOption: Option[Ack] = None): ByteString + def constructMessage(localAddress: Address, + recipient: ActorRef, + serializedMessage: SerializedMessage, + senderOption: OptionVal[ActorRef], + seqOption: Option[SeqNo] = None, + ackOption: Option[Ack] = None): ByteString def constructPureAck(ack: Ack): ByteString } @@ -115,17 +117,18 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { private def ackBuilder(ack: Ack): AcknowledgementInfo.Builder = { val ackBuilder = AcknowledgementInfo.newBuilder() ackBuilder.setCumulativeAck(ack.cumulativeAck.rawValue) - ack.nacks foreach { nack => ackBuilder.addNacks(nack.rawValue) } + ack.nacks.foreach { nack => + ackBuilder.addNacks(nack.rawValue) + } ackBuilder } - override def constructMessage( - localAddress: Address, - recipient: ActorRef, - serializedMessage: SerializedMessage, - senderOption: OptionVal[ActorRef], - seqOption: Option[SeqNo] = None, - ackOption: Option[Ack] = None): ByteString = { + override def constructMessage(localAddress: Address, + recipient: ActorRef, + serializedMessage: SerializedMessage, + senderOption: OptionVal[ActorRef], + seqOption: Option[SeqNo] = None, + ackOption: Option[Ack] = None): ByteString = { val ackAndEnvelopeBuilder = AckAndEnvelopeContainer.newBuilder @@ -137,8 +140,12 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { case OptionVal.None => } - seqOption foreach { seq => envelopeBuilder.setSeq(seq.rawValue) } - ackOption foreach { ack => ackAndEnvelopeBuilder.setAck(ackBuilder(ack)) } + seqOption.foreach { seq => + envelopeBuilder.setSeq(seq.rawValue) + } + ackOption.foreach { ack => + ackAndEnvelopeBuilder.setAck(ackBuilder(ack)) + } envelopeBuilder.setMessage(serializedMessage) ackAndEnvelopeBuilder.setEnvelope(envelopeBuilder) @@ -149,17 +156,24 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { ByteString.ByteString1C(AckAndEnvelopeContainer.newBuilder.setAck(ackBuilder(ack)).build().toByteArray) //Reuse Byte Array (naughty!) override def constructPayload(payload: ByteString): ByteString = - ByteString.ByteString1C(AkkaProtocolMessage.newBuilder().setPayload(PByteString.copyFrom(payload.asByteBuffer)).build.toByteArray) //Reuse Byte Array (naughty!) + ByteString.ByteString1C( + AkkaProtocolMessage + .newBuilder() + .setPayload(PByteString.copyFrom(payload.asByteBuffer)) + .build + .toByteArray) //Reuse Byte Array (naughty!) override def constructAssociate(info: HandshakeInfo): ByteString = { val handshakeInfo = AkkaHandshakeInfo.newBuilder.setOrigin(serializeAddress(info.origin)).setUid(info.uid.toLong) - info.cookie foreach handshakeInfo.setCookie + info.cookie.foreach(handshakeInfo.setCookie) constructControlMessagePdu(WireFormats.CommandType.ASSOCIATE, Some(handshakeInfo)) } private val DISASSOCIATE = constructControlMessagePdu(WireFormats.CommandType.DISASSOCIATE, None) - private val DISASSOCIATE_SHUTTING_DOWN = constructControlMessagePdu(WireFormats.CommandType.DISASSOCIATE_SHUTTING_DOWN, None) - private val DISASSOCIATE_QUARANTINED = constructControlMessagePdu(WireFormats.CommandType.DISASSOCIATE_QUARANTINED, None) + private val DISASSOCIATE_SHUTTING_DOWN = + constructControlMessagePdu(WireFormats.CommandType.DISASSOCIATE_SHUTTING_DOWN, None) + private val DISASSOCIATE_QUARANTINED = + constructControlMessagePdu(WireFormats.CommandType.DISASSOCIATE_QUARANTINED, None) override def constructDisassociate(info: AssociationHandle.DisassociateInfo): ByteString = info match { case AssociationHandle.Unknown => DISASSOCIATE @@ -175,34 +189,36 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { val pdu = AkkaProtocolMessage.parseFrom(raw.toArray) if (pdu.hasPayload) Payload(ByteString(pdu.getPayload.asReadOnlyByteBuffer())) else if (pdu.hasInstruction) decodeControlPdu(pdu.getInstruction) - else throw new PduCodecException("Error decoding Akka PDU: Neither message nor control message were contained", null) + else + throw new PduCodecException("Error decoding Akka PDU: Neither message nor control message were contained", null) } catch { case e: InvalidProtocolBufferException => throw new PduCodecException("Decoding PDU failed.", e) } } - override def decodeMessage( - raw: ByteString, - provider: RemoteActorRefProvider, - localAddress: Address): (Option[Ack], Option[Message]) = { + override def decodeMessage(raw: ByteString, + provider: RemoteActorRefProvider, + localAddress: Address): (Option[Ack], Option[Message]) = { val ackAndEnvelope = AckAndEnvelopeContainer.parseFrom(raw.toArray) val ackOption = if (ackAndEnvelope.hasAck) { import scala.collection.JavaConverters._ - Some(Ack(SeqNo(ackAndEnvelope.getAck.getCumulativeAck), ackAndEnvelope.getAck.getNacksList.asScala.map(SeqNo(_)).toSet)) + Some( + Ack(SeqNo(ackAndEnvelope.getAck.getCumulativeAck), + ackAndEnvelope.getAck.getNacksList.asScala.map(SeqNo(_)).toSet)) } else None val messageOption = if (ackAndEnvelope.hasEnvelope) { val msgPdu = ackAndEnvelope.getEnvelope - Some(Message( - recipient = provider.resolveActorRefWithLocalAddress(msgPdu.getRecipient.getPath, localAddress), - recipientAddress = AddressFromURIString(msgPdu.getRecipient.getPath), - serializedMessage = msgPdu.getMessage, - senderOption = - if (msgPdu.hasSender) OptionVal(provider.resolveActorRefWithLocalAddress(msgPdu.getSender.getPath, localAddress)) - else OptionVal.None, - seqOption = - if (msgPdu.hasSeq) Some(SeqNo(msgPdu.getSeq)) else None)) + Some( + Message(recipient = provider.resolveActorRefWithLocalAddress(msgPdu.getRecipient.getPath, localAddress), + recipientAddress = AddressFromURIString(msgPdu.getRecipient.getPath), + serializedMessage = msgPdu.getMessage, + senderOption = + if (msgPdu.hasSender) + OptionVal(provider.resolveActorRefWithLocalAddress(msgPdu.getSender.getPath, localAddress)) + else OptionVal.None, + seqOption = if (msgPdu.hasSeq) Some(SeqNo(msgPdu.getSeq)) else None)) } else None (ackOption, messageOption) @@ -231,30 +247,32 @@ private[remote] object AkkaPduProtobufCodec extends AkkaPduCodec { private def decodeAddress(encodedAddress: AddressData): Address = Address(encodedAddress.getProtocol, encodedAddress.getSystem, encodedAddress.getHostname, encodedAddress.getPort) - private def constructControlMessagePdu( - code: WireFormats.CommandType, - handshakeInfo: Option[AkkaHandshakeInfo.Builder]): ByteString = { + private def constructControlMessagePdu(code: WireFormats.CommandType, + handshakeInfo: Option[AkkaHandshakeInfo.Builder]): ByteString = { val controlMessageBuilder = AkkaControlMessage.newBuilder() controlMessageBuilder.setCommandType(code) - handshakeInfo foreach controlMessageBuilder.setHandshakeInfo + handshakeInfo.foreach(controlMessageBuilder.setHandshakeInfo) - ByteString.ByteString1C(AkkaProtocolMessage.newBuilder().setInstruction(controlMessageBuilder.build).build.toByteArray) //Reuse Byte Array (naughty!) + ByteString.ByteString1C( + AkkaProtocolMessage + .newBuilder() + .setInstruction(controlMessageBuilder.build) + .build + .toByteArray) //Reuse Byte Array (naughty!) } private def serializeActorRef(defaultAddress: Address, ref: ActorRef): ActorRefData = { - ActorRefData.newBuilder.setPath( - if (ref.path.address.host.isDefined) ref.path.toSerializationFormat else ref.path.toSerializationFormatWithAddress(defaultAddress)).build() + ActorRefData.newBuilder + .setPath( + if (ref.path.address.host.isDefined) ref.path.toSerializationFormat + else ref.path.toSerializationFormatWithAddress(defaultAddress)) + .build() } private def serializeAddress(address: Address): AddressData = address match { case Address(protocol, system, Some(host), Some(port)) => - AddressData.newBuilder - .setHostname(host) - .setPort(port) - .setSystem(system) - .setProtocol(protocol) - .build() + AddressData.newBuilder.setHostname(host).setPort(port).setSystem(system).setProtocol(protocol).build() case _ => throw new IllegalArgumentException(s"Address [${address}] could not be serialized: host or port missing.") } diff --git a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala index 5fcc0f9659..2c1ffc4002 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/AkkaProtocolTransport.scala @@ -39,10 +39,11 @@ private[remote] class AkkaProtocolSettings(config: Config) { import config._ val TransportFailureDetectorConfig: Config = getConfig("akka.remote.transport-failure-detector") - val TransportFailureDetectorImplementationClass: String = TransportFailureDetectorConfig.getString("implementation-class") + val TransportFailureDetectorImplementationClass: String = + TransportFailureDetectorConfig.getString("implementation-class") val TransportHeartBeatInterval: FiniteDuration = { TransportFailureDetectorConfig.getMillisDuration("heartbeat-interval") - } requiring (_ > Duration.Zero, "transport-failure-detector.heartbeat-interval must be > 0") + }.requiring(_ > Duration.Zero, "transport-failure-detector.heartbeat-interval must be > 0") val RequireCookie: Boolean = getBoolean("akka.remote.require-cookie") @@ -55,9 +56,9 @@ private[remote] class AkkaProtocolSettings(config: Config) { else if (enabledTransports.contains("akka.remote.netty.ssl")) config.getMillisDuration("akka.remote.netty.ssl.connection-timeout") else - config.getMillisDuration("akka.remote.handshake-timeout").requiring( - _ > Duration.Zero, - "handshake-timeout must be > 0") + config + .getMillisDuration("akka.remote.handshake-timeout") + .requiring(_ > Duration.Zero, "handshake-timeout must be > 0") } } @@ -66,10 +67,10 @@ private[remote] object AkkaProtocolTransport { //Couldn't these go into the Remo val AkkaOverhead: Int = 0 //Don't know yet val UniqueId = new java.util.concurrent.atomic.AtomicInteger(0) - final case class AssociateUnderlyingRefuseUid( - remoteAddress: Address, - statusPromise: Promise[AssociationHandle], - refuseUid: Option[Int]) extends NoSerializationVerificationNeeded + final case class AssociateUnderlyingRefuseUid(remoteAddress: Address, + statusPromise: Promise[AssociationHandle], + refuseUid: Option[Int]) + extends NoSerializationVerificationNeeded } final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String]) @@ -97,11 +98,11 @@ final case class HandshakeInfo(origin: Address, uid: Int, cookie: Option[String] * @param codec * the codec that will be used to encode/decode Akka PDUs */ -private[remote] class AkkaProtocolTransport( - wrappedTransport: Transport, - private val system: ActorSystem, - private val settings: AkkaProtocolSettings, - private val codec: AkkaPduCodec) extends ActorTransportAdapter(wrappedTransport, system) { +private[remote] class AkkaProtocolTransport(wrappedTransport: Transport, + private val system: ActorSystem, + private val settings: AkkaProtocolSettings, + private val codec: AkkaPduCodec) + extends ActorTransportAdapter(wrappedTransport, system) { override val addedSchemeIdentifier: String = AkkaScheme @@ -125,10 +126,9 @@ private[remote] class AkkaProtocolTransport( } } -private[transport] class AkkaProtocolManager( - private val wrappedTransport: Transport, - private val settings: AkkaProtocolSettings) - extends ActorTransportAdapterManager { +private[transport] class AkkaProtocolManager(private val wrappedTransport: Transport, + private val settings: AkkaProtocolSettings) + extends ActorTransportAdapterManager { // The AkkaProtocolTransport does not handle the recovery of associations, this task is implemented in the // remoting itself. Hence the strategy Stop. @@ -145,13 +145,18 @@ private[transport] class AkkaProtocolManager( val stateActorAssociationHandler = associationListener val stateActorSettings = settings val failureDetector = createTransportFailureDetector() - context.actorOf(RARP(context.system).configureDispatcher(ProtocolStateActor.inboundProps( - HandshakeInfo(stateActorLocalAddress, AddressUidExtension(context.system).addressUid, stateActorSettings.SecureCookie), - handle, - stateActorAssociationHandler, - stateActorSettings, - AkkaPduProtobufCodec, - failureDetector)), actorNameFor(handle.remoteAddress)) + context.actorOf( + RARP(context.system).configureDispatcher( + ProtocolStateActor.inboundProps( + HandshakeInfo(stateActorLocalAddress, + AddressUidExtension(context.system).addressUid, + stateActorSettings.SecureCookie), + handle, + stateActorAssociationHandler, + stateActorSettings, + AkkaPduProtobufCodec, + failureDetector)), + actorNameFor(handle.remoteAddress)) case AssociateUnderlying(remoteAddress, statusPromise) => createOutboundStateActor(remoteAddress, statusPromise, None) @@ -160,24 +165,28 @@ private[transport] class AkkaProtocolManager( } - private def createOutboundStateActor( - remoteAddress: Address, - statusPromise: Promise[AssociationHandle], - refuseUid: Option[Int]): Unit = { + private def createOutboundStateActor(remoteAddress: Address, + statusPromise: Promise[AssociationHandle], + refuseUid: Option[Int]): Unit = { val stateActorLocalAddress = localAddress val stateActorSettings = settings val stateActorWrappedTransport = wrappedTransport val failureDetector = createTransportFailureDetector() - context.actorOf(RARP(context.system).configureDispatcher(ProtocolStateActor.outboundProps( - HandshakeInfo(stateActorLocalAddress, AddressUidExtension(context.system).addressUid, stateActorSettings.SecureCookie), - remoteAddress, - statusPromise, - stateActorWrappedTransport, - stateActorSettings, - AkkaPduProtobufCodec, - failureDetector, - refuseUid)), actorNameFor(remoteAddress)) + context.actorOf( + RARP(context.system).configureDispatcher( + ProtocolStateActor.outboundProps( + HandshakeInfo(stateActorLocalAddress, + AddressUidExtension(context.system).addressUid, + stateActorSettings.SecureCookie), + remoteAddress, + statusPromise, + stateActorWrappedTransport, + stateActorSettings, + AkkaPduProtobufCodec, + failureDetector, + refuseUid)), + actorNameFor(remoteAddress)) } private def createTransportFailureDetector(): FailureDetector = @@ -185,15 +194,14 @@ private[transport] class AkkaProtocolManager( } -private[remote] class AkkaProtocolHandle( - _localAddress: Address, - _remoteAddress: Address, - val readHandlerPromise: Promise[HandleEventListener], - _wrappedHandle: AssociationHandle, - val handshakeInfo: HandshakeInfo, - private val stateActor: ActorRef, - private val codec: AkkaPduCodec) - extends AbstractTransportAdapterHandle(_localAddress, _remoteAddress, _wrappedHandle, AkkaScheme) { +private[remote] class AkkaProtocolHandle(_localAddress: Address, + _remoteAddress: Address, + val readHandlerPromise: Promise[HandleEventListener], + _wrappedHandle: AssociationHandle, + val handshakeInfo: HandshakeInfo, + private val stateActor: ActorRef, + private val codec: AkkaPduCodec) + extends AbstractTransportAdapterHandle(_localAddress, _remoteAddress, _wrappedHandle, AkkaScheme) { override def write(payload: ByteString): Boolean = wrappedHandle.write(codec.constructPayload(payload)) @@ -238,60 +246,74 @@ private[transport] object ProtocolStateActor { trait InitialProtocolStateData extends ProtocolStateData // Neither the underlying, nor the provided transport is associated - final case class OutboundUnassociated(remoteAddress: Address, statusPromise: Promise[AssociationHandle], transport: Transport) - extends InitialProtocolStateData + final case class OutboundUnassociated(remoteAddress: Address, + statusPromise: Promise[AssociationHandle], + transport: Transport) + extends InitialProtocolStateData // The underlying transport is associated, but the handshake of the akka protocol is not yet finished - final case class OutboundUnderlyingAssociated(statusPromise: Promise[AssociationHandle], wrappedHandle: AssociationHandle) - extends ProtocolStateData + final case class OutboundUnderlyingAssociated(statusPromise: Promise[AssociationHandle], + wrappedHandle: AssociationHandle) + extends ProtocolStateData // The underlying transport is associated, but the handshake of the akka protocol is not yet finished final case class InboundUnassociated(associationListener: AssociationEventListener, wrappedHandle: AssociationHandle) - extends InitialProtocolStateData + extends InitialProtocolStateData // Both transports are associated, but the handler for the handle has not yet been provided - final case class AssociatedWaitHandler(handleListener: Future[HandleEventListener], wrappedHandle: AssociationHandle, + final case class AssociatedWaitHandler(handleListener: Future[HandleEventListener], + wrappedHandle: AssociationHandle, queue: immutable.Queue[ByteString]) - extends ProtocolStateData + extends ProtocolStateData final case class ListenerReady(listener: HandleEventListener, wrappedHandle: AssociationHandle) - extends ProtocolStateData + extends ProtocolStateData case class TimeoutReason(errorMessage: String) case object ForbiddenUidReason - private[remote] def outboundProps( - handshakeInfo: HandshakeInfo, - remoteAddress: Address, - statusPromise: Promise[AssociationHandle], - transport: Transport, - settings: AkkaProtocolSettings, - codec: AkkaPduCodec, - failureDetector: FailureDetector, - refuseUid: Option[Int]): Props = - Props(classOf[ProtocolStateActor], handshakeInfo, remoteAddress, statusPromise, transport, settings, codec, - failureDetector, refuseUid).withDeploy(Deploy.local) + private[remote] def outboundProps(handshakeInfo: HandshakeInfo, + remoteAddress: Address, + statusPromise: Promise[AssociationHandle], + transport: Transport, + settings: AkkaProtocolSettings, + codec: AkkaPduCodec, + failureDetector: FailureDetector, + refuseUid: Option[Int]): Props = + Props(classOf[ProtocolStateActor], + handshakeInfo, + remoteAddress, + statusPromise, + transport, + settings, + codec, + failureDetector, + refuseUid).withDeploy(Deploy.local) - private[remote] def inboundProps( - handshakeInfo: HandshakeInfo, - wrappedHandle: AssociationHandle, - associationListener: AssociationEventListener, - settings: AkkaProtocolSettings, - codec: AkkaPduCodec, - failureDetector: FailureDetector): Props = - Props(classOf[ProtocolStateActor], handshakeInfo, wrappedHandle, associationListener, settings, codec, - failureDetector).withDeploy(Deploy.local) + private[remote] def inboundProps(handshakeInfo: HandshakeInfo, + wrappedHandle: AssociationHandle, + associationListener: AssociationEventListener, + settings: AkkaProtocolSettings, + codec: AkkaPduCodec, + failureDetector: FailureDetector): Props = + Props(classOf[ProtocolStateActor], + handshakeInfo, + wrappedHandle, + associationListener, + settings, + codec, + failureDetector).withDeploy(Deploy.local) } -private[transport] class ProtocolStateActor( - initialData: InitialProtocolStateData, - private val localHandshakeInfo: HandshakeInfo, - private val refuseUid: Option[Int], - private val settings: AkkaProtocolSettings, - private val codec: AkkaPduCodec, - private val failureDetector: FailureDetector) - extends Actor with FSM[AssociationState, ProtocolStateData] - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[transport] class ProtocolStateActor(initialData: InitialProtocolStateData, + private val localHandshakeInfo: HandshakeInfo, + private val refuseUid: Option[Int], + private val settings: AkkaProtocolSettings, + private val codec: AkkaPduCodec, + private val failureDetector: FailureDetector) + extends Actor + with FSM[AssociationState, ProtocolStateData] + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { private val markerLog = Logging.withMarker(this) @@ -299,27 +321,35 @@ private[transport] class ProtocolStateActor( import context.dispatcher // Outbound case - def this( - handshakeInfo: HandshakeInfo, - remoteAddress: Address, - statusPromise: Promise[AssociationHandle], - transport: Transport, - settings: AkkaProtocolSettings, - codec: AkkaPduCodec, - failureDetector: FailureDetector, - refuseUid: Option[Int]) = { - this(OutboundUnassociated(remoteAddress, statusPromise, transport), handshakeInfo, refuseUid, settings, codec, failureDetector) + def this(handshakeInfo: HandshakeInfo, + remoteAddress: Address, + statusPromise: Promise[AssociationHandle], + transport: Transport, + settings: AkkaProtocolSettings, + codec: AkkaPduCodec, + failureDetector: FailureDetector, + refuseUid: Option[Int]) = { + this(OutboundUnassociated(remoteAddress, statusPromise, transport), + handshakeInfo, + refuseUid, + settings, + codec, + failureDetector) } // Inbound case - def this( - handshakeInfo: HandshakeInfo, - wrappedHandle: AssociationHandle, - associationListener: AssociationEventListener, - settings: AkkaProtocolSettings, - codec: AkkaPduCodec, - failureDetector: FailureDetector) = { - this(InboundUnassociated(associationListener, wrappedHandle), handshakeInfo, refuseUid = None, settings, codec, failureDetector) + def this(handshakeInfo: HandshakeInfo, + wrappedHandle: AssociationHandle, + associationListener: AssociationEventListener, + settings: AkkaProtocolSettings, + codec: AkkaPduCodec, + failureDetector: FailureDetector) = { + this(InboundUnassociated(associationListener, wrappedHandle), + handshakeInfo, + refuseUid = None, + settings, + codec, + failureDetector) } val localAddress = localHandshakeInfo.origin @@ -327,7 +357,7 @@ private[transport] class ProtocolStateActor( initialData match { case d: OutboundUnassociated => - d.transport.associate(d.remoteAddress).map(Handle(_)) pipeTo self + d.transport.associate(d.remoteAddress).map(Handle(_)).pipeTo(self) startWith(Closed, d) case d: InboundUnassociated => @@ -350,11 +380,14 @@ private[transport] class ProtocolStateActor( if (sendAssociate(wrappedHandle, localHandshakeInfo)) { failureDetector.heartbeat() initHeartbeatTimer() - goto(WaitHandshake) using OutboundUnderlyingAssociated(statusPromise, wrappedHandle) + goto(WaitHandshake).using(OutboundUnderlyingAssociated(statusPromise, wrappedHandle)) } else { // Underlying transport was busy -- Associate could not be sent - setTimer("associate-retry", Handle(wrappedHandle), RARP(context.system).provider.remoteSettings.BackoffPeriod, repeat = false) + setTimer("associate-retry", + Handle(wrappedHandle), + RARP(context.system).provider.remoteSettings.BackoffPeriod, + repeat = false) stay() } @@ -385,10 +418,10 @@ private[transport] class ProtocolStateActor( case Associate(handshakeInfo) => failureDetector.heartbeat() cancelTimer(handshakeTimerKey) - goto(Open) using AssociatedWaitHandler( - notifyOutboundHandler(wrappedHandle, handshakeInfo, statusPromise), - wrappedHandle, - immutable.Queue.empty) + goto(Open).using( + AssociatedWaitHandler(notifyOutboundHandler(wrappedHandle, handshakeInfo, statusPromise), + wrappedHandle, + immutable.Queue.empty)) case Disassociate(info) => // After receiving Disassociate we MUST NOT send back a Disassociate (loop) @@ -399,7 +432,8 @@ private[transport] class ProtocolStateActor( if (log.isDebugEnabled) log.debug( "Sending disassociate to [{}] because unexpected message of type [{}] was received during handshake", - wrappedHandle, msg.getClass.getName) + wrappedHandle, + msg.getClass.getName) sendDisassociate(wrappedHandle, Unknown) stop() @@ -420,17 +454,20 @@ private[transport] class ProtocolStateActor( failureDetector.heartbeat() initHeartbeatTimer() cancelTimer(handshakeTimerKey) - goto(Open) using AssociatedWaitHandler( - notifyInboundHandler(wrappedHandle, info, associationHandler), - wrappedHandle, - immutable.Queue.empty) + goto(Open).using( + AssociatedWaitHandler(notifyInboundHandler(wrappedHandle, info, associationHandler), + wrappedHandle, + immutable.Queue.empty)) } else { if (log.isDebugEnabled) - log.warning( - s"Association attempt with mismatching cookie from [{}]. Expected [{}] but received [{}].", - info.origin, localHandshakeInfo.cookie.getOrElse(""), info.cookie.getOrElse("")) + log.warning(s"Association attempt with mismatching cookie from [{}]. Expected [{}] but received [{}].", + info.origin, + localHandshakeInfo.cookie.getOrElse(""), + info.cookie.getOrElse("")) else - markerLog.warning(LogMarker.Security, s"Association attempt with mismatching cookie from [{}].", info.origin) + markerLog.warning(LogMarker.Security, + s"Association attempt with mismatching cookie from [{}].", + info.origin) stop() } @@ -439,7 +476,8 @@ private[transport] class ProtocolStateActor( if (log.isDebugEnabled) log.debug( "Sending disassociate to [{}] because unexpected message of type [{}] was received while unassociated", - wrappedHandle, msg.getClass.getName) + wrappedHandle, + msg.getClass.getName) sendDisassociate(wrappedHandle, Unknown) stop() @@ -447,22 +485,24 @@ private[transport] class ProtocolStateActor( case Event(HandshakeTimer, OutboundUnderlyingAssociated(_, wrappedHandle)) => if (log.isDebugEnabled) - log.debug( - "Sending disassociate to [{}] because handshake timed out for outbound association after [{}] ms.", - wrappedHandle, settings.HandshakeTimeout.toMillis) + log.debug("Sending disassociate to [{}] because handshake timed out for outbound association after [{}] ms.", + wrappedHandle, + settings.HandshakeTimeout.toMillis) sendDisassociate(wrappedHandle, Unknown) - stop(FSM.Failure(TimeoutReason("No response from remote for outbound association. Handshake timed out after " + + stop( + FSM.Failure(TimeoutReason("No response from remote for outbound association. Handshake timed out after " + s"[${settings.HandshakeTimeout.toMillis} ms]."))) case Event(HandshakeTimer, InboundUnassociated(_, wrappedHandle)) => if (log.isDebugEnabled) - log.debug( - "Sending disassociate to [{}] because handshake timed out for inbound association after [{}] ms.", - wrappedHandle, settings.HandshakeTimeout.toMillis) + log.debug("Sending disassociate to [{}] because handshake timed out for inbound association after [{}] ms.", + wrappedHandle, + settings.HandshakeTimeout.toMillis) sendDisassociate(wrappedHandle, Unknown) - stop(FSM.Failure(TimeoutReason("No response from remote for inbound association. Handshake timed out after " + + stop( + FSM.Failure(TimeoutReason("No response from remote for inbound association. Handshake timed out after " + s"[${settings.HandshakeTimeout.toMillis} ms]."))) } @@ -486,12 +526,13 @@ private[transport] class ProtocolStateActor( stateData match { case AssociatedWaitHandler(handlerFuture, wrappedHandle, queue) => // Queue message until handler is registered - stay() using AssociatedWaitHandler(handlerFuture, wrappedHandle, queue :+ payload) + stay().using(AssociatedWaitHandler(handlerFuture, wrappedHandle, queue :+ payload)) case ListenerReady(listener, _) => - listener notify InboundPayload(payload) + listener.notify(InboundPayload(payload)) stay() case msg => - throw new AkkaProtocolException(s"unhandled message in state Open(InboundPayload) with type [${safeClassName(msg)}]") + throw new AkkaProtocolException( + s"unhandled message in state Open(InboundPayload) with type [${safeClassName(msg)}]") } case _ => stay() @@ -505,7 +546,8 @@ private[transport] class ProtocolStateActor( case ListenerReady(_, wrappedHandle) => wrappedHandle case AssociatedWaitHandler(_, wrappedHandle, _) => wrappedHandle case msg => - throw new AkkaProtocolException(s"unhandled message in state Open(DisassociateUnderlying) with type [${safeClassName(msg)}]") + throw new AkkaProtocolException( + s"unhandled message in state Open(DisassociateUnderlying) with type [${safeClassName(msg)}]") } // No debug logging here as sending DisassociateUnderlying(Unknown) should have been logged from where // it was sent @@ -514,8 +556,10 @@ private[transport] class ProtocolStateActor( stop() case Event(HandleListenerRegistered(listener), AssociatedWaitHandler(_, wrappedHandle, queue)) => - queue.foreach {p => listener notify InboundPayload(p) } - stay() using ListenerReady(listener, wrappedHandle) + queue.foreach { p => + listener.notify(InboundPayload(p)) + } + stay().using(ListenerReady(listener, wrappedHandle)) } private def initHeartbeatTimer(): Unit = { @@ -532,13 +576,14 @@ private[transport] class ProtocolStateActor( stay() } else { if (log.isDebugEnabled) - log.debug( - "Sending disassociate to [{}] because failure detector triggered in state [{}]", - wrappedHandle, stateName) + log.debug("Sending disassociate to [{}] because failure detector triggered in state [{}]", + wrappedHandle, + stateName) // send disassociate just to be sure sendDisassociate(wrappedHandle, Unknown) - stop(FSM.Failure(TimeoutReason(s"No response from remote. " + + stop( + FSM.Failure(TimeoutReason(s"No response from remote. " + s"Transport failure detector triggered. (internal state was $stateName)"))) } } @@ -580,7 +625,7 @@ private[transport] class ProtocolStateActor( case FSM.Failure(info: DisassociateInfo) => Disassociated(info) case _ => Disassociated(Unknown) } - handlerFuture foreach { _ notify disassociateNotification } + handlerFuture.foreach { _.notify(disassociateNotification) } wrappedHandle.disassociate(disassociationReason(reason), log) case StopEvent(reason, _, ListenerReady(handler, wrappedHandle)) => @@ -588,7 +633,7 @@ private[transport] class ProtocolStateActor( case FSM.Failure(info: DisassociateInfo) => Disassociated(info) case _ => Disassociated(Unknown) } - handler notify disassociateNotification + handler.notify(disassociateNotification) wrappedHandle.disassociate(disassociationReason(reason), log) case StopEvent(reason, _, InboundUnassociated(_, wrappedHandle)) => @@ -602,7 +647,8 @@ private[transport] class ProtocolStateActor( case Shutdown => InvalidAssociationException("The remote system refused the association because it is shutting down.") case Quarantined => - InvalidAssociationException("The remote system has quarantined this system. No further associations to the remote " + + InvalidAssociationException( + "The remote system has quarantined this system. No further associations to the remote " + "system are possible until this system is restarted.") } @@ -615,67 +661,71 @@ private[transport] class ProtocolStateActor( } private def listenForListenerRegistration(readHandlerPromise: Promise[HandleEventListener]): Unit = - readHandlerPromise.future.map { HandleListenerRegistered(_) } pipeTo self + readHandlerPromise.future.map { HandleListenerRegistered(_) }.pipeTo(self) - private def notifyOutboundHandler( - wrappedHandle: AssociationHandle, - handshakeInfo: HandshakeInfo, - statusPromise: Promise[AssociationHandle]): Future[HandleEventListener] = { + private def notifyOutboundHandler(wrappedHandle: AssociationHandle, + handshakeInfo: HandshakeInfo, + statusPromise: Promise[AssociationHandle]): Future[HandleEventListener] = { val readHandlerPromise = Promise[HandleEventListener]() listenForListenerRegistration(readHandlerPromise) statusPromise.success( - new AkkaProtocolHandle( - localAddress, - wrappedHandle.remoteAddress, - readHandlerPromise, - wrappedHandle, - handshakeInfo, - self, - codec)) + new AkkaProtocolHandle(localAddress, + wrappedHandle.remoteAddress, + readHandlerPromise, + wrappedHandle, + handshakeInfo, + self, + codec)) readHandlerPromise.future } - private def notifyInboundHandler( - wrappedHandle: AssociationHandle, - handshakeInfo: HandshakeInfo, - associationListener: AssociationEventListener): Future[HandleEventListener] = { + private def notifyInboundHandler(wrappedHandle: AssociationHandle, + handshakeInfo: HandshakeInfo, + associationListener: AssociationEventListener): Future[HandleEventListener] = { val readHandlerPromise = Promise[HandleEventListener]() listenForListenerRegistration(readHandlerPromise) - associationListener notify InboundAssociation( - new AkkaProtocolHandle( - localAddress, - handshakeInfo.origin, - readHandlerPromise, - wrappedHandle, - handshakeInfo, - self, - codec)) + associationListener.notify( + InboundAssociation( + new AkkaProtocolHandle(localAddress, + handshakeInfo.origin, + readHandlerPromise, + wrappedHandle, + handshakeInfo, + self, + codec))) readHandlerPromise.future } - private def decodePdu(pdu: ByteString): AkkaPdu = try codec.decodePdu(pdu) catch { - case NonFatal(e) => throw new AkkaProtocolException("Error while decoding incoming Akka PDU of length: " + pdu.length, e) - } + private def decodePdu(pdu: ByteString): AkkaPdu = + try codec.decodePdu(pdu) + catch { + case NonFatal(e) => + throw new AkkaProtocolException("Error while decoding incoming Akka PDU of length: " + pdu.length, e) + } // Neither heartbeats neither disassociate cares about backing off if write fails: // - Missing heartbeats are not critical // - Disassociate messages are not guaranteed anyway - private def sendHeartbeat(wrappedHandle: AssociationHandle): Boolean = try wrappedHandle.write(codec.constructHeartbeat) catch { - case NonFatal(e) => throw new AkkaProtocolException("Error writing HEARTBEAT to transport", e) - } + private def sendHeartbeat(wrappedHandle: AssociationHandle): Boolean = + try wrappedHandle.write(codec.constructHeartbeat) + catch { + case NonFatal(e) => throw new AkkaProtocolException("Error writing HEARTBEAT to transport", e) + } private def sendDisassociate(wrappedHandle: AssociationHandle, info: DisassociateInfo): Unit = - try wrappedHandle.write(codec.constructDisassociate(info)) catch { + try wrappedHandle.write(codec.constructDisassociate(info)) + catch { case NonFatal(e) => throw new AkkaProtocolException("Error writing DISASSOCIATE to transport", e) } - private def sendAssociate(wrappedHandle: AssociationHandle, info: HandshakeInfo): Boolean = try { - wrappedHandle.write(codec.constructAssociate(info)) - } catch { - case NonFatal(e) => throw new AkkaProtocolException("Error writing ASSOCIATE to transport", e) - } + private def sendAssociate(wrappedHandle: AssociationHandle, info: HandshakeInfo): Boolean = + try { + wrappedHandle.write(codec.constructAssociate(info)) + } catch { + case NonFatal(e) => throw new AkkaProtocolException("Error writing ASSOCIATE to transport", e) + } private def disassociationReason(reason: FSM.Reason): String = reason match { case FSM.Normal => "the ProtocolStateActor was stopped normally" diff --git a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala index 39f2c9f886..1ee57e5f54 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/FailureInjectorTransportAdapter.scala @@ -42,6 +42,7 @@ private[remote] object FailureInjectorTransportAdapter { sealed trait GremlinMode @SerialVersionUID(1L) case object PassThru extends GremlinMode { + /** * Java API: get the singleton instance */ @@ -54,8 +55,10 @@ private[remote] object FailureInjectorTransportAdapter { /** * INTERNAL API */ -private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transport, val extendedSystem: ExtendedActorSystem) - extends AbstractTransportAdapter(wrappedTransport)(extendedSystem.dispatcher) with AssociationEventListener { +private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transport, + val extendedSystem: ExtendedActorSystem) + extends AbstractTransportAdapter(wrappedTransport)(extendedSystem.dispatcher) + with AssociationEventListener { private def rng = ThreadLocalRandom.current() private val log = Logging(extendedSystem, getClass.getName) @@ -79,22 +82,24 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor case _ => wrappedTransport.managementCommand(cmd) } - protected def interceptListen( - listenAddress: Address, - listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] = { + protected def interceptListen(listenAddress: Address, + listenerFuture: Future[AssociationEventListener]): Future[AssociationEventListener] = { log.warning("FailureInjectorTransport is active on this system. Gremlins might munch your packets.") listenerFuture.foreach { // Side effecting: As this class is not an actor, the only way to safely modify state is through volatile vars. // Listen is called only during the initialization of the stack, and upstreamListener is not read before this // finishes. - listener => upstreamListener = Some(listener) + listener => + upstreamListener = Some(listener) } Future.successful(this) } protected def interceptAssociate(remoteAddress: Address, statusPromise: Promise[AssociationHandle]): Unit = { // Association is simulated to be failed if there was either an inbound or outbound message drop - if (shouldDropInbound(remoteAddress, Unit, "interceptAssociate") || shouldDropOutbound(remoteAddress, Unit, "interceptAssociate")) + if (shouldDropInbound(remoteAddress, Unit, "interceptAssociate") || shouldDropOutbound(remoteAddress, + Unit, + "interceptAssociate")) statusPromise.failure(new FailureInjectorException("Simulated failure of association to " + remoteAddress)) else statusPromise.completeWith(wrappedTransport.associate(remoteAddress).map { handle => @@ -105,10 +110,11 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor def notify(ev: AssociationEvent): Unit = ev match { case InboundAssociation(handle) if shouldDropInbound(handle.remoteAddress, ev, "notify") => //Ignore - case _ => upstreamListener match { - case Some(listener) => listener notify interceptInboundAssociation(ev) - case None => - } + case _ => + upstreamListener match { + case Some(listener) => listener.notify(interceptInboundAssociation(ev)) + case None => + } } def interceptInboundAssociation(ev: AssociationEvent): AssociationEvent = ev match { @@ -116,23 +122,27 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor case _ => ev } - def shouldDropInbound(remoteAddress: Address, instance: Any, debugMessage: String): Boolean = chaosMode(remoteAddress) match { - case PassThru => false - case Drop(_, inboundDropP) => - if (rng.nextDouble() <= inboundDropP) { - if (shouldDebugLog) log.debug("Dropping inbound [{}] for [{}] {}", instance.getClass, remoteAddress, debugMessage) - true - } else false - } + def shouldDropInbound(remoteAddress: Address, instance: Any, debugMessage: String): Boolean = + chaosMode(remoteAddress) match { + case PassThru => false + case Drop(_, inboundDropP) => + if (rng.nextDouble() <= inboundDropP) { + if (shouldDebugLog) + log.debug("Dropping inbound [{}] for [{}] {}", instance.getClass, remoteAddress, debugMessage) + true + } else false + } - def shouldDropOutbound(remoteAddress: Address, instance: Any, debugMessage: String): Boolean = chaosMode(remoteAddress) match { - case PassThru => false - case Drop(outboundDropP, _) => - if (rng.nextDouble() <= outboundDropP) { - if (shouldDebugLog) log.debug("Dropping outbound [{}] for [{}] {}", instance.getClass, remoteAddress, debugMessage) - true - } else false - } + def shouldDropOutbound(remoteAddress: Address, instance: Any, debugMessage: String): Boolean = + chaosMode(remoteAddress) match { + case PassThru => false + case Drop(outboundDropP, _) => + if (rng.nextDouble() <= outboundDropP) { + if (shouldDebugLog) + log.debug("Dropping outbound [{}] for [{}] {}", instance.getClass, remoteAddress, debugMessage) + true + } else false + } def chaosMode(remoteAddress: Address): GremlinMode = { val mode = addressChaosTable.get(remoteAddress.copy(protocol = "", system = "")) @@ -143,24 +153,23 @@ private[remote] class FailureInjectorTransportAdapter(wrappedTransport: Transpor /** * INTERNAL API */ -private[remote] final case class FailureInjectorHandle( - _wrappedHandle: AssociationHandle, - private val gremlinAdapter: FailureInjectorTransportAdapter) - extends AbstractTransportAdapterHandle(_wrappedHandle, FailureInjectorSchemeIdentifier) - with HandleEventListener { +private[remote] final case class FailureInjectorHandle(_wrappedHandle: AssociationHandle, + private val gremlinAdapter: FailureInjectorTransportAdapter) + extends AbstractTransportAdapterHandle(_wrappedHandle, FailureInjectorSchemeIdentifier) + with HandleEventListener { import gremlinAdapter.extendedSystem.dispatcher @volatile private var upstreamListener: HandleEventListener = null override val readHandlerPromise: Promise[HandleEventListener] = Promise() - readHandlerPromise.future.foreach { - listener => - upstreamListener = listener - wrappedHandle.readHandlerPromise.success(this) + readHandlerPromise.future.foreach { listener => + upstreamListener = listener + wrappedHandle.readHandlerPromise.success(this) } override def write(payload: ByteString): Boolean = - if (!gremlinAdapter.shouldDropOutbound(wrappedHandle.remoteAddress, payload, "handler.write")) wrappedHandle.write(payload) + if (!gremlinAdapter.shouldDropOutbound(wrappedHandle.remoteAddress, payload, "handler.write")) + wrappedHandle.write(payload) else true override def disassociate(reason: String, log: LoggingAdapter): Unit = @@ -171,6 +180,6 @@ private[remote] final case class FailureInjectorHandle( override def notify(ev: HandleEvent): Unit = if (!gremlinAdapter.shouldDropInbound(wrappedHandle.remoteAddress, ev, "handler.notify")) - upstreamListener notify ev + upstreamListener.notify(ev) } diff --git a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala index fb34301ecf..66f9ee04e7 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/TestTransport.scala @@ -4,7 +4,7 @@ package akka.remote.transport -import java.util.concurrent.{ CopyOnWriteArrayList, ConcurrentHashMap } +import java.util.concurrent.{ ConcurrentHashMap, CopyOnWriteArrayList } import akka.actor._ import akka.remote.transport.AssociationHandle._ @@ -25,18 +25,17 @@ import scala.concurrent.ExecutionContext.Implicits.global * requested to do. This class is not optimized for performance and MUST not be used as an in-memory transport in * production systems. */ -class TestTransport( - val localAddress: Address, - final val registry: AssociationRegistry, - val maximumPayloadBytes: Int = 32000, - val schemeIdentifier: String = "test") extends Transport { +class TestTransport(val localAddress: Address, + final val registry: AssociationRegistry, + val maximumPayloadBytes: Int = 32000, + val schemeIdentifier: String = "test") + extends Transport { def this(system: ExtendedActorSystem, conf: Config) = { - this( - AddressFromURIString(conf.getString("local-address")), - AssociationRegistry.get(conf.getString("registry-key")), - conf.getBytes("maximum-payload-bytes").toInt, - conf.getString("scheme-identifier")) + this(AddressFromURIString(conf.getString("local-address")), + AssociationRegistry.get(conf.getString("registry-key")), + conf.getBytes("maximum-payload-bytes").toInt, + conf.getString("scheme-identifier")) } override def isResponsibleFor(address: Address): Boolean = true @@ -57,9 +56,9 @@ class TestTransport( remoteHandle.writable = false // Pass a non-writable handle to remote first - remoteListenerFuture flatMap { + remoteListenerFuture.flatMap { case listener => - listener notify InboundAssociation(remoteHandle) + listener.notify(InboundAssociation(remoteHandle)) val remoteHandlerFuture = remoteHandle.readHandlerPromise.future // Registration of reader at local finishes the registration and enables communication @@ -72,7 +71,9 @@ class TestTransport( remoteHandle.writable = true } - remoteHandlerFuture.map { _ => localHandle } + remoteHandlerFuture.map { _ => + localHandle + } } case None => @@ -80,7 +81,8 @@ class TestTransport( } } - private def createHandlePair(remoteTransport: TestTransport, remoteAddress: Address): (TestAssociationHandle, TestAssociationHandle) = { + private def createHandlePair(remoteTransport: TestTransport, + remoteAddress: Address): (TestAssociationHandle, TestAssociationHandle) = { val localHandle = new TestAssociationHandle(localAddress, remoteAddress, this, inbound = false) val remoteHandle = new TestAssociationHandle(remoteAddress, localAddress, remoteTransport, inbound = true) @@ -119,7 +121,7 @@ class TestTransport( private def defaultWrite(params: (TestAssociationHandle, ByteString)): Future[Boolean] = { registry.getRemoteReadHandlerFor(params._1) match { case Some(listener) => - listener notify InboundPayload(params._2) + listener.notify(InboundPayload(params._2)) Future.successful(true) case None => Future.failed(new IllegalStateException("No association present")) @@ -128,7 +130,7 @@ class TestTransport( private def defaultDisassociate(handle: TestAssociationHandle): Future[Unit] = { registry.deregisterAssociation(handle.key).foreach { - registry.remoteListenerRelativeTo(handle, _) notify Disassociated(AssociationHandle.Unknown) + registry.remoteListenerRelativeTo(handle, _).notify(Disassociated(AssociationHandle.Unknown)) } Future.successful(()) } @@ -139,27 +141,22 @@ class TestTransport( * altering the behavior via pushDelayed will turn write to a blocking operation -- use of pushDelayed therefore * is not recommended. */ - val writeBehavior = new SwitchableLoggedBehavior[(TestAssociationHandle, ByteString), Boolean]( - defaultBehavior = { - defaultWrite _ - }, - logCallback = { - case (handle, payload) => - registry.logActivity(WriteAttempt(handle.localAddress, handle.remoteAddress, payload)) - }) + val writeBehavior = new SwitchableLoggedBehavior[(TestAssociationHandle, ByteString), Boolean](defaultBehavior = { + defaultWrite _ + }, logCallback = { + case (handle, payload) => + registry.logActivity(WriteAttempt(handle.localAddress, handle.remoteAddress, payload)) + }) /** * The [[akka.remote.transport.TestTransport.SwitchableLoggedBehavior]] for the disassociate() method on handles. All * handle calls pass through this call. */ - val disassociateBehavior = new SwitchableLoggedBehavior[TestAssociationHandle, Unit]( - defaultBehavior = { - defaultDisassociate _ - }, - logCallback = { - (handle) => - registry.logActivity(DisassociateAttempt(handle.localAddress, handle.remoteAddress)) - }) + val disassociateBehavior = new SwitchableLoggedBehavior[TestAssociationHandle, Unit](defaultBehavior = { + defaultDisassociate _ + }, logCallback = { (handle) => + registry.logActivity(DisassociateAttempt(handle.localAddress, handle.remoteAddress)) + }) private[akka] def write(handle: TestAssociationHandle, payload: ByteString): Boolean = Await.result(writeBehavior((handle, payload)), 3.seconds) @@ -195,7 +192,8 @@ object TestTransport { * type parameter B: * - Type parameter of the future that the original function returns. */ - class SwitchableLoggedBehavior[A, B](defaultBehavior: Behavior[A, B], logCallback: (A) => Unit) extends Behavior[A, B] { + class SwitchableLoggedBehavior[A, B](defaultBehavior: Behavior[A, B], logCallback: (A) => Unit) + extends Behavior[A, B] { private val behaviorStack = new CopyOnWriteArrayList[Behavior[A, B]]() behaviorStack.add(0, defaultBehavior) @@ -216,8 +214,8 @@ object TestTransport { * @param c * The constant the future will be completed with. */ - def pushConstant(c: B): Unit = push { - _ => Future.successful(c) + def pushConstant(c: B): Unit = push { _ => + Future.successful(c) } /** @@ -226,8 +224,8 @@ object TestTransport { * @param e * The throwable the failed future will contain. */ - def pushError(e: Throwable): Unit = push { - _ => Future.failed(e) + def pushError(e: Throwable): Unit = push { _ => + Future.failed(e) } /** @@ -241,8 +239,7 @@ object TestTransport { val controlPromise: Promise[Unit] = Promise() val originalBehavior = currentBehavior - push( - (params: A) => controlPromise.future.flatMap(_ => originalBehavior(params))) + push((params: A) => controlPromise.future.flatMap(_ => originalBehavior(params))) controlPromise } @@ -299,9 +296,8 @@ object TestTransport { * @param listenerPair pair of listeners in initiator, receiver order. * @return */ - def remoteListenerRelativeTo( - handle: TestAssociationHandle, - listenerPair: (HandleEventListener, HandleEventListener)): HandleEventListener = { + def remoteListenerRelativeTo(handle: TestAssociationHandle, + listenerPair: (HandleEventListener, HandleEventListener)): HandleEventListener = { listenerPair match { case (initiator, receiver) => if (handle.inbound) initiator else receiver } @@ -345,7 +341,8 @@ object TestTransport { * @param associationEventListenerFuture * The future that will be completed with the listener that will handle the events for the given transport. */ - def registerTransport(transport: TestTransport, associationEventListenerFuture: Future[AssociationEventListener]): Unit = { + def registerTransport(transport: TestTransport, + associationEventListenerFuture: Future[AssociationEventListener]): Unit = { transportTable.put(transport.localAddress, (transport, associationEventListenerFuture)) } @@ -359,7 +356,7 @@ object TestTransport { * True if all transports are successfully registered. */ def transportsReady(addresses: Address*): Boolean = { - addresses forall { + addresses.forall { transportTable.containsKey(_) } } @@ -407,7 +404,7 @@ object TestTransport { * @return The option that contains the Future for the listener if exists. */ def getRemoteReadHandlerFor(localHandle: TestAssociationHandle): Option[HandleEventListener] = { - Option(listenersTable.get(localHandle.key)) map { remoteListenerRelativeTo(localHandle, _) } + Option(listenersTable.get(localHandle.key)).map { remoteListenerRelativeTo(localHandle, _) } } /** @@ -437,7 +434,7 @@ object TestTransport { of the shared instance must happen during the startup time of the actor system. Association registries are looked up via a string key. Until we find a better way to inject an AssociationRegistry to multiple actor systems it is strongly recommended to use long, randomly generated strings to key the registry to avoid interference between tests. -*/ + */ object AssociationRegistry { private final val registries = scala.collection.mutable.Map[String, AssociationRegistry]() @@ -448,11 +445,11 @@ object AssociationRegistry { def clear(): Unit = this.synchronized { registries.clear() } } -final case class TestAssociationHandle( - localAddress: Address, - remoteAddress: Address, - transport: TestTransport, - inbound: Boolean) extends AssociationHandle { +final case class TestAssociationHandle(localAddress: Address, + remoteAddress: Address, + transport: TestTransport, + inbound: Boolean) + extends AssociationHandle { @volatile var writable = true diff --git a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala index 458e9f420c..1d9d01e427 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/ThrottlerTransportAdapter.scala @@ -5,10 +5,16 @@ package akka.remote.transport import akka.actor._ -import akka.pattern.{ PromiseActorRef, ask, pipe } +import akka.pattern.{ ask, pipe, PromiseActorRef } import akka.remote.transport.ActorTransportAdapter.AssociateUnderlying import akka.remote.transport.AkkaPduCodec.Associate -import akka.remote.transport.AssociationHandle.{ ActorHandleEventListener, DisassociateInfo, Disassociated, HandleEventListener, InboundPayload } +import akka.remote.transport.AssociationHandle.{ + ActorHandleEventListener, + DisassociateInfo, + Disassociated, + HandleEventListener, + InboundPayload +} import akka.remote.transport.ThrottlerManager.{ Checkin, Handle, Listener, ListenerAndMode } import akka.remote.transport.ThrottlerTransportAdapter._ import akka.remote.transport.Transport._ @@ -86,6 +92,7 @@ object ThrottlerTransportAdapter { @SerialVersionUID(1L) case object SetThrottleAck { + /** * Java API: get the singleton instance */ @@ -99,7 +106,7 @@ object ThrottlerTransportAdapter { @SerialVersionUID(1L) final case class TokenBucket(capacity: Int, tokensPerSecond: Double, nanoTimeOfLastSend: Long, availableTokens: Int) - extends ThrottleMode { + extends ThrottleMode { private def isAvailable(nanoTimeOfSend: Long, tokens: Int): Boolean = if ((tokens > capacity && availableTokens > 0)) { @@ -108,9 +115,9 @@ object ThrottlerTransportAdapter { override def tryConsumeTokens(nanoTimeOfSend: Long, tokens: Int): (ThrottleMode, Boolean) = { if (isAvailable(nanoTimeOfSend, tokens)) - (this.copy( - nanoTimeOfLastSend = nanoTimeOfSend, - availableTokens = min(availableTokens - tokens + tokensGenerated(nanoTimeOfSend), capacity)), true) + (this.copy(nanoTimeOfLastSend = nanoTimeOfSend, + availableTokens = min(availableTokens - tokens + tokensGenerated(nanoTimeOfSend), capacity)), + true) else (this, false) } @@ -160,6 +167,7 @@ object ThrottlerTransportAdapter { @SerialVersionUID(1L) case object ForceDisassociateAck { + /** * Java API: get the singleton instance */ @@ -192,11 +200,13 @@ object ThrottlerTransportAdapter { def unthrottledThrottleMode(): ThrottleMode = Unthrottled } -class ThrottlerTransportAdapter(_wrappedTransport: Transport, _system: ExtendedActorSystem) extends ActorTransportAdapter(_wrappedTransport, _system) { +class ThrottlerTransportAdapter(_wrappedTransport: Transport, _system: ExtendedActorSystem) + extends ActorTransportAdapter(_wrappedTransport, _system) { override protected def addedSchemeIdentifier = SchemeIdentifier override protected def maximumOverhead = 0 - protected def managerName: String = s"throttlermanager.${wrappedTransport.schemeIdentifier}${UniqueId.getAndIncrement}" + protected def managerName: String = + s"throttlermanager.${wrappedTransport.schemeIdentifier}${UniqueId.getAndIncrement}" protected def managerProps: Props = { val wt = wrappedTransport Props(classOf[ThrottlerManager], wt) @@ -205,9 +215,9 @@ class ThrottlerTransportAdapter(_wrappedTransport: Transport, _system: ExtendedA override def managementCommand(cmd: Any): Future[Boolean] = { import ActorTransportAdapter.AskTimeout cmd match { - case s: SetThrottle => manager ? s map { case SetThrottleAck => true } - case f: ForceDisassociate => manager ? f map { case ForceDisassociateAck => true } - case f: ForceDisassociateExplicitly => manager ? f map { case ForceDisassociateAck => true } + case s: SetThrottle => (manager ? s).map { case SetThrottleAck => true } + case f: ForceDisassociate => (manager ? f).map { case ForceDisassociateAck => true } + case f: ForceDisassociateExplicitly => (manager ? f).map { case ForceDisassociateAck => true } case _ => wrappedTransport.managementCommand(cmd) } } @@ -220,9 +230,10 @@ private[transport] object ThrottlerManager { final case class Checkin(origin: Address, handle: ThrottlerHandle) extends NoSerializationVerificationNeeded final case class AssociateResult(handle: AssociationHandle, statusPromise: Promise[AssociationHandle]) - extends NoSerializationVerificationNeeded + extends NoSerializationVerificationNeeded - final case class ListenerAndMode(listener: HandleEventListener, mode: ThrottleMode) extends NoSerializationVerificationNeeded + final case class ListenerAndMode(listener: HandleEventListener, mode: ThrottleMode) + extends NoSerializationVerificationNeeded final case class Handle(handle: ThrottlerHandle) extends NoSerializationVerificationNeeded @@ -233,7 +244,8 @@ private[transport] object ThrottlerManager { * INTERNAL API */ private[transport] class ThrottlerManager(wrappedTransport: Transport) - extends ActorTransportAdapterManager with ActorLogging { + extends ActorTransportAdapterManager + with ActorLogging { import ThrottlerManager._ import context.dispatcher @@ -248,7 +260,7 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) val wrappedHandle = wrapHandle(handle, associationListener, inbound = true) wrappedHandle.throttlerActor ! Handle(wrappedHandle) case AssociateUnderlying(remoteAddress, statusPromise) => - wrappedTransport.associate(remoteAddress) onComplete { + wrappedTransport.associate(remoteAddress).onComplete { // Slight modification of pipe, only success is sent, failure is propagated to a separate future case Success(handle) => self ! AssociateResult(handle, statusPromise) case Failure(e) => statusPromise.failure(e) @@ -259,27 +271,30 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) val naked = nakedAddress(handle.remoteAddress) val inMode = getInboundMode(naked) wrappedHandle.outboundThrottleMode.set(getOutboundMode(naked)) - wrappedHandle.readHandlerPromise.future map { ListenerAndMode(_, inMode) } pipeTo wrappedHandle.throttlerActor + wrappedHandle.readHandlerPromise.future.map { ListenerAndMode(_, inMode) }.pipeTo(wrappedHandle.throttlerActor) handleTable ::= naked -> wrappedHandle statusPromise.success(wrappedHandle) case SetThrottle(address, direction, mode) => val naked = nakedAddress(address) throttlingModes = throttlingModes.updated(naked, (mode, direction)) val ok = Future.successful(SetThrottleAck) - Future.sequence(handleTable map { - case (`naked`, handle) => setMode(handle, mode, direction) - case _ => ok - }).map(_ => SetThrottleAck) pipeTo sender() + Future + .sequence(handleTable.map { + case (`naked`, handle) => setMode(handle, mode, direction) + case _ => ok + }) + .map(_ => SetThrottleAck) + .pipeTo(sender()) case ForceDisassociate(address) => val naked = nakedAddress(address) - handleTable foreach { + handleTable.foreach { case (`naked`, handle) => handle.disassociate(s"the disassociation was forced by ${sender()}", log) case _ => } sender() ! ForceDisassociateAck case ForceDisassociateExplicitly(address, reason) => val naked = nakedAddress(address) - handleTable foreach { + handleTable.foreach { case (`naked`, handle) => handle.disassociateWithFailure(reason) case _ => } @@ -295,14 +310,14 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) private def getInboundMode(nakedAddress: Address): ThrottleMode = { throttlingModes.get(nakedAddress) match { case Some((mode, direction)) if direction.includes(Direction.Receive) => mode - case _ => Unthrottled + case _ => Unthrottled } } private def getOutboundMode(nakedAddress: Address): ThrottleMode = { throttlingModes.get(nakedAddress) match { case Some((mode, direction)) if direction.includes(Direction.Send) => mode - case _ => Unthrottled + case _ => Unthrottled } } @@ -313,7 +328,9 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) } } - private def setMode(handle: ThrottlerHandle, mode: ThrottleMode, direction: Direction): Future[SetThrottleAck.type] = { + private def setMode(handle: ThrottlerHandle, + mode: ThrottleMode, + direction: Direction): Future[SetThrottleAck.type] = { if (direction.includes(Direction.Send)) handle.outboundThrottleMode.set(mode) if (direction.includes(Direction.Receive)) @@ -322,8 +339,9 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) Future.successful(SetThrottleAck) } - private def askModeWithDeathCompletion(target: ActorRef, mode: ThrottleMode)(implicit timeout: Timeout): Future[SetThrottleAck.type] = { - if (target.isTerminated) Future successful SetThrottleAck + private def askModeWithDeathCompletion(target: ActorRef, mode: ThrottleMode)( + implicit timeout: Timeout): Future[SetThrottleAck.type] = { + if (target.isTerminated) Future.successful(SetThrottleAck) else { val internalTarget = target.asInstanceOf[InternalActorRef] val ref = PromiseActorRef(internalTarget.provider, timeout, target, mode.getClass.getName) @@ -336,14 +354,17 @@ private[transport] class ThrottlerManager(wrappedTransport: Transport) } } - private def wrapHandle(originalHandle: AssociationHandle, listener: AssociationEventListener, inbound: Boolean): ThrottlerHandle = { + private def wrapHandle(originalHandle: AssociationHandle, + listener: AssociationEventListener, + inbound: Boolean): ThrottlerHandle = { val managerRef = self - ThrottlerHandle( - originalHandle, - context.actorOf( - RARP(context.system).configureDispatcher( - Props(classOf[ThrottledAssociation], managerRef, listener, originalHandle, inbound)).withDeploy(Deploy.local), - "throttler" + nextId())) + ThrottlerHandle(originalHandle, + context.actorOf( + RARP(context.system) + .configureDispatcher( + Props(classOf[ThrottledAssociation], managerRef, listener, originalHandle, inbound)) + .withDeploy(Deploy.local), + "throttler" + nextId())) } } @@ -387,13 +408,13 @@ private[transport] object ThrottledAssociation { /** * INTERNAL API */ -private[transport] class ThrottledAssociation( - val manager: ActorRef, - val associationHandler: AssociationEventListener, - val originalHandle: AssociationHandle, - val inbound: Boolean) - extends Actor with LoggingFSM[ThrottledAssociation.ThrottlerState, ThrottledAssociation.ThrottlerData] - with RequiresMessageQueue[UnboundedMessageQueueSemantics] { +private[transport] class ThrottledAssociation(val manager: ActorRef, + val associationHandler: AssociationEventListener, + val originalHandle: AssociationHandle, + val inbound: Boolean) + extends Actor + with LoggingFSM[ThrottledAssociation.ThrottlerState, ThrottledAssociation.ThrottlerData] + with RequiresMessageQueue[UnboundedMessageQueueSemantics] { import ThrottledAssociation._ import context.dispatcher @@ -403,7 +424,8 @@ private[transport] class ThrottledAssociation( override def postStop(): Unit = originalHandle.disassociate("the owning ThrottledAssociation stopped", log) - if (inbound) startWith(WaitExposedHandle, Uninitialized) else { + if (inbound) startWith(WaitExposedHandle, Uninitialized) + else { originalHandle.readHandlerPromise.success(ActorHandleEventListener(self)) startWith(WaitModeAndUpstreamListener, Uninitialized) } @@ -412,12 +434,12 @@ private[transport] class ThrottledAssociation( case Event(Handle(handle), Uninitialized) => // register to downstream layer and wait for origin originalHandle.readHandlerPromise.success(ActorHandleEventListener(self)) - goto(WaitOrigin) using ExposedHandle(handle) + goto(WaitOrigin).using(ExposedHandle(handle)) } when(WaitOrigin) { case Event(InboundPayload(p), ExposedHandle(exposedHandle)) => - throttledMessages = throttledMessages enqueue p + throttledMessages = throttledMessages.enqueue(p) peekOrigin(p) match { case Some(origin) => manager ! Checkin(origin, exposedHandle) @@ -428,7 +450,7 @@ private[transport] class ThrottledAssociation( when(WaitMode) { case Event(InboundPayload(p), _) => - throttledMessages = throttledMessages enqueue p + throttledMessages = throttledMessages.enqueue(p) stay() case Event(mode: ThrottleMode, ExposedHandle(exposedHandle)) => inboundThrottleMode = mode @@ -437,15 +459,15 @@ private[transport] class ThrottledAssociation( exposedHandle.disassociate("the association was blackholed", log) stop() } else { - associationHandler notify InboundAssociation(exposedHandle) - exposedHandle.readHandlerPromise.future.map(Listener(_)) pipeTo self + associationHandler.notify(InboundAssociation(exposedHandle)) + exposedHandle.readHandlerPromise.future.map(Listener(_)).pipeTo(self) goto(WaitUpstreamListener) } finally sender() ! SetThrottleAck } when(WaitUpstreamListener) { case Event(InboundPayload(p), _) => - throttledMessages = throttledMessages enqueue p + throttledMessages = throttledMessages.enqueue(p) stay() case Event(Listener(listener), _) => upstreamListener = listener @@ -460,7 +482,7 @@ private[transport] class ThrottledAssociation( self ! Dequeue goto(Throttling) case Event(InboundPayload(p), _) => - throttledMessages = throttledMessages enqueue p + throttledMessages = throttledMessages.enqueue(p) stay() } @@ -480,7 +502,7 @@ private[transport] class ThrottledAssociation( case Event(Dequeue, _) => if (throttledMessages.nonEmpty) { val (payload, newqueue) = throttledMessages.dequeue - upstreamListener notify InboundPayload(payload) + upstreamListener.notify(InboundPayload(payload)) throttledMessages = newqueue inboundThrottleMode = inboundThrottleMode.tryConsumeTokens(System.nanoTime(), payload.length)._1 if (throttledMessages.nonEmpty) @@ -499,7 +521,7 @@ private[transport] class ThrottledAssociation( case Event(Disassociated(_), _) => stop() // not notifying the upstream handler is intentional: we are relying on heartbeating case Event(FailWith(reason), _) => - if (upstreamListener ne null) upstreamListener notify Disassociated(reason) + if (upstreamListener ne null) upstreamListener.notify(Disassociated(reason)) stop() } @@ -526,7 +548,7 @@ private[transport] class ThrottledAssociation( val (newbucket, success) = inboundThrottleMode.tryConsumeTokens(System.nanoTime(), tokens) if (success) { inboundThrottleMode = newbucket - upstreamListener notify InboundPayload(payload) + upstreamListener.notify(InboundPayload(payload)) } else { throttledMessages = throttledMessages.enqueue(payload) scheduleDequeue(inboundThrottleMode.timeToAvailable(System.nanoTime(), tokens)) @@ -549,7 +571,7 @@ private[transport] class ThrottledAssociation( * INTERNAL API */ private[transport] final case class ThrottlerHandle(_wrappedHandle: AssociationHandle, throttlerActor: ActorRef) - extends AbstractTransportAdapterHandle(_wrappedHandle, SchemeIdentifier) { + extends AbstractTransportAdapterHandle(_wrappedHandle, SchemeIdentifier) { private[transport] val outboundThrottleMode = new AtomicReference[ThrottleMode](Unthrottled) diff --git a/akka-remote/src/main/scala/akka/remote/transport/Transport.scala b/akka-remote/src/main/scala/akka/remote/transport/Transport.scala index b232052f65..6b69a1c07d 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/Transport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/Transport.scala @@ -8,7 +8,7 @@ import scala.concurrent.{ Future, Promise } import scala.util.control.NoStackTrace import akka.actor.{ ActorRef, Address, NoSerializationVerificationNeeded } -import akka.util.{ ByteString, unused } +import akka.util.{ unused, ByteString } import akka.remote.transport.AssociationHandle.HandleEventListener import akka.AkkaException import akka.actor.DeadLetterSuppression @@ -23,7 +23,9 @@ object Transport { * hostname, etc.). */ @SerialVersionUID(1L) - final case class InvalidAssociationException(msg: String, cause: Throwable = null) extends AkkaException(msg, cause) with NoStackTrace + final case class InvalidAssociationException(msg: String, cause: Throwable = null) + extends AkkaException(msg, cause) + with NoStackTrace /** * Message sent to a [[akka.remote.transport.Transport.AssociationEventListener]] registered to a transport @@ -187,6 +189,7 @@ object AssociationHandle { * to listen to association events. */ trait HandleEventListener { + /** * Called by the transport to notify the listener about a HandleEvent * @param ev The HandleEvent of the handle @@ -264,7 +267,8 @@ trait AssociationHandle { * could be called arbitrarily many times. * */ - @deprecated(message = "Use method that states reasons to make sure disassociation reasons are logged.", since = "2.5.3") + @deprecated(message = "Use method that states reasons to make sure disassociation reasons are logged.", + since = "2.5.3") def disassociate(): Unit /** @@ -275,11 +279,11 @@ trait AssociationHandle { */ def disassociate(reason: String, log: LoggingAdapter): Unit = { if (log.isDebugEnabled) - log.debug( - "Association between local [{}] and remote [{}] was disassociated because {}", - localAddress, remoteAddress, reason) + log.debug("Association between local [{}] and remote [{}] was disassociated because {}", + localAddress, + remoteAddress, + reason) disassociate() } } - diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyHelpers.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyHelpers.scala index 283e487ed2..5789a97769 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyHelpers.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyHelpers.scala @@ -91,4 +91,3 @@ private[netty] trait NettyClientHelpers extends SimpleChannelHandler with NettyH onDisconnect(ctx, e) } } - diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala index acbe273e64..3004780358 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/NettyTransport.scala @@ -70,10 +70,12 @@ object NettyFutureBridge { def apply(nettyFuture: ChannelFuture): Future[Channel] = { val p = Promise[Channel]() nettyFuture.addListener(new ChannelFutureListener { - def operationComplete(future: ChannelFuture): Unit = p complete Try( - if (future.isSuccess) future.getChannel - else if (future.isCancelled) throw new CancellationException - else throw future.getCause) + def operationComplete(future: ChannelFuture): Unit = + p.complete( + Try( + if (future.isSuccess) future.getChannel + else if (future.isCancelled) throw new CancellationException + else throw future.getCause)) }) p.future } @@ -82,24 +84,34 @@ object NettyFutureBridge { import scala.collection.JavaConverters._ val p = Promise[ChannelGroup] nettyFuture.addListener(new ChannelGroupFutureListener { - def operationComplete(future: ChannelGroupFuture): Unit = p complete Try( - if (future.isCompleteSuccess) future.getGroup - else throw future.iterator.asScala.collectFirst { - case f if f.isCancelled => new CancellationException - case f if !f.isSuccess => f.getCause - } getOrElse new IllegalStateException("Error reported in ChannelGroupFuture, but no error found in individual futures.")) + def operationComplete(future: ChannelGroupFuture): Unit = + p.complete( + Try( + if (future.isCompleteSuccess) future.getGroup + else + throw future.iterator.asScala + .collectFirst { + case f if f.isCancelled => new CancellationException + case f if !f.isSuccess => f.getCause + } + .getOrElse(new IllegalStateException( + "Error reported in ChannelGroupFuture, but no error found in individual futures.")))) }) p.future } } @SerialVersionUID(1L) -class NettyTransportException(msg: String, cause: Throwable) extends RuntimeException(msg, cause) with OnlyCauseStackTrace { +class NettyTransportException(msg: String, cause: Throwable) + extends RuntimeException(msg, cause) + with OnlyCauseStackTrace { def this(msg: String) = this(msg, null) } @SerialVersionUID(1L) -class NettyTransportExceptionNoStack(msg: String, cause: Throwable) extends NettyTransportException(msg, cause) with NoStackTrace { +class NettyTransportExceptionNoStack(msg: String, cause: Throwable) + extends NettyTransportException(msg, cause) + with NoStackTrace { def this(msg: String) = this(msg, null) } @@ -114,7 +126,8 @@ class NettyTransportSettings(config: Config) { case unknown => throw new ConfigurationException(s"Unknown transport: [$unknown]") } - val EnableSsl: Boolean = getBoolean("enable-ssl") requiring (!_ || TransportMode == Tcp, s"$TransportMode does not support SSL") + val EnableSsl: Boolean = + getBoolean("enable-ssl").requiring(!_ || TransportMode == Tcp, s"$TransportMode does not support SSL") val SSLEngineProviderClassName: String = if (EnableSsl) getString("ssl-engine-provider") else "" @@ -137,12 +150,11 @@ class NettyTransportSettings(config: Config) { val SendBufferSize: Option[Int] = optionSize("send-buffer-size") - val ReceiveBufferSize: Option[Int] = optionSize("receive-buffer-size") requiring (s => - s.isDefined || TransportMode != Udp, "receive-buffer-size must be specified for UDP") + val ReceiveBufferSize: Option[Int] = optionSize("receive-buffer-size") + .requiring(s => s.isDefined || TransportMode != Udp, "receive-buffer-size must be specified for UDP") - val MaxFrameSize: Int = getBytes("maximum-frame-size").toInt requiring ( - _ >= 32000, - s"Setting 'maximum-frame-size' must be at least 32000 bytes") + val MaxFrameSize: Int = getBytes("maximum-frame-size").toInt + .requiring(_ >= 32000, s"Setting 'maximum-frame-size' must be at least 32000 bytes") val Backlog: Int = getInt("backlog") @@ -181,16 +193,16 @@ class NettyTransportSettings(config: Config) { val ClientSocketWorkerPoolSize: Int = computeWPS(config.getConfig("client-socket-worker-pool")) private def computeWPS(config: Config): Int = - ThreadPoolConfig.scaledPoolSize( - config.getInt("pool-size-min"), - config.getDouble("pool-size-factor"), - config.getInt("pool-size-max")) + ThreadPoolConfig.scaledPoolSize(config.getInt("pool-size-min"), + config.getDouble("pool-size-factor"), + config.getInt("pool-size-max")) // Check Netty version >= 3.10.6 { val nettyVersion = org.jboss.netty.util.Version.ID def throwInvalidNettyVersion(): Nothing = { - throw new IllegalArgumentException("akka-remote with the Netty transport requires Netty version 3.10.6 or " + + throw new IllegalArgumentException( + "akka-remote with the Netty transport requires Netty version 3.10.6 or " + s"later. Version [$nettyVersion] is on the class path. Issue https://github.com/netty/netty/pull/4739 " + "may cause messages to not be delivered.") } @@ -213,26 +225,31 @@ class NettyTransportSettings(config: Config) { private[netty] trait CommonHandlers extends NettyHelpers { protected val transport: NettyTransport - final override def onOpen(ctx: ChannelHandlerContext, e: ChannelStateEvent): Unit = transport.channelGroup.add(e.getChannel) + final override def onOpen(ctx: ChannelHandlerContext, e: ChannelStateEvent): Unit = + transport.channelGroup.add(e.getChannel) protected def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle - protected def registerListener( - channel: Channel, - listener: HandleEventListener, - msg: ChannelBuffer, - remoteSocketAddress: InetSocketAddress): Unit + protected def registerListener(channel: Channel, + listener: HandleEventListener, + msg: ChannelBuffer, + remoteSocketAddress: InetSocketAddress): Unit - final protected def init(channel: Channel, remoteSocketAddress: SocketAddress, remoteAddress: Address, msg: ChannelBuffer)( - op: (AssociationHandle => Any)): Unit = { + final protected def init(channel: Channel, + remoteSocketAddress: SocketAddress, + remoteAddress: Address, + msg: ChannelBuffer)(op: (AssociationHandle => Any)): Unit = { import transport._ - NettyTransport.addressFromSocketAddress(channel.getLocalAddress, schemeIdentifier, system.name, Some(settings.Hostname), None) match { + NettyTransport.addressFromSocketAddress(channel.getLocalAddress, + schemeIdentifier, + system.name, + Some(settings.Hostname), + None) match { case Some(localAddress) => val handle = createHandle(channel, localAddress, remoteAddress) - handle.readHandlerPromise.future.foreach { - listener => - registerListener(channel, listener, msg, remoteSocketAddress.asInstanceOf[InetSocketAddress]) - channel.setReadable(true) + handle.readHandlerPromise.future.foreach { listener => + registerListener(channel, listener, msg, remoteSocketAddress.asInstanceOf[InetSocketAddress]) + channel.setReadable(true) } op(handle) @@ -245,20 +262,27 @@ private[netty] trait CommonHandlers extends NettyHelpers { * INTERNAL API */ private[netty] abstract class ServerHandler( - protected final val transport: NettyTransport, - private final val associationListenerFuture: Future[AssociationEventListener]) - extends NettyServerHelpers with CommonHandlers { + protected final val transport: NettyTransport, + private final val associationListenerFuture: Future[AssociationEventListener]) + extends NettyServerHelpers + with CommonHandlers { import transport.executionContext final protected def initInbound(channel: Channel, remoteSocketAddress: SocketAddress, msg: ChannelBuffer): Unit = { channel.setReadable(false) - associationListenerFuture.foreach { - listener => - val remoteAddress = NettyTransport.addressFromSocketAddress(remoteSocketAddress, transport.schemeIdentifier, - transport.system.name, hostName = None, port = None).getOrElse( - throw new NettyTransportException(s"Unknown inbound remote address type [${remoteSocketAddress.getClass.getName}]")) - init(channel, remoteSocketAddress, remoteAddress, msg) {a => listener notify InboundAssociation(a) } + associationListenerFuture.foreach { listener => + val remoteAddress = NettyTransport + .addressFromSocketAddress(remoteSocketAddress, + transport.schemeIdentifier, + transport.system.name, + hostName = None, + port = None) + .getOrElse(throw new NettyTransportException( + s"Unknown inbound remote address type [${remoteSocketAddress.getClass.getName}]")) + init(channel, remoteSocketAddress, remoteAddress, msg) { a => + listener.notify(InboundAssociation(a)) + } } } @@ -268,7 +292,8 @@ private[netty] abstract class ServerHandler( * INTERNAL API */ private[netty] abstract class ClientHandler(protected final val transport: NettyTransport, remoteAddress: Address) - extends NettyClientHelpers with CommonHandlers { + extends NettyClientHelpers + with CommonHandlers { final protected val statusPromise = Promise[AssociationHandle]() def statusFuture = statusPromise.future @@ -285,7 +310,7 @@ private[transport] object NettyTransport { // 4 bytes will be used to represent the frame length. Used by netty LengthFieldPrepender downstream handler. val FrameLengthFieldLength = 4 def gracefulClose(channel: Channel)(implicit ec: ExecutionContext): Unit = { - def always(c: ChannelFuture) = NettyFutureBridge(c) recover { case _ => c.getChannel } + def always(c: ChannelFuture) = NettyFutureBridge(c).recover { case _ => c.getChannel } for { _ <- always { channel.write(ChannelBuffers.buffer(0)) } // Force flush by waiting on a final dummy write _ <- always { channel.disconnect() } @@ -294,15 +319,20 @@ private[transport] object NettyTransport { val uniqueIdCounter = new AtomicInteger(0) - def addressFromSocketAddress(addr: SocketAddress, schemeIdentifier: String, systemName: String, - hostName: Option[String], port: Option[Int]): Option[Address] = addr match { - case sa: InetSocketAddress => Some(Address(schemeIdentifier, systemName, - hostName.getOrElse(sa.getHostString), port.getOrElse(sa.getPort))) + def addressFromSocketAddress(addr: SocketAddress, + schemeIdentifier: String, + systemName: String, + hostName: Option[String], + port: Option[Int]): Option[Address] = addr match { + case sa: InetSocketAddress => + Some(Address(schemeIdentifier, systemName, hostName.getOrElse(sa.getHostString), port.getOrElse(sa.getPort))) case _ => None } // Need to do like this for binary compatibility reasons - def addressFromSocketAddress(addr: SocketAddress, schemeIdentifier: String, systemName: String, + def addressFromSocketAddress(addr: SocketAddress, + schemeIdentifier: String, + systemName: String, hostName: Option[String]): Option[Address] = addressFromSocketAddress(addr, schemeIdentifier, systemName, hostName, port = None) } @@ -316,10 +346,13 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA import settings._ implicit val executionContext: ExecutionContext = - settings.UseDispatcherForIo.orElse(RARP(system).provider.remoteSettings.Dispatcher match { - case "" => None - case dispatcherName => Some(dispatcherName) - }).map(system.dispatchers.lookup).getOrElse(system.dispatcher) + settings.UseDispatcherForIo + .orElse(RARP(system).provider.remoteSettings.Dispatcher match { + case "" => None + case dispatcherName => Some(dispatcherName) + }) + .map(system.dispatchers.lookup) + .getOrElse(system.dispatcher) override val schemeIdentifier: String = (if (EnableSsl) "ssl." else "") + TransportMode override def maximumPayloadBytes: Int = settings.MaxFrameSize @@ -338,7 +371,7 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA private[netty] final val udpConnectionTable = new ConcurrentHashMap[SocketAddress, HandleEventListener]() private def createExecutorService() = - UseDispatcherForIo.map(system.dispatchers.lookup) getOrElse Executors.newCachedThreadPool(system.threadFactory) + UseDispatcherForIo.map(system.dispatchers.lookup).getOrElse(Executors.newCachedThreadPool(system.threadFactory)) /* * Be aware, that the close() method of DefaultChannelGroup is racy, because it uses an iterator over a ConcurrentHashMap. @@ -346,7 +379,8 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA * The usage of this class is safe in the new remoting, as close() is called after unbind() is finished, and no * outbound connections are initiated in the shutdown phase. */ - val channelGroup = new DefaultChannelGroup("akka-netty-transport-driver-channelgroup-" + + val channelGroup = new DefaultChannelGroup( + "akka-netty-transport-driver-channelgroup-" + uniqueIdCounter.getAndIncrement) private val clientChannelFactory: ChannelFactory = TransportMode match { @@ -354,8 +388,10 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA val boss, worker = createExecutorService() // We need to create a HashedWheelTimer here since Netty creates one with a thread that // doesn't respect the akka.daemonic setting - new NioClientSocketChannelFactory(boss, 1, new NioWorkerPool(worker, ClientSocketWorkerPoolSize), - new HashedWheelTimer(system.threadFactory)) + new NioClientSocketChannelFactory(boss, + 1, + new NioWorkerPool(worker, ClientSocketWorkerPoolSize), + new HashedWheelTimer(system.threadFactory)) case Udp => // This does not create a HashedWheelTimer internally new NioDatagramChannelFactory(createExecutorService(), ClientSocketWorkerPoolSize) @@ -375,13 +411,13 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA val pipeline = new DefaultChannelPipeline if (!isDatagram) { - pipeline.addLast("FrameDecoder", new LengthFieldBasedFrameDecoder( - maximumPayloadBytes, - 0, - FrameLengthFieldLength, - 0, - FrameLengthFieldLength, // Strip the header - true)) + pipeline.addLast("FrameDecoder", + new LengthFieldBasedFrameDecoder(maximumPayloadBytes, + 0, + FrameLengthFieldLength, + 0, + FrameLengthFieldLength, // Strip the header + true)) pipeline.addLast("FrameEncoder", new LengthFieldPrepender(FrameLengthFieldLength)) } @@ -392,12 +428,15 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA private val sslEngineProvider: OptionVal[SSLEngineProvider] = if (settings.EnableSsl) { - OptionVal.Some(system.dynamicAccess.createInstanceFor[SSLEngineProvider]( - settings.SSLEngineProviderClassName, - List((classOf[ActorSystem], system))).recover { - case e => throw new ConfigurationException( - s"Could not create SSLEngineProvider [${settings.SSLEngineProviderClassName}]", e) - }.get) + OptionVal.Some(system.dynamicAccess + .createInstanceFor[SSLEngineProvider](settings.SSLEngineProviderClassName, List((classOf[ActorSystem], system))) + .recover { + case e => + throw new ConfigurationException( + s"Could not create SSLEngineProvider [${settings.SSLEngineProviderClassName}]", + e) + } + .get) } else OptionVal.None private def sslHandler(isClient: Boolean): SslHandler = { @@ -416,8 +455,9 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA override def getPipeline: ChannelPipeline = { val pipeline = newPipeline if (EnableSsl) pipeline.addFirst("SslHandler", sslHandler(isClient = false)) - val handler = if (isDatagram) new UdpServerHandler(NettyTransport.this, associationListenerPromise.future) - else new TcpServerHandler(NettyTransport.this, associationListenerPromise.future, log) + val handler = + if (isDatagram) new UdpServerHandler(NettyTransport.this, associationListenerPromise.future) + else new TcpServerHandler(NettyTransport.this, associationListenerPromise.future, log) pipeline.addLast("ServerHandler", handler) pipeline } @@ -428,8 +468,9 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA override def getPipeline: ChannelPipeline = { val pipeline = newPipeline if (EnableSsl) pipeline.addFirst("SslHandler", sslHandler(isClient = true)) - val handler = if (isDatagram) new UdpClientHandler(NettyTransport.this, remoteAddress) - else new TcpClientHandler(NettyTransport.this, remoteAddress, log) + val handler = + if (isDatagram) new UdpClientHandler(NettyTransport.this, remoteAddress) + else new TcpClientHandler(NettyTransport.this, remoteAddress, log) pipeline.addLast("clienthandler", handler) pipeline } @@ -441,7 +482,9 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA bootstrap.setOption("child.tcpNoDelay", settings.TcpNodelay) bootstrap.setOption("child.keepAlive", settings.TcpKeepalive) bootstrap.setOption("reuseAddress", settings.TcpReuseAddr) - if (isDatagram) bootstrap.setOption("receiveBufferSizePredictorFactory", new FixedReceiveBufferSizePredictorFactory(ReceiveBufferSize.get)) + if (isDatagram) + bootstrap.setOption("receiveBufferSizePredictorFactory", + new FixedReceiveBufferSizePredictorFactory(ReceiveBufferSize.get)) settings.ReceiveBufferSize.foreach(sz => bootstrap.setOption("receiveBufferSize", sz)) settings.SendBufferSize.foreach(sz => bootstrap.setOption("sendBufferSize", sz)) settings.WriteBufferHighWaterMark.foreach(sz => bootstrap.setOption("writeBufferHighWaterMark", sz)) @@ -470,8 +513,9 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA // TODO: This should be factored out to an async (or thread-isolated) name lookup service #2960 def addressToSocketAddress(addr: Address): Future[InetSocketAddress] = addr match { - case Address(_, _, Some(host), Some(port)) => Future { blocking { new InetSocketAddress(InetAddress.getByName(host), port) } } - case _ => Future.failed(new IllegalArgumentException(s"Address [$addr] does not contain host or port information.")) + case Address(_, _, Some(host), Some(port)) => + Future { blocking { new InetSocketAddress(InetAddress.getByName(host), port) } } + case _ => Future.failed(new IllegalArgumentException(s"Address [$addr] does not contain host or port information.")) } override def listen: Future[(Address, Promise[AssociationEventListener])] = { @@ -490,22 +534,33 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA serverChannel = newServerChannel - addressFromSocketAddress(newServerChannel.getLocalAddress, schemeIdentifier, system.name, Some(settings.Hostname), - if (settings.PortSelector == 0) None else Some(settings.PortSelector)) match { - case Some(address) => - addressFromSocketAddress(newServerChannel.getLocalAddress, schemeIdentifier, system.name, None, None) match { - case Some(address) => boundTo = address - case None => throw new NettyTransportException(s"Unknown local address type [${newServerChannel.getLocalAddress.getClass.getName}]") - } - localAddress = address - associationListenerPromise.future.foreach { _ => newServerChannel.setReadable(true) } - (address, associationListenerPromise) - case None => throw new NettyTransportException(s"Unknown local address type [${newServerChannel.getLocalAddress.getClass.getName}]") - } + addressFromSocketAddress(newServerChannel.getLocalAddress, + schemeIdentifier, + system.name, + Some(settings.Hostname), + if (settings.PortSelector == 0) None else Some(settings.PortSelector)) match { + case Some(address) => + addressFromSocketAddress(newServerChannel.getLocalAddress, schemeIdentifier, system.name, None, None) match { + case Some(address) => boundTo = address + case None => + throw new NettyTransportException( + s"Unknown local address type [${newServerChannel.getLocalAddress.getClass.getName}]") + } + localAddress = address + associationListenerPromise.future.foreach { _ => + newServerChannel.setReadable(true) + } + (address, associationListenerPromise) + case None => + throw new NettyTransportException( + s"Unknown local address type [${newServerChannel.getLocalAddress.getClass.getName}]") + } } catch { case NonFatal(e) => { log.error("failed to bind to {}, shutting down Netty transport", address) - try { shutdown() } catch { case NonFatal(_) => } // ignore possible exception during shutdown + try { + shutdown() + } catch { case NonFatal(_) => } // ignore possible exception during shutdown throw e } } @@ -522,30 +577,29 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA (for { socketAddress <- addressToSocketAddress(remoteAddress) - readyChannel <- NettyFutureBridge(bootstrap.connect(socketAddress)) map { - channel => - if (EnableSsl) - blocking { - channel.getPipeline.get(classOf[SslHandler]).handshake().awaitUninterruptibly() - } - if (!isDatagram) channel.setReadable(false) - channel + readyChannel <- NettyFutureBridge(bootstrap.connect(socketAddress)).map { channel => + if (EnableSsl) + blocking { + channel.getPipeline.get(classOf[SslHandler]).handshake().awaitUninterruptibly() + } + if (!isDatagram) channel.setReadable(false) + channel } handle <- if (isDatagram) Future { readyChannel.getRemoteAddress match { case address: InetSocketAddress => val handle = new UdpAssociationHandle(localAddress, remoteAddress, readyChannel, NettyTransport.this) - handle.readHandlerPromise.future.foreach { - listener => udpConnectionTable.put(address, listener) + handle.readHandlerPromise.future.foreach { listener => + udpConnectionTable.put(address, listener) } handle - case unknown => throw new NettyTransportException(s"Unknown outbound remote address type [${unknown.getClass.getName}]") + case unknown => + throw new NettyTransportException(s"Unknown outbound remote address type [${unknown.getClass.getName}]") } - } - else + } else readyChannel.getPipeline.get(classOf[ClientHandler]).statusFuture - } yield handle) recover { + } yield handle).recover { case _: CancellationException => throw new NettyTransportExceptionNoStack("Connection was cancelled") case NonFatal(t) => val msg = @@ -561,7 +615,7 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA } override def shutdown(): Future[Boolean] = { - def always(c: ChannelGroupFuture) = NettyFutureBridge(c).map(_ => true) recover { case _ => false } + def always(c: ChannelGroupFuture) = NettyFutureBridge(c).map(_ => true).recover { case _ => false } for { // Force flush by trying to write an empty buffer and wait for success unbindStatus <- always(channelGroup.unbind()) @@ -583,4 +637,3 @@ class NettyTransport(val settings: NettyTransportSettings, val system: ExtendedA } } - diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/SSLEngineProvider.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/SSLEngineProvider.scala index d68de8b26a..3f3d30911c 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/SSLEngineProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/SSLEngineProvider.scala @@ -41,14 +41,12 @@ import javax.net.ssl.TrustManagerFactory * * Subclass may override protected methods to replace certain parts, such as key and trust manager. */ -@ApiMayChange class ConfigSSLEngineProvider( - protected val log: MarkerLoggingAdapter, - private val settings: SSLSettings) extends SSLEngineProvider { +@ApiMayChange class ConfigSSLEngineProvider(protected val log: MarkerLoggingAdapter, private val settings: SSLSettings) + extends SSLEngineProvider { - def this(system: ActorSystem) = this( - Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName), - new SSLSettings(system.settings.config.getConfig("akka.remote.netty.ssl.security")) - ) + def this(system: ActorSystem) = + this(Logging.withMarker(system, classOf[ConfigSSLEngineProvider].getName), + new SSLSettings(system.settings.config.getConfig("akka.remote.netty.ssl.security"))) import settings._ @@ -59,9 +57,16 @@ import javax.net.ssl.TrustManagerFactory ctx.init(keyManagers, trustManagers, rng) ctx } catch { - case e: FileNotFoundException => throw new RemoteTransportException("Server SSL connection could not be established because key store could not be loaded", e) - case e: IOException => throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) - case e: GeneralSecurityException => throw new RemoteTransportException("Server SSL connection could not be established because SSL context could not be constructed", e) + case e: FileNotFoundException => + throw new RemoteTransportException( + "Server SSL connection could not be established because key store could not be loaded", + e) + case e: IOException => + throw new RemoteTransportException("Server SSL connection could not be established because: " + e.getMessage, e) + case e: GeneralSecurityException => + throw new RemoteTransportException( + "Server SSL connection could not be established because SSL context could not be constructed", + e) } } @@ -71,7 +76,8 @@ import javax.net.ssl.TrustManagerFactory protected def loadKeystore(filename: String, password: String): KeyStore = { val keyStore = KeyStore.getInstance(KeyStore.getDefaultType) val fin = Files.newInputStream(Paths.get(filename)) - try keyStore.load(fin, password.toCharArray) finally Try(fin.close()) + try keyStore.load(fin, password.toCharArray) + finally Try(fin.close()) keyStore } @@ -121,4 +127,3 @@ import javax.net.ssl.TrustManagerFactory } } - diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala index e9d06b945c..369d8132d1 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/TcpSupport.scala @@ -22,7 +22,7 @@ import scala.concurrent.{ Future, Promise } */ private[remote] object ChannelLocalActor extends ChannelLocal[Option[HandleEventListener]] { override def initialValue(channel: Channel): Option[HandleEventListener] = None - def notifyListener(channel: Channel, msg: HandleEvent): Unit = get(channel) foreach { _ notify msg } + def notifyListener(channel: Channel, msg: HandleEvent): Unit = get(channel).foreach { _.notify(msg) } } /** @@ -33,11 +33,11 @@ private[remote] trait TcpHandlers extends CommonHandlers { import ChannelLocalActor._ - override def registerListener( - channel: Channel, - listener: HandleEventListener, - msg: ChannelBuffer, - remoteSocketAddress: InetSocketAddress): Unit = ChannelLocalActor.set(channel, Some(listener)) + override def registerListener(channel: Channel, + listener: HandleEventListener, + msg: ChannelBuffer, + remoteSocketAddress: InetSocketAddress): Unit = + ChannelLocalActor.set(channel, Some(listener)) override def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle = new TcpAssociationHandle(localAddress, remoteAddress, transport, channel) @@ -62,8 +62,11 @@ private[remote] trait TcpHandlers extends CommonHandlers { /** * INTERNAL API */ -private[remote] class TcpServerHandler(_transport: NettyTransport, _associationListenerFuture: Future[AssociationEventListener], val log: LoggingAdapter) - extends ServerHandler(_transport, _associationListenerFuture) with TcpHandlers { +private[remote] class TcpServerHandler(_transport: NettyTransport, + _associationListenerFuture: Future[AssociationEventListener], + val log: LoggingAdapter) + extends ServerHandler(_transport, _associationListenerFuture) + with TcpHandlers { override def onConnect(ctx: ChannelHandlerContext, e: ChannelStateEvent): Unit = initInbound(e.getChannel, e.getChannel.getRemoteAddress, null) @@ -74,7 +77,8 @@ private[remote] class TcpServerHandler(_transport: NettyTransport, _associationL * INTERNAL API */ private[remote] class TcpClientHandler(_transport: NettyTransport, remoteAddress: Address, val log: LoggingAdapter) - extends ClientHandler(_transport, remoteAddress) with TcpHandlers { + extends ClientHandler(_transport, remoteAddress) + with TcpHandlers { override def onConnect(ctx: ChannelHandlerContext, e: ChannelStateEvent): Unit = initOutbound(e.getChannel, e.getChannel.getRemoteAddress, null) @@ -84,12 +88,11 @@ private[remote] class TcpClientHandler(_transport: NettyTransport, remoteAddress /** * INTERNAL API */ -private[remote] class TcpAssociationHandle( - val localAddress: Address, - val remoteAddress: Address, - val transport: NettyTransport, - private val channel: Channel) - extends AssociationHandle { +private[remote] class TcpAssociationHandle(val localAddress: Address, + val remoteAddress: Address, + val transport: NettyTransport, + private val channel: Channel) + extends AssociationHandle { import transport.executionContext override val readHandlerPromise: Promise[HandleEventListener] = Promise() diff --git a/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala b/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala index aef73390c1..d689e7ecd2 100644 --- a/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/transport/netty/UdpSupport.scala @@ -9,7 +9,7 @@ import akka.remote.transport.AssociationHandle import akka.remote.transport.AssociationHandle.{ HandleEventListener, InboundPayload } import akka.remote.transport.Transport.AssociationEventListener import akka.util.ByteString -import java.net.{ SocketAddress, InetAddress, InetSocketAddress } +import java.net.{ InetAddress, InetSocketAddress, SocketAddress } import org.jboss.netty.buffer.{ ChannelBuffer, ChannelBuffers } import org.jboss.netty.channel._ import scala.concurrent.{ Future, Promise } @@ -23,13 +23,12 @@ private[remote] trait UdpHandlers extends CommonHandlers { override def createHandle(channel: Channel, localAddress: Address, remoteAddress: Address): AssociationHandle = new UdpAssociationHandle(localAddress, remoteAddress, channel, transport) - override def registerListener( - channel: Channel, - listener: HandleEventListener, - msg: ChannelBuffer, - remoteSocketAddress: InetSocketAddress): Unit = { + override def registerListener(channel: Channel, + listener: HandleEventListener, + msg: ChannelBuffer, + remoteSocketAddress: InetSocketAddress): Unit = { transport.udpConnectionTable.putIfAbsent(remoteSocketAddress, listener) match { - case null => listener notify InboundPayload(ByteString(msg.array())) + case null => listener.notify(InboundPayload(ByteString(msg.array()))) case oldReader => throw new NettyTransportException( s"Listener $listener attempted to register for remote address $remoteSocketAddress but $oldReader was already registered.") @@ -44,7 +43,7 @@ private[remote] trait UdpHandlers extends CommonHandlers { } else { val listener = transport.udpConnectionTable.get(inetSocketAddress) val bytes: Array[Byte] = e.getMessage.asInstanceOf[ChannelBuffer].array() - if (bytes.length > 0) listener notify InboundPayload(ByteString(bytes)) + if (bytes.length > 0) listener.notify(InboundPayload(ByteString(bytes))) } case _ => } @@ -56,10 +55,13 @@ private[remote] trait UdpHandlers extends CommonHandlers { * INTERNAL API */ @deprecated("Deprecated in favour of Artery (the new Aeron/UDP based remoting implementation).", since = "2.5.0") -private[remote] class UdpServerHandler(_transport: NettyTransport, _associationListenerFuture: Future[AssociationEventListener]) - extends ServerHandler(_transport, _associationListenerFuture) with UdpHandlers { +private[remote] class UdpServerHandler(_transport: NettyTransport, + _associationListenerFuture: Future[AssociationEventListener]) + extends ServerHandler(_transport, _associationListenerFuture) + with UdpHandlers { - transport.system.log.warning("The netty.udp transport is deprecated, please use Artery instead. See: http://doc.akka.io/docs/akka/2.4/scala/remoting-artery.html") + transport.system.log.warning( + "The netty.udp transport is deprecated, please use Artery instead. See: http://doc.akka.io/docs/akka/2.4/scala/remoting-artery.html") override def initUdp(channel: Channel, remoteSocketAddress: SocketAddress, msg: ChannelBuffer): Unit = initInbound(channel, remoteSocketAddress, msg) @@ -70,9 +72,11 @@ private[remote] class UdpServerHandler(_transport: NettyTransport, _associationL */ @deprecated("Deprecated in favour of Artery (the new Aeron/UDP based remoting implementation).", since = "2.5.0") private[remote] class UdpClientHandler(_transport: NettyTransport, remoteAddress: Address) - extends ClientHandler(_transport, remoteAddress) with UdpHandlers { + extends ClientHandler(_transport, remoteAddress) + with UdpHandlers { - transport.system.log.warning("The netty.udp transport is deprecated, please use Artery instead. See: http://doc.akka.io/docs/akka/2.4/scala/remoting-artery.html") + transport.system.log.warning( + "The netty.udp transport is deprecated, please use Artery instead. See: http://doc.akka.io/docs/akka/2.4/scala/remoting-artery.html") override def initUdp(channel: Channel, remoteSocketAddress: SocketAddress, msg: ChannelBuffer): Unit = initOutbound(channel, remoteSocketAddress, msg) @@ -81,11 +85,11 @@ private[remote] class UdpClientHandler(_transport: NettyTransport, remoteAddress /** * INTERNAL API */ -private[remote] class UdpAssociationHandle( - val localAddress: Address, - val remoteAddress: Address, - private val channel: Channel, - private val transport: NettyTransport) extends AssociationHandle { +private[remote] class UdpAssociationHandle(val localAddress: Address, + val remoteAddress: Address, + private val channel: Channel, + private val transport: NettyTransport) + extends AssociationHandle { override val readHandlerPromise: Promise[HandleEventListener] = Promise() @@ -99,7 +103,8 @@ private[remote] class UdpAssociationHandle( } else false } - override def disassociate(): Unit = try channel.close() - finally transport.udpConnectionTable.remove(transport.addressToSocketAddress(remoteAddress)) + override def disassociate(): Unit = + try channel.close() + finally transport.udpConnectionTable.remove(transport.addressToSocketAddress(remoteAddress)) } diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala index 850f682284..5d99951317 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala @@ -14,7 +14,8 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { "An AccrualFailureDetector" must { def fakeTimeGenerator(timeIntervals: Seq[Long]): Clock = new Clock { - @volatile var times = timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) => acc ::: List[Long](acc.last + c)) + @volatile var times = + timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) => acc ::: List[Long](acc.last + c)) override def apply(): Long = { val currentTime = times.head times = times.tail @@ -22,19 +23,17 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { } } - def createFailureDetector( - threshold: Double = 8.0, - maxSampleSize: Int = 1000, - minStdDeviation: FiniteDuration = 100.millis, - acceptableLostDuration: FiniteDuration = Duration.Zero, - firstHeartbeatEstimate: FiniteDuration = 1.second, - clock: Clock = FailureDetector.defaultClock) = - new PhiAccrualFailureDetector( - threshold, - maxSampleSize, - minStdDeviation, - acceptableLostDuration, - firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock) + def createFailureDetector(threshold: Double = 8.0, + maxSampleSize: Int = 1000, + minStdDeviation: FiniteDuration = 100.millis, + acceptableLostDuration: FiniteDuration = Duration.Zero, + firstHeartbeatEstimate: FiniteDuration = 1.second, + clock: Clock = FailureDetector.defaultClock) = + new PhiAccrualFailureDetector(threshold, + maxSampleSize, + minStdDeviation, + acceptableLostDuration, + firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock) def cdf(phi: Double) = 1.0 - math.pow(10, -phi) @@ -69,8 +68,9 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { } // larger stdDeviation results => lower phi - fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 500.0) should be < ( - fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 100.0)) + fd.phi(timeDiff = 1100, mean = 1000.0, stdDeviation = 500.0) should be < (fd.phi(timeDiff = 1100, + mean = 1000.0, + stdDeviation = 100.0)) } "return phi value of 0.0 on startup for each address, when no heartbeats" in { @@ -81,9 +81,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { "return phi based on guess when only one heartbeat" in { val timeInterval = List[Long](0, 1000, 1000, 1000, 1000) - val fd = createFailureDetector( - firstHeartbeatEstimate = 1.seconds, - clock = fakeTimeGenerator(timeInterval)) + val fd = createFailureDetector(firstHeartbeatEstimate = 1.seconds, clock = fakeTimeGenerator(timeInterval)) fd.heartbeat() fd.phi should ===(0.3 +- 0.2) @@ -130,7 +128,9 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { // 1000 regular intervals, 5 minute pause, and then a short pause again that should trigger unreachable again val regularIntervals = 0L +: Vector.fill(999)(1000L) val timeIntervals = regularIntervals :+ (5 * 60 * 1000L) :+ 100L :+ 900L :+ 100L :+ 7000L :+ 100L :+ 900L :+ 100L :+ 900L - val fd = createFailureDetector(threshold = 8, acceptableLostDuration = 3.seconds, clock = fakeTimeGenerator(timeIntervals)) + val fd = createFailureDetector(threshold = 8, + acceptableLostDuration = 3.seconds, + clock = fakeTimeGenerator(timeIntervals)) for (_ <- 0 until 1000) fd.heartbeat() fd.isAvailable should ===(false) // after the long pause @@ -197,8 +197,8 @@ class AccrualFailureDetectorSpec extends AkkaSpec("akka.loglevel = INFO") { "calculate correct mean and variance" in { val samples = Seq(100, 200, 125, 340, 130) - val stats = samples.foldLeft(HeartbeatHistory(maxSampleSize = 20)) { - (stats, value) => stats :+ value + val stats = samples.foldLeft(HeartbeatHistory(maxSampleSize = 20)) { (stats, value) => + stats :+ value } stats.mean should ===(179.0 +- 0.00001) stats.variance should ===(7584.0 +- 0.00001) diff --git a/akka-remote/src/test/scala/akka/remote/AckedDeliverySpec.scala b/akka-remote/src/test/scala/akka/remote/AckedDeliverySpec.scala index 8e6d751a47..d2d6891b19 100644 --- a/akka-remote/src/test/scala/akka/remote/AckedDeliverySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/AckedDeliverySpec.scala @@ -95,7 +95,7 @@ class AckedDeliverySpec extends AkkaSpec { val buffer = new AckedSendBuffer[Sequenced](4).buffer(msg(0)).buffer(msg(1)).buffer(msg(2)).buffer(msg(3)) intercept[ResendBufferCapacityReachedException] { - buffer buffer msg(4) + buffer.buffer(msg(4)) } } @@ -226,15 +226,9 @@ class AckedDeliverySpec extends AkkaSpec { val msg1 = msg(1) val msg2 = msg(2) - val (buf2, _, _) = buf - .receive(msg0) - .receive(msg1) - .receive(msg2) - .extractDeliverable + val (buf2, _, _) = buf.receive(msg0).receive(msg1).receive(msg2).extractDeliverable - val buf3 = buf2.receive(msg0) - .receive(msg1) - .receive(msg2) + val buf3 = buf2.receive(msg0).receive(msg1).receive(msg2) val (_, deliver, ack) = buf3.extractDeliverable @@ -251,8 +245,7 @@ class AckedDeliverySpec extends AkkaSpec { val msg2 = msg(2) val msg3 = msg(3) - val buf = buf1.receive(msg1a).receive(msg2).mergeFrom( - buf2.receive(msg1b).receive(msg3)) + val buf = buf1.receive(msg1a).receive(msg2).mergeFrom(buf2.receive(msg1b).receive(msg3)) val (_, deliver, ack) = buf.receive(msg0).extractDeliverable deliver should ===(Vector(msg0, msg1a, msg2, msg3)) @@ -272,7 +265,9 @@ class AckedDeliverySpec extends AkkaSpec { "correctly cooperate with each other" in { val MsgCount = 1000 val DeliveryProbability = 0.5 - val referenceList: Seq[Sequenced] = (0 until MsgCount).toSeq map { i => msg(i.toLong) } + val referenceList: Seq[Sequenced] = (0 until MsgCount).toSeq.map { i => + msg(i.toLong) + } var toSend = referenceList var received = Seq.empty[Sequenced] @@ -292,7 +287,7 @@ class AckedDeliverySpec extends AkkaSpec { tmp } else Seq.empty[Sequenced] - (resends ++ sends) foreach { msg => + (resends ++ sends).foreach { msg => if (sends.contains(msg)) sndBuf = sndBuf.buffer(msg) if (happened(p)) { val (updatedRcvBuf, delivers, ack) = rcvBuf.receive(msg).extractDeliverable diff --git a/akka-remote/src/test/scala/akka/remote/ActorsLeakSpec.scala b/akka-remote/src/test/scala/akka/remote/ActorsLeakSpec.scala index ad24c2780f..e72c6e59c4 100644 --- a/akka-remote/src/test/scala/akka/remote/ActorsLeakSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/ActorsLeakSpec.scala @@ -19,8 +19,7 @@ import scala.concurrent.duration._ object ActorsLeakSpec { - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" | akka.actor.provider = remote | akka.remote.netty.tcp.applied-adapters = ["trttl"] | #akka.remote.log-lifecycle-events = on @@ -39,10 +38,10 @@ object ActorsLeakSpec { val cell = wc.underlying cell.childrenRefs match { - case ChildrenContainer.TerminatingChildrenContainer(_, toDie, reason) => Nil + case ChildrenContainer.TerminatingChildrenContainer(_, toDie, reason) => Nil case x @ (ChildrenContainer.TerminatedChildrenContainer | ChildrenContainer.EmptyChildrenContainer) => Nil - case n: ChildrenContainer.NormalChildrenContainer => cell.childrenRefs.children.toList - case x => Nil + case n: ChildrenContainer.NormalChildrenContainer => cell.childrenRefs.children.toList + case x => Nil } case _ => Nil } @@ -80,10 +79,8 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender //Clean shutdown case for (_ <- 1 to 3) { - val remoteSystem = ActorSystem( - "remote", - ConfigFactory.parseString("akka.remote.netty.tcp.port = 0") - .withFallback(config)) + val remoteSystem = + ActorSystem("remote", ConfigFactory.parseString("akka.remote.netty.tcp.port = 0").withFallback(config)) try { val probe = TestProbe()(remoteSystem) @@ -101,10 +98,8 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender // Quarantine an old incarnation case for (_ <- 1 to 3) { //always use the same address - val remoteSystem = ActorSystem( - "remote", - ConfigFactory.parseString("akka.remote.netty.tcp.port = 2553") - .withFallback(config)) + val remoteSystem = + ActorSystem("remote", ConfigFactory.parseString("akka.remote.netty.tcp.port = 2553").withFallback(config)) try { val remoteAddress = RARP(remoteSystem).provider.getDefaultAddress @@ -119,7 +114,8 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender val beforeQuarantineActors = targets.flatMap(collectLiveActors).toSet // it must not quarantine the current connection - RARP(system).provider.transport.quarantine(remoteAddress, Some(AddressUidExtension(remoteSystem).addressUid + 1), "test") + RARP(system).provider.transport + .quarantine(remoteAddress, Some(AddressUidExtension(remoteSystem).addressUid + 1), "test") // the message from local to remote should reuse passive inbound connection system.actorSelection(RootActorPath(remoteAddress) / "user" / "stoppable") ! Identify(1) @@ -140,10 +136,8 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender // Missing SHUTDOWN case for (_ <- 1 to 3) { - val remoteSystem = ActorSystem( - "remote", - ConfigFactory.parseString("akka.remote.netty.tcp.port = 0") - .withFallback(config)) + val remoteSystem = + ActorSystem("remote", ConfigFactory.parseString("akka.remote.netty.tcp.port = 0").withFallback(config)) val remoteAddress = RARP(remoteSystem).provider.getDefaultAddress try { @@ -153,9 +147,7 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender probe.expectMsgType[ActorIdentity].ref.nonEmpty should be(true) // This will make sure that no SHUTDOWN message gets through - Await.ready( - RARP(system).provider.transport.managementCommand(ForceDisassociate(remoteAddress)), - 3.seconds) + Await.ready(RARP(system).provider.transport.managementCommand(ForceDisassociate(remoteAddress)), 3.seconds) } finally { remoteSystem.terminate() @@ -167,10 +159,8 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender } // Remote idle for too long case - val remoteSystem = ActorSystem( - "remote", - ConfigFactory.parseString("akka.remote.netty.tcp.port = 0") - .withFallback(config)) + val remoteSystem = + ActorSystem("remote", ConfigFactory.parseString("akka.remote.netty.tcp.port = 0").withFallback(config)) val remoteAddress = RARP(remoteSystem).provider.getDefaultAddress remoteSystem.actorOf(Props[StoppableActor], "stoppable") @@ -190,9 +180,7 @@ class ActorsLeakSpec extends AkkaSpec(ActorsLeakSpec.config) with ImplicitSender // All system messages has been acked now on this side // This will make sure that no SHUTDOWN message gets through - Await.ready( - RARP(system).provider.transport.managementCommand(ForceDisassociate(remoteAddress)), - 3.seconds) + Await.ready(RARP(system).provider.transport.managementCommand(ForceDisassociate(remoteAddress)), 3.seconds) } finally { remoteSystem.terminate() diff --git a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala index af18bcdbb6..b99cb4cb99 100644 --- a/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/DaemonicSpec.scala @@ -6,7 +6,7 @@ package akka.remote import akka.testkit._ import scala.concurrent.duration._ -import akka.actor.{ Address, ActorSystem } +import akka.actor.{ ActorSystem, Address } import akka.util.ccompat._ import com.typesafe.config.ConfigFactory import scala.collection.JavaConverters._ @@ -19,7 +19,9 @@ class DaemonicSpec extends AkkaSpec { // get all threads running before actor system is started val origThreads: Set[Thread] = Thread.getAllStackTraces.keySet().asScala.to(Set) // create a separate actor system that we can check the threads for - val daemonicSystem = ActorSystem("daemonic", ConfigFactory.parseString(""" + val daemonicSystem = ActorSystem("daemonic", + ConfigFactory.parseString( + """ akka.daemonic = on akka.actor.provider = remote akka.remote.netty.tcp.transport-class = "akka.remote.transport.netty.NettyTransport" @@ -30,14 +32,15 @@ class DaemonicSpec extends AkkaSpec { try { val unusedPort = 86 // very unlikely to ever be used, "system port" range reserved for Micro Focus Cobol - val unusedAddress = RARP(daemonicSystem).provider.getExternalAddressFor(Address(s"akka.tcp", "", "", unusedPort)).get + val unusedAddress = + RARP(daemonicSystem).provider.getExternalAddressFor(Address(s"akka.tcp", "", "", unusedPort)).get val selection = daemonicSystem.actorSelection(s"$unusedAddress/user/SomeActor") selection ! "whatever" // get new non daemonic threads running awaitAssert({ - val newNonDaemons: Set[Thread] = Thread.getAllStackTraces.keySet().asScala.seq. - filter(t => !origThreads(t) && !t.isDaemon).to(Set) + val newNonDaemons: Set[Thread] = + Thread.getAllStackTraces.keySet().asScala.seq.filter(t => !origThreads(t) && !t.isDaemon).to(Set) newNonDaemons should ===(Set.empty[Thread]) }, 4.seconds) diff --git a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala index 838708c666..a958268663 100644 --- a/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/DeadlineFailureDetectorSpec.scala @@ -13,7 +13,8 @@ class DeadlineFailureDetectorSpec extends AkkaSpec { "A DeadlineFailureDetector" must { def fakeTimeGenerator(timeIntervals: Seq[Long]): Clock = new Clock { - @volatile var times = timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) => acc ::: List[Long](acc.last + c)) + @volatile var times = + timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) => acc ::: List[Long](acc.last + c)) override def apply(): Long = { val currentTime = times.head times = times.tail @@ -21,9 +22,7 @@ class DeadlineFailureDetectorSpec extends AkkaSpec { } } - def createFailureDetector( - acceptableLostDuration: FiniteDuration, - clock: Clock = FailureDetector.defaultClock) = + def createFailureDetector(acceptableLostDuration: FiniteDuration, clock: Clock = FailureDetector.defaultClock) = new DeadlineFailureDetector(acceptableLostDuration, heartbeatInterval = 1.second)(clock = clock) "mark node as monitored after a series of successful heartbeats" in { diff --git a/akka-remote/src/test/scala/akka/remote/EndpointRegistrySpec.scala b/akka-remote/src/test/scala/akka/remote/EndpointRegistrySpec.scala index 2c29bfb6c5..b803b98931 100644 --- a/akka-remote/src/test/scala/akka/remote/EndpointRegistrySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/EndpointRegistrySpec.scala @@ -5,7 +5,7 @@ package akka.remote import akka.testkit.AkkaSpec -import akka.actor.{ Props, Address } +import akka.actor.{ Address, Props } import akka.remote.EndpointManager._ import scala.concurrent.duration._ diff --git a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala index e80656d702..e14eb5d6ba 100644 --- a/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/FailureDetectorRegistrySpec.scala @@ -11,7 +11,8 @@ import akka.testkit.AkkaSpec class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { def fakeTimeGenerator(timeIntervals: Seq[Long]): Clock = new Clock { - @volatile var times = timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) => acc ::: List[Long](acc.last + c)) + @volatile var times = + timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) => acc ::: List[Long](acc.last + c)) override def apply(): Long = { val currentTime = times.head times = times.tail @@ -19,34 +20,32 @@ class FailureDetectorRegistrySpec extends AkkaSpec("akka.loglevel = INFO") { } } - def createFailureDetector( - threshold: Double = 8.0, - maxSampleSize: Int = 1000, - minStdDeviation: FiniteDuration = 10.millis, - acceptableLostDuration: FiniteDuration = Duration.Zero, - firstHeartbeatEstimate: FiniteDuration = 1.second, - clock: Clock = FailureDetector.defaultClock) = - new PhiAccrualFailureDetector( - threshold, - maxSampleSize, - minStdDeviation, - acceptableLostDuration, - firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock) + def createFailureDetector(threshold: Double = 8.0, + maxSampleSize: Int = 1000, + minStdDeviation: FiniteDuration = 10.millis, + acceptableLostDuration: FiniteDuration = Duration.Zero, + firstHeartbeatEstimate: FiniteDuration = 1.second, + clock: Clock = FailureDetector.defaultClock) = + new PhiAccrualFailureDetector(threshold, + maxSampleSize, + minStdDeviation, + acceptableLostDuration, + firstHeartbeatEstimate = firstHeartbeatEstimate)(clock = clock) - def createFailureDetectorRegistry( - threshold: Double = 8.0, - maxSampleSize: Int = 1000, - minStdDeviation: FiniteDuration = 10.millis, - acceptableLostDuration: FiniteDuration = Duration.Zero, - firstHeartbeatEstimate: FiniteDuration = 1.second, - clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = { - new DefaultFailureDetectorRegistry[String](() => createFailureDetector( - threshold, - maxSampleSize, - minStdDeviation, - acceptableLostDuration, - firstHeartbeatEstimate, - clock)) + def createFailureDetectorRegistry(threshold: Double = 8.0, + maxSampleSize: Int = 1000, + minStdDeviation: FiniteDuration = 10.millis, + acceptableLostDuration: FiniteDuration = Duration.Zero, + firstHeartbeatEstimate: FiniteDuration = 1.second, + clock: Clock = FailureDetector.defaultClock): FailureDetectorRegistry[String] = { + new DefaultFailureDetectorRegistry[String]( + () => + createFailureDetector(threshold, + maxSampleSize, + minStdDeviation, + acceptableLostDuration, + firstHeartbeatEstimate, + clock)) } "mark node as available after a series of successful heartbeats" in { diff --git a/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala b/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala index ea18a491ef..0a1104a1bf 100644 --- a/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/LogSourceSpec.scala @@ -23,8 +23,7 @@ object LogSourceSpec { } } -class LogSourceSpec extends AkkaSpec( - """ +class LogSourceSpec extends AkkaSpec(""" akka.loglevel = INFO akka.actor.provider = remote akka.remote.netty.tcp.port = 0 @@ -37,7 +36,7 @@ class LogSourceSpec extends AkkaSpec( system.eventStream.subscribe(system.actorOf(Props(new Actor { def receive = { case i @ Info(_, _, msg: String) if msg contains "hello" => logProbe.ref ! i - case _ => + case _ => } }).withDeploy(Deploy.local), "logSniffer"), classOf[Logging.Info]) diff --git a/akka-remote/src/test/scala/akka/remote/MessageLoggingSpec.scala b/akka-remote/src/test/scala/akka/remote/MessageLoggingSpec.scala index 28e7bbd2a9..8f45c73496 100644 --- a/akka-remote/src/test/scala/akka/remote/MessageLoggingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/MessageLoggingSpec.scala @@ -10,8 +10,7 @@ import com.typesafe.config.{ Config, ConfigFactory } import MessageLoggingSpec._ object MessageLoggingSpec { - def config(artery: Boolean) = ConfigFactory.parseString( - s""" + def config(artery: Boolean) = ConfigFactory.parseString(s""" akka.loglevel = info // debug makes this test fail intentionally akka.actor.provider = remote akka.remote { @@ -70,4 +69,3 @@ abstract class MessageLoggingSpec(config: Config) extends AkkaSpec(config) with TestKit.shutdownActorSystem(remoteSystem) } } - diff --git a/akka-remote/src/test/scala/akka/remote/NetworkFailureSpec.scala b/akka-remote/src/test/scala/akka/remote/NetworkFailureSpec.scala index 0783955529..b9f384db40 100644 --- a/akka-remote/src/test/scala/akka/remote/NetworkFailureSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/NetworkFailureSpec.scala @@ -66,25 +66,29 @@ trait NetworkFailureSpec extends DefaultTimeout { self: AkkaSpec => def sleepFor(duration: Duration) = { println("===>>> Sleeping for [" + duration + "]") - Thread sleep (duration.toMillis) + Thread.sleep(duration.toMillis) } def enableNetworkThrottling() = { restoreIP() assert(new ProcessBuilder("ipfw", "add", "pipe", "1", "ip", "from", "any", "to", "any").start.waitFor == 0) assert(new ProcessBuilder("ipfw", "add", "pipe", "2", "ip", "from", "any", "to", "any").start.waitFor == 0) - assert(new ProcessBuilder("ipfw", "pipe", "1", "config", "bw", BytesPerSecond, "delay", DelayMillis).start.waitFor == 0) - assert(new ProcessBuilder("ipfw", "pipe", "2", "config", "bw", BytesPerSecond, "delay", DelayMillis).start.waitFor == 0) + assert( + new ProcessBuilder("ipfw", "pipe", "1", "config", "bw", BytesPerSecond, "delay", DelayMillis).start.waitFor == 0) + assert( + new ProcessBuilder("ipfw", "pipe", "2", "config", "bw", BytesPerSecond, "delay", DelayMillis).start.waitFor == 0) } def enableNetworkDrop() = { restoreIP() - assert(new ProcessBuilder("ipfw", "add", "1", "deny", "tcp", "from", "any", "to", "any", PortRange).start.waitFor == 0) + assert( + new ProcessBuilder("ipfw", "add", "1", "deny", "tcp", "from", "any", "to", "any", PortRange).start.waitFor == 0) } def enableTcpReset() = { restoreIP() - assert(new ProcessBuilder("ipfw", "add", "1", "reset", "tcp", "from", "any", "to", "any", PortRange).start.waitFor == 0) + assert( + new ProcessBuilder("ipfw", "add", "1", "reset", "tcp", "from", "any", "to", "any", PortRange).start.waitFor == 0) } def restoreIP() = { diff --git a/akka-remote/src/test/scala/akka/remote/RemoteActorMailboxSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteActorMailboxSpec.scala index 6eccc05f1c..6108ebc38a 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteActorMailboxSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteActorMailboxSpec.scala @@ -7,8 +7,6 @@ package akka.remote import akka.actor.ActorMailboxSpec import com.typesafe.config.ConfigFactory -class RemoteActorMailboxSpec extends ActorMailboxSpec( - ConfigFactory.parseString("""akka.actor.provider = remote"""). - withFallback(ActorMailboxSpec.mailboxConf)) { - -} +class RemoteActorMailboxSpec + extends ActorMailboxSpec( + ConfigFactory.parseString("""akka.actor.provider = remote""").withFallback(ActorMailboxSpec.mailboxConf)) {} diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index 40064d9cd2..d841d72708 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -12,8 +12,7 @@ import akka.util.{ Helpers } import akka.util.Helpers.ConfigOps import akka.remote.transport.netty.{ NettyTransportSettings, SSLSettings } -class RemoteConfigSpec extends AkkaSpec( - """ +class RemoteConfigSpec extends AkkaSpec(""" akka.actor.provider = remote akka.remote.netty.tcp.port = 0 """) { @@ -46,9 +45,9 @@ class RemoteConfigSpec extends AkkaSpec( Transports.size should ===(1) Transports.head._1 should ===(classOf[akka.remote.transport.netty.NettyTransport].getName) Transports.head._2 should ===(Nil) - Adapters should ===(Map( - "gremlin" -> classOf[akka.remote.transport.FailureInjectorProvider].getName, - "trttl" -> classOf[akka.remote.transport.ThrottlerProvider].getName)) + Adapters should ===( + Map("gremlin" -> classOf[akka.remote.transport.FailureInjectorProvider].getName, + "trttl" -> classOf[akka.remote.transport.ThrottlerProvider].getName)) WatchFailureDetectorImplementationClass should ===(classOf[PhiAccrualFailureDetector].getName) WatchHeartBeatInterval should ===(1 seconds) @@ -81,8 +80,8 @@ class RemoteConfigSpec extends AkkaSpec( import s._ ConnectionTimeout should ===(15.seconds) - ConnectionTimeout should ===(new AkkaProtocolSettings(RARP(system).provider.remoteSettings.config) - .HandshakeTimeout) + ConnectionTimeout should ===( + new AkkaProtocolSettings(RARP(system).provider.remoteSettings.config).HandshakeTimeout) WriteBufferHighWaterMark should ===(None) WriteBufferLowWaterMark should ===(None) SendBufferSize should ===(Some(256000)) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala index f20fcdca69..e0afdd4dd0 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConsistentHashingRouterSpec.scala @@ -27,8 +27,8 @@ class RemoteConsistentHashingRouterSpec extends AkkaSpec(""" val consistentHash1 = ConsistentHash(nodes1, 10) val consistentHash2 = ConsistentHash(nodes2, 10) val keys = List("A", "B", "C", "D", "E", "F", "G") - val result1 = keys collect { case k => consistentHash1.nodeFor(k).routee } - val result2 = keys collect { case k => consistentHash2.nodeFor(k).routee } + val result1 = keys.collect { case k => consistentHash1.nodeFor(k).routee } + val result2 = keys.collect { case k => consistentHash2.nodeFor(k).routee } result1 should ===(result2) } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala index ab3559300f..67a24028e0 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeathWatchSpec.scala @@ -12,7 +12,8 @@ import scala.concurrent.duration._ import akka.testkit.SocketUtil import akka.event.Logging.Warning -class RemoteDeathWatchSpec extends AkkaSpec(ConfigFactory.parseString(""" +class RemoteDeathWatchSpec + extends AkkaSpec(ConfigFactory.parseString(""" akka { actor { provider = remote @@ -27,18 +28,21 @@ akka { port = 0 } } -""")) with ImplicitSender with DefaultTimeout with DeathWatchSpec { +""")) + with ImplicitSender + with DefaultTimeout + with DeathWatchSpec { val protocol = if (RARP(system).provider.remoteSettings.Artery.Enabled) "akka" else "akka.tcp" - val other = ActorSystem("other", ConfigFactory.parseString("akka.remote.netty.tcp.port=2666") - .withFallback(system.settings.config)) + val other = ActorSystem( + "other", + ConfigFactory.parseString("akka.remote.netty.tcp.port=2666").withFallback(system.settings.config)) override def beforeTermination(): Unit = { - system.eventStream.publish(TestEvent.Mute( - EventFilter.warning(pattern = "received dead letter.*Disassociate"))) + system.eventStream.publish(TestEvent.Mute(EventFilter.warning(pattern = "received dead letter.*Disassociate"))) } override def afterTermination(): Unit = { @@ -94,8 +98,12 @@ akka { // This forces ReliableDeliverySupervisor to start with unknown remote system UID. val extinctPath = RootActorPath(Address(protocol, "extinct-system", "localhost", SocketUtil.temporaryLocalPort())) / "user" / "noone" val transport = RARP(system).provider.transport - val extinctRef = new RemoteActorRef(transport, transport.localAddressForRemote(extinctPath.address), - extinctPath, Nobody, props = None, deploy = None) + val extinctRef = new RemoteActorRef(transport, + transport.localAddressForRemote(extinctPath.address), + extinctPath, + Nobody, + props = None, + deploy = None) val probe = TestProbe() probe.watch(extinctRef) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala index c7a3462d63..f35a7d862d 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala @@ -22,7 +22,8 @@ object RemoteDeployerSpec { } } akka.remote.netty.tcp.port = 0 - """, ConfigParseOptions.defaults) + """, + ConfigParseOptions.defaults) class RecipeActor extends Actor { def receive = { case _ => } @@ -38,19 +39,20 @@ class RemoteDeployerSpec extends AkkaSpec(RemoteDeployerSpec.deployerConf) { val service = "/service2" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) - deployment should ===(Some( - Deploy( - service, - deployment.get.config, - RoundRobinPool(3), - RemoteScope(Address("akka", "sys", "wallace", 2552)), - "mydispatcher"))) + deployment should ===( + Some( + Deploy(service, + deployment.get.config, + RoundRobinPool(3), + RemoteScope(Address("akka", "sys", "wallace", 2552)), + "mydispatcher"))) } "reject remote deployment when the source requires LocalScope" in { intercept[ConfigurationException] { system.actorOf(Props.empty.withDeploy(Deploy.local), "service2") - }.getMessage should ===("configuration requested remote deployment for local-only Props at [akka://RemoteDeployerSpec/user/service2]") + }.getMessage should ===( + "configuration requested remote deployment for local-only Props at [akka://RemoteDeployerSpec/user/service2]") } } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala index 1f0dcfcc1d..1eabc54637 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeploymentWhitelistSpec.scala @@ -49,7 +49,7 @@ object RemoteDeploymentWhitelistSpec { } } - val cfg: Config = ConfigFactory parseString (s""" + val cfg: Config = ConfigFactory.parseString(s""" akka { actor.provider = remote @@ -82,19 +82,22 @@ object RemoteDeploymentWhitelistSpec { """) def muteSystem(system: ActorSystem): Unit = { - system.eventStream.publish(TestEvent.Mute( - EventFilter.error(start = "AssociationError"), - EventFilter.warning(start = "AssociationError"), - EventFilter.warning(pattern = "received dead letter.*"))) + system.eventStream.publish( + TestEvent.Mute(EventFilter.error(start = "AssociationError"), + EventFilter.warning(start = "AssociationError"), + EventFilter.warning(pattern = "received dead letter.*"))) } } -class RemoteDeploymentWhitelistSpec extends AkkaSpec(RemoteDeploymentWhitelistSpec.cfg) with ImplicitSender with DefaultTimeout { +class RemoteDeploymentWhitelistSpec + extends AkkaSpec(RemoteDeploymentWhitelistSpec.cfg) + with ImplicitSender + with DefaultTimeout { import RemoteDeploymentWhitelistSpec._ - val conf = ConfigFactory.parseString( - """ + val conf = + ConfigFactory.parseString(""" akka.remote.test { local-address = "test://remote-sys@localhost:12346" maximum-payload-bytes = 48000 bytes @@ -115,10 +118,11 @@ class RemoteDeploymentWhitelistSpec extends AkkaSpec(RemoteDeploymentWhitelistSp override def atStartup() = { muteSystem(system) - remoteSystem.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointException](), - EventFilter.error(start = "AssociationError"), - EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate|HandleListener)"))) + remoteSystem.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointException](), + EventFilter.error(start = "AssociationError"), + EventFilter.warning( + pattern = "received dead letter.*(InboundPayload|Disassociate|HandleListener)"))) } override def afterTermination(): Unit = { @@ -130,7 +134,8 @@ class RemoteDeploymentWhitelistSpec extends AkkaSpec(RemoteDeploymentWhitelistSp "allow deploying Echo actor (included in whitelist)" in { val r = system.actorOf(Props[EchoWhitelisted], "blub") - r.path.toString should ===(s"akka.test://remote-sys@localhost:12346/remote/akka.test/${getClass.getSimpleName}@localhost:12345/user/blub") + r.path.toString should ===( + s"akka.test://remote-sys@localhost:12346/remote/akka.test/${getClass.getSimpleName}@localhost:12345/user/blub") r ! 42 expectMsg(42) EventFilter[Exception]("crash", occurrences = 1).intercept { @@ -145,7 +150,8 @@ class RemoteDeploymentWhitelistSpec extends AkkaSpec(RemoteDeploymentWhitelistSp "not deploy actor not listed in whitelist" in { val r = system.actorOf(Props[EchoNotWhitelisted], "danger-mouse") - r.path.toString should ===(s"akka.test://remote-sys@localhost:12346/remote/akka.test/${getClass.getSimpleName}@localhost:12345/user/danger-mouse") + r.path.toString should ===( + s"akka.test://remote-sys@localhost:12346/remote/akka.test/${getClass.getSimpleName}@localhost:12345/user/danger-mouse") r ! 42 expectNoMsg(1.second) system.stop(r) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteInitErrorSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteInitErrorSpec.scala index 3a1cd31854..7fc39fccd3 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteInitErrorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteInitErrorSpec.scala @@ -21,8 +21,7 @@ import scala.util.control.NonFatal * the ActorSystem with the use of remoting will intentionally fail. */ class RemoteInitErrorSpec extends WordSpec with Matchers { - val conf = ConfigFactory.parseString( - """ + val conf = ConfigFactory.parseString(""" akka { actor { provider = remote @@ -53,7 +52,7 @@ class RemoteInitErrorSpec extends WordSpec with Matchers { eventually(timeout(30 seconds), interval(800 milliseconds)) { val current = currentThreadIds() // no new threads should remain compared to the start state - (current diff start) should be(empty) + (current.diff(start)) should be(empty) } } } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala index 64c0055fac..0562b578e3 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteRouterSpec.scala @@ -52,8 +52,7 @@ class RemoteRouterSpec extends AkkaSpec(s""" val protocol = if (RARP(system).provider.remoteSettings.Artery.Enabled) "akka" else "akka.tcp" - val conf = ConfigFactory.parseString( - s""" + val conf = ConfigFactory.parseString(s""" akka { actor.deployment { /blub { @@ -111,20 +110,21 @@ class RemoteRouterSpec extends AkkaSpec(s""" val children = replies.toSet children should have size 2 children.map(_.parent) should have size 1 - children foreach (_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) masterSystem.stop(router) } "deploy its children on remote host driven by programatic definition" in { val probe = TestProbe()(masterSystem) val router = masterSystem.actorOf(new RemoteRouterConfig( - RoundRobinPool(2), - Seq(Address(protocol, sysName, "localhost", port))).props(echoActorProps), "blub2") + RoundRobinPool(2), + Seq(Address(protocol, sysName, "localhost", port))).props(echoActorProps), + "blub2") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet children should have size 2 children.map(_.parent) should have size 1 - children foreach (_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) masterSystem.stop(router) } @@ -135,7 +135,7 @@ class RemoteRouterSpec extends AkkaSpec(s""" val children = replies.toSet children.size should be >= 2 children.map(_.parent) should have size 1 - children foreach (_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) masterSystem.stop(router) } @@ -149,14 +149,17 @@ class RemoteRouterSpec extends AkkaSpec(s""" val parents = children.map(_.parent) parents should have size 1 parents.head should ===(router.path) - children foreach (_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) masterSystem.stop(router) } "deploy remote routers based on explicit deployment" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(RoundRobinPool(2).props(echoActorProps) - .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"$protocol://${sysName}@localhost:${port}")))), "remote-blub2") + val router = masterSystem.actorOf( + RoundRobinPool(2) + .props(echoActorProps) + .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"$protocol://${sysName}@localhost:${port}")))), + "remote-blub2") router.path.address.toString should ===(s"$protocol://${sysName}@localhost:${port}") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet @@ -164,14 +167,17 @@ class RemoteRouterSpec extends AkkaSpec(s""" val parents = children.map(_.parent) parents should have size 1 parents.head should ===(router.path) - children foreach (_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) masterSystem.stop(router) } "let remote deployment be overridden by local configuration" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(RoundRobinPool(2).props(echoActorProps) - .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"$protocol://${sysName}@localhost:${port}")))), "local-blub") + val router = masterSystem.actorOf( + RoundRobinPool(2) + .props(echoActorProps) + .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"$protocol://${sysName}@localhost:${port}")))), + "local-blub") router.path.address.toString should ===(s"akka://$masterSystemName") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet @@ -179,14 +185,17 @@ class RemoteRouterSpec extends AkkaSpec(s""" val parents = children.map(_.parent) parents should have size 1 parents.head.address should ===(Address(protocol, sysName, "localhost", port)) - children foreach (_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) masterSystem.stop(router) } "let remote deployment router be overridden by local configuration" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(RoundRobinPool(2).props(echoActorProps) - .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"$protocol://${sysName}@localhost:${port}")))), "local-blub2") + val router = masterSystem.actorOf( + RoundRobinPool(2) + .props(echoActorProps) + .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"$protocol://${sysName}@localhost:${port}")))), + "local-blub2") router.path.address.toString should ===(s"$protocol://${sysName}@localhost:${port}") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet @@ -194,14 +203,17 @@ class RemoteRouterSpec extends AkkaSpec(s""" val parents = children.map(_.parent) parents should have size 1 parents.head should ===(router.path) - children foreach (_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) masterSystem.stop(router) } "let remote deployment be overridden by remote configuration" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(RoundRobinPool(2).props(echoActorProps) - .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"$protocol://${sysName}@localhost:${port}")))), "remote-override") + val router = masterSystem.actorOf( + RoundRobinPool(2) + .props(echoActorProps) + .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"$protocol://${sysName}@localhost:${port}")))), + "remote-override") router.path.address.toString should ===(s"$protocol://${sysName}@localhost:${port}") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet @@ -209,7 +221,7 @@ class RemoteRouterSpec extends AkkaSpec(s""" val parents = children.map(_.parent) parents should have size 1 parents.head should ===(router.path) - children foreach (_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"$protocol://${sysName}@localhost:${port}")) masterSystem.stop(router) } @@ -218,9 +230,10 @@ class RemoteRouterSpec extends AkkaSpec(s""" val escalator = OneForOneStrategy() { case e => probe.ref ! e; SupervisorStrategy.Escalate } - val router = masterSystem.actorOf(new RemoteRouterConfig( - RoundRobinPool(1, supervisorStrategy = escalator), - Seq(Address(protocol, sysName, "localhost", port))).props(Props.empty), "blub3") + val router = masterSystem.actorOf( + new RemoteRouterConfig(RoundRobinPool(1, supervisorStrategy = escalator), + Seq(Address(protocol, sysName, "localhost", port))).props(Props.empty), + "blub3") router.tell(GetRoutees, probe.ref) EventFilter[ActorKilledException](occurrences = 1).intercept { diff --git a/akka-remote/src/test/scala/akka/remote/RemoteSettingsSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteSettingsSpec.scala index 55b1e5dad3..fbab6cd005 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteSettingsSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteSettingsSpec.scala @@ -15,7 +15,10 @@ class RemoteSettingsSpec extends WordSpec with Matchers { } "parse akka.remote.log-frame-size-exceeding value as bytes" in { - new RemoteSettings(ConfigFactory.parseString("akka.remote.log-frame-size-exceeding = 100b").withFallback(ConfigFactory.load())).LogFrameSizeExceeding shouldEqual Some(100) + new RemoteSettings( + ConfigFactory + .parseString("akka.remote.log-frame-size-exceeding = 100b") + .withFallback(ConfigFactory.load())).LogFrameSizeExceeding shouldEqual Some(100) } } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala index a6bc82b8b9..9ef0b68911 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteWatcherSpec.scala @@ -13,7 +13,7 @@ object RemoteWatcherSpec { class TestActorProxy(testActor: ActorRef) extends Actor { def receive = { - case msg => testActor forward msg + case msg => testActor.forward(msg) } } @@ -26,12 +26,11 @@ object RemoteWatcherSpec { def createFailureDetector(): FailureDetectorRegistry[Address] = { def createFailureDetector(): FailureDetector = - new PhiAccrualFailureDetector( - threshold = 8.0, - maxSampleSize = 200, - minStdDeviation = 100.millis, - acceptableHeartbeatPause = 3.seconds, - firstHeartbeatEstimate = 1.second) + new PhiAccrualFailureDetector(threshold = 8.0, + maxSampleSize = 200, + minStdDeviation = 100.millis, + acceptableHeartbeatPause = 3.seconds, + firstHeartbeatEstimate = 1.second) new DefaultFailureDetectorRegistry(() => createFailureDetector()) } @@ -41,11 +40,11 @@ object RemoteWatcherSpec { final case class Quarantined(address: Address, uid: Option[Long]) } - class TestRemoteWatcher(heartbeatExpectedResponseAfter: FiniteDuration) extends RemoteWatcher( - createFailureDetector, - heartbeatInterval = TurnOff, - unreachableReaperInterval = TurnOff, - heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) { + class TestRemoteWatcher(heartbeatExpectedResponseAfter: FiniteDuration) + extends RemoteWatcher(createFailureDetector, + heartbeatInterval = TurnOff, + unreachableReaperInterval = TurnOff, + heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) { def this() = this(heartbeatExpectedResponseAfter = TurnOff) @@ -63,8 +62,7 @@ object RemoteWatcherSpec { } -class RemoteWatcherSpec extends AkkaSpec( - """akka { +class RemoteWatcherSpec extends AkkaSpec("""akka { loglevel = INFO log-dead-letters-during-shutdown = false actor.provider = remote @@ -83,9 +81,9 @@ class RemoteWatcherSpec extends AkkaSpec( val remoteAddress = remoteSystem.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress def remoteAddressUid = AddressUidExtension(remoteSystem).addressUid - Seq(system, remoteSystem).foreach(muteDeadLetters( - akka.remote.transport.AssociationHandle.Disassociated.getClass, - akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass)(_)) + Seq(system, remoteSystem).foreach( + muteDeadLetters(akka.remote.transport.AssociationHandle.Disassociated.getClass, + akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass)(_)) override def afterTermination(): Unit = { shutdown(remoteSystem) diff --git a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala index 95e96743d0..a292e50241 100644 --- a/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemotingSpec.scala @@ -64,7 +64,7 @@ object RemotingSpec { } } - val cfg: Config = ConfigFactory parseString (s""" + val cfg: Config = ConfigFactory.parseString(s""" common-ssl-settings { key-store = "${getClass.getClassLoader.getResource("keystore").getPath}" trust-store = "${getClass.getClassLoader.getResource("truststore").getPath}" @@ -121,10 +121,10 @@ object RemotingSpec { """) def muteSystem(system: ActorSystem): Unit = { - system.eventStream.publish(TestEvent.Mute( - EventFilter.error(start = "AssociationError"), - EventFilter.warning(start = "AssociationError"), - EventFilter.warning(pattern = "received dead letter.*"))) + system.eventStream.publish( + TestEvent.Mute(EventFilter.error(start = "AssociationError"), + EventFilter.warning(start = "AssociationError"), + EventFilter.warning(pattern = "received dead letter.*"))) } } @@ -132,8 +132,7 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D import RemotingSpec._ - val conf = ConfigFactory.parseString( - """ + val conf = ConfigFactory.parseString(""" akka.remote.test { local-address = "test://remote-sys@localhost:12346" maximum-payload-bytes = 48000 bytes @@ -141,12 +140,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D """).withFallback(system.settings.config).resolve() val remoteSystem = ActorSystem("remote-sys", conf) - for ( - (name, proto) <- Seq( - "/gonk" -> "tcp", - "/zagzag" -> "udp", - "/roghtaar" -> "ssl.tcp") - ) deploy(system, Deploy(name, scope = RemoteScope(getOtherAddress(remoteSystem, proto)))) + for ((name, proto) <- Seq("/gonk" -> "tcp", "/zagzag" -> "udp", "/roghtaar" -> "ssl.tcp")) + deploy(system, Deploy(name, scope = RemoteScope(getOtherAddress(remoteSystem, proto)))) def getOtherAddress(sys: ActorSystem, proto: String) = sys.asInstanceOf[ExtendedActorSystem].provider.getExternalAddressFor(Address(s"akka.$proto", "", "", 0)).get @@ -190,10 +185,11 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D override def atStartup() = { muteSystem(system) - remoteSystem.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointException](), - EventFilter.error(start = "AssociationError"), - EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate|HandleListener)"))) + remoteSystem.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointException](), + EventFilter.error(start = "AssociationError"), + EventFilter.warning( + pattern = "received dead letter.*(InboundPayload|Disassociate|HandleListener)"))) } private def byteStringOfSize(size: Int) = ByteString.fromArray(Array.fill(size)(42: Byte)) @@ -226,54 +222,63 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D } "send dead letters on remote if actor does not exist" in { - EventFilter.warning(pattern = "dead.*buh", occurrences = 1).intercept { - system.actorFor("akka.test://remote-sys@localhost:12346/does/not/exist") ! "buh" - }(remoteSystem) + EventFilter + .warning(pattern = "dead.*buh", occurrences = 1) + .intercept { + system.actorFor("akka.test://remote-sys@localhost:12346/does/not/exist") ! "buh" + }(remoteSystem) } "not be exhausted by sending to broken connections" in { - val tcpOnlyConfig = ConfigFactory.parseString("""akka.remote.enabled-transports = ["akka.remote.netty.tcp"]"""). - withFallback(remoteSystem.settings.config) + val tcpOnlyConfig = ConfigFactory + .parseString("""akka.remote.enabled-transports = ["akka.remote.netty.tcp"]""") + .withFallback(remoteSystem.settings.config) val moreSystems = Vector.fill(5)(ActorSystem(remoteSystem.name, tcpOnlyConfig)) - moreSystems foreach { sys => - sys.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointDisassociatedException](), - EventFilter.warning(pattern = "received dead letter.*"))) + moreSystems.foreach { sys => + sys.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointDisassociatedException](), + EventFilter.warning(pattern = "received dead letter.*"))) sys.actorOf(Props[Echo2], name = "echo") } - val moreRefs = moreSystems map (sys => system.actorSelection(RootActorPath(getOtherAddress(sys, "tcp")) / "user" / "echo")) + val moreRefs = + moreSystems.map(sys => system.actorSelection(RootActorPath(getOtherAddress(sys, "tcp")) / "user" / "echo")) val aliveEcho = system.actorSelection(RootActorPath(getOtherAddress(remoteSystem, "tcp")) / "user" / "echo") val n = 100 // first everything is up and running - 1 to n foreach { x => + (1 to n).foreach { x => aliveEcho ! "ping" moreRefs(x % moreSystems.size) ! "ping" } within(5.seconds) { - receiveN(n * 2) foreach { reply => reply should ===(("pong", testActor)) } + receiveN(n * 2).foreach { reply => + reply should ===(("pong", testActor)) + } } // then we shutdown all but one system to simulate broken connections - moreSystems foreach { sys => + moreSystems.foreach { sys => shutdown(sys) } - 1 to n foreach { x => + (1 to n).foreach { x => aliveEcho ! "ping" moreRefs(x % moreSystems.size) ! "ping" } // ping messages to aliveEcho should go through even though we use many different broken connections within(5.seconds) { - receiveN(n) foreach { reply => reply should ===(("pong", testActor)) } + receiveN(n).foreach { reply => + reply should ===(("pong", testActor)) + } } } "create and supervise children on remote node" in { val r = system.actorOf(Props[Echo1], "blub") - r.path.toString should ===("akka.test://remote-sys@localhost:12346/remote/akka.test/RemotingSpec@localhost:12345/user/blub") + r.path.toString should ===( + "akka.test://remote-sys@localhost:12346/remote/akka.test/RemotingSpec@localhost:12345/user/blub") r ! 42 expectMsg(42) EventFilter[Exception]("crash", occurrences = 1).intercept { @@ -337,11 +342,13 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D myref ! 44 expectMsg(44) lastSender should ===(grandchild) - lastSender should be theSameInstanceAs grandchild + (lastSender should be).theSameInstanceAs(grandchild) child.asInstanceOf[RemoteActorRef].getParent should ===(l) - system.actorFor("/user/looker1/child") should be theSameInstanceAs child - Await.result(l ? ActorForReq("child/.."), timeout.duration).asInstanceOf[AnyRef] should be theSameInstanceAs l - Await.result(system.actorFor(system / "looker1" / "child") ? ActorForReq(".."), timeout.duration).asInstanceOf[AnyRef] should be theSameInstanceAs l + (system.actorFor("/user/looker1/child") should be).theSameInstanceAs(child) + (Await.result(l ? ActorForReq("child/.."), timeout.duration).asInstanceOf[AnyRef] should be).theSameInstanceAs(l) + (Await + .result(system.actorFor(system / "looker1" / "child") ? ActorForReq(".."), timeout.duration) + .asInstanceOf[AnyRef] should be).theSameInstanceAs(l) watch(child) child ! PoisonPill @@ -379,7 +386,7 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D mysel ! 54 expectMsg(54) lastSender should ===(grandchild) - lastSender should be theSameInstanceAs grandchild + (lastSender should be).theSameInstanceAs(grandchild) mysel ! Identify(mysel) val grandchild2 = expectMsgType[ActorIdentity].ref grandchild2 should ===(Some(grandchild)) @@ -387,10 +394,10 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D expectMsgType[ActorIdentity].ref should ===(Some(child)) l ! ActorSelReq("child/..") expectMsgType[ActorSelection] ! Identify(None) - expectMsgType[ActorIdentity].ref.get should be theSameInstanceAs l + (expectMsgType[ActorIdentity].ref.get should be).theSameInstanceAs(l) system.actorSelection(system / "looker2" / "child") ! ActorSelReq("..") expectMsgType[ActorSelection] ! Identify(None) - expectMsgType[ActorIdentity].ref.get should be theSameInstanceAs l + (expectMsgType[ActorIdentity].ref.get should be).theSameInstanceAs(l) grandchild ! ((Props[Echo1], "grandgrandchild")) val grandgrandchild = expectMsgType[ActorRef] @@ -455,13 +462,14 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "not fail ask across node boundaries" in within(5.seconds) { import system.dispatcher - val f = for (_ <- 1 to 1000) yield here ? "ping" mapTo manifest[(String, ActorRef)] + val f = for (_ <- 1 to 1000) yield (here ? "ping").mapTo(manifest[(String, ActorRef)]) Await.result(Future.sequence(f), timeout.duration).map(_._1).toSet should ===(Set("pong")) } "be able to use multiple transports and use the appropriate one (TCP)" in { val r = system.actorOf(Props[Echo1], "gonk") - r.path.toString should ===(s"akka.tcp://remote-sys@localhost:${port(remoteSystem, "tcp")}/remote/akka.tcp/RemotingSpec@localhost:${port(system, "tcp")}/user/gonk") + r.path.toString should ===( + s"akka.tcp://remote-sys@localhost:${port(remoteSystem, "tcp")}/remote/akka.tcp/RemotingSpec@localhost:${port(system, "tcp")}/user/gonk") r ! 42 expectMsg(42) EventFilter[Exception]("crash", occurrences = 1).intercept { @@ -476,7 +484,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "be able to use multiple transports and use the appropriate one (UDP)" in { val r = system.actorOf(Props[Echo1], "zagzag") - r.path.toString should ===(s"akka.udp://remote-sys@localhost:${port(remoteSystem, "udp")}/remote/akka.udp/RemotingSpec@localhost:${port(system, "udp")}/user/zagzag") + r.path.toString should ===( + s"akka.udp://remote-sys@localhost:${port(remoteSystem, "udp")}/remote/akka.udp/RemotingSpec@localhost:${port(system, "udp")}/user/zagzag") r ! 42 expectMsg(10.seconds, 42) EventFilter[Exception]("crash", occurrences = 1).intercept { @@ -491,7 +500,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "be able to use multiple transports and use the appropriate one (SSL)" in { val r = system.actorOf(Props[Echo1], "roghtaar") - r.path.toString should ===(s"akka.ssl.tcp://remote-sys@localhost:${port(remoteSystem, "ssl.tcp")}/remote/akka.ssl.tcp/RemotingSpec@localhost:${port(system, "ssl.tcp")}/user/roghtaar") + r.path.toString should ===( + s"akka.ssl.tcp://remote-sys@localhost:${port(remoteSystem, "ssl.tcp")}/remote/akka.ssl.tcp/RemotingSpec@localhost:${port(system, "ssl.tcp")}/user/roghtaar") r ! 42 expectMsg(10.seconds, 42) EventFilter[Exception]("crash", occurrences = 1).intercept { @@ -523,47 +533,56 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D "drop sent messages over payload size" in { val oversized = byteStringOfSize(maxPayloadBytes + 1) - EventFilter[OversizedPayloadException](pattern = ".*Discarding oversized payload sent.*", occurrences = 1).intercept { - verifySend(oversized) { - expectNoMsg(1.second) // No AssocitionErrorEvent should be published + EventFilter[OversizedPayloadException](pattern = ".*Discarding oversized payload sent.*", occurrences = 1) + .intercept { + verifySend(oversized) { + expectNoMsg(1.second) // No AssocitionErrorEvent should be published + } } - } } "drop received messages over payload size" in { // Receiver should reply with a message of size maxPayload + 1, which will be dropped and an error logged - EventFilter[OversizedPayloadException](pattern = ".*Discarding oversized payload received.*", occurrences = 1).intercept { - verifySend(maxPayloadBytes + 1) { - expectNoMsg(1.second) // No AssocitionErrorEvent should be published + EventFilter[OversizedPayloadException](pattern = ".*Discarding oversized payload received.*", occurrences = 1) + .intercept { + verifySend(maxPayloadBytes + 1) { + expectNoMsg(1.second) // No AssocitionErrorEvent should be published + } } - } } "be able to serialize a local actor ref from another actor system" in { - val config = ConfigFactory.parseString(""" + val config = ConfigFactory + .parseString( + """ # Additional internal serialization verification need so be off, otherwise it triggers two error messages # instead of one: one for the internal check, and one for the actual remote send -- tripping off this test akka.actor.serialize-messages = off akka.remote.enabled-transports = ["akka.remote.test", "akka.remote.netty.tcp"] akka.remote.test.local-address = "test://other-system@localhost:12347" - """).withFallback(remoteSystem.settings.config) + """) + .withFallback(remoteSystem.settings.config) val otherSystem = ActorSystem("other-system", config) try { val otherGuy = otherSystem.actorOf(Props[Echo2], "other-guy") // check that we use the specified transport address instead of the default val otherGuyRemoteTcp = otherGuy.path.toSerializationFormatWithAddress(getOtherAddress(otherSystem, "tcp")) - val remoteEchoHereTcp = system.actorFor(s"akka.tcp://remote-sys@localhost:${port(remoteSystem, "tcp")}/user/echo") + val remoteEchoHereTcp = + system.actorFor(s"akka.tcp://remote-sys@localhost:${port(remoteSystem, "tcp")}/user/echo") val proxyTcp = system.actorOf(Props(classOf[Proxy], remoteEchoHereTcp, testActor), "proxy-tcp") proxyTcp ! otherGuy expectMsg(3.seconds, ("pong", otherGuyRemoteTcp)) // now check that we fall back to default when we haven't got a corresponding transport val otherGuyRemoteTest = otherGuy.path.toSerializationFormatWithAddress(getOtherAddress(otherSystem, "test")) - val remoteEchoHereSsl = system.actorFor(s"akka.ssl.tcp://remote-sys@localhost:${port(remoteSystem, "ssl.tcp")}/user/echo") + val remoteEchoHereSsl = + system.actorFor(s"akka.ssl.tcp://remote-sys@localhost:${port(remoteSystem, "ssl.tcp")}/user/echo") val proxySsl = system.actorOf(Props(classOf[Proxy], remoteEchoHereSsl, testActor), "proxy-ssl") - EventFilter.warning(start = "Error while resolving ActorRef", occurrences = 1).intercept { - proxySsl ! otherGuy - expectMsg(3.seconds, ("pong", otherGuyRemoteTest)) - }(otherSystem) + EventFilter + .warning(start = "Error while resolving ActorRef", occurrences = 1) + .intercept { + proxySsl ! otherGuy + expectMsg(3.seconds, ("pong", otherGuyRemoteTest)) + }(otherSystem) } finally { shutdown(otherSystem) } @@ -616,7 +635,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D AddressTerminatedTopic(thisSystem).subscribe(terminatedListener) val probe = new TestProbe(thisSystem) - val otherSelection = thisSystem.actorSelection(ActorPath.fromString(remoteAddress.toString + "/user/noonethere")) + val otherSelection = + thisSystem.actorSelection(ActorPath.fromString(remoteAddress.toString + "/user/noonethere")) otherSelection.tell("ping", probe.ref) probe.expectNoMsg(1.second) @@ -651,11 +671,15 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D val remoteTransport = new TestTransport(rawRemoteAddress, registry) val remoteTransportProbe = TestProbe() - registry.registerTransport(remoteTransport, associationEventListenerFuture = Future.successful(new Transport.AssociationEventListener { - override def notify(ev: Transport.AssociationEvent): Unit = remoteTransportProbe.ref ! ev - })) + registry.registerTransport(remoteTransport, + associationEventListenerFuture = + Future.successful(new Transport.AssociationEventListener { + override def notify(ev: Transport.AssociationEvent): Unit = + remoteTransportProbe.ref ! ev + })) - val outboundHandle = new TestAssociationHandle(rawLocalAddress, rawRemoteAddress, remoteTransport, inbound = false) + val outboundHandle = + new TestAssociationHandle(rawLocalAddress, rawRemoteAddress, remoteTransport, inbound = false) // Hijack associations through the test transport awaitCond(registry.transportsReady(rawLocalAddress, rawRemoteAddress)) @@ -664,7 +688,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D // Force an outbound associate on the real system (which we will hijack) // we send no handshake packet, so this remains a pending connection - val dummySelection = thisSystem.actorSelection(ActorPath.fromString(remoteAddress.toString + "/user/noonethere")) + val dummySelection = + thisSystem.actorSelection(ActorPath.fromString(remoteAddress.toString + "/user/noonethere")) dummySelection.tell("ping", system.deadLetters) val remoteHandle = remoteTransportProbe.expectMsgType[Transport.InboundAssociation] @@ -683,7 +708,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D registry.getRemoteReadHandlerFor(inboundHandle.asInstanceOf[TestAssociationHandle]).get } - val handshakePacket = AkkaPduProtobufCodec.constructAssociate(HandshakeInfo(rawRemoteAddress, uid = 0, cookie = None)) + val handshakePacket = + AkkaPduProtobufCodec.constructAssociate(HandshakeInfo(rawRemoteAddress, uid = 0, cookie = None)) val brokenPacket = AkkaPduProtobufCodec.constructPayload(ByteString(0, 1, 2, 3, 4, 5, 6)) // Finish the inbound handshake so now it is handed up to Remoting @@ -730,11 +756,15 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D val remoteTransport = new TestTransport(rawRemoteAddress, registry) val remoteTransportProbe = TestProbe() - registry.registerTransport(remoteTransport, associationEventListenerFuture = Future.successful(new Transport.AssociationEventListener { - override def notify(ev: Transport.AssociationEvent): Unit = remoteTransportProbe.ref ! ev - })) + registry.registerTransport(remoteTransport, + associationEventListenerFuture = + Future.successful(new Transport.AssociationEventListener { + override def notify(ev: Transport.AssociationEvent): Unit = + remoteTransportProbe.ref ! ev + })) - val outboundHandle = new TestAssociationHandle(rawLocalAddress, rawRemoteAddress, remoteTransport, inbound = false) + val outboundHandle = + new TestAssociationHandle(rawLocalAddress, rawRemoteAddress, remoteTransport, inbound = false) // Hijack associations through the test transport awaitCond(registry.transportsReady(rawLocalAddress, rawRemoteAddress)) @@ -743,7 +773,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D // Force an outbound associate on the real system (which we will hijack) // we send no handshake packet, so this remains a pending connection - val dummySelection = thisSystem.actorSelection(ActorPath.fromString(remoteAddress.toString + "/user/noonethere")) + val dummySelection = + thisSystem.actorSelection(ActorPath.fromString(remoteAddress.toString + "/user/noonethere")) dummySelection.tell("ping", system.deadLetters) val remoteHandle = remoteTransportProbe.expectMsgType[Transport.InboundAssociation] @@ -762,7 +793,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D registry.getRemoteReadHandlerFor(inboundHandle.asInstanceOf[TestAssociationHandle]).get } - val handshakePacket = AkkaPduProtobufCodec.constructAssociate(HandshakeInfo(rawRemoteAddress, uid = remoteUID, cookie = None)) + val handshakePacket = + AkkaPduProtobufCodec.constructAssociate(HandshakeInfo(rawRemoteAddress, uid = remoteUID, cookie = None)) // Finish the inbound handshake so now it is handed up to Remoting inboundHandle.write(handshakePacket) @@ -799,7 +831,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D val otherConfig = ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = ${otherAddress.getPort} """).withFallback(config) - val otherSelection = thisSystem.actorSelection(s"akka.tcp://other-system@localhost:${otherAddress.getPort}/user/echo") + val otherSelection = + thisSystem.actorSelection(s"akka.tcp://other-system@localhost:${otherAddress.getPort}/user/echo") otherSelection.tell("ping", probeSender) probe.expectNoMsg(1.seconds) val otherSystem = ActorSystem("other-system", otherConfig) @@ -837,7 +870,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D val otherConfig = ConfigFactory.parseString(s""" akka.remote.netty.tcp.port = ${otherAddress.getPort} """).withFallback(config) - val otherSelection = thisSystem.actorSelection(s"akka.tcp://other-system@localhost:${otherAddress.getPort}/user/echo") + val otherSelection = + thisSystem.actorSelection(s"akka.tcp://other-system@localhost:${otherAddress.getPort}/user/echo") otherSelection.tell("ping", thisSender) thisProbe.expectNoMsg(1.seconds) val otherSystem = ActorSystem("other-system", otherConfig) @@ -846,7 +880,8 @@ class RemotingSpec extends AkkaSpec(RemotingSpec.cfg) with ImplicitSender with D thisProbe.expectNoMsg(2.seconds) val otherProbe = new TestProbe(otherSystem) val otherSender = otherProbe.ref - val thisSelection = otherSystem.actorSelection(s"akka.tcp://this-system@localhost:${port(thisSystem, "tcp")}/user/echo") + val thisSelection = + otherSystem.actorSelection(s"akka.tcp://this-system@localhost:${port(thisSystem, "tcp")}/user/echo") within(5.seconds) { awaitAssert { thisSelection.tell("ping", otherSender) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index df0611bc32..02cf97c4c8 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -11,7 +11,7 @@ import java.util.zip.GZIPOutputStream import akka.actor._ import akka.event.NoMarkerLogging import akka.pattern.ask -import akka.remote.Configuration.{ CipherConfig, getCipherConfig } +import akka.remote.Configuration.{ getCipherConfig, CipherConfig } import akka.remote.transport.netty.SSLSettings import akka.testkit._ import akka.util.Timeout @@ -55,16 +55,28 @@ object Configuration { } """ - final case class CipherConfig(runTest: Boolean, config: Config, cipher: String, localPort: Int, remotePort: Int, + final case class CipherConfig(runTest: Boolean, + config: Config, + cipher: String, + localPort: Int, + remotePort: Int, provider: Option[ConfigSSLEngineProvider]) def getCipherConfig(cipher: String, enabled: String*): CipherConfig = { - val localPort, remotePort = { val s = new java.net.ServerSocket(0); try s.getLocalPort finally s.close() } + val localPort, remotePort = { + val s = new java.net.ServerSocket(0); + try s.getLocalPort + finally s.close() + } try { //if (true) throw new IllegalArgumentException("Ticket1978*Spec isn't enabled") - val config = ConfigFactory.parseString(conf.format(localPort, trustStore, keyStore, cipher, enabled.mkString(", "))) - val fullConfig = config.withFallback(AkkaSpec.testConf).withFallback(ConfigFactory.load).getConfig("akka.remote.netty.ssl.security") + val config = + ConfigFactory.parseString(conf.format(localPort, trustStore, keyStore, cipher, enabled.mkString(", "))) + val fullConfig = config + .withFallback(AkkaSpec.testConf) + .withFallback(ConfigFactory.load) + .getConfig("akka.remote.netty.ssl.security") val settings = new SSLSettings(fullConfig) val sslEngineProvider = new ConfigSSLEngineProvider(NoMarkerLogging, settings) @@ -76,12 +88,12 @@ object Configuration { throw new NoSuchAlgorithmException(sRng) val engine = sslEngineProvider.createClientSSLEngine() - val gotAllSupported = enabled.toSet diff engine.getSupportedCipherSuites.toSet - val gotAllEnabled = enabled.toSet diff engine.getEnabledCipherSuites.toSet + val gotAllSupported = enabled.toSet.diff(engine.getSupportedCipherSuites.toSet) + val gotAllEnabled = enabled.toSet.diff(engine.getEnabledCipherSuites.toSet) gotAllSupported.isEmpty || (throw new IllegalArgumentException("Cipher Suite not supported: " + gotAllSupported)) gotAllEnabled.isEmpty || (throw new IllegalArgumentException("Cipher Suite not enabled: " + gotAllEnabled)) engine.getSupportedProtocols.contains(settings.SSLProtocol) || - (throw new IllegalArgumentException("Protocol not supported: " + settings.SSLProtocol)) + (throw new IllegalArgumentException("Protocol not supported: " + settings.SSLProtocol)) CipherConfig(true, config, cipher, localPort, remotePort, Some(sslEngineProvider)) } catch { @@ -91,21 +103,28 @@ object Configuration { } } -class Ticket1978SHA1PRNGSpec extends Ticket1978CommunicationSpec(getCipherConfig("SHA1PRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) +class Ticket1978SHA1PRNGSpec + extends Ticket1978CommunicationSpec(getCipherConfig("SHA1PRNG", "TLS_RSA_WITH_AES_128_CBC_SHA")) -class Ticket1978DefaultRNGSecureSpec extends Ticket1978CommunicationSpec(getCipherConfig("", "TLS_RSA_WITH_AES_128_CBC_SHA")) +class Ticket1978DefaultRNGSecureSpec + extends Ticket1978CommunicationSpec(getCipherConfig("", "TLS_RSA_WITH_AES_128_CBC_SHA")) -class Ticket1978CrappyRSAWithMD5OnlyHereToMakeSureThingsWorkSpec extends Ticket1978CommunicationSpec(getCipherConfig("", "SSL_RSA_WITH_NULL_MD5")) +class Ticket1978CrappyRSAWithMD5OnlyHereToMakeSureThingsWorkSpec + extends Ticket1978CommunicationSpec(getCipherConfig("", "SSL_RSA_WITH_NULL_MD5")) -class Ticket1978NonExistingRNGSecureSpec extends Ticket1978CommunicationSpec(CipherConfig(false, AkkaSpec.testConf, "NonExistingRNG", 12345, 12346, None)) +class Ticket1978NonExistingRNGSecureSpec + extends Ticket1978CommunicationSpec(CipherConfig(false, AkkaSpec.testConf, "NonExistingRNG", 12345, 12346, None)) -abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) extends AkkaSpec(cipherConfig.config) with ImplicitSender { +abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) + extends AkkaSpec(cipherConfig.config) + with ImplicitSender { implicit val timeout: Timeout = Timeout(10.seconds) - lazy val other: ActorSystem = ActorSystem( - "remote-sys", - ConfigFactory.parseString("akka.remote.netty.ssl.port = " + cipherConfig.remotePort).withFallback(system.settings.config)) + lazy val other: ActorSystem = ActorSystem("remote-sys", + ConfigFactory + .parseString("akka.remote.netty.ssl.port = " + cipherConfig.remotePort) + .withFallback(system.settings.config)) override def afterTermination(): Unit = { if (cipherConfig.runTest) { @@ -117,8 +136,11 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) exten ("-") must { if (cipherConfig.runTest && preCondition) { - val ignoreMe = other.actorOf(Props(new Actor { def receive = { case ("ping", x) => sender() ! ((("pong", x), sender())) } }), "echo") - val otherAddress = other.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].transport.defaultAddress + val ignoreMe = other.actorOf(Props(new Actor { + def receive = { case ("ping", x) => sender() ! ((("pong", x), sender())) } + }), "echo") + val otherAddress = + other.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].transport.defaultAddress "generate random" in { val rng = cipherConfig.provider.get.createSecureRandom() @@ -169,7 +191,7 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) exten expectMsgType[ActorIdentity].ref.get } - val f = for (i <- 1 to 1000) yield here ? (("ping", i)) mapTo classTag[((String, Int), ActorRef)] + val f = for (i <- 1 to 1000) yield (here ? (("ping", i))).mapTo(classTag[((String, Int), ActorRef)]) Await.result(Future.sequence(f), remaining).map(_._1._1).toSet should ===(Set("pong")) } diff --git a/akka-remote/src/test/scala/akka/remote/TransientSerializationErrorSpec.scala b/akka-remote/src/test/scala/akka/remote/TransientSerializationErrorSpec.scala index 9efd417520..fa1e23a3ef 100644 --- a/akka-remote/src/test/scala/akka/remote/TransientSerializationErrorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/TransientSerializationErrorSpec.scala @@ -43,9 +43,10 @@ object TransientSerializationErrorSpec { } } -abstract class AbstractTransientSerializationErrorSpec(config: Config) extends AkkaSpec( - config.withFallback(ConfigFactory.parseString( - """ +abstract class AbstractTransientSerializationErrorSpec(config: Config) + extends AkkaSpec( + config.withFallback( + ConfigFactory.parseString(""" akka { loglevel = info actor { @@ -89,16 +90,12 @@ abstract class AbstractTransientSerializationErrorSpec(config: Config) extends A expectMsg("ping") // none of these should tear down the connection - List( - ManifestIllegal, - ManifestNotSerializable, - ToBinaryIllegal, - ToBinaryNotSerializable, - NotDeserializable, - IllegalOnDeserialize - ).foreach(msg => - selection.tell(msg, this.testActor) - ) + List(ManifestIllegal, + ManifestNotSerializable, + ToBinaryIllegal, + ToBinaryNotSerializable, + NotDeserializable, + IllegalOnDeserialize).foreach(msg => selection.tell(msg, this.testActor)) // make sure we still have a connection selection.tell("ping", this.testActor) @@ -112,7 +109,8 @@ abstract class AbstractTransientSerializationErrorSpec(config: Config) extends A } } -class TransientSerializationErrorSpec extends AbstractTransientSerializationErrorSpec(ConfigFactory.parseString(""" +class TransientSerializationErrorSpec + extends AbstractTransientSerializationErrorSpec(ConfigFactory.parseString(""" akka.remote.netty.tcp { hostname = localhost port = 0 diff --git a/akka-remote/src/test/scala/akka/remote/TypedActorRemoteDeploySpec.scala b/akka-remote/src/test/scala/akka/remote/TypedActorRemoteDeploySpec.scala index 9a4159dd40..cbc5b67e33 100644 --- a/akka-remote/src/test/scala/akka/remote/TypedActorRemoteDeploySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/TypedActorRemoteDeploySpec.scala @@ -8,7 +8,7 @@ import akka.testkit.AkkaSpec import com.typesafe.config._ import scala.concurrent.{ Await, Future } import TypedActorRemoteDeploySpec._ -import akka.actor.{ Deploy, ActorSystem, TypedProps, TypedActor } +import akka.actor.{ ActorSystem, Deploy, TypedActor, TypedProps } import akka.util.IgnoreForScala212 import scala.concurrent.duration._ @@ -37,8 +37,8 @@ class TypedActorRemoteDeploySpec extends AkkaSpec(conf) { def verify[T](f: RemoteNameService => Future[T], expected: T) = { val ts = TypedActor(system) - val echoService: RemoteNameService = ts.typedActorOf( - TypedProps[RemoteNameServiceImpl].withDeploy(Deploy(scope = RemoteScope(remoteAddress)))) + val echoService: RemoteNameService = + ts.typedActorOf(TypedProps[RemoteNameServiceImpl].withDeploy(Deploy(scope = RemoteScope(remoteAddress)))) Await.result(f(echoService), 3.seconds) should ===(expected) val actor = ts.getActorRefFor(echoService) system.stop(actor) diff --git a/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala b/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala index eee349e1ec..45322395d1 100644 --- a/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/UntrustedSpec.scala @@ -36,8 +36,8 @@ object UntrustedSpec { def receive = { case IdentifyReq(path) => context.actorSelection(path).tell(Identify(None), sender()) - case StopChild(name) => context.child(name) foreach context.stop - case msg => testActor forward msg + case StopChild(name) => context.child(name).foreach(context.stop) + case msg => testActor.forward(msg) } } @@ -46,14 +46,14 @@ object UntrustedSpec { testActor ! s"${self.path.name} stopped" } def receive = { - case msg => testActor forward msg + case msg => testActor.forward(msg) } } class FakeUser(testActor: ActorRef) extends Actor { context.actorOf(Props(classOf[Child], testActor), "receptionist") def receive = { - case msg => testActor forward msg + case msg => testActor.forward(msg) } } @@ -69,7 +69,8 @@ akka.loglevel = DEBUG # test verifies debug import UntrustedSpec._ - val client = ActorSystem("UntrustedSpec-client", ConfigFactory.parseString(""" + val client = ActorSystem("UntrustedSpec-client", + ConfigFactory.parseString(""" akka.actor.provider = remote akka.remote.netty.tcp.port = 0 """)) @@ -87,8 +88,7 @@ akka.loglevel = DEBUG # test verifies debug lazy val target2 = { val p = TestProbe()(client) - client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell( - IdentifyReq("child2"), p.ref) + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell(IdentifyReq("child2"), p.ref) p.expectMsgType[ActorIdentity].ref.get } @@ -114,7 +114,7 @@ akka.loglevel = DEBUG # test verifies debug import Logging._ def receive = { case d @ Debug(_, _, msg: String) if msg contains "dropping" => logProbe.ref ! d - case _ => + case _ => } }).withDeploy(Deploy.local), "debugSniffer"), classOf[Logging.Debug]) @@ -134,7 +134,7 @@ akka.loglevel = DEBUG # test verifies debug client.actorOf(Props(new Actor { context.watch(target2) def receive = { - case x => testActor forward x + case x => testActor.forward(x) } }).withDeploy(Deploy.local)) receptionist ! StopChild("child2") @@ -151,8 +151,7 @@ akka.loglevel = DEBUG # test verifies debug "discard actor selection with non root anchor" in { val p = TestProbe()(client) - client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell( - Identify(None), p.ref) + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell(Identify(None), p.ref) val clientReceptionistRef = p.expectMsgType[ActorIdentity].ref.get val sel = ActorSelection(clientReceptionistRef, receptionist.path.toStringWithoutAddress) diff --git a/akka-remote/src/test/scala/akka/remote/artery/ArteryMultiNodeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/ArteryMultiNodeSpec.scala index 7a234aac82..14813edffb 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/ArteryMultiNodeSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/ArteryMultiNodeSpec.scala @@ -14,8 +14,9 @@ import com.typesafe.config.{ Config, ConfigFactory } * Base class for remoting tests what needs to test interaction between a "local" actor system * which is always created (the usual AkkaSpec system), and multiple additional actor systems over artery */ -abstract class ArteryMultiNodeSpec(config: Config) extends AkkaSpec(config.withFallback(ArterySpecSupport.defaultConfig)) - with FlightRecorderSpecIntegration { +abstract class ArteryMultiNodeSpec(config: Config) + extends AkkaSpec(config.withFallback(ArterySpecSupport.defaultConfig)) + with FlightRecorderSpecIntegration { def this() = this(ConfigFactory.empty()) def this(extraConfig: String) = this(ConfigFactory.parseString(extraConfig)) @@ -41,16 +42,12 @@ abstract class ArteryMultiNodeSpec(config: Config) extends AkkaSpec(config.withF * @return A new actor system configured with artery enabled. The system will * automatically be terminated after test is completed to avoid leaks. */ - def newRemoteSystem( - extraConfig: Option[String] = None, - name: Option[String] = None, - setup: Option[ActorSystemSetup] = None): ActorSystem = { + def newRemoteSystem(extraConfig: Option[String] = None, + name: Option[String] = None, + setup: Option[ActorSystemSetup] = None): ActorSystem = { val config = - ArterySpecSupport.newFlightRecorderConfig.withFallback(extraConfig.fold( - localSystem.settings.config - )( - str => ConfigFactory.parseString(str).withFallback(localSystem.settings.config) - )) + ArterySpecSupport.newFlightRecorderConfig.withFallback(extraConfig.fold(localSystem.settings.config)(str => + ConfigFactory.parseString(str).withFallback(localSystem.settings.config))) val sysName = name.getOrElse(nextGeneratedSystemName) val remoteSystem = setup match { diff --git a/akka-remote/src/test/scala/akka/remote/artery/ArterySpecSupport.scala b/akka-remote/src/test/scala/akka/remote/artery/ArterySpecSupport.scala index 247ccae16a..6437c16b4d 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/ArterySpecSupport.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/ArterySpecSupport.scala @@ -48,9 +48,10 @@ object ArterySpecSupport { * Artery enabled, flight recorder enabled, dynamic selection of port on localhost. * Combine with [[FlightRecorderSpecIntegration]] or remember to delete flight recorder file if using manually */ - def defaultConfig = newFlightRecorderConfig - .withFallback(staticArteryRemotingConfig) - .withFallback(tlsConfig) // TLS only used if transport=tls-tcp + def defaultConfig = + newFlightRecorderConfig + .withFallback(staticArteryRemotingConfig) + .withFallback(tlsConfig) // TLS only used if transport=tls-tcp // set the test key-store and trust-store properties // TLS only used if transport=tls-tcp, which can be set from specific tests or diff --git a/akka-remote/src/test/scala/akka/remote/artery/BindCanonicalAddressSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/BindCanonicalAddressSpec.scala index 1f5e3ab312..8cef3bc2bb 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/BindCanonicalAddressSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/BindCanonicalAddressSpec.scala @@ -23,8 +23,7 @@ trait BindCanonicalAddressBehaviors { val commonConfig = BindCanonicalAddressSpec.commonConfig(transport) "bind to a random port" in { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = 0 """) @@ -37,8 +36,7 @@ trait BindCanonicalAddressBehaviors { "bind to a random port but remoting accepts from a specified port" in { val address = SocketUtil.temporaryServerAddress(InetAddress.getLocalHost.getHostAddress, udp = isUDP) - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = ${address.getPort} akka.remote.artery.bind.port = 0 """) @@ -47,21 +45,21 @@ trait BindCanonicalAddressBehaviors { getExternal should ===(address.toAkkaAddress("akka")) // May have selected the same random port - bind another in that case while the other still has the canonical port - val internals = if (getInternal.collect { case Address(_, _, _, Some(port)) => port }.toSeq.contains(address.getPort)) { - val sys2 = ActorSystem("sys", config.withFallback(commonConfig)) - val secondInternals = getInternal()(sys2) - Await.result(sys2.terminate(), Duration.Inf) - secondInternals - } else { - getInternal - } + val internals = + if (getInternal.collect { case Address(_, _, _, Some(port)) => port }.toSeq.contains(address.getPort)) { + val sys2 = ActorSystem("sys", config.withFallback(commonConfig)) + val secondInternals = getInternal()(sys2) + Await.result(sys2.terminate(), Duration.Inf) + secondInternals + } else { + getInternal + } internals should not contain address.toAkkaAddress("akka") Await.result(sys.terminate(), Duration.Inf) } "bind to a specified bind hostname and remoting aspects from canonical hostname" in { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = 0 akka.remote.artery.canonical.hostname = "127.0.0.1" akka.remote.artery.bind.hostname = "localhost" @@ -76,8 +74,7 @@ trait BindCanonicalAddressBehaviors { "bind to a specified port and remoting accepts from a bound port" in { val address = SocketUtil.temporaryServerAddress(InetAddress.getLocalHost.getHostAddress, udp = isUDP) - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.remote.artery.canonical.port = 0 akka.remote.artery.bind.port = ${address.getPort} """) @@ -89,15 +86,14 @@ trait BindCanonicalAddressBehaviors { } "bind to all interfaces" in { - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.remote.artery.bind.hostname = "0.0.0.0" """) implicit val sys = ActorSystem("sys", config.withFallback(commonConfig)) getInternal.flatMap(_.port) should contain(getExternal.port.get) - getInternal.map(x => x.host.get should include regex "0.0.0.0".r) // regexp dot is intentional to match IPv4 and 6 addresses + getInternal.map(x => (x.host.get should include).regex("0.0.0.0".r)) // regexp dot is intentional to match IPv4 and 6 addresses Await.result(sys.terminate(), Duration.Inf) } @@ -106,19 +102,18 @@ trait BindCanonicalAddressBehaviors { class BindCanonicalAddressSpec extends WordSpec with Matchers with BindCanonicalAddressBehaviors { s"artery with aeron-udp transport" should { - behave like arteryConnectionTest("aeron-udp", isUDP = true) + behave.like(arteryConnectionTest("aeron-udp", isUDP = true)) } s"artery with tcp transport" should { - behave like arteryConnectionTest("tcp", isUDP = false) + behave.like(arteryConnectionTest("tcp", isUDP = false)) } s"artery with tls-tcp transport" should { - behave like arteryConnectionTest("tls-tcp", isUDP = false) + behave.like(arteryConnectionTest("tls-tcp", isUDP = false)) } } object BindCanonicalAddressSpec { - def commonConfig(transport: String) = ConfigFactory.parseString( - s""" + def commonConfig(transport: String) = ConfigFactory.parseString(s""" akka { actor.provider = remote remote.artery.enabled = true diff --git a/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala index 6d0e0e3b1f..77f6de968e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/DuplicateHandshakeSpec.scala @@ -33,8 +33,11 @@ class DuplicateHandshakeSpec extends AkkaSpec with ImplicitSender { val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) val addressB = UniqueAddress(Address("akka", "sysB", "hostB", 1002), 2) - private def setupStream(inboundContext: InboundContext, timeout: FiniteDuration = 5.seconds): (TestPublisher.Probe[AnyRef], TestSubscriber.Probe[Any]) = { - TestSource.probe[AnyRef] + private def setupStream( + inboundContext: InboundContext, + timeout: FiniteDuration = 5.seconds): (TestPublisher.Probe[AnyRef], TestSubscriber.Probe[Any]) = { + TestSource + .probe[AnyRef] .map { msg => val association = inboundContext.association(addressA.uid) val ser = serialization.serializerFor(msg.getClass) @@ -45,8 +48,16 @@ class DuplicateHandshakeSpec extends AkkaSpec with ImplicitSender { } val env = new ReusableInboundEnvelope - env.init(recipient = OptionVal.None, sender = OptionVal.None, originUid = addressA.uid, - serializerId, manifest, flags = 0, envelopeBuffer = null, association, lane = 0) + env + .init(recipient = OptionVal.None, + sender = OptionVal.None, + originUid = addressA.uid, + serializerId, + manifest, + flags = 0, + envelopeBuffer = null, + association, + lane = 0) .withMessage(msg) env } diff --git a/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala index 64e620e95e..4f1b74965c 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/EnvelopeBufferSpec.scala @@ -16,20 +16,15 @@ class EnvelopeBufferSpec extends AkkaSpec { import CompressionTestUtils._ object TestCompressor extends InboundCompressions { - val refToIdx: Map[ActorRef, Int] = Map( - minimalRef("compressable0") -> 0, - minimalRef("compressable1") -> 1, - minimalRef("reallylongcompressablestring") -> 2) + val refToIdx: Map[ActorRef, Int] = Map(minimalRef("compressable0") -> 0, + minimalRef("compressable1") -> 1, + minimalRef("reallylongcompressablestring") -> 2) val idxToRef: Map[Int, ActorRef] = refToIdx.map(_.swap) - val serializerToIdx = Map( - "serializer0" -> 0, - "serializer1" -> 1) + val serializerToIdx = Map("serializer0" -> 0, "serializer1" -> 1) val idxToSer = serializerToIdx.map(_.swap) - val manifestToIdx = Map( - "manifest0" -> 0, - "manifest1" -> 1) + val manifestToIdx = Map("manifest0" -> 0, "manifest1" -> 1) val idxToManifest = manifestToIdx.map(_.swap) val outboundActorRefTable: CompressionTable[ActorRef] = @@ -39,11 +34,13 @@ class EnvelopeBufferSpec extends AkkaSpec { CompressionTable(17L, version = 35.toByte, manifestToIdx) override def hitActorRef(originUid: Long, remote: Address, ref: ActorRef, n: Int): Unit = () - override def decompressActorRef(originUid: Long, tableVersion: Byte, idx: Int): OptionVal[ActorRef] = OptionVal(idxToRef(idx)) + override def decompressActorRef(originUid: Long, tableVersion: Byte, idx: Int): OptionVal[ActorRef] = + OptionVal(idxToRef(idx)) override def confirmActorRefCompressionAdvertisement(originUid: Long, tableVersion: Byte): Unit = () override def hitClassManifest(originUid: Long, remote: Address, manifest: String, n: Int): Unit = () - override def decompressClassManifest(originUid: Long, tableVersion: Byte, idx: Int): OptionVal[String] = OptionVal(idxToManifest(idx)) + override def decompressClassManifest(originUid: Long, tableVersion: Byte, idx: Int): OptionVal[String] = + OptionVal(idxToManifest(idx)) override def confirmClassManifestCompressionAdvertisement(originUid: Long, tableVersion: Byte): Unit = () override def close(originUid: Long): Unit = () @@ -67,16 +64,17 @@ class EnvelopeBufferSpec extends AkkaSpec { val originUid = 1L "be able to encode and decode headers with compressed literals" in { - headerIn setVersion version - headerIn setUid 42 - headerIn setSerializer 4 - headerIn setRecipientActorRef minimalRef("compressable1") - headerIn setSenderActorRef minimalRef("compressable0") + headerIn.setVersion(version) + headerIn.setUid(42) + headerIn.setSerializer(4) + headerIn.setRecipientActorRef(minimalRef("compressable1")) + headerIn.setSenderActorRef(minimalRef("compressable0")) - headerIn setManifest "manifest1" + headerIn.setManifest("manifest1") envelope.writeHeader(headerIn) - envelope.byteBuffer.position() should ===(EnvelopeBuffer.MetadataContainerAndLiteralSectionOffset) // Fully compressed header + envelope.byteBuffer + .position() should ===(EnvelopeBuffer.MetadataContainerAndLiteralSectionOffset) // Fully compressed header envelope.byteBuffer.flip() envelope.parseHeader(headerOut) @@ -86,9 +84,11 @@ class EnvelopeBufferSpec extends AkkaSpec { headerOut.inboundActorRefCompressionTableVersion should ===(28.toByte) headerOut.inboundClassManifestCompressionTableVersion should ===(35.toByte) headerOut.serializer should ===(4) - headerOut.senderActorRef(originUid).get.path.toSerializationFormat should ===("akka://EnvelopeBufferSpec/compressable0") + headerOut.senderActorRef(originUid).get.path.toSerializationFormat should ===( + "akka://EnvelopeBufferSpec/compressable0") headerOut.senderActorRefPath should ===(OptionVal.None) - headerOut.recipientActorRef(originUid).get.path.toSerializationFormat should ===("akka://EnvelopeBufferSpec/compressable1") + headerOut.recipientActorRef(originUid).get.path.toSerializationFormat should ===( + "akka://EnvelopeBufferSpec/compressable1") headerOut.recipientActorRefPath should ===(OptionVal.None) headerOut.manifest(originUid).get should ===("manifest1") } @@ -97,18 +97,18 @@ class EnvelopeBufferSpec extends AkkaSpec { val senderRef = minimalRef("uncompressable0") val recipientRef = minimalRef("uncompressable11") - headerIn setVersion version - headerIn setUid 42 - headerIn setSerializer 4 - headerIn setSenderActorRef senderRef - headerIn setRecipientActorRef recipientRef - headerIn setManifest "uncompressable3333" + headerIn.setVersion(version) + headerIn.setUid(42) + headerIn.setSerializer(4) + headerIn.setSenderActorRef(senderRef) + headerIn.setRecipientActorRef(recipientRef) + headerIn.setManifest("uncompressable3333") val expectedHeaderLength = EnvelopeBuffer.MetadataContainerAndLiteralSectionOffset + // Constant header part - 2 + lengthOfSerializedActorRefPath(senderRef) + // Length field + literal - 2 + lengthOfSerializedActorRefPath(recipientRef) + // Length field + literal - 2 + "uncompressable3333".length // Length field + literal + 2 + lengthOfSerializedActorRefPath(senderRef) + // Length field + literal + 2 + lengthOfSerializedActorRefPath(recipientRef) + // Length field + literal + 2 + "uncompressable3333".length // Length field + literal envelope.writeHeader(headerIn) envelope.byteBuffer.position() should ===(expectedHeaderLength) @@ -129,17 +129,17 @@ class EnvelopeBufferSpec extends AkkaSpec { "be able to encode and decode headers with mixed literals" in { val recipientRef = minimalRef("uncompressable1") - headerIn setVersion version - headerIn setUid 42 - headerIn setSerializer 4 - headerIn setSenderActorRef minimalRef("reallylongcompressablestring") - headerIn setRecipientActorRef recipientRef - headerIn setManifest "manifest1" + headerIn.setVersion(version) + headerIn.setUid(42) + headerIn.setSerializer(4) + headerIn.setSenderActorRef(minimalRef("reallylongcompressablestring")) + headerIn.setRecipientActorRef(recipientRef) + headerIn.setManifest("manifest1") envelope.writeHeader(headerIn) envelope.byteBuffer.position() should ===( EnvelopeBuffer.MetadataContainerAndLiteralSectionOffset + - 2 + lengthOfSerializedActorRefPath(recipientRef)) + 2 + lengthOfSerializedActorRefPath(recipientRef)) envelope.byteBuffer.flip() envelope.parseHeader(headerOut) @@ -147,7 +147,8 @@ class EnvelopeBufferSpec extends AkkaSpec { headerOut.version should ===(version) headerOut.uid should ===(42L) headerOut.serializer should ===(4) - headerOut.senderActorRef(originUid).get.path.toSerializationFormat should ===("akka://EnvelopeBufferSpec/reallylongcompressablestring") + headerOut.senderActorRef(originUid).get.path.toSerializationFormat should ===( + "akka://EnvelopeBufferSpec/reallylongcompressablestring") headerOut.senderActorRefPath should ===(OptionVal.None) headerOut.recipientActorRefPath should ===(OptionVal.Some("akka://EnvelopeBufferSpec/uncompressable1")) headerOut.recipientActorRef(originUid) should ===(OptionVal.None) @@ -155,18 +156,18 @@ class EnvelopeBufferSpec extends AkkaSpec { val senderRef = minimalRef("uncompressable0") - headerIn setVersion version - headerIn setUid Long.MinValue - headerIn setSerializer -1 - headerIn setSenderActorRef senderRef - headerIn setRecipientActorRef minimalRef("reallylongcompressablestring") - headerIn setManifest "longlonglongliteralmanifest" + headerIn.setVersion(version) + headerIn.setUid(Long.MinValue) + headerIn.setSerializer(-1) + headerIn.setSenderActorRef(senderRef) + headerIn.setRecipientActorRef(minimalRef("reallylongcompressablestring")) + headerIn.setManifest("longlonglongliteralmanifest") envelope.writeHeader(headerIn) envelope.byteBuffer.position() should ===( EnvelopeBuffer.MetadataContainerAndLiteralSectionOffset + - 2 + lengthOfSerializedActorRefPath(senderRef) + - 2 + "longlonglongliteralmanifest".length) + 2 + lengthOfSerializedActorRefPath(senderRef) + + 2 + "longlonglongliteralmanifest".length) envelope.byteBuffer.flip() envelope.parseHeader(headerOut) @@ -176,7 +177,8 @@ class EnvelopeBufferSpec extends AkkaSpec { headerOut.serializer should ===(-1) headerOut.senderActorRefPath should ===(OptionVal.Some("akka://EnvelopeBufferSpec/uncompressable0")) headerOut.senderActorRef(originUid) should ===(OptionVal.None) - headerOut.recipientActorRef(originUid).get.path.toSerializationFormat should ===("akka://EnvelopeBufferSpec/reallylongcompressablestring") + headerOut.recipientActorRef(originUid).get.path.toSerializationFormat should ===( + "akka://EnvelopeBufferSpec/reallylongcompressablestring") headerOut.recipientActorRefPath should ===(OptionVal.None) headerOut.manifest(originUid).get should ===("longlonglongliteralmanifest") } @@ -184,12 +186,12 @@ class EnvelopeBufferSpec extends AkkaSpec { "be able to encode and decode headers with mixed literals and payload" in { val payload = ByteString("Hello Artery!") - headerIn setVersion version - headerIn setUid 42 - headerIn setSerializer 4 - headerIn setSenderActorRef minimalRef("reallylongcompressablestring") - headerIn setRecipientActorRef minimalRef("uncompressable1") - headerIn setManifest "manifest1" + headerIn.setVersion(version) + headerIn.setUid(42) + headerIn.setSerializer(4) + headerIn.setSenderActorRef(minimalRef("reallylongcompressablestring")) + headerIn.setRecipientActorRef(minimalRef("uncompressable1")) + headerIn.setManifest("manifest1") envelope.writeHeader(headerIn) envelope.byteBuffer.put(payload.toByteBuffer) @@ -200,7 +202,8 @@ class EnvelopeBufferSpec extends AkkaSpec { headerOut.version should ===(version) headerOut.uid should ===(42L) headerOut.serializer should ===(4) - headerOut.senderActorRef(originUid).get.path.toSerializationFormat should ===("akka://EnvelopeBufferSpec/reallylongcompressablestring") + headerOut.senderActorRef(originUid).get.path.toSerializationFormat should ===( + "akka://EnvelopeBufferSpec/reallylongcompressablestring") headerOut.senderActorRefPath should ===(OptionVal.None) headerOut.recipientActorRefPath should ===(OptionVal.Some("akka://EnvelopeBufferSpec/uncompressable1")) headerOut.recipientActorRef(originUid) should ===(OptionVal.None) diff --git a/akka-remote/src/test/scala/akka/remote/artery/FlightRecorderSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/FlightRecorderSpec.scala index cdc52901e6..c75c31d6a8 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/FlightRecorderSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/FlightRecorderSpec.scala @@ -225,7 +225,8 @@ class FlightRecorderSpec extends AkkaSpec { entries.exists(_.dirty) should be(false) // Note the (2 * FlightRecorder.HiFreqBatchSize) initial sequence number. // This is because the overflow by 100 events rotates out two records, not just 100. - entries.map(_.code.toInt).sorted should ===((2 * FlightRecorder.HiFreqBatchSize) until (EffectiveHighFreqWindow + 100)) + entries.map(_.code.toInt).sorted should ===( + (2 * FlightRecorder.HiFreqBatchSize) until (EffectiveHighFreqWindow + 100)) entries.forall(entry => entry.param == 42) should be(true) // Timestamps are monotonic @@ -421,7 +422,8 @@ class FlightRecorderSpec extends AkkaSpec { randomAccessFile.setLength(FlightRecorder.TotalSize) randomAccessFile.close() - channel = FileChannel.open(file.toPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.READ) + channel = + FileChannel.open(file.toPath, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.READ) recorder = new FlightRecorder(channel) reader = new FlightRecorderReader(channel) body(recorder, reader, channel) diff --git a/akka-remote/src/test/scala/akka/remote/artery/HandshakeDenySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/HandshakeDenySpec.scala index aa2c8b9554..0374b550b2 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/HandshakeDenySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/HandshakeDenySpec.scala @@ -32,12 +32,14 @@ class HandshakeDenySpec extends ArteryMultiNodeSpec(HandshakeDenySpec.commonConf systemB.actorOf(TestActors.echoActorProps, "echo") - EventFilter.warning(start = "Dropping Handshake Request from").intercept { - sel ! Identify("hi echo") - // handshake timeout and Identify message in SendQueue is sent to deadLetters, - // which generates the ActorIdentity(None) - expectMsg(5.seconds, ActorIdentity("hi echo", None)) - }(systemB) + EventFilter + .warning(start = "Dropping Handshake Request from") + .intercept { + sel ! Identify("hi echo") + // handshake timeout and Identify message in SendQueue is sent to deadLetters, + // which generates the ActorIdentity(None) + expectMsg(5.seconds, ActorIdentity("hi echo", None)) + }(systemB) } } diff --git a/akka-remote/src/test/scala/akka/remote/artery/HandshakeFailureSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/HandshakeFailureSpec.scala index 2f5e8bf720..3a9c24b52c 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/HandshakeFailureSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/HandshakeFailureSpec.scala @@ -32,9 +32,8 @@ class HandshakeFailureSpec extends ArteryMultiNodeSpec(HandshakeFailureSpec.comm sel ! "hello" expectNoMessage(3.seconds) // longer than handshake-timeout - val systemB = newRemoteSystem( - name = Some("systemB"), - extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + val systemB = + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) systemB.actorOf(TestActors.echoActorProps, "echo") within(10.seconds) { diff --git a/akka-remote/src/test/scala/akka/remote/artery/HandshakeRetrySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/HandshakeRetrySpec.scala index 45ed52055a..be72336846 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/HandshakeRetrySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/HandshakeRetrySpec.scala @@ -31,10 +31,8 @@ class HandshakeRetrySpec extends ArteryMultiNodeSpec(HandshakeRetrySpec.commonCo sel ! "hello" expectNoMessage(1.second) - val systemB = newRemoteSystem( - name = Some("systemB"), - extraConfig = Some(s"akka.remote.artery.canonical.port = $portB") - ) + val systemB = + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) systemB.actorOf(TestActors.echoActorProps, "echo") expectMsg("hello") diff --git a/akka-remote/src/test/scala/akka/remote/artery/ImmutableLongMapSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/ImmutableLongMapSpec.scala index add84ac8db..bbf755cb65 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/ImmutableLongMapSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/ImmutableLongMapSpec.scala @@ -38,16 +38,15 @@ class ImmutableLongMapSpec extends WordSpec with Matchers { val m5 = m4.updated(15L, "15") m5.keysIterator.toList should be(List(5L, 10L, 15L, 20L)) - m5.keysIterator.map(m5.get).toList should be(List(OptionVal("5"), OptionVal("10"), OptionVal("15"), - OptionVal("20"))) + m5.keysIterator.map(m5.get).toList should be( + List(OptionVal("5"), OptionVal("10"), OptionVal("15"), OptionVal("20"))) } "replace entries" in { val m1 = ImmutableLongMap.empty[String].updated(10L, "10a").updated(10, "10b") m1.keysIterator.map(m1.get).toList should be(List(OptionVal("10b"))) - val m2 = m1.updated(20L, "20a").updated(30L, "30a") - .updated(20L, "20b").updated(30L, "30b") + val m2 = m1.updated(20L, "20a").updated(30L, "30a").updated(20L, "20b").updated(30L, "30b") m2.keysIterator.map(m2.get).toList should be(List(OptionVal("10b"), OptionVal("20b"), OptionVal("30b"))) } @@ -68,13 +67,29 @@ class ImmutableLongMapSpec extends WordSpec with Matchers { ImmutableLongMap.empty[String].updated(10L, "10").updated(20, "20").updated(30, "30").hashCode should be( ImmutableLongMap.empty[String].updated(10L, "10").updated(20, "20").updated(30, "30").hashCode) - ImmutableLongMap.empty[String].updated(10L, "10").updated(20, "20") should not be (ImmutableLongMap.empty[String].updated(10L, "10")) + ImmutableLongMap.empty[String].updated(10L, "10").updated(20, "20") should not be (ImmutableLongMap + .empty[String] + .updated(10L, "10")) - ImmutableLongMap.empty[String].updated(10L, "10").updated(20, "20").updated(30, "30") should not be ( - ImmutableLongMap.empty[String].updated(10L, "10").updated(20, "20b").updated(30, "30")) + ImmutableLongMap + .empty[String] + .updated(10L, "10") + .updated(20, "20") + .updated(30, "30") should not be (ImmutableLongMap + .empty[String] + .updated(10L, "10") + .updated(20, "20b") + .updated(30, "30")) - ImmutableLongMap.empty[String].updated(10L, "10").updated(20, "20").updated(30, "30") should not be ( - ImmutableLongMap.empty[String].updated(10L, "10").updated(20, "20b").updated(31, "30")) + ImmutableLongMap + .empty[String] + .updated(10L, "10") + .updated(20, "20") + .updated(30, "30") should not be (ImmutableLongMap + .empty[String] + .updated(10L, "10") + .updated(20, "20b") + .updated(31, "30")) ImmutableLongMap.empty[String] should be(ImmutableLongMap.empty[String]) ImmutableLongMap.empty[String].hashCode should be(ImmutableLongMap.empty[String].hashCode) diff --git a/akka-remote/src/test/scala/akka/remote/artery/InboundControlJunctionSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/InboundControlJunctionSpec.scala index a5c9f04f02..87663ab617 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/InboundControlJunctionSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/InboundControlJunctionSpec.scala @@ -29,11 +29,12 @@ object InboundControlJunctionSpec { } class InboundControlJunctionSpec - extends AkkaSpec(""" + extends AkkaSpec(""" akka.actor.serialization-bindings { "akka.remote.artery.InboundControlJunctionSpec$TestControlMessage" = java } - """) with ImplicitSender { + """) + with ImplicitSender { import InboundControlJunctionSpec._ val matSettings = ActorMaterializerSettings(system).withFuzzing(true) @@ -49,7 +50,8 @@ class InboundControlJunctionSpec val inboundContext = new TestInboundContext(localAddress = addressB) val recipient = OptionVal.None // not used - val ((upstream, controlSubject), downstream) = TestSource.probe[AnyRef] + val ((upstream, controlSubject), downstream) = TestSource + .probe[AnyRef] .map(msg => InboundEnvelope(recipient, msg, OptionVal.None, addressA.uid, OptionVal.None)) .viaMat(new InboundControlJunction)(Keep.both) .map { case env: InboundEnvelope => env.message } diff --git a/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala index 142461bb7d..3a746322e6 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/InboundHandshakeSpec.scala @@ -36,11 +36,14 @@ class InboundHandshakeSpec extends AkkaSpec with ImplicitSender { val addressA = UniqueAddress(Address("akka", "sysA", "hostA", 1001), 1) val addressB = UniqueAddress(Address("akka", "sysB", "hostB", 1002), 2) - private def setupStream(inboundContext: InboundContext, timeout: FiniteDuration = 5.seconds): (TestPublisher.Probe[AnyRef], TestSubscriber.Probe[Any]) = { + private def setupStream( + inboundContext: InboundContext, + timeout: FiniteDuration = 5.seconds): (TestPublisher.Probe[AnyRef], TestSubscriber.Probe[Any]) = { val recipient = OptionVal.None // not used - TestSource.probe[AnyRef] - .map(msg => InboundEnvelope(recipient, msg, OptionVal.None, addressA.uid, - inboundContext.association(addressA.uid))) + TestSource + .probe[AnyRef] + .map(msg => + InboundEnvelope(recipient, msg, OptionVal.None, addressA.uid, inboundContext.association(addressA.uid))) .via(new InboundHandshake(inboundContext, inControlStream = true)) .map { case env: InboundEnvelope => env.message } .toMat(TestSink.probe[Any])(Keep.both) @@ -70,8 +73,9 @@ class InboundHandshakeSpec extends AkkaSpec with ImplicitSender { upstream.sendNext(HandshakeReq(addressA, addressB.address)) upstream.sendNext("msg1") downstream.expectNext("msg1") - val uniqueRemoteAddress = Await.result( - inboundContext.association(addressA.address).associationState.uniqueRemoteAddress, remainingOrDefault) + val uniqueRemoteAddress = + Await.result(inboundContext.association(addressA.address).associationState.uniqueRemoteAddress, + remainingOrDefault) uniqueRemoteAddress should ===(addressA) downstream.cancel() } diff --git a/akka-remote/src/test/scala/akka/remote/artery/LargeMessagesStreamSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/LargeMessagesStreamSpec.scala index b8bad241b2..370c6ea577 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/LargeMessagesStreamSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/LargeMessagesStreamSpec.scala @@ -24,8 +24,7 @@ object LargeMessagesStreamSpec { } } -class LargeMessagesStreamSpec extends ArteryMultiNodeSpec( - """ +class LargeMessagesStreamSpec extends ArteryMultiNodeSpec(""" akka { remote.artery.large-message-destinations = [ "/user/large" ] } diff --git a/akka-remote/src/test/scala/akka/remote/artery/LateConnectSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/LateConnectSpec.scala index 8c49648120..a43a41d34f 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/LateConnectSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/LateConnectSpec.scala @@ -26,9 +26,8 @@ object LateConnectSpec { class LateConnectSpec extends ArteryMultiNodeSpec(LateConnectSpec.config) with ImplicitSender { val portB = freePort() - lazy val systemB = newRemoteSystem( - name = Some("systemB"), - extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + lazy val systemB = + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) "Connection" must { diff --git a/akka-remote/src/test/scala/akka/remote/artery/LruBoundedCacheSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/LruBoundedCacheSpec.scala index f2a39768c0..15d980571e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/LruBoundedCacheSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/LruBoundedCacheSpec.scala @@ -11,7 +11,8 @@ import scala.util.Random class LruBoundedCacheSpec extends AkkaSpec { - class TestCache(_capacity: Int, threshold: Int, hashSeed: String = "") extends LruBoundedCache[String, String](_capacity, threshold) { + class TestCache(_capacity: Int, threshold: Int, hashSeed: String = "") + extends LruBoundedCache[String, String](_capacity, threshold) { private var cntr = 0 override protected def compute(k: String): String = { diff --git a/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala index 5f5af729b2..2ea47e9287 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/MetadataCarryingSpec.scala @@ -73,7 +73,11 @@ class TestInstrument(system: ExtendedActorSystem) extends RemoteInstrument { case _ => } - override def remoteMessageReceived(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit = + override def remoteMessageReceived(recipient: ActorRef, + message: Object, + sender: ActorRef, + size: Int, + time: Long): Unit = message match { case _: MetadataCarryingSpec.Ping | ActorSelectionMessage(_: MetadataCarryingSpec.Ping, _, _) => MetadataCarryingSpy(system).ref.foreach(_ ! RemoteMessageReceived(recipient, message, sender, size, time)) @@ -93,8 +97,7 @@ object MetadataCarryingSpec { } } -class MetadataCarryingSpec extends ArteryMultiNodeSpec( - """ +class MetadataCarryingSpec extends ArteryMultiNodeSpec(""" akka { remote.artery.advanced { instruments = [ "akka.remote.artery.TestInstrument" ] diff --git a/akka-remote/src/test/scala/akka/remote/artery/OutboundControlJunctionSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/OutboundControlJunctionSpec.scala index 2093656f10..885127152e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/OutboundControlJunctionSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/OutboundControlJunctionSpec.scala @@ -35,7 +35,8 @@ class OutboundControlJunctionSpec extends AkkaSpec with ImplicitSender { val inboundContext = new TestInboundContext(localAddress = addressA) val outboundContext = inboundContext.association(addressB.address) - val ((upstream, controlIngress), downstream) = TestSource.probe[String] + val ((upstream, controlIngress), downstream) = TestSource + .probe[String] .map(msg => outboundEnvelopePool.acquire().init(OptionVal.None, msg, OptionVal.None)) .viaMat(new OutboundControlJunction(outboundContext, outboundEnvelopePool))(Keep.both) .map(env => env.message) diff --git a/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala index d410aa140b..b2bcf20b06 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/OutboundHandshakeSpec.scala @@ -30,16 +30,24 @@ class OutboundHandshakeSpec extends AkkaSpec with ImplicitSender { private val outboundEnvelopePool = ReusableOutboundEnvelope.createObjectPool(capacity = 16) - private def setupStream( - outboundContext: OutboundContext, timeout: FiniteDuration = 5.seconds, - retryInterval: FiniteDuration = 10.seconds, - injectHandshakeInterval: FiniteDuration = 10.seconds, - livenessProbeInterval: Duration = Duration.Undefined): (TestPublisher.Probe[String], TestSubscriber.Probe[Any]) = { + private def setupStream(outboundContext: OutboundContext, + timeout: FiniteDuration = 5.seconds, + retryInterval: FiniteDuration = 10.seconds, + injectHandshakeInterval: FiniteDuration = 10.seconds, + livenessProbeInterval: Duration = Duration.Undefined) + : (TestPublisher.Probe[String], TestSubscriber.Probe[Any]) = { - TestSource.probe[String] + TestSource + .probe[String] .map(msg => outboundEnvelopePool.acquire().init(OptionVal.None, msg, OptionVal.None)) - .via(new OutboundHandshake(system, outboundContext, outboundEnvelopePool, timeout, retryInterval, - injectHandshakeInterval, livenessProbeInterval)) + .via( + new OutboundHandshake(system, + outboundContext, + outboundEnvelopePool, + timeout, + retryInterval, + injectHandshakeInterval, + livenessProbeInterval)) .map(env => env.message) .toMat(TestSink.probe[Any])(Keep.both) .run() diff --git a/akka-remote/src/test/scala/akka/remote/artery/OutboundIdleShutdownSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/OutboundIdleShutdownSpec.scala index d673061bb0..6b66727f49 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/OutboundIdleShutdownSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/OutboundIdleShutdownSpec.scala @@ -27,9 +27,8 @@ class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s""" } """) with ImplicitSender with Eventually { - override implicit val patience: PatienceConfig = PatienceConfig( - testKitSettings.DefaultTimeout.duration * 2, - Span(200, org.scalatest.time.Millis)) + override implicit val patience: PatienceConfig = + PatienceConfig(testKitSettings.DefaultTimeout.duration * 2, Span(200, org.scalatest.time.Millis)) private def isArteryTcp: Boolean = RARP(system).provider.transport.asInstanceOf[ArteryTransport].settings.Transport == ArterySettings.Tcp @@ -50,19 +49,17 @@ class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s""" "Outbound streams" should { - "be stopped when they are idle" in withAssociation { - (_, remoteAddress, remoteEcho, localArtery, localProbe) => + "be stopped when they are idle" in withAssociation { (_, remoteAddress, remoteEcho, localArtery, localProbe) => + val association = localArtery.association(remoteAddress) + withClue("When initiating a connection, both the control and ordinary streams are opened") { + assertStreamActive(association, Association.ControlQueueIndex, expected = true) + assertStreamActive(association, Association.OrdinaryQueueIndex, expected = true) + } - val association = localArtery.association(remoteAddress) - withClue("When initiating a connection, both the control and ordinary streams are opened") { - assertStreamActive(association, Association.ControlQueueIndex, expected = true) - assertStreamActive(association, Association.OrdinaryQueueIndex, expected = true) - } - - eventually { - assertStreamActive(association, Association.ControlQueueIndex, expected = false) - assertStreamActive(association, Association.OrdinaryQueueIndex, expected = false) - } + eventually { + assertStreamActive(association, Association.ControlQueueIndex, expected = false) + assertStreamActive(association, Association.OrdinaryQueueIndex, expected = false) + } } "still be resumable after they have been stopped" in withAssociation { @@ -89,7 +86,6 @@ class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s""" "eliminate quarantined association when not used" in withAssociation { (_, remoteAddress, remoteEcho, localArtery, localProbe) => - val association = localArtery.association(remoteAddress) withClue("When initiating a connection, both the control and ordinary streams are opened") { assertStreamActive(association, Association.ControlQueueIndex, expected = true) @@ -113,7 +109,6 @@ class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s""" "remove inbound compression after quarantine" in withAssociation { (_, remoteAddress, remoteEcho, localArtery, localProbe) => - val association = localArtery.association(remoteAddress) val remoteUid = association.associationState.uniqueRemoteAddress.futureValue.uid @@ -134,7 +129,6 @@ class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s""" "remove inbound compression after restart with same host:port" in withAssociation { (remoteSystem, remoteAddress, remoteEcho, localArtery, localProbe) => - val association = localArtery.association(remoteAddress) val remoteUid = association.associationState.uniqueRemoteAddress.futureValue.uid @@ -145,7 +139,8 @@ class OutboundIdleShutdownSpec extends ArteryMultiNodeSpec(s""" val remoteSystem2 = newRemoteSystem(Some(s""" akka.remote.artery.canonical.hostname = ${remoteAddress.host.get} akka.remote.artery.canonical.port = ${remoteAddress.port.get} - """), name = Some(remoteAddress.system)) + """), + name = Some(remoteAddress.system)) try { remoteSystem2.actorOf(TestActors.echoActorProps, "echo2") diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteActorForSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteActorForSpec.scala index cbc363c78a..fad4d0435f 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteActorForSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteActorForSpec.scala @@ -47,9 +47,11 @@ class RemoteActorForSpec extends ArteryMultiNodeSpec("akka.loglevel=INFO") with } "send dead letters on remote if actor does not exist" in { - EventFilter.warning(pattern = "dead.*buh", occurrences = 1).intercept { - localSystem.actorFor(s"akka://${remoteSystem.name}@localhost:$remotePort/dead-letters-on-remote") ! "buh" - }(remoteSystem) + EventFilter + .warning(pattern = "dead.*buh", occurrences = 1) + .intercept { + localSystem.actorFor(s"akka://${remoteSystem.name}@localhost:$remotePort/dead-letters-on-remote") ! "buh" + }(remoteSystem) } // FIXME needs remote deployment section @@ -74,11 +76,12 @@ class RemoteActorForSpec extends ArteryMultiNodeSpec("akka.loglevel=INFO") with myref ! 44 expectMsg(44) lastSender should ===(grandchild) - lastSender should be theSameInstanceAs grandchild + (lastSender should be).theSameInstanceAs(grandchild) child.asInstanceOf[RemoteActorRef].getParent should ===(l) - localSystem.actorFor("/user/looker1/child") should be theSameInstanceAs child - (l ? ActorForReq("child/..")).mapTo[AnyRef].futureValue should be theSameInstanceAs l - (localSystem.actorFor(system / "looker1" / "child") ? ActorForReq("..")).mapTo[AnyRef].futureValue should be theSameInstanceAs l + (localSystem.actorFor("/user/looker1/child") should be).theSameInstanceAs(child) + (l ? ActorForReq("child/..")).mapTo[AnyRef].futureValue should be.theSameInstanceAs(l) + (localSystem.actorFor(system / "looker1" / "child") ? ActorForReq("..")).mapTo[AnyRef].futureValue should be + .theSameInstanceAs(l) watch(child) child ! PoisonPill diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteActorRefProviderSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteActorRefProviderSpec.scala index 8f72ddf6ae..ce2e9881b5 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteActorRefProviderSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteActorRefProviderSpec.scala @@ -40,7 +40,7 @@ class RemoteActorRefProviderSpec extends ArteryMultiNodeSpec { ref1.asInstanceOf[ActorRefScope].isLocal should ===(true) val ref2 = provider.resolveActorRef(path) - ref1 should be theSameInstanceAs (ref2) + (ref1 should be).theSameInstanceAs(ref2) } "not cache resolveActorRef for unresolved ref" in { @@ -60,12 +60,13 @@ class RemoteActorRefProviderSpec extends ArteryMultiNodeSpec { ref1.getClass should ===(classOf[RemoteActorRef]) val ref2 = provider.resolveActorRef(path) - ref1 should be theSameInstanceAs (ref2) + (ref1 should be).theSameInstanceAs(ref2) } "detect wrong protocol" in { EventFilter[IllegalArgumentException](start = "No root guardian at", occurrences = 1).intercept { - val sel = system.actorSelection(s"akka.tcp://${systemB.name}@${addressB.host.get}:${addressB.port.get}/user/echo") + val sel = + system.actorSelection(s"akka.tcp://${systemB.name}@${addressB.host.get}:${addressB.port.get}/user/echo") sel.anchor.getClass should ===(classOf[EmptyLocalActorRef]) } } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteActorSelectionSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteActorSelectionSpec.scala index c9078d12cd..1f48e2c3f9 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteActorSelectionSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteActorSelectionSpec.scala @@ -4,7 +4,18 @@ package akka.remote.artery -import akka.actor.{ Actor, ActorIdentity, ActorLogging, ActorRef, ActorRefScope, ActorSelection, Identify, PoisonPill, Props, Terminated } +import akka.actor.{ + Actor, + ActorIdentity, + ActorLogging, + ActorRef, + ActorRefScope, + ActorSelection, + Identify, + PoisonPill, + Props, + Terminated +} import akka.testkit.{ ImplicitSender, TestActors } import scala.concurrent.duration._ @@ -54,13 +65,9 @@ class RemoteActorSelectionSpec extends ArteryMultiNodeSpec with ImplicitSender { } """ - val localSystem = newRemoteSystem( - extraConfig = Some(config(localPort)), - name = Some(localSysName)) + val localSystem = newRemoteSystem(extraConfig = Some(config(localPort)), name = Some(localSysName)) - newRemoteSystem( - extraConfig = Some(config(remotePort)), - name = Some(remoteSysName)) + newRemoteSystem(extraConfig = Some(config(remotePort)), name = Some(remoteSysName)) val localLooker2 = localSystem.actorOf(selectionActorProps, "looker2") @@ -79,7 +86,7 @@ class RemoteActorSelectionSpec extends ArteryMultiNodeSpec with ImplicitSender { localGrandchildSelection ! 54 expectMsg(54) lastSender should ===(localGrandchild) - lastSender should be theSameInstanceAs localGrandchild + (lastSender should be).theSameInstanceAs(localGrandchild) localGrandchildSelection ! Identify(localGrandchildSelection) val grandchild2 = expectMsgType[ActorIdentity].ref grandchild2 should ===(Some(localGrandchild)) @@ -89,11 +96,11 @@ class RemoteActorSelectionSpec extends ArteryMultiNodeSpec with ImplicitSender { localLooker2 ! ActorSelReq("child/..") expectMsgType[ActorSelection] ! Identify(None) - expectMsgType[ActorIdentity].ref.get should be theSameInstanceAs localLooker2 + (expectMsgType[ActorIdentity].ref.get should be).theSameInstanceAs(localLooker2) localSystem.actorSelection(localSystem / "looker2" / "child") ! ActorSelReq("..") expectMsgType[ActorSelection] ! Identify(None) - expectMsgType[ActorIdentity].ref.get should be theSameInstanceAs localLooker2 + (expectMsgType[ActorIdentity].ref.get should be).theSameInstanceAs(localLooker2) localGrandchild ! ((TestActors.echoActorProps, "grandgrandchild")) val grandgrandchild = expectMsgType[ActorRef] diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteConnectionSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteConnectionSpec.scala index d373615b9f..a7c61b4eda 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteConnectionSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteConnectionSpec.scala @@ -12,10 +12,10 @@ import scala.concurrent.duration._ class RemoteConnectionSpec extends ArteryMultiNodeSpec("akka.remote.retry-gate-closed-for = 5s") with ImplicitSender { def muteSystem(system: ActorSystem): Unit = { - system.eventStream.publish(TestEvent.Mute( - EventFilter.error(start = "AssociationError"), - EventFilter.warning(start = "AssociationError"), - EventFilter.warning(pattern = "received dead letter.*"))) + system.eventStream.publish( + TestEvent.Mute(EventFilter.error(start = "AssociationError"), + EventFilter.warning(start = "AssociationError"), + EventFilter.warning(pattern = "received dead letter.*"))) } "Remoting between systems" should { diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala index 8133c6b1a2..8e7205411e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeathWatchSpec.scala @@ -37,11 +37,14 @@ object RemoteDeathWatchSpec { """).withFallback(ArterySpecSupport.defaultConfig) } -class RemoteDeathWatchSpec extends ArteryMultiNodeSpec(RemoteDeathWatchSpec.config) with ImplicitSender with DefaultTimeout with DeathWatchSpec { +class RemoteDeathWatchSpec + extends ArteryMultiNodeSpec(RemoteDeathWatchSpec.config) + with ImplicitSender + with DefaultTimeout + with DeathWatchSpec { import RemoteDeathWatchSpec._ - system.eventStream.publish(TestEvent.Mute( - EventFilter[io.aeron.exceptions.RegistrationException]())) + system.eventStream.publish(TestEvent.Mute(EventFilter[io.aeron.exceptions.RegistrationException]())) val other = newRemoteSystem(name = Some("other"), extraConfig = Some(s"akka.remote.artery.canonical.port=$otherPort")) diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala index 645cbb362e..749fc99516 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeployerSpec.scala @@ -37,19 +37,20 @@ class RemoteDeployerSpec extends AkkaSpec(RemoteDeployerSpec.deployerConf) with val service = "/service2" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service.split("/").drop(1)) - deployment should ===(Some( - Deploy( - service, - deployment.get.config, - RoundRobinPool(3), - RemoteScope(Address("akka", "sys", "wallace", 2552)), - "mydispatcher"))) + deployment should ===( + Some( + Deploy(service, + deployment.get.config, + RoundRobinPool(3), + RemoteScope(Address("akka", "sys", "wallace", 2552)), + "mydispatcher"))) } "reject remote deployment when the source requires LocalScope" in { intercept[ConfigurationException] { system.actorOf(Props.empty.withDeploy(Deploy.local), "service2") - }.getMessage should ===("configuration requested remote deployment for local-only Props at [akka://RemoteDeployerSpec/user/service2]") + }.getMessage should ===( + "configuration requested remote deployment for local-only Props at [akka://RemoteDeployerSpec/user/service2]") } } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala index f924376d00..ca600f8679 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteDeploymentSpec.scala @@ -59,8 +59,8 @@ object RemoteDeploymentSpec { } } -class RemoteDeploymentSpec extends ArteryMultiNodeSpec( - ConfigFactory.parseString(""" +class RemoteDeploymentSpec + extends ArteryMultiNodeSpec(ConfigFactory.parseString(""" akka.remote.artery.advanced.inbound-lanes = 10 akka.remote.artery.advanced.outbound-lanes = 3 """).withFallback(ArterySpecSupport.defaultConfig)) { @@ -86,7 +86,8 @@ class RemoteDeploymentSpec extends ArteryMultiNodeSpec( "create and supervise children on remote node" in { val senderProbe = TestProbe()(masterSystem) val r = masterSystem.actorOf(Props[Echo1], "blub") - r.path.toString should ===(s"akka://${system.name}@localhost:${port}/remote/akka/${masterSystem.name}@localhost:${masterPort}/user/blub") + r.path.toString should ===( + s"akka://${system.name}@localhost:${port}/remote/akka/${masterSystem.name}@localhost:${masterPort}/user/blub") r.tell(42, senderProbe.ref) senderProbe.expectMsg(42) diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteFailureSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteFailureSpec.scala index c2254c7c7c..3830739257 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteFailureSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteFailureSpec.scala @@ -25,13 +25,13 @@ class RemoteFailureSpec extends ArteryMultiNodeSpec with ImplicitSender { "not be exhausted by sending to broken connections" in { val remoteSystems = Vector.fill(5)(newRemoteSystem()) - remoteSystems foreach { sys => - sys.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointDisassociatedException](), - EventFilter.warning(pattern = "received dead letter.*"))) + remoteSystems.foreach { sys => + sys.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointDisassociatedException](), + EventFilter.warning(pattern = "received dead letter.*"))) sys.actorOf(TestActors.echoActorProps, name = "echo") } - val remoteSelections = remoteSystems map { sys => + val remoteSelections = remoteSystems.map { sys => system.actorSelection(rootActorPath(sys) / "user" / "echo") } @@ -41,28 +41,32 @@ class RemoteFailureSpec extends ArteryMultiNodeSpec with ImplicitSender { val n = 100 // first everything is up and running - 1 to n foreach { x => + (1 to n).foreach { x => localSelection ! Ping("1") remoteSelections(x % remoteSystems.size) ! Ping("1") } within(5.seconds) { - receiveN(n * 2) foreach { reply => reply should ===(Ping("1")) } + receiveN(n * 2).foreach { reply => + reply should ===(Ping("1")) + } } // then we shutdown remote systems to simulate broken connections - remoteSystems foreach { sys => + remoteSystems.foreach { sys => shutdown(sys) } - 1 to n foreach { x => + (1 to n).foreach { x => localSelection ! Ping("2") remoteSelections(x % remoteSystems.size) ! Ping("2") } // ping messages to localEcho should go through even though we use many different broken connections within(5.seconds) { - receiveN(n) foreach { reply => reply should ===(Ping("2")) } + receiveN(n).foreach { reply => + reply should ===(Ping("2")) + } } } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala index dfb614f7c3..51b88ef7a5 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteInstrumentsSerializationSpec.scala @@ -25,7 +25,7 @@ class RemoteInstrumentsSerializationSpec extends AkkaSpec("akka.loglevel = DEBUG if (messages.isEmpty) f else - EventFilter.debug(message = messages.head, occurrences = 1) intercept { + EventFilter.debug(message = messages.head, occurrences = 1).intercept { ensureDebugLog(messages.tail: _*)(f) } } @@ -54,70 +54,65 @@ class RemoteInstrumentsSerializationSpec extends AkkaSpec("akka.loglevel = DEBUG } "skip exitsing remote instruments not in the message" in { - ensureDebugLog( - "Skipping local RemoteInstrument 10 that has no matching data in the message") { - val p = TestProbe() - val instruments = Seq(testInstrument(7, "!"), testInstrument(10, ".."), testInstrument(21, "???")) - val riS = remoteInstruments(instruments(0), instruments(2)) - val riD = remoteInstruments(instruments: _*) - serializeDeserialize(riS, riD, p.ref, "baz") - p.expectMsgAllOf("baz-7-!", "baz-21-???") - p.expectNoMsg(100.millis) - } + ensureDebugLog("Skipping local RemoteInstrument 10 that has no matching data in the message") { + val p = TestProbe() + val instruments = Seq(testInstrument(7, "!"), testInstrument(10, ".."), testInstrument(21, "???")) + val riS = remoteInstruments(instruments(0), instruments(2)) + val riD = remoteInstruments(instruments: _*) + serializeDeserialize(riS, riD, p.ref, "baz") + p.expectMsgAllOf("baz-7-!", "baz-21-???") + p.expectNoMsg(100.millis) + } } "skip remote instruments in the message that are not existing" in { - ensureDebugLog( - "Skipping serialized data in message for RemoteInstrument 11 that has no local match") { - val p = TestProbe() - val instruments = Seq(testInstrument(6, "!"), testInstrument(11, ".."), testInstrument(19, "???")) - val riS = remoteInstruments(instruments: _*) - val riD = remoteInstruments(instruments(0), instruments(2)) - serializeDeserialize(riS, riD, p.ref, "buz") - p.expectMsgAllOf("buz-6-!", "buz-19-???") - p.expectNoMsg(100.millis) - } + ensureDebugLog("Skipping serialized data in message for RemoteInstrument 11 that has no local match") { + val p = TestProbe() + val instruments = Seq(testInstrument(6, "!"), testInstrument(11, ".."), testInstrument(19, "???")) + val riS = remoteInstruments(instruments: _*) + val riD = remoteInstruments(instruments(0), instruments(2)) + serializeDeserialize(riS, riD, p.ref, "buz") + p.expectMsgAllOf("buz-6-!", "buz-19-???") + p.expectNoMsg(100.millis) + } } "skip all remote instruments in the message if none are existing" in { - ensureDebugLog( - "Skipping serialized data in message for RemoteInstrument(s) [1, 10, 31] that has no local match") { - val p = TestProbe() - val instruments = Seq(testInstrument(1, "!"), testInstrument(10, ".."), testInstrument(31, "???")) - val riS = remoteInstruments(instruments: _*) - val riD = remoteInstruments() - serializeDeserialize(riS, riD, p.ref, "boz") - p.expectNoMsg(100.millis) - } + ensureDebugLog("Skipping serialized data in message for RemoteInstrument(s) [1, 10, 31] that has no local match") { + val p = TestProbe() + val instruments = Seq(testInstrument(1, "!"), testInstrument(10, ".."), testInstrument(31, "???")) + val riS = remoteInstruments(instruments: _*) + val riD = remoteInstruments() + serializeDeserialize(riS, riD, p.ref, "boz") + p.expectNoMsg(100.millis) + } } "skip serializing remote instrument that fails" in { - ensureDebugLog( - "Skipping serialization of RemoteInstrument 7 since it failed with boom", - "Skipping local RemoteInstrument 7 that has no matching data in the message") { - val p = TestProbe() - val instruments = Seq( - testInstrument(7, "!", sentThrowable = boom), testInstrument(10, ".."), testInstrument(21, "???")) - val ri = remoteInstruments(instruments: _*) - serializeDeserialize(ri, ri, p.ref, "woot") - p.expectMsgAllOf("woot-10-..", "woot-21-???") - p.expectNoMsg(100.millis) - } + ensureDebugLog("Skipping serialization of RemoteInstrument 7 since it failed with boom", + "Skipping local RemoteInstrument 7 that has no matching data in the message") { + val p = TestProbe() + val instruments = + Seq(testInstrument(7, "!", sentThrowable = boom), testInstrument(10, ".."), testInstrument(21, "???")) + val ri = remoteInstruments(instruments: _*) + serializeDeserialize(ri, ri, p.ref, "woot") + p.expectMsgAllOf("woot-10-..", "woot-21-???") + p.expectNoMsg(100.millis) + } } "skip deserializing remote instrument that fails" in { - ensureDebugLog( - "Skipping deserialization of RemoteInstrument 7 since it failed with boom", - "Skipping deserialization of RemoteInstrument 21 since it failed with boom") { - val p = TestProbe() - val instruments = Seq( - testInstrument(7, "!", receiveThrowable = boom), testInstrument(10, ".."), - testInstrument(21, "???", receiveThrowable = boom)) - val ri = remoteInstruments(instruments: _*) - serializeDeserialize(ri, ri, p.ref, "waat") - p.expectMsgAllOf("waat-10-..") - p.expectNoMsg(100.millis) - } + ensureDebugLog("Skipping deserialization of RemoteInstrument 7 since it failed with boom", + "Skipping deserialization of RemoteInstrument 21 since it failed with boom") { + val p = TestProbe() + val instruments = Seq(testInstrument(7, "!", receiveThrowable = boom), + testInstrument(10, ".."), + testInstrument(21, "???", receiveThrowable = boom)) + val ri = remoteInstruments(instruments: _*) + serializeDeserialize(ri, ri, p.ref, "waat") + p.expectMsgAllOf("waat-10-..") + p.expectNoMsg(100.millis) + } } } } @@ -136,7 +131,10 @@ object RemoteInstrumentsSerializationSpec { override def isDebugEnabled(logClass: Class[_], logSource: String): Boolean = logSource == "DebugSource" } - def testInstrument(id: Int, metadata: String, sentThrowable: Throwable = null, receiveThrowable: Throwable = null): RemoteInstrument = { + def testInstrument(id: Int, + metadata: String, + sentThrowable: Throwable = null, + receiveThrowable: Throwable = null): RemoteInstrument = { new RemoteInstrument { private val charset = Charset.forName("UTF-8") private val encoder = charset.newEncoder() @@ -144,7 +142,10 @@ object RemoteInstrumentsSerializationSpec { override def identifier: Byte = id.toByte - override def remoteWriteMetadata(recipient: ActorRef, message: Object, sender: ActorRef, buffer: ByteBuffer): Unit = { + override def remoteWriteMetadata(recipient: ActorRef, + message: Object, + sender: ActorRef, + buffer: ByteBuffer): Unit = { buffer.putInt(metadata.length) if (sentThrowable ne null) throw sentThrowable encoder.encode(CharBuffer.wrap(metadata), buffer, true) @@ -152,7 +153,10 @@ object RemoteInstrumentsSerializationSpec { encoder.reset() } - override def remoteReadMetadata(recipient: ActorRef, message: Object, sender: ActorRef, buffer: ByteBuffer): Unit = { + override def remoteReadMetadata(recipient: ActorRef, + message: Object, + sender: ActorRef, + buffer: ByteBuffer): Unit = { val size = buffer.getInt if (receiveThrowable ne null) throw receiveThrowable val charBuffer = CharBuffer.allocate(size) @@ -163,9 +167,17 @@ object RemoteInstrumentsSerializationSpec { recipient ! s"$message-$identifier-$string" } - override def remoteMessageSent(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit = () + override def remoteMessageSent(recipient: ActorRef, + message: Object, + sender: ActorRef, + size: Int, + time: Long): Unit = () - override def remoteMessageReceived(recipient: ActorRef, message: Object, sender: ActorRef, size: Int, time: Long): Unit = () + override def remoteMessageReceived(recipient: ActorRef, + message: Object, + sender: ActorRef, + size: Int, + time: Long): Unit = () } } @@ -182,7 +194,10 @@ object RemoteInstrumentsSerializationSpec { ri.deserializeRaw(mockInbound) } - def serializeDeserialize(riS: RemoteInstruments, riD: RemoteInstruments, recipient: ActorRef, message: AnyRef): Unit = { + def serializeDeserialize(riS: RemoteInstruments, + riD: RemoteInstruments, + recipient: ActorRef, + message: AnyRef): Unit = { val buffer = ByteBuffer.allocate(1024) serialize(riS, buffer) buffer.flip() diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala index 7fdf569afc..c4d7a9c8fc 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteMessageSerializationSpec.scala @@ -54,28 +54,31 @@ class RemoteMessageSerializationSpec extends ArteryMultiNodeSpec(""" "drop sent messages over payload size" in { val oversized = byteStringOfSize(maxPayloadBytes + 1) - EventFilter[OversizedPayloadException](start = "Failed to serialize oversized message", occurrences = 1).intercept { - verifySend(oversized) { - expectNoMsg(1.second) // No AssocitionErrorEvent should be published + EventFilter[OversizedPayloadException](start = "Failed to serialize oversized message", occurrences = 1) + .intercept { + verifySend(oversized) { + expectNoMsg(1.second) // No AssocitionErrorEvent should be published + } } - } } // TODO max payload size is not configurable yet, so we cannot send a too big message, it fails no sending side "drop received messages over payload size" ignore { // Receiver should reply with a message of size maxPayload + 1, which will be dropped and an error logged - EventFilter[OversizedPayloadException](pattern = ".*Discarding oversized payload received.*", occurrences = 1).intercept { - verifySend(maxPayloadBytes + 1) { - expectNoMsg(1.second) // No AssocitionErrorEvent should be published + EventFilter[OversizedPayloadException](pattern = ".*Discarding oversized payload received.*", occurrences = 1) + .intercept { + verifySend(maxPayloadBytes + 1) { + expectNoMsg(1.second) // No AssocitionErrorEvent should be published + } } - } } "be able to serialize a local actor ref from another actor system" in { remoteSystem.actorOf(TestActors.echoActorProps, "echo") val local = localSystem.actorOf(TestActors.echoActorProps, "echo") - val remoteEcho = system.actorSelection(rootActorPath(remoteSystem) / "user" / "echo").resolveOne(3.seconds).futureValue + val remoteEcho = + system.actorSelection(rootActorPath(remoteSystem) / "user" / "echo").resolveOne(3.seconds).futureValue remoteEcho ! local expectMsg(3.seconds, local) } diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala index 9fab6b9d96..2d32aee44a 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteRouterSpec.scala @@ -42,8 +42,7 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val port = RARP(system).provider.getDefaultAddress.port.get val sysName = system.name - val conf = ConfigFactory.parseString( - s""" + val conf = ConfigFactory.parseString(s""" akka { actor.deployment { /blub { @@ -76,9 +75,7 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" target.nodes = ["akka://${sysName}@localhost:${port}"] } } - }""" - ).withFallback(ArterySpecSupport.newFlightRecorderConfig) - .withFallback(system.settings.config) + }""").withFallback(ArterySpecSupport.newFlightRecorderConfig).withFallback(system.settings.config) val masterSystem = ActorSystem("Master" + sysName, conf) @@ -106,20 +103,21 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val children = replies.toSet children should have size 2 children.map(_.parent) should have size 1 - children foreach (_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) masterSystem.stop(router) } "deploy its children on remote host driven by programatic definition" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(new RemoteRouterConfig( - RoundRobinPool(2), - Seq(Address("akka", sysName, "localhost", port))).props(echoActorProps), "blub2") + val router = + masterSystem.actorOf(new RemoteRouterConfig(RoundRobinPool(2), Seq(Address("akka", sysName, "localhost", port))) + .props(echoActorProps), + "blub2") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet children should have size 2 children.map(_.parent) should have size 1 - children foreach (_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) masterSystem.stop(router) } @@ -130,7 +128,7 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val children = replies.toSet children.size should be >= 2 children.map(_.parent) should have size 1 - children foreach (_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) masterSystem.stop(router) } @@ -144,14 +142,17 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val parents = children.map(_.parent) parents should have size 1 parents.head should ===(router.path) - children foreach (_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) masterSystem.stop(router) } "deploy remote routers based on explicit deployment" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(RoundRobinPool(2).props(echoActorProps) - .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"akka://${sysName}@localhost:${port}")))), "remote-blub2") + val router = masterSystem.actorOf( + RoundRobinPool(2) + .props(echoActorProps) + .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"akka://${sysName}@localhost:${port}")))), + "remote-blub2") router.path.address.toString should ===(s"akka://${sysName}@localhost:${port}") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet @@ -159,14 +160,17 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val parents = children.map(_.parent) parents should have size 1 parents.head should ===(router.path) - children foreach (_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) masterSystem.stop(router) } "let remote deployment be overridden by local configuration" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(RoundRobinPool(2).props(echoActorProps) - .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"akka://${sysName}@localhost:${port}")))), "local-blub") + val router = masterSystem.actorOf( + RoundRobinPool(2) + .props(echoActorProps) + .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"akka://${sysName}@localhost:${port}")))), + "local-blub") router.path.address.toString should ===("akka://MasterRemoteRouterSpec") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet @@ -174,14 +178,17 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val parents = children.map(_.parent) parents should have size 1 parents.head.address should ===(Address("akka", sysName, "localhost", port)) - children foreach (_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) masterSystem.stop(router) } "let remote deployment router be overridden by local configuration" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(RoundRobinPool(2).props(echoActorProps) - .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"akka://${sysName}@localhost:${port}")))), "local-blub2") + val router = masterSystem.actorOf( + RoundRobinPool(2) + .props(echoActorProps) + .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"akka://${sysName}@localhost:${port}")))), + "local-blub2") router.path.address.toString should ===(s"akka://${sysName}@localhost:${port}") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet @@ -189,14 +196,17 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val parents = children.map(_.parent) parents should have size 1 parents.head should ===(router.path) - children foreach (_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) masterSystem.stop(router) } "let remote deployment be overridden by remote configuration" in { val probe = TestProbe()(masterSystem) - val router = masterSystem.actorOf(RoundRobinPool(2).props(echoActorProps) - .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"akka://${sysName}@localhost:${port}")))), "remote-override") + val router = masterSystem.actorOf( + RoundRobinPool(2) + .props(echoActorProps) + .withDeploy(Deploy(scope = RemoteScope(AddressFromURIString(s"akka://${sysName}@localhost:${port}")))), + "remote-override") router.path.address.toString should ===(s"akka://${sysName}@localhost:${port}") val replies = collectRouteePaths(probe, router, 5) val children = replies.toSet @@ -204,7 +214,7 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val parents = children.map(_.parent) parents should have size 1 parents.head should ===(router.path) - children foreach (_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) + children.foreach(_.address.toString should ===(s"akka://${sysName}@localhost:${port}")) masterSystem.stop(router) } @@ -213,9 +223,10 @@ class RemoteRouterSpec extends AkkaSpec(ConfigFactory.parseString(""" val escalator = OneForOneStrategy() { case e => probe.ref ! e; SupervisorStrategy.Escalate } - val router = masterSystem.actorOf(new RemoteRouterConfig( - RoundRobinPool(1, supervisorStrategy = escalator), - Seq(Address("akka", sysName, "localhost", port))).props(Props.empty), "blub3") + val router = masterSystem.actorOf( + new RemoteRouterConfig(RoundRobinPool(1, supervisorStrategy = escalator), + Seq(Address("akka", sysName, "localhost", port))).props(Props.empty), + "blub3") router.tell(GetRoutees, probe.ref) EventFilter[ActorKilledException](occurrences = 1).intercept { diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala index 0c41c2ce71..4a438a6545 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteSendConsistencySpec.scala @@ -11,46 +11,49 @@ import com.typesafe.config.{ Config, ConfigFactory } import scala.concurrent.duration._ import akka.actor.ActorSelection -class ArteryUpdSendConsistencyWithOneLaneSpec extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" +class ArteryUpdSendConsistencyWithOneLaneSpec + extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" akka.remote.artery.advanced.outbound-lanes = 1 akka.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) -class ArteryUpdSendConsistencyWithThreeLanesSpec extends AbstractRemoteSendConsistencySpec( - ConfigFactory.parseString(""" +class ArteryUpdSendConsistencyWithThreeLanesSpec + extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" akka.remote.artery.advanced.outbound-lanes = 3 akka.remote.artery.advanced.inbound-lanes = 3 """).withFallback(ArterySpecSupport.defaultConfig)) -class ArteryTcpSendConsistencyWithOneLaneSpec extends AbstractRemoteSendConsistencySpec( - ConfigFactory.parseString(""" +class ArteryTcpSendConsistencyWithOneLaneSpec + extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" akka.remote.artery.transport = tcp akka.remote.artery.advanced.outbound-lanes = 1 akka.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) -class ArteryTcpSendConsistencyWithThreeLanesSpec extends AbstractRemoteSendConsistencySpec( - ConfigFactory.parseString(""" +class ArteryTcpSendConsistencyWithThreeLanesSpec + extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" akka.remote.artery.transport = tcp akka.remote.artery.advanced.outbound-lanes = 3 akka.remote.artery.advanced.inbound-lanes = 3 """).withFallback(ArterySpecSupport.defaultConfig)) -class ArteryTlsTcpSendConsistencyWithOneLaneSpec extends AbstractRemoteSendConsistencySpec( - ConfigFactory.parseString(""" +class ArteryTlsTcpSendConsistencyWithOneLaneSpec + extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" akka.remote.artery.transport = tls-tcp akka.remote.artery.advanced.outbound-lanes = 1 akka.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) -class ArteryTlsTcpSendConsistencyWithThreeLanesSpec extends AbstractRemoteSendConsistencySpec( - ConfigFactory.parseString(""" +class ArteryTlsTcpSendConsistencyWithThreeLanesSpec + extends AbstractRemoteSendConsistencySpec(ConfigFactory.parseString(""" akka.remote.artery.transport = tls-tcp akka.remote.artery.advanced.outbound-lanes = 1 akka.remote.artery.advanced.inbound-lanes = 1 """).withFallback(ArterySpecSupport.defaultConfig)) -abstract class AbstractRemoteSendConsistencySpec(config: Config) extends ArteryMultiNodeSpec(config) with ImplicitSender { +abstract class AbstractRemoteSendConsistencySpec(config: Config) + extends ArteryMultiNodeSpec(config) + with ImplicitSender { val systemB = newRemoteSystem(name = Some("systemB")) val addressB = address(systemB) @@ -126,22 +129,23 @@ abstract class AbstractRemoteSendConsistencySpec(config: Config) extends ArteryM expectMsgType[ActorIdentity].ref.get } - def senderProps(remoteRef: ActorRef) = Props(new Actor { - var counter = 1000 - remoteRef ! counter + def senderProps(remoteRef: ActorRef) = + Props(new Actor { + var counter = 1000 + remoteRef ! counter - override def receive: Receive = { - case i: Int => - if (i != counter) testActor ! s"Failed, expected $counter got $i" - else if (counter == 0) { - testActor ! "success" - context.stop(self) - } else { - counter -= 1 - remoteRef ! counter - } - } - }).withDeploy(Deploy.local) + override def receive: Receive = { + case i: Int => + if (i != counter) testActor ! s"Failed, expected $counter got $i" + else if (counter == 0) { + testActor ! "success" + context.stop(self) + } else { + counter -= 1 + remoteRef ! counter + } + } + }).withDeploy(Deploy.local) system.actorOf(senderProps(remoteRefA)) system.actorOf(senderProps(remoteRefB)) @@ -165,22 +169,23 @@ abstract class AbstractRemoteSendConsistencySpec(config: Config) extends ArteryM val selB = system.actorSelection(rootB / "user" / "echoB2") val selC = system.actorSelection(rootB / "user" / "echoC2") - def senderProps(sel: ActorSelection) = Props(new Actor { - var counter = 1000 - sel ! counter + def senderProps(sel: ActorSelection) = + Props(new Actor { + var counter = 1000 + sel ! counter - override def receive: Receive = { - case i: Int => - if (i != counter) testActor ! s"Failed, expected $counter got $i" - else if (counter == 0) { - testActor ! "success2" - context.stop(self) - } else { - counter -= 1 - sel ! counter - } - } - }).withDeploy(Deploy.local) + override def receive: Receive = { + case i: Int => + if (i != counter) testActor ! s"Failed, expected $counter got $i" + else if (counter == 0) { + testActor ! "success2" + context.stop(self) + } else { + counter -= 1 + sel ! counter + } + } + }).withDeploy(Deploy.local) system.actorOf(senderProps(selA)) system.actorOf(senderProps(selB)) diff --git a/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala index 69a81a4fc1..5e7f822a7c 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RemoteWatcherSpec.scala @@ -14,7 +14,7 @@ object RemoteWatcherSpec { class TestActorProxy(testActor: ActorRef) extends Actor { def receive = { - case msg => testActor forward msg + case msg => testActor.forward(msg) } } @@ -27,12 +27,11 @@ object RemoteWatcherSpec { def createFailureDetector(): FailureDetectorRegistry[Address] = { def createFailureDetector(): FailureDetector = - new PhiAccrualFailureDetector( - threshold = 8.0, - maxSampleSize = 200, - minStdDeviation = 100.millis, - acceptableHeartbeatPause = 3.seconds, - firstHeartbeatEstimate = 1.second) + new PhiAccrualFailureDetector(threshold = 8.0, + maxSampleSize = 200, + minStdDeviation = 100.millis, + acceptableHeartbeatPause = 3.seconds, + firstHeartbeatEstimate = 1.second) new DefaultFailureDetectorRegistry(() => createFailureDetector()) } @@ -42,11 +41,11 @@ object RemoteWatcherSpec { final case class Quarantined(address: Address, uid: Option[Long]) extends JavaSerializable } - class TestRemoteWatcher(heartbeatExpectedResponseAfter: FiniteDuration) extends RemoteWatcher( - createFailureDetector, - heartbeatInterval = TurnOff, - unreachableReaperInterval = TurnOff, - heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) { + class TestRemoteWatcher(heartbeatExpectedResponseAfter: FiniteDuration) + extends RemoteWatcher(createFailureDetector, + heartbeatInterval = TurnOff, + unreachableReaperInterval = TurnOff, + heartbeatExpectedResponseAfter = heartbeatExpectedResponseAfter) { def this() = this(heartbeatExpectedResponseAfter = TurnOff) @@ -75,9 +74,9 @@ class RemoteWatcherSpec extends ArteryMultiNodeSpec(ArterySpecSupport.defaultCon val remoteAddress = address(remoteSystem) def remoteAddressUid = AddressUidExtension(remoteSystem).longAddressUid - Seq(system, remoteSystem).foreach(muteDeadLetters( - akka.remote.transport.AssociationHandle.Disassociated.getClass, - akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass)(_)) + Seq(system, remoteSystem).foreach( + muteDeadLetters(akka.remote.transport.AssociationHandle.Disassociated.getClass, + akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass)(_)) override def afterTermination(): Unit = { shutdown(remoteSystem) diff --git a/akka-remote/src/test/scala/akka/remote/artery/RollingEventLogSimulationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/RollingEventLogSimulationSpec.scala index 57dfde70f5..c0a4dfc844 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/RollingEventLogSimulationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/RollingEventLogSimulationSpec.scala @@ -101,9 +101,9 @@ class RollingEventLogSimulationSpec extends AkkaSpec { val instructions: Array[Instruction] = (Array(AdvanceHeader, TryMarkDirty) :+ - WriteId) ++ - Array.fill(EntrySize - 2)(WriteByte) :+ - Commit + WriteId) ++ + Array.fill(EntrySize - 2)(WriteByte) :+ + Commit def step(simulator: Simulator): String = { instructions(instructionPtr)(simulator) diff --git a/akka-remote/src/test/scala/akka/remote/artery/SendQueueSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SendQueueSpec.scala index 45027e54c6..8b7612730e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SendQueueSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SendQueueSpec.scala @@ -68,8 +68,8 @@ class SendQueueSpec extends AkkaSpec("akka.actor.serialize-messages = off") with "deliver all messages" in { val queue = createQueue[String](128) - val (sendQueue, downstream) = Source.fromGraph(new SendQueue[String](sendToDeadLetters)) - .toMat(TestSink.probe)(Keep.both).run() + val (sendQueue, downstream) = + Source.fromGraph(new SendQueue[String](sendToDeadLetters)).toMat(TestSink.probe)(Keep.both).run() downstream.request(10) sendQueue.inject(queue) @@ -87,8 +87,8 @@ class SendQueueSpec extends AkkaSpec("akka.actor.serialize-messages = off") with queue.offer("a") queue.offer("b") - val (sendQueue, downstream) = Source.fromGraph(new SendQueue[String](sendToDeadLetters)) - .toMat(TestSink.probe)(Keep.both).run() + val (sendQueue, downstream) = + Source.fromGraph(new SendQueue[String](sendToDeadLetters)).toMat(TestSink.probe)(Keep.both).run() downstream.request(10) downstream.expectNoMsg(200.millis) @@ -105,10 +105,12 @@ class SendQueueSpec extends AkkaSpec("akka.actor.serialize-messages = off") with // this test verifies that the wakeup signal is triggered correctly val queue = createQueue[Int](128) val burstSize = 100 - val (sendQueue, downstream) = Source.fromGraph(new SendQueue[Int](sendToDeadLetters)) + val (sendQueue, downstream) = Source + .fromGraph(new SendQueue[Int](sendToDeadLetters)) .grouped(burstSize) .async - .toMat(TestSink.probe)(Keep.both).run() + .toMat(TestSink.probe)(Keep.both) + .run() downstream.request(10) sendQueue.inject(queue) @@ -133,8 +135,8 @@ class SendQueueSpec extends AkkaSpec("akka.actor.serialize-messages = off") with // send 100 per producer before materializing producers.foreach(_ ! ProduceToQueue(0, 100, queue)) - val (sendQueue, downstream) = Source.fromGraph(new SendQueue[Msg](sendToDeadLetters)) - .toMat(TestSink.probe)(Keep.both).run() + val (sendQueue, downstream) = + Source.fromGraph(new SendQueue[Msg](sendToDeadLetters)).toMat(TestSink.probe)(Keep.both).run() sendQueue.inject(queue) producers.foreach(_ ! ProduceToQueueValue(100, 200, sendQueue)) @@ -163,8 +165,8 @@ class SendQueueSpec extends AkkaSpec("akka.actor.serialize-messages = off") with (1 to 100).foreach { n => val queue = createQueue[String](16) - val (sendQueue, downstream) = Source.fromGraph(new SendQueue[String](sendToDeadLetters)) - .toMat(TestSink.probe)(Keep.both).run() + val (sendQueue, downstream) = + Source.fromGraph(new SendQueue[String](sendToDeadLetters)).toMat(TestSink.probe)(Keep.both).run() f(queue, sendQueue, downstream) downstream.expectNext("a") diff --git a/akka-remote/src/test/scala/akka/remote/artery/SerializationErrorSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SerializationErrorSpec.scala index b54d835f02..d0e05746ad 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SerializationErrorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SerializationErrorSpec.scala @@ -18,9 +18,8 @@ object SerializationErrorSpec { class SerializationErrorSpec extends ArteryMultiNodeSpec(ArterySpecSupport.defaultConfig) with ImplicitSender { import SerializationErrorSpec._ - val systemB = newRemoteSystem( - name = Some("systemB"), - extraConfig = Some(""" + val systemB = newRemoteSystem(name = Some("systemB"), + extraConfig = Some(""" akka.actor.serialization-identifiers { # this will cause deserialization error "akka.serialization.ByteArraySerializer" = -4 @@ -58,10 +57,11 @@ class SerializationErrorSpec extends ArteryMultiNodeSpec(ArterySpecSupport.defau remoteRef ! "ping" expectMsg("ping") - EventFilter.warning( - pattern = """Failed to deserialize message from \[.*\] with serializer id \[4\]""", occurrences = 1).intercept { - remoteRef ! "boom".getBytes("utf-8") - }(systemB) + EventFilter + .warning(pattern = """Failed to deserialize message from \[.*\] with serializer id \[4\]""", occurrences = 1) + .intercept { + remoteRef ! "boom".getBytes("utf-8") + }(systemB) remoteRef ! "ping2" expectMsg("ping2") diff --git a/akka-remote/src/test/scala/akka/remote/artery/SerializationTransportInformationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SerializationTransportInformationSpec.scala index 5578ed0e4b..fca0d1a188 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SerializationTransportInformationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SerializationTransportInformationSpec.scala @@ -6,5 +6,5 @@ package akka.remote.artery import akka.remote.serialization.AbstractSerializationTransportInformationSpec -class SerializationTransportInformationSpec extends AbstractSerializationTransportInformationSpec( - ArterySpecSupport.defaultConfig) +class SerializationTransportInformationSpec + extends AbstractSerializationTransportInformationSpec(ArterySpecSupport.defaultConfig) diff --git a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageAckerSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageAckerSpec.scala index 716a8620b3..654e30e7eb 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageAckerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageAckerSpec.scala @@ -30,13 +30,15 @@ class SystemMessageAckerSpec extends AkkaSpec with ImplicitSender { val addressB = UniqueAddress(Address("akka", "sysB", "hostB", 1002), 2) val addressC = UniqueAddress(Address("akka", "sysC", "hostB", 1003), 3) - private def setupStream(inboundContext: InboundContext, timeout: FiniteDuration = 5.seconds): (TestPublisher.Probe[AnyRef], TestSubscriber.Probe[Any]) = { + private def setupStream( + inboundContext: InboundContext, + timeout: FiniteDuration = 5.seconds): (TestPublisher.Probe[AnyRef], TestSubscriber.Probe[Any]) = { val recipient = OptionVal.None // not used - TestSource.probe[AnyRef] + TestSource + .probe[AnyRef] .map { case sysMsg @ SystemMessageEnvelope(_, _, ackReplyTo) => - InboundEnvelope(recipient, sysMsg, OptionVal.None, ackReplyTo.uid, - inboundContext.association(ackReplyTo.uid)) + InboundEnvelope(recipient, sysMsg, OptionVal.None, ackReplyTo.uid, inboundContext.association(ackReplyTo.uid)) } .via(new SystemMessageAcker(inboundContext)) .map { case env: InboundEnvelope => env.message } diff --git a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala index c92aa98188..7ff943c16e 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/SystemMessageDeliverySpec.scala @@ -35,8 +35,7 @@ object SystemMessageDeliverySpec { case class TestSysMsg(s: String) extends SystemMessageDelivery.AckedDeliveryMessage - val config = ConfigFactory.parseString( - s""" + val config = ConfigFactory.parseString(s""" akka.loglevel = INFO akka.remote.artery.advanced.stop-idle-outbound-after = 1000 ms akka.remote.artery.advanced.inject-handshake-interval = 500 ms @@ -50,13 +49,9 @@ object SystemMessageDeliverySpec { class SystemMessageDeliverySpec extends ArteryMultiNodeSpec(SystemMessageDeliverySpec.config) with ImplicitSender { import SystemMessageDeliverySpec._ - val addressA = UniqueAddress( - address(system), - AddressUidExtension(system).longAddressUid) + val addressA = UniqueAddress(address(system), AddressUidExtension(system).longAddressUid) val systemB = newRemoteSystem(name = Some("systemB")) - val addressB = UniqueAddress( - address(systemB), - AddressUidExtension(systemB).longAddressUid) + val addressB = UniqueAddress(address(systemB), AddressUidExtension(systemB).longAddressUid) val rootB = RootActorPath(addressB.address) val matSettings = ActorMaterializerSettings(system).withFuzzing(true) implicit val mat = ActorMaterializer(matSettings)(system) @@ -66,7 +61,9 @@ class SystemMessageDeliverySpec extends ArteryMultiNodeSpec(SystemMessageDeliver system.eventStream.publish(TestEvent.Mute(EventFilter.warning(pattern = ".*negative acknowledgement.*"))) systemB.eventStream.publish(TestEvent.Mute(EventFilter.warning(pattern = ".*negative acknowledgement.*"))) - private def send(sendCount: Int, resendInterval: FiniteDuration, outboundContext: OutboundContext): Source[OutboundEnvelope, NotUsed] = { + private def send(sendCount: Int, + resendInterval: FiniteDuration, + outboundContext: OutboundContext): Source[OutboundEnvelope, NotUsed] = { val deadLetters = TestProbe().ref Source(1 to sendCount) .map(n => outboundEnvelopePool.acquire().init(OptionVal.None, TestSysMsg("msg-" + n), OptionVal.None)) @@ -76,34 +73,32 @@ class SystemMessageDeliverySpec extends ArteryMultiNodeSpec(SystemMessageDeliver private def inbound(inboundContext: InboundContext): Flow[OutboundEnvelope, InboundEnvelope, NotUsed] = { val recipient = OptionVal.None // not used Flow[OutboundEnvelope] - .map(outboundEnvelope => outboundEnvelope.message match { - case sysEnv: SystemMessageEnvelope => - InboundEnvelope(recipient, sysEnv, OptionVal.None, addressA.uid, - inboundContext.association(addressA.uid)) - }) + .map(outboundEnvelope => + outboundEnvelope.message match { + case sysEnv: SystemMessageEnvelope => + InboundEnvelope(recipient, sysEnv, OptionVal.None, addressA.uid, inboundContext.association(addressA.uid)) + }) .async .via(new SystemMessageAcker(inboundContext)) } private def drop(dropSeqNumbers: Vector[Long]): Flow[OutboundEnvelope, OutboundEnvelope, NotUsed] = { - Flow[OutboundEnvelope] - .statefulMapConcat(() => { - var dropping = dropSeqNumbers + Flow[OutboundEnvelope].statefulMapConcat(() => { + var dropping = dropSeqNumbers - { - outboundEnvelope => - outboundEnvelope.message match { - case SystemMessageEnvelope(_, seqNo, _) => - val i = dropping.indexOf(seqNo) - if (i >= 0) { - dropping = dropping.updated(i, -1L) - Nil - } else - List(outboundEnvelope) - case _ => Nil - } + { outboundEnvelope => + outboundEnvelope.message match { + case SystemMessageEnvelope(_, seqNo, _) => + val i = dropping.indexOf(seqNo) + if (i >= 0) { + dropping = dropping.updated(i, -1L) + Nil + } else + List(outboundEnvelope) + case _ => Nil } - }) + } + }) } private def randomDrop[T](dropRate: Double): Flow[T, T, NotUsed] = Flow[T].mapConcat { elem => @@ -137,7 +132,8 @@ class SystemMessageDeliverySpec extends ArteryMultiNodeSpec(SystemMessageDeliver expectMsgType[ActorIdentity].ref.get } - val idleTimeout = RARP(system).provider.transport.asInstanceOf[ArteryTransport].settings.Advanced.StopIdleOutboundAfter + val idleTimeout = + RARP(system).provider.transport.asInstanceOf[ArteryTransport].settings.Advanced.StopIdleOutboundAfter val rnd = ThreadLocalRandom.current() (1 to 5).foreach { _ => diff --git a/akka-remote/src/test/scala/akka/remote/artery/TestContext.scala b/akka-remote/src/test/scala/akka/remote/artery/TestContext.scala index 813f37436b..8e3c864c7f 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/TestContext.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/TestContext.scala @@ -20,11 +20,11 @@ import akka.util.OptionVal import akka.dispatch.ExecutionContexts import com.typesafe.config.ConfigFactory -private[remote] class TestInboundContext( - override val localAddress: UniqueAddress, - val controlSubject: TestControlMessageSubject = new TestControlMessageSubject, - val controlProbe: Option[ActorRef] = None, - val replyDropRate: Double = 0.0) extends InboundContext { +private[remote] class TestInboundContext(override val localAddress: UniqueAddress, + val controlSubject: TestControlMessageSubject = new TestControlMessageSubject, + val controlProbe: Option[ActorRef] = None, + val replyDropRate: Double = 0.0) + extends InboundContext { private val associationsByAddress = new ConcurrentHashMap[Address, OutboundContext]() private val associationsByUid = new ConcurrentHashMap[Long, OutboundContext]() @@ -64,11 +64,11 @@ private[remote] class TestInboundContext( ArterySettings(ConfigFactory.load().getConfig("akka.remote.artery")) } -private[remote] class TestOutboundContext( - override val localAddress: UniqueAddress, - override val remoteAddress: Address, - override val controlSubject: TestControlMessageSubject, - val controlProbe: Option[ActorRef] = None) extends OutboundContext { +private[remote] class TestOutboundContext(override val localAddress: UniqueAddress, + override val remoteAddress: Address, + override val controlSubject: TestControlMessageSubject, + val controlProbe: Option[ActorRef] = None) + extends OutboundContext { // access to this is synchronized (it's a test utility) private var _associationState = AssociationState() @@ -95,8 +95,8 @@ private[remote] class TestOutboundContext( override def sendControl(message: ControlMessage) = { controlProbe.foreach(_ ! message) - controlSubject.sendControl(InboundEnvelope(OptionVal.None, message, OptionVal.None, localAddress.uid, - OptionVal.None)) + controlSubject.sendControl( + InboundEnvelope(OptionVal.None, message, OptionVal.None, localAddress.uid, OptionVal.None)) } override lazy val settings: ArterySettings = @@ -119,16 +119,15 @@ private[remote] class TestControlMessageSubject extends ControlMessageSubject { def sendControl(env: InboundEnvelope): Unit = { val iter = observers.iterator() - while (iter.hasNext()) - iter.next().notify(env) + while (iter.hasNext()) iter.next().notify(env) } } -private[remote] class ManualReplyInboundContext( - replyProbe: ActorRef, - localAddress: UniqueAddress, - controlSubject: TestControlMessageSubject) extends TestInboundContext(localAddress, controlSubject) { +private[remote] class ManualReplyInboundContext(replyProbe: ActorRef, + localAddress: UniqueAddress, + controlSubject: TestControlMessageSubject) + extends TestInboundContext(localAddress, controlSubject) { private var lastReply: Option[(Address, ControlMessage)] = None diff --git a/akka-remote/src/test/scala/akka/remote/artery/TransientSerializationErrorSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/TransientSerializationErrorSpec.scala index 0ce834bdc3..f8479d40d1 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/TransientSerializationErrorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/TransientSerializationErrorSpec.scala @@ -6,5 +6,4 @@ package akka.remote.artery import akka.remote.AbstractTransientSerializationErrorSpec -class TransientSerializationErrorSpec extends AbstractTransientSerializationErrorSpec( - ArterySpecSupport.defaultConfig) +class TransientSerializationErrorSpec extends AbstractTransientSerializationErrorSpec(ArterySpecSupport.defaultConfig) diff --git a/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala index 500a7a8124..a06c0d5ba3 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/UntrustedSpec.scala @@ -35,8 +35,8 @@ object UntrustedSpec { def receive = { case IdentifyReq(path) => context.actorSelection(path).tell(Identify(None), sender()) - case StopChild(name) => context.child(name) foreach context.stop - case msg => testActor forward msg + case StopChild(name) => context.child(name).foreach(context.stop) + case msg => testActor.forward(msg) } } @@ -45,24 +45,22 @@ object UntrustedSpec { testActor ! s"${self.path.name} stopped" } def receive = { - case msg => testActor forward msg + case msg => testActor.forward(msg) } } class FakeUser(testActor: ActorRef) extends Actor { context.actorOf(Props(classOf[Child], testActor), "receptionist") def receive = { - case msg => testActor forward msg + case msg => testActor.forward(msg) } } - val config = ConfigFactory.parseString( - """ + val config = ConfigFactory.parseString(""" akka.remote.artery.untrusted-mode = on akka.remote.artery.trusted-selection-paths = ["/user/receptionist", ] akka.loglevel = DEBUG # test verifies debug - """ - ).withFallback(ArterySpecSupport.defaultConfig) + """).withFallback(ArterySpecSupport.defaultConfig) } @@ -85,8 +83,7 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli lazy val target2 = { val p = TestProbe()(client) - client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell( - IdentifyReq("child2"), p.ref) + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell(IdentifyReq("child2"), p.ref) p.expectMsgType[ActorIdentity].ref.get } @@ -108,7 +105,7 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli import Logging._ def receive = { case d @ Debug(_, _, msg: String) if msg contains "dropping" => logProbe.ref ! d - case _ => + case _ => } }).withDeploy(Deploy.local), "debugSniffer"), classOf[Logging.Debug]) @@ -128,7 +125,7 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli client.actorOf(Props(new Actor { context.watch(target2) def receive = { - case x => testActor forward x + case x => testActor.forward(x) } }).withDeploy(Deploy.local)) receptionist ! StopChild("child2") @@ -145,8 +142,7 @@ class UntrustedSpec extends ArteryMultiNodeSpec(UntrustedSpec.config) with Impli "discard actor selection with non root anchor" in { val p = TestProbe()(client) - client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell( - Identify(None), p.ref) + client.actorSelection(RootActorPath(address) / receptionist.path.elements).tell(Identify(None), p.ref) val clientReceptionistRef = p.expectMsgType[ActorIdentity].ref.get val sel = ActorSelection(clientReceptionistRef, receptionist.path.toStringWithoutAddress) diff --git a/akka-remote/src/test/scala/akka/remote/artery/aeron/AeronSinkSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/aeron/AeronSinkSpec.scala index f0ec93bbb2..58a340970c 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/aeron/AeronSinkSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/aeron/AeronSinkSpec.scala @@ -60,14 +60,16 @@ class AeronSinkSpec extends AkkaSpec with ImplicitSender { val port = SocketUtil.temporaryLocalPort(udp = true) val channel = s"aeron:udp?endpoint=localhost:$port" - Source.fromGraph(new AeronSource(channel, 1, aeron, taskRunner, pool, IgnoreEventSink, 0)) + Source + .fromGraph(new AeronSource(channel, 1, aeron, taskRunner, pool, IgnoreEventSink, 0)) // fail receiver stream on first message .map(_ => throw new RuntimeException("stop") with NoStackTrace) .runWith(Sink.ignore) // use large enough messages to fill up buffers val payload = new Array[Byte](100000) - val done = Source(1 to 1000).map(_ => payload) + val done = Source(1 to 1000) + .map(_ => payload) .map { n => val envelope = pool.acquire() envelope.byteBuffer.put(payload) diff --git a/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala index f521a7ea30..285bb121d5 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/compress/CompressionIntegrationSpec.scala @@ -40,8 +40,9 @@ object CompressionIntegrationSpec { } -class CompressionIntegrationSpec extends ArteryMultiNodeSpec(CompressionIntegrationSpec.commonConfig) - with ImplicitSender { +class CompressionIntegrationSpec + extends ArteryMultiNodeSpec(CompressionIntegrationSpec.commonConfig) + with ImplicitSender { val systemB = newRemoteSystem(name = Some("systemB")) val messagesToExchange = 10 @@ -51,8 +52,10 @@ class CompressionIntegrationSpec extends ArteryMultiNodeSpec(CompressionIntegrat // listen for compression table events val aManifestProbe = TestProbe()(system) val bManifestProbe = TestProbe()(systemB) - system.eventStream.subscribe(aManifestProbe.ref, classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable]) - systemB.eventStream.subscribe(bManifestProbe.ref, classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable]) + system.eventStream.subscribe(aManifestProbe.ref, + classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable]) + systemB.eventStream.subscribe(bManifestProbe.ref, + classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable]) val aRefProbe = TestProbe()(system) val bRefProbe = TestProbe()(systemB) system.eventStream.subscribe(aRefProbe.ref, classOf[CompressionProtocol.Events.ReceivedActorRefCompressionTable]) @@ -65,7 +68,9 @@ class CompressionIntegrationSpec extends ArteryMultiNodeSpec(CompressionIntegrat // cause TestMessage manifest to become a heavy hitter // cause echo to become a heavy hitter - (1 to messagesToExchange).foreach { i => echoRefA ! TestMessage("hello") } + (1 to messagesToExchange).foreach { i => + echoRefA ! TestMessage("hello") + } receiveN(messagesToExchange) // the replies within(10.seconds) { @@ -189,15 +194,16 @@ class CompressionIntegrationSpec extends ArteryMultiNodeSpec(CompressionIntegrat "work when starting new ActorSystem with same hostname:port" in { val port = address(systemB).port.get shutdown(systemB) - val systemB2 = newRemoteSystem( - extraConfig = Some(s"akka.remote.artery.canonical.port=$port"), - name = Some("systemB")) + val systemB2 = + newRemoteSystem(extraConfig = Some(s"akka.remote.artery.canonical.port=$port"), name = Some("systemB")) // listen for compression table events val aManifestProbe = TestProbe()(system) val bManifestProbe = TestProbe()(systemB2) - system.eventStream.subscribe(aManifestProbe.ref, classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable]) - systemB2.eventStream.subscribe(bManifestProbe.ref, classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable]) + system.eventStream.subscribe(aManifestProbe.ref, + classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable]) + systemB2.eventStream.subscribe(bManifestProbe.ref, + classOf[CompressionProtocol.Events.ReceivedClassManifestCompressionTable]) val aRefProbe = TestProbe()(system) val bRefProbe = TestProbe()(systemB2) system.eventStream.subscribe(aRefProbe.ref, classOf[CompressionProtocol.Events.ReceivedActorRefCompressionTable]) @@ -218,7 +224,9 @@ class CompressionIntegrationSpec extends ArteryMultiNodeSpec(CompressionIntegrat val echoRefA = expectMsgType[ActorIdentity].ref.get // cause TestMessage manifest to become a heavy hitter - (1 to messagesToExchange).foreach { i => echoRefA ! TestMessage("hello") } + (1 to messagesToExchange).foreach { i => + echoRefA ! TestMessage("hello") + } receiveN(messagesToExchange) // the replies within(10.seconds) { @@ -267,9 +275,8 @@ class CompressionIntegrationSpec extends ArteryMultiNodeSpec(CompressionIntegrat val systemWrap = newRemoteSystem(extraConfig = Some(extraConfig)) val receivedActorRefCompressionTableProbe = TestProbe()(system) - system.eventStream.subscribe( - receivedActorRefCompressionTableProbe.ref, - classOf[CompressionProtocol.Events.ReceivedActorRefCompressionTable]) + system.eventStream.subscribe(receivedActorRefCompressionTableProbe.ref, + classOf[CompressionProtocol.Events.ReceivedActorRefCompressionTable]) def createAndIdentify(i: Int) = { val echoWrap = systemWrap.actorOf(TestActors.echoActorProps, s"echo_$i") @@ -281,7 +288,8 @@ class CompressionIntegrationSpec extends ArteryMultiNodeSpec(CompressionIntegrat // iterate from 2, since our assertion wants the locally created actor to be included in the table // which will only happen in the 2nd advertisement the earliest. val upToNTablesAcceptedAfterWrap = 6 - var remainingExpectedTableVersions = (Iterator.from(2).take(126) ++ Iterator.from(0).take(upToNTablesAcceptedAfterWrap + 1)).toList + var remainingExpectedTableVersions = + (Iterator.from(2).take(126) ++ Iterator.from(0).take(upToNTablesAcceptedAfterWrap + 1)).toList // so table version wraps around at least once var lastTable: CompressionTable[ActorRef] = null @@ -295,7 +303,9 @@ class CompressionIntegrationSpec extends ArteryMultiNodeSpec(CompressionIntegrat allRefs ::= echoWrap // cause echo to become a heavy hitter - (1 to messagesToExchange).foreach { i => echoWrap ! TestMessage("hello") } + (1 to messagesToExchange).foreach { i => + echoWrap ! TestMessage("hello") + } receiveN(messagesToExchange) // the replies var currentTable: CompressionTable[ActorRef] = null diff --git a/akka-remote/src/test/scala/akka/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala index 416d279534..fa771584b8 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/compress/HandshakeShouldDropCompressionTableSpec.scala @@ -34,17 +34,17 @@ object HandshakeShouldDropCompressionTableSpec { } -class HandshakeShouldDropCompressionTableSpec extends ArteryMultiNodeSpec(HandshakeShouldDropCompressionTableSpec.commonConfig) - with ImplicitSender with BeforeAndAfter { +class HandshakeShouldDropCompressionTableSpec + extends ArteryMultiNodeSpec(HandshakeShouldDropCompressionTableSpec.commonConfig) + with ImplicitSender + with BeforeAndAfter { implicit val t = Timeout(3.seconds) var systemB: ActorSystem = null val portB = freePort() before { - systemB = newRemoteSystem( - name = Some("systemB"), - extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + systemB = newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) } "Outgoing compression table" must { @@ -64,7 +64,9 @@ class HandshakeShouldDropCompressionTableSpec extends ArteryMultiNodeSpec(Handsh systemB.actorOf(TestActors.echoActorProps, "echo") // cause testActor-1 to become a heavy hitter - (1 to messagesToExchange).foreach { i => echoSel ! s"hello-$i" } // does not reply, but a hot receiver should be advertised + (1 to messagesToExchange).foreach { i => + echoSel ! s"hello-$i" + } // does not reply, but a hot receiver should be advertised waitForEcho(this, s"hello-$messagesToExchange") systemBTransport.triggerCompressionAdvertisements(actorRef = true, manifest = false) @@ -73,7 +75,9 @@ class HandshakeShouldDropCompressionTableSpec extends ArteryMultiNodeSpec(Handsh a0.table.dictionary.keySet should contain(testActor) // cause a1Probe to become a heavy hitter (we want to not have it in the 2nd compression table later) - (1 to messagesToExchange).foreach { i => echoSel.tell(s"hello-$i", a1Probe.ref) } + (1 to messagesToExchange).foreach { i => + echoSel.tell(s"hello-$i", a1Probe.ref) + } waitForEcho(a1Probe, s"hello-$messagesToExchange") systemBTransport.triggerCompressionAdvertisements(actorRef = true, manifest = false) @@ -83,9 +87,8 @@ class HandshakeShouldDropCompressionTableSpec extends ArteryMultiNodeSpec(Handsh log.info("SHUTTING DOWN system {}...", systemB) shutdown(systemB) - systemB = newRemoteSystem( - name = Some("systemB"), - extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) + systemB = + newRemoteSystem(name = Some("systemB"), extraConfig = Some(s"akka.remote.artery.canonical.port = $portB")) Thread.sleep(1000) log.info("SYSTEM READY {}...", systemB) @@ -93,9 +96,11 @@ class HandshakeShouldDropCompressionTableSpec extends ArteryMultiNodeSpec(Handsh system.eventStream.subscribe(aNewProbe.ref, classOf[Event]) systemB.actorOf(TestActors.echoActorProps, "echo") // start it again - (1 to 5) foreach { _ => + (1 to 5).foreach { _ => // since some messages may end up being lost - (1 to messagesToExchange).foreach { i => echoSel ! s"hello-$i" } // does not reply, but a hot receiver should be advertised + (1 to messagesToExchange).foreach { i => + echoSel ! s"hello-$i" + } // does not reply, but a hot receiver should be advertised Thread.sleep(100) } waitForEcho(this, s"hello-$messagesToExchange", max = 10.seconds) @@ -106,7 +111,9 @@ class HandshakeShouldDropCompressionTableSpec extends ArteryMultiNodeSpec(Handsh a2.table.dictionary.keySet should contain(testActor) val aNew2Probe = TestProbe() - (1 to messagesToExchange).foreach { i => echoSel.tell(s"hello-$i", aNew2Probe.ref) } // does not reply, but a hot receiver should be advertised + (1 to messagesToExchange).foreach { i => + echoSel.tell(s"hello-$i", aNew2Probe.ref) + } // does not reply, but a hot receiver should be advertised waitForEcho(aNew2Probe, s"hello-$messagesToExchange") systemBTransport.triggerCompressionAdvertisements(actorRef = true, manifest = false) diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/TcpFramingSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/TcpFramingSpec.scala index 253b24fcfe..0adf3cdbff 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/TcpFramingSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/TcpFramingSpec.scala @@ -60,15 +60,14 @@ class TcpFramingSpec extends AkkaSpec with ImplicitSender { } "grab streamId from connection header in single chunk" in { - val frames = Source(List(TcpFraming.encodeConnectionHeader(1), frameBytes(1))).via(framingFlow) - .runWith(Sink.seq).futureValue + val frames = + Source(List(TcpFraming.encodeConnectionHeader(1), frameBytes(1))).via(framingFlow).runWith(Sink.seq).futureValue frames.head.streamId should ===(1) } "reject invalid magic" in { val bytes = frameBytes(2) - val fail = Source(List(bytes)).via(framingFlow).runWith(Sink.seq) - .failed.futureValue + val fail = Source(List(bytes)).via(framingFlow).runWith(Sink.seq).failed.futureValue fail shouldBe a[ParsingException] fail.getCause shouldBe a[FramingException] } @@ -99,8 +98,7 @@ class TcpFramingSpec extends AkkaSpec with ImplicitSender { "report truncated frames" in { val bytes = TcpFraming.encodeConnectionHeader(3) ++ frameBytes(3).drop(1) - Source(List(bytes)).via(framingFlow).runWith(Sink.seq) - .failed.futureValue shouldBe a[FramingException] + Source(List(bytes)).via(framingFlow).runWith(Sink.seq).failed.futureValue shouldBe a[FramingException] } "work with empty stream" in { diff --git a/akka-remote/src/test/scala/akka/remote/artery/tcp/TlsTcpSpec.scala b/akka-remote/src/test/scala/akka/remote/artery/tcp/TlsTcpSpec.scala index 86a8b985a7..6101e1f142 100644 --- a/akka-remote/src/test/scala/akka/remote/artery/tcp/TlsTcpSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/artery/tcp/TlsTcpSpec.scala @@ -27,21 +27,24 @@ import javax.net.ssl.SSLEngine class TlsTcpWithDefaultConfigSpec extends TlsTcpSpec(ConfigFactory.empty()) -class TlsTcpWithSHA1PRNGSpec extends TlsTcpSpec(ConfigFactory.parseString(""" +class TlsTcpWithSHA1PRNGSpec + extends TlsTcpSpec(ConfigFactory.parseString(""" akka.remote.artery.ssl.config-ssl-engine { random-number-generator = "SHA1PRNG" enabled-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"] } """)) -class TlsTcpWithDefaultRNGSecureSpec extends TlsTcpSpec(ConfigFactory.parseString(""" +class TlsTcpWithDefaultRNGSecureSpec + extends TlsTcpSpec(ConfigFactory.parseString(""" akka.remote.artery.ssl.config-ssl-engine { random-number-generator = "" enabled-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"] } """)) -class TlsTcpWithCrappyRSAWithMD5OnlyHereToMakeSureThingsWorkSpec extends TlsTcpSpec(ConfigFactory.parseString(""" +class TlsTcpWithCrappyRSAWithMD5OnlyHereToMakeSureThingsWorkSpec + extends TlsTcpSpec(ConfigFactory.parseString(""" akka.remote.artery.ssl.config-ssl-engine { random-number-generator = "" enabled-algorithms = [""SSL_RSA_WITH_NULL_MD5""] @@ -62,7 +65,8 @@ object TlsTcpSpec { } abstract class TlsTcpSpec(config: Config) - extends ArteryMultiNodeSpec(config.withFallback(TlsTcpSpec.config)) with ImplicitSender { + extends ArteryMultiNodeSpec(config.withFallback(TlsTcpSpec.config)) + with ImplicitSender { val systemB = newRemoteSystem(name = Some("systemB")) val addressB = address(systemB) @@ -83,12 +87,12 @@ abstract class TlsTcpSpec(config: Config) val port = address.port.get val engine = provider.createServerSSLEngine(host, port) - val gotAllSupported = provider.SSLEnabledAlgorithms diff engine.getSupportedCipherSuites.toSet - val gotAllEnabled = provider.SSLEnabledAlgorithms diff engine.getEnabledCipherSuites.toSet + val gotAllSupported = provider.SSLEnabledAlgorithms.diff(engine.getSupportedCipherSuites.toSet) + val gotAllEnabled = provider.SSLEnabledAlgorithms.diff(engine.getEnabledCipherSuites.toSet) gotAllSupported.isEmpty || (throw new IllegalArgumentException("Cipher Suite not supported: " + gotAllSupported)) gotAllEnabled.isEmpty || (throw new IllegalArgumentException("Cipher Suite not enabled: " + gotAllEnabled)) engine.getSupportedProtocols.contains(provider.SSLProtocol) || - (throw new IllegalArgumentException("Protocol not supported: " + provider.SSLProtocol)) + (throw new IllegalArgumentException("Protocol not supported: " + provider.SSLProtocol)) } catch { case e @ (_: IllegalArgumentException | _: NoSuchAlgorithmException) => info(e.toString) @@ -172,12 +176,13 @@ abstract class TlsTcpSpec(config: Config) } -class TlsTcpWithHostnameVerificationSpec extends ArteryMultiNodeSpec( - ConfigFactory.parseString(""" +class TlsTcpWithHostnameVerificationSpec + extends ArteryMultiNodeSpec(ConfigFactory.parseString(""" akka.remote.artery.ssl.config-ssl-engine { hostname-verification = on } - """).withFallback(TlsTcpSpec.config)) with ImplicitSender { + """).withFallback(TlsTcpSpec.config)) + with ImplicitSender { val systemB = newRemoteSystem(name = Some("systemB")) val addressB = address(systemB) @@ -196,24 +201,24 @@ class TlsTcpWithHostnameVerificationSpec extends ArteryMultiNodeSpec( } } -class TlsTcpWithActorSystemSetupSpec - extends ArteryMultiNodeSpec(TlsTcpSpec.config) with ImplicitSender { +class TlsTcpWithActorSystemSetupSpec extends ArteryMultiNodeSpec(TlsTcpSpec.config) with ImplicitSender { val sslProviderServerProbe = TestProbe() val sslProviderClientProbe = TestProbe() - val sslProviderSetup = SSLEngineProviderSetup(sys => new ConfigSSLEngineProvider(sys) { - override def createServerSSLEngine(hostname: String, port: Int): SSLEngine = { - sslProviderServerProbe.ref ! "createServerSSLEngine" - super.createServerSSLEngine(hostname, port) - } + val sslProviderSetup = SSLEngineProviderSetup(sys => + new ConfigSSLEngineProvider(sys) { + override def createServerSSLEngine(hostname: String, port: Int): SSLEngine = { + sslProviderServerProbe.ref ! "createServerSSLEngine" + super.createServerSSLEngine(hostname, port) + } - override def createClientSSLEngine(hostname: String, port: Int): SSLEngine = { - sslProviderClientProbe.ref ! "createClientSSLEngine" - super.createClientSSLEngine(hostname, port) - } + override def createClientSSLEngine(hostname: String, port: Int): SSLEngine = { + sslProviderClientProbe.ref ! "createClientSSLEngine" + super.createClientSSLEngine(hostname, port) + } - }) + }) val systemB = newRemoteSystem(name = Some("systemB"), setup = Some(ActorSystemSetup(sslProviderSetup))) val addressB = address(systemB) diff --git a/akka-remote/src/test/scala/akka/remote/serialization/AllowJavaSerializationOffSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/AllowJavaSerializationOffSpec.scala index eb12aaff5a..446bfac5b6 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/AllowJavaSerializationOffSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/AllowJavaSerializationOffSpec.scala @@ -22,10 +22,12 @@ object AllowJavaSerializationOffSpec { val dummySerializer = new FakeSerializer val serializationSettings = SerializationSetup { _ => - List( - SerializerDetails("test", dummySerializer, List(classOf[ProgrammaticDummy]))) + List(SerializerDetails("test", dummySerializer, List(classOf[ProgrammaticDummy]))) } - val bootstrapSettings = BootstrapSetup(None, Some(ConfigFactory.parseString(""" + val bootstrapSettings = BootstrapSetup(None, + Some( + ConfigFactory.parseString( + """ akka { actor { serialize-messages = off @@ -37,11 +39,13 @@ object AllowJavaSerializationOffSpec { } } } - """)), None) + """)), + None) val actorSystemSettings = ActorSystemSetup(bootstrapSettings, serializationSettings) - val noJavaSerializationSystem = ActorSystem("AllowJavaSerializationOffSpec" + "NoJavaSerialization", ConfigFactory.parseString( - """ + val noJavaSerializationSystem = ActorSystem("AllowJavaSerializationOffSpec" + "NoJavaSerialization", + ConfigFactory.parseString( + """ akka { actor { allow-java-serialization = off @@ -54,8 +58,8 @@ object AllowJavaSerializationOffSpec { } -class AllowJavaSerializationOffSpec extends AkkaSpec( - ActorSystem("AllowJavaSerializationOffSpec", AllowJavaSerializationOffSpec.actorSystemSettings)) { +class AllowJavaSerializationOffSpec + extends AkkaSpec(ActorSystem("AllowJavaSerializationOffSpec", AllowJavaSerializationOffSpec.actorSystemSettings)) { import AllowJavaSerializationOffSpec._ @@ -64,11 +68,15 @@ class AllowJavaSerializationOffSpec extends AkkaSpec( // allow-java-serialization=on to create the SerializationSetup and use that SerializationSetup // in another system with allow-java-serialization=off val addedJavaSerializationSettings = SerializationSetup { _ => - List( - SerializerDetails("test", dummySerializer, List(classOf[ProgrammaticDummy])), - SerializerDetails("java-manual", new JavaSerializer(system.asInstanceOf[ExtendedActorSystem]), List(classOf[ProgrammaticJavaDummy]))) + List(SerializerDetails("test", dummySerializer, List(classOf[ProgrammaticDummy])), + SerializerDetails("java-manual", + new JavaSerializer(system.asInstanceOf[ExtendedActorSystem]), + List(classOf[ProgrammaticJavaDummy]))) } - val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup(None, Some(ConfigFactory.parseString(""" + val addedJavaSerializationProgramaticallyButDisabledSettings = BootstrapSetup(None, + Some( + ConfigFactory.parseString( + """ akka { loglevel = debug actor { @@ -78,10 +86,13 @@ class AllowJavaSerializationOffSpec extends AkkaSpec( warn-about-java-serializer-usage = on } } - """)), None) + """)), + None) val dontAllowJavaSystem = - ActorSystem("addedJavaSerializationSystem", ActorSystemSetup(addedJavaSerializationProgramaticallyButDisabledSettings, addedJavaSerializationSettings)) + ActorSystem( + "addedJavaSerializationSystem", + ActorSystemSetup(addedJavaSerializationProgramaticallyButDisabledSettings, addedJavaSerializationSettings)) "Disabling java serialization" should { @@ -91,7 +102,9 @@ class AllowJavaSerializationOffSpec extends AkkaSpec( }.getMessage should include("akka.actor.allow-java-serialization = off") intercept[DisabledJavaSerializer.JavaSerializationException] { - SerializationExtension(dontAllowJavaSystem).findSerializerFor(new ProgrammaticJavaDummy).toBinary(new ProgrammaticJavaDummy) + SerializationExtension(dontAllowJavaSystem) + .findSerializerFor(new ProgrammaticJavaDummy) + .toBinary(new ProgrammaticJavaDummy) } } @@ -100,8 +113,8 @@ class AllowJavaSerializationOffSpec extends AkkaSpec( val ser = SerializationExtension(dontAllowJavaSystem).findSerializerFor(some).asInstanceOf[MiscMessageSerializer] val bytes = ser.toBinary(some) ser.fromBinary(bytes, ser.manifest(some)) should ===(Some("foo")) - SerializationExtension(dontAllowJavaSystem).deserialize(bytes, ser.identifier, ser.manifest(some)) - .get should ===(Some("foo")) + SerializationExtension(dontAllowJavaSystem).deserialize(bytes, ser.identifier, ser.manifest(some)).get should ===( + Some("foo")) } "have replaced java serializer" in { diff --git a/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala index 6ce5e9831b..0c7238b825 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/ArteryMessageSerializerSpec.scala @@ -9,7 +9,12 @@ import java.io.NotSerializableException import akka.actor._ import akka.remote.{ RemoteWatcher, UniqueAddress } import akka.remote.artery.OutboundHandshake.{ HandshakeReq, HandshakeRsp } -import akka.remote.artery.compress.CompressionProtocol.{ ActorRefCompressionAdvertisement, ActorRefCompressionAdvertisementAck, ClassManifestCompressionAdvertisement, ClassManifestCompressionAdvertisementAck } +import akka.remote.artery.compress.CompressionProtocol.{ + ActorRefCompressionAdvertisement, + ActorRefCompressionAdvertisementAck, + ClassManifestCompressionAdvertisement, + ClassManifestCompressionAdvertisementAck +} import akka.remote.artery.compress.CompressionTable import akka.remote.artery.{ ActorSystemTerminating, ActorSystemTerminatingAck, Quarantined, SystemMessageDelivery } import akka.serialization.SerializationExtension @@ -20,32 +25,36 @@ class ArteryMessageSerializerSpec extends AkkaSpec { val actorA = system.actorOf(Props.empty) val actorB = system.actorOf(Props.empty) - Seq( - "Quarantined" -> Quarantined(uniqueAddress(), uniqueAddress()), - "ActorSystemTerminating" -> ActorSystemTerminating(uniqueAddress()), - "ActorSystemTerminatingAck" -> ActorSystemTerminatingAck(uniqueAddress()), - "HandshakeReq" -> HandshakeReq(uniqueAddress(), uniqueAddress().address), - "HandshakeRsp" -> HandshakeRsp(uniqueAddress()), - "ActorRefCompressionAdvertisement" -> ActorRefCompressionAdvertisement(uniqueAddress(), CompressionTable(17L, 123, Map(actorA -> 123, actorB -> 456, system.deadLetters -> 0))), - "ActorRefCompressionAdvertisementAck" -> ActorRefCompressionAdvertisementAck(uniqueAddress(), 23), - "ClassManifestCompressionAdvertisement" -> ClassManifestCompressionAdvertisement(uniqueAddress(), CompressionTable(17L, 42, Map("a" -> 535, "b" -> 23))), - "ClassManifestCompressionAdvertisementAck" -> ClassManifestCompressionAdvertisementAck(uniqueAddress(), 23), - "SystemMessageDelivery.SystemMessageEnvelop" -> SystemMessageDelivery.SystemMessageEnvelope("test", 1234567890123L, uniqueAddress()), - "SystemMessageDelivery.Ack" -> SystemMessageDelivery.Ack(98765432109876L, uniqueAddress()), - "SystemMessageDelivery.Nack" -> SystemMessageDelivery.Nack(98765432109876L, uniqueAddress()), - "RemoteWatcher.ArteryHeartbeat" -> RemoteWatcher.ArteryHeartbeat, - "RemoteWatcher.ArteryHeartbeatRsp" -> RemoteWatcher.ArteryHeartbeatRsp(Long.MaxValue) - ).foreach { - case (scenario, item) => - s"resolve serializer for $scenario" in { - val serializer = SerializationExtension(system) - serializer.serializerFor(item.getClass).getClass should ===(classOf[ArteryMessageSerializer]) - } + Seq("Quarantined" -> Quarantined(uniqueAddress(), uniqueAddress()), + "ActorSystemTerminating" -> ActorSystemTerminating(uniqueAddress()), + "ActorSystemTerminatingAck" -> ActorSystemTerminatingAck(uniqueAddress()), + "HandshakeReq" -> HandshakeReq(uniqueAddress(), uniqueAddress().address), + "HandshakeRsp" -> HandshakeRsp(uniqueAddress()), + "ActorRefCompressionAdvertisement" -> ActorRefCompressionAdvertisement( + uniqueAddress(), + CompressionTable(17L, 123, Map(actorA -> 123, actorB -> 456, system.deadLetters -> 0))), + "ActorRefCompressionAdvertisementAck" -> ActorRefCompressionAdvertisementAck(uniqueAddress(), 23), + "ClassManifestCompressionAdvertisement" -> ClassManifestCompressionAdvertisement( + uniqueAddress(), + CompressionTable(17L, 42, Map("a" -> 535, "b" -> 23))), + "ClassManifestCompressionAdvertisementAck" -> ClassManifestCompressionAdvertisementAck(uniqueAddress(), 23), + "SystemMessageDelivery.SystemMessageEnvelop" -> SystemMessageDelivery.SystemMessageEnvelope("test", + 1234567890123L, + uniqueAddress()), + "SystemMessageDelivery.Ack" -> SystemMessageDelivery.Ack(98765432109876L, uniqueAddress()), + "SystemMessageDelivery.Nack" -> SystemMessageDelivery.Nack(98765432109876L, uniqueAddress()), + "RemoteWatcher.ArteryHeartbeat" -> RemoteWatcher.ArteryHeartbeat, + "RemoteWatcher.ArteryHeartbeatRsp" -> RemoteWatcher.ArteryHeartbeatRsp(Long.MaxValue)).foreach { + case (scenario, item) => + s"resolve serializer for $scenario" in { + val serializer = SerializationExtension(system) + serializer.serializerFor(item.getClass).getClass should ===(classOf[ArteryMessageSerializer]) + } - s"serialize and de-serialize $scenario" in { - verifySerialization(item) - } - } + s"serialize and de-serialize $scenario" in { + verifySerialization(item) + } + } "not support UniqueAddresses without host/port set" in pending @@ -72,4 +81,3 @@ class ArteryMessageSerializerSpec extends AkkaSpec { UniqueAddress(Address("abc", "def", "host", 12345), 2342) } } - diff --git a/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerSpec.scala index 15f4c2d432..6296d77544 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/DaemonMsgCreateSerializerSpec.scala @@ -6,7 +6,16 @@ package akka.remote.serialization import akka.actor.ActorSystem import akka.testkit.TestKit -import akka.actor.{ Actor, ActorRef, Address, Deploy, ExtendedActorSystem, OneForOneStrategy, Props, SupervisorStrategy } +import akka.actor.{ + Actor, + ActorRef, + Address, + Deploy, + ExtendedActorSystem, + OneForOneStrategy, + Props, + SupervisorStrategy +} import akka.remote.{ DaemonMsgCreate, RemoteScope } import akka.routing.{ FromConfig, RoundRobinPool } import akka.serialization.{ Serialization, SerializationExtension } @@ -33,14 +42,16 @@ trait SerializationVerification { self: AkkaSpec => def ser: Serialization def verifySerialization(msg: DaemonMsgCreate): Unit = { - assertDaemonMsgCreate(msg, ser.deserialize(ser.serialize(msg).get, classOf[DaemonMsgCreate]).get.asInstanceOf[DaemonMsgCreate]) + assertDaemonMsgCreate( + msg, + ser.deserialize(ser.serialize(msg).get, classOf[DaemonMsgCreate]).get.asInstanceOf[DaemonMsgCreate]) } def assertDaemonMsgCreate(expected: DaemonMsgCreate, got: DaemonMsgCreate): Unit = { // can't compare props.creator when function got.props.clazz should ===(expected.props.clazz) got.props.args.length should ===(expected.props.args.length) - got.props.args zip expected.props.args foreach { + got.props.args.zip(expected.props.args).foreach { case (g, e) => if (e.isInstanceOf[Function0[_]]) () else if (e.isInstanceOf[Function1[_, _]]) () @@ -67,47 +78,38 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec with SerializationVerificat "serialize and de-serialize DaemonMsgCreate with FromClassCreator" in { verifySerialization { - DaemonMsgCreate( - props = Props[MyActor], - deploy = Deploy(), - path = "foo", - supervisor = supervisor) + DaemonMsgCreate(props = Props[MyActor], deploy = Deploy(), path = "foo", supervisor = supervisor) } } "serialize and de-serialize DaemonMsgCreate with FromClassCreator, with null parameters for Props" in { verifySerialization { - DaemonMsgCreate( - props = Props(classOf[MyActorWithParam], null), - deploy = Deploy(), - path = "foo", - supervisor = supervisor) + DaemonMsgCreate(props = Props(classOf[MyActorWithParam], null), + deploy = Deploy(), + path = "foo", + supervisor = supervisor) } } "serialize and de-serialize DaemonMsgCreate with function creator" in { verifySerialization { - DaemonMsgCreate( - props = Props(new MyActor), - deploy = Deploy(), - path = "foo", - supervisor = supervisor) + DaemonMsgCreate(props = Props(new MyActor), deploy = Deploy(), path = "foo", supervisor = supervisor) } } "serialize and de-serialize DaemonMsgCreate with FromClassCreator, with function parameters for Props" in { verifySerialization { - DaemonMsgCreate( - props = Props(classOf[MyActorWithFunParam], (i: Int) => i + 1), - deploy = Deploy(), - path = "foo", - supervisor = supervisor) + DaemonMsgCreate(props = Props(classOf[MyActorWithFunParam], (i: Int) => i + 1), + deploy = Deploy(), + path = "foo", + supervisor = supervisor) } } "deserialize the old wire format with just class and field for props parameters (if possible)" in { - val system = ActorSystem("DaemonMsgCreateSerializer-old-wire-format", ConfigFactory.parseString( - """ + val system = ActorSystem("DaemonMsgCreateSerializer-old-wire-format", + ConfigFactory.parseString( + """ # in 2.4 this is off by default, but in 2.5+ its on so we wouldn't # get the right set of serializers (and since the old wire protocol doesn't # contain serializer ids that will go unnoticed with unpleasant consequences) @@ -124,17 +126,17 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec with SerializationVerificat val bytes = serializer.toBinary( DaemonMsgCreate(Props(classOf[MyActorWithParam], "a string"), Deploy.local, "/user/test", system.actorFor("/user"))) println(String.valueOf(encodeHex(bytes))) - */ + */ val oldBytesHex = "0a7112020a001a48616b6b612e72656d6f74652e73657269616c697a617" + - "4696f6e2e4461656d6f6e4d736743726561746553657269616c697a6572" + - "53706563244d794163746f7257697468506172616d220faced000574000" + - "86120737472696e672a106a6176612e6c616e672e537472696e67122f0a" + - "00222baced000573720016616b6b612e6163746f722e4c6f63616c53636" + - "f706524000000000000000102000078701a0a2f757365722f7465737422" + - "2b0a29616b6b613a2f2f4461656d6f6e4d7367437265617465536572696" + - "16c697a6572537065632f75736572" + "4696f6e2e4461656d6f6e4d736743726561746553657269616c697a6572" + + "53706563244d794163746f7257697468506172616d220faced000574000" + + "86120737472696e672a106a6176612e6c616e672e537472696e67122f0a" + + "00222baced000573720016616b6b612e6163746f722e4c6f63616c53636" + + "f706524000000000000000102000078701a0a2f757365722f7465737422" + + "2b0a29616b6b613a2f2f4461656d6f6e4d7367437265617465536572696" + + "16c697a6572537065632f75736572" import org.apache.commons.codec.binary.Hex.decodeHex val oldBytes = decodeHex(oldBytesHex.toCharArray) @@ -154,42 +156,38 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec with SerializationVerificat // Duration.Inf doesn't equal Duration.Inf, so we use another for test // we don't serialize the supervisor strategy, but always fallback to default val supervisorStrategy = SupervisorStrategy.defaultStrategy - val deploy1 = Deploy( - path = "path1", - config = ConfigFactory.parseString("a=1"), - routerConfig = RoundRobinPool(nrOfInstances = 5, supervisorStrategy = supervisorStrategy), - scope = RemoteScope(Address("akka", "Test", "host1", 1921)), - dispatcher = "mydispatcher") - val deploy2 = Deploy( - path = "path2", - config = ConfigFactory.parseString("a=2"), - routerConfig = FromConfig, - scope = RemoteScope(Address("akka", "Test", "host2", 1922)), - dispatcher = Deploy.NoDispatcherGiven) - DaemonMsgCreate( - props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1), - deploy = deploy2, - path = "foo", - supervisor = supervisor) + val deploy1 = Deploy(path = "path1", + config = ConfigFactory.parseString("a=1"), + routerConfig = RoundRobinPool(nrOfInstances = 5, supervisorStrategy = supervisorStrategy), + scope = RemoteScope(Address("akka", "Test", "host1", 1921)), + dispatcher = "mydispatcher") + val deploy2 = Deploy(path = "path2", + config = ConfigFactory.parseString("a=2"), + routerConfig = FromConfig, + scope = RemoteScope(Address("akka", "Test", "host2", 1922)), + dispatcher = Deploy.NoDispatcherGiven) + DaemonMsgCreate(props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1), + deploy = deploy2, + path = "foo", + supervisor = supervisor) } } "allows for mixing serializers with and without manifests for props parameters" in { verifySerialization { DaemonMsgCreate( - // parameters should trigger JavaSerializer for the first one and additional protobuf for the second (?) - props = Props(classOf[ActorWithDummyParameter], new DummyParameter("dummy"), system.deadLetters), - deploy = Deploy(), - path = "foo", - supervisor = supervisor) + // parameters should trigger JavaSerializer for the first one and additional protobuf for the second (?) + props = Props(classOf[ActorWithDummyParameter], new DummyParameter("dummy"), system.deadLetters), + deploy = Deploy(), + path = "foo", + supervisor = supervisor) } } } } -class DaemonMsgCreateSerializerNoJavaSerializationSpec extends AkkaSpec( - """ +class DaemonMsgCreateSerializerNoJavaSerializationSpec extends AkkaSpec(""" akka.actor.allow-java-serialization=off akka.actor.serialize-messages=off akka.actor.serialize-creators=off @@ -207,25 +205,21 @@ class DaemonMsgCreateSerializerNoJavaSerializationSpec extends AkkaSpec( case _ => SupervisorStrategy.Escalate } - val deploy1 = Deploy( - path = "path1", - config = ConfigFactory.parseString("a=1"), - // a whole can of worms: routerConfig = RoundRobinPool(nrOfInstances = 5, supervisorStrategy = supervisorStrategy), - scope = RemoteScope(Address("akka", "Test", "host1", 1921)), - dispatcher = "mydispatcher") - val deploy2 = Deploy( - path = "path2", - config = ConfigFactory.parseString("a=2"), - routerConfig = FromConfig, - scope = RemoteScope(Address("akka", "Test", "host2", 1922)), - dispatcher = Deploy.NoDispatcherGiven) - DaemonMsgCreate( - props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1), - deploy = deploy2, - path = "foo", - supervisor = supervisor) + val deploy1 = Deploy(path = "path1", + config = ConfigFactory.parseString("a=1"), + // a whole can of worms: routerConfig = RoundRobinPool(nrOfInstances = 5, supervisorStrategy = supervisorStrategy), + scope = RemoteScope(Address("akka", "Test", "host1", 1921)), + dispatcher = "mydispatcher") + val deploy2 = Deploy(path = "path2", + config = ConfigFactory.parseString("a=2"), + routerConfig = FromConfig, + scope = RemoteScope(Address("akka", "Test", "host2", 1922)), + dispatcher = Deploy.NoDispatcherGiven) + DaemonMsgCreate(props = Props[MyActor].withDispatcher("my-disp").withDeploy(deploy1), + deploy = deploy2, + path = "foo", + supervisor = supervisor) } } } - diff --git a/akka-remote/src/test/scala/akka/remote/serialization/MessageContainerSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/MessageContainerSerializerSpec.scala index 18940b8643..9015117072 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/MessageContainerSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/MessageContainerSerializerSpec.scala @@ -22,9 +22,15 @@ class MessageContainerSerializerSpec extends AkkaSpec { } "serialize and de-serialize ActorSelectionMessage" in { - verifySerialization(ActorSelectionMessage("hello", Vector( - SelectChildName("user"), SelectChildName("a"), SelectChildName("b"), SelectParent, - SelectChildPattern("*"), SelectChildName("c")), wildcardFanOut = true)) + verifySerialization( + ActorSelectionMessage("hello", + Vector(SelectChildName("user"), + SelectChildName("a"), + SelectChildName("b"), + SelectParent, + SelectChildPattern("*"), + SelectChildName("c")), + wildcardFanOut = true)) } def verifySerialization(msg: AnyRef): Unit = { @@ -33,4 +39,3 @@ class MessageContainerSerializerSpec extends AkkaSpec { } } - diff --git a/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala index 595abffc70..e88e9a810a 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/MiscMessageSerializerSpec.scala @@ -35,11 +35,11 @@ object MiscMessageSerializerSpec { override def equals(other: Any): Boolean = other match { case e: TestException => e.getMessage == getMessage && e.getCause == getCause && - // on JDK9+ the stacktraces aren't equal, something about how they are constructed - // they are alike enough to be roughly equal though - e.stackTrace.zip(stackTrace).forall { - case (t, o) => t.getClassName == o.getClassName && t.getFileName == o.getFileName - } + // on JDK9+ the stacktraces aren't equal, something about how they are constructed + // they are alike enough to be roughly equal though + e.stackTrace.zip(stackTrace).forall { + case (t, o) => t.getClassName == o.getClassName && t.getFileName == o.getFileName + } case _ => false } @@ -70,49 +70,52 @@ class MiscMessageSerializerSpec extends AkkaSpec(MiscMessageSerializerSpec.testC val ref = system.actorOf(Props.empty, "hello") "MiscMessageSerializer" must { - Seq( - "Identify" -> Identify("some-message"), - "Identify with None" -> Identify(None), - "Identify with Some" -> Identify(Some("value")), - "ActorIdentity without actor ref" -> ActorIdentity("some-message", ref = None), - "ActorIdentity with actor ref" -> ActorIdentity("some-message", ref = Some(testActor)), - "TestException" -> new TestException("err"), - "TestExceptionNoStack" -> new TestExceptionNoStack("err2"), - "TestException with cause" -> new TestException("err3", new TestException("cause")), - "Status.Success" -> Status.Success("value"), - "Status.Failure" -> Status.Failure(new TestException("err")), - "Status.Failure JavaSer" -> Status.Failure(new OtherException("exc")), // exc with JavaSerializer - "ActorRef" -> ref, - "Some" -> Some("value"), - "None" -> None, - "Optional.present" -> Optional.of("value2"), - "Optional.empty" -> Optional.empty(), - "Kill" -> Kill, - "PoisonPill" -> PoisonPill, - "RemoteWatcher.Heartbeat" -> RemoteWatcher.Heartbeat, - "RemoteWatcher.HertbeatRsp" -> RemoteWatcher.HeartbeatRsp(65537), - "Done" -> Done, - "NotUsed" -> NotUsed, - "Address" -> Address("akka", "system", "host", 1337), - "UniqueAddress" -> akka.remote.UniqueAddress(Address("akka", "system", "host", 1337), 82751), - "LocalScope" -> LocalScope, - "RemoteScope" -> RemoteScope(Address("akka", "system", "localhost", 2525)), - "Config" -> system.settings.config, - "Empty Config" -> ConfigFactory.empty(), - "FromConfig" -> FromConfig, - // routers - "DefaultResizer" -> DefaultResizer(), - "BalancingPool" -> BalancingPool(nrOfInstances = 25), - "BalancingPool with custom dispatcher" -> BalancingPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher"), - "BroadcastPool" -> BroadcastPool(nrOfInstances = 25), - "BroadcastPool with custom dispatcher and resizer" -> BroadcastPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher", usePoolDispatcher = true, resizer = Some(DefaultResizer())), - "RandomPool" -> RandomPool(nrOfInstances = 25), - "RandomPool with custom dispatcher" -> RandomPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher"), - "RoundRobinPool" -> RoundRobinPool(25), - "ScatterGatherFirstCompletedPool" -> ScatterGatherFirstCompletedPool(25, within = 3.seconds), - "TailChoppingPool" -> TailChoppingPool(25, within = 3.seconds, interval = 1.second), - "RemoteRouterConfig" -> RemoteRouterConfig(local = RandomPool(25), nodes = List(Address("akka", "system", "localhost", 2525))) - ).foreach { + Seq("Identify" -> Identify("some-message"), + "Identify with None" -> Identify(None), + "Identify with Some" -> Identify(Some("value")), + "ActorIdentity without actor ref" -> ActorIdentity("some-message", ref = None), + "ActorIdentity with actor ref" -> ActorIdentity("some-message", ref = Some(testActor)), + "TestException" -> new TestException("err"), + "TestExceptionNoStack" -> new TestExceptionNoStack("err2"), + "TestException with cause" -> new TestException("err3", new TestException("cause")), + "Status.Success" -> Status.Success("value"), + "Status.Failure" -> Status.Failure(new TestException("err")), + "Status.Failure JavaSer" -> Status.Failure(new OtherException("exc")), // exc with JavaSerializer + "ActorRef" -> ref, + "Some" -> Some("value"), + "None" -> None, + "Optional.present" -> Optional.of("value2"), + "Optional.empty" -> Optional.empty(), + "Kill" -> Kill, + "PoisonPill" -> PoisonPill, + "RemoteWatcher.Heartbeat" -> RemoteWatcher.Heartbeat, + "RemoteWatcher.HertbeatRsp" -> RemoteWatcher.HeartbeatRsp(65537), + "Done" -> Done, + "NotUsed" -> NotUsed, + "Address" -> Address("akka", "system", "host", 1337), + "UniqueAddress" -> akka.remote.UniqueAddress(Address("akka", "system", "host", 1337), 82751), + "LocalScope" -> LocalScope, + "RemoteScope" -> RemoteScope(Address("akka", "system", "localhost", 2525)), + "Config" -> system.settings.config, + "Empty Config" -> ConfigFactory.empty(), + "FromConfig" -> FromConfig, + // routers + "DefaultResizer" -> DefaultResizer(), + "BalancingPool" -> BalancingPool(nrOfInstances = 25), + "BalancingPool with custom dispatcher" -> BalancingPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher"), + "BroadcastPool" -> BroadcastPool(nrOfInstances = 25), + "BroadcastPool with custom dispatcher and resizer" -> BroadcastPool(nrOfInstances = 25, + routerDispatcher = "my-dispatcher", + usePoolDispatcher = true, + resizer = Some(DefaultResizer())), + "RandomPool" -> RandomPool(nrOfInstances = 25), + "RandomPool with custom dispatcher" -> RandomPool(nrOfInstances = 25, routerDispatcher = "my-dispatcher"), + "RoundRobinPool" -> RoundRobinPool(25), + "ScatterGatherFirstCompletedPool" -> ScatterGatherFirstCompletedPool(25, within = 3.seconds), + "TailChoppingPool" -> TailChoppingPool(25, within = 3.seconds, interval = 1.second), + "RemoteRouterConfig" -> RemoteRouterConfig(local = RandomPool(25), + nodes = List(Address("akka", "system", "localhost", 2525)))) + .foreach { case (scenario, item) => s"resolve serializer for $scenario" in { val serializer = SerializationExtension(system) @@ -146,13 +149,15 @@ class MiscMessageSerializerSpec extends AkkaSpec(MiscMessageSerializerSpec.testC // Separate tests due to missing equality on ActorInitializationException "resolve serializer for ActorInitializationException" in { val serializer = SerializationExtension(system) - serializer.serializerFor(classOf[ActorInitializationException]).getClass should ===(classOf[MiscMessageSerializer]) + serializer.serializerFor(classOf[ActorInitializationException]).getClass should ===( + classOf[MiscMessageSerializer]) } "serialize and deserialze ActorInitializationException" in { val aiex = ActorInitializationException(ref, "test", new TestException("err")) val serializer = new MiscMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) - val deserialized = serializer.fromBinary(serializer.toBinary(aiex), serializer.manifest(aiex)) + val deserialized = serializer + .fromBinary(serializer.toBinary(aiex), serializer.manifest(aiex)) .asInstanceOf[ActorInitializationException] deserialized.getMessage should ===(aiex.getMessage) @@ -166,7 +171,8 @@ class MiscMessageSerializerSpec extends AkkaSpec(MiscMessageSerializerSpec.testC "serialize and deserialze ActorInitializationException if ref is null" in { val aiex = ActorInitializationException(null, "test", new TestException("err")) val serializer = new MiscMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) - val deserialized = serializer.fromBinary(serializer.toBinary(aiex), serializer.manifest(aiex)) + val deserialized = serializer + .fromBinary(serializer.toBinary(aiex), serializer.manifest(aiex)) .asInstanceOf[ActorInitializationException] deserialized.getMessage should ===(aiex.getMessage) @@ -180,7 +186,8 @@ class MiscMessageSerializerSpec extends AkkaSpec(MiscMessageSerializerSpec.testC "serialize and deserialze ActorInitializationException if cause is null" in { val aiex = ActorInitializationException(ref, "test", null) val serializer = new MiscMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) - val deserialized = serializer.fromBinary(serializer.toBinary(aiex), serializer.manifest(aiex)) + val deserialized = serializer + .fromBinary(serializer.toBinary(aiex), serializer.manifest(aiex)) .asInstanceOf[ActorInitializationException] deserialized.getMessage should ===(aiex.getMessage) @@ -191,4 +198,3 @@ class MiscMessageSerializerSpec extends AkkaSpec(MiscMessageSerializerSpec.testC } } } - diff --git a/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala index c0863431e9..a36e640372 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/PrimitivesSerializationSpec.scala @@ -56,8 +56,9 @@ class PrimitivesSerializationSpec extends AkkaSpec(PrimitivesSerializationSpec.t } "LongSerializer" must { - Seq(0L, 1L, -1L, Long.MinValue, Long.MinValue + 1L, Long.MaxValue, Long.MaxValue - 1L).map(_.asInstanceOf[AnyRef]).foreach { - item => + Seq(0L, 1L, -1L, Long.MinValue, Long.MinValue + 1L, Long.MaxValue, Long.MaxValue - 1L) + .map(_.asInstanceOf[AnyRef]) + .foreach { item => s"resolve serializer for value $item" in { val serializer = SerializationExtension(system) serializer.serializerFor(item.getClass).getClass should ===(classOf[LongSerializer]) @@ -70,7 +71,7 @@ class PrimitivesSerializationSpec extends AkkaSpec(PrimitivesSerializationSpec.t s"serialize and de-serialize value $item using ByteBuffers" in { verifySerializationByteBuffer(item) } - } + } } @@ -94,11 +95,8 @@ class PrimitivesSerializationSpec extends AkkaSpec(PrimitivesSerializationSpec.t "StringSerializer" must { val random = Random.nextString(256) - Seq( - "empty string" -> "", - "hello" -> "hello", - "árvíztűrőütvefúrógép" -> "árvíztűrőütvefúrógép", - "random" -> random).foreach { + Seq("empty string" -> "", "hello" -> "hello", "árvíztűrőütvefúrógép" -> "árvíztűrőütvefúrógép", "random" -> random) + .foreach { case (scenario, item) => s"resolve serializer for [$scenario]" in { val serializer = SerializationExtension(system) @@ -117,25 +115,24 @@ class PrimitivesSerializationSpec extends AkkaSpec(PrimitivesSerializationSpec.t } "ByteStringSerializer" must { - Seq( - "empty string" -> ByteString.empty, - "simple content" -> ByteString("hello"), - "concatenated content" -> (ByteString("hello") ++ ByteString("world")), - "sliced content" -> ByteString("helloabc").take(5)).foreach { - case (scenario, item) => - s"resolve serializer for [$scenario]" in { - val serializer = SerializationExtension(system) - serializer.serializerFor(item.getClass).getClass should ===(classOf[ByteStringSerializer]) - } + Seq("empty string" -> ByteString.empty, + "simple content" -> ByteString("hello"), + "concatenated content" -> (ByteString("hello") ++ ByteString("world")), + "sliced content" -> ByteString("helloabc").take(5)).foreach { + case (scenario, item) => + s"resolve serializer for [$scenario]" in { + val serializer = SerializationExtension(system) + serializer.serializerFor(item.getClass).getClass should ===(classOf[ByteStringSerializer]) + } - s"serialize and de-serialize [$scenario]" in { - verifySerialization(item) - } + s"serialize and de-serialize [$scenario]" in { + verifySerialization(item) + } - s"serialize and de-serialize value [$scenario] using ByteBuffers" in { - verifySerializationByteBuffer(item) - } - } + s"serialize and de-serialize value [$scenario] using ByteBuffers" in { + verifySerializationByteBuffer(item) + } + } } diff --git a/akka-remote/src/test/scala/akka/remote/serialization/ProtobufSerializerSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/ProtobufSerializerSpec.scala index a4befb6247..a01495e278 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/ProtobufSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/ProtobufSerializerSpec.scala @@ -35,4 +35,3 @@ class ProtobufSerializerSpec extends AkkaSpec { } } - diff --git a/akka-remote/src/test/scala/akka/remote/serialization/SerializationTransportInformationSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/SerializationTransportInformationSpec.scala index 7940b3d033..427a68b824 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/SerializationTransportInformationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/SerializationTransportInformationSpec.scala @@ -57,8 +57,7 @@ object SerializationTransportInformationSpec { throw new IllegalStateException("currentTransportInformation was not set") case t => if (t.system ne system) - throw new IllegalStateException( - s"wrong system in currentTransportInformation, ${t.system} != $system") + throw new IllegalStateException(s"wrong system in currentTransportInformation, ${t.system} != $system") if (t.address != system.provider.getDefaultAddress) throw new IllegalStateException( s"wrong address in currentTransportInformation, ${t.address} != ${system.provider.getDefaultAddress}") @@ -67,9 +66,11 @@ object SerializationTransportInformationSpec { } } -abstract class AbstractSerializationTransportInformationSpec(config: Config) extends AkkaSpec( - config.withFallback(ConfigFactory.parseString( - """ +abstract class AbstractSerializationTransportInformationSpec(config: Config) + extends AkkaSpec( + config.withFallback( + ConfigFactory.parseString( + """ akka { loglevel = info actor { @@ -85,7 +86,8 @@ abstract class AbstractSerializationTransportInformationSpec(config: Config) ext } } } - """))) with ImplicitSender { + """))) + with ImplicitSender { import SerializationTransportInformationSpec._ @@ -127,7 +129,8 @@ abstract class AbstractSerializationTransportInformationSpec(config: Config) ext } } -class SerializationTransportInformationSpec extends AbstractSerializationTransportInformationSpec(ConfigFactory.parseString(""" +class SerializationTransportInformationSpec + extends AbstractSerializationTransportInformationSpec(ConfigFactory.parseString(""" akka.remote.netty.tcp { hostname = localhost port = 0 diff --git a/akka-remote/src/test/scala/akka/remote/serialization/SystemMessageSerializationSpec.scala b/akka-remote/src/test/scala/akka/remote/serialization/SystemMessageSerializationSpec.scala index e22eadcd6e..66b4db146e 100644 --- a/akka-remote/src/test/scala/akka/remote/serialization/SystemMessageSerializationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/serialization/SystemMessageSerializationSpec.scala @@ -32,28 +32,27 @@ class SystemMessageSerializationSpec extends AkkaSpec(PrimitivesSerializationSpe val testRef2 = TestProbe().ref.asInstanceOf[InternalActorRef] "ByteStringSerializer" must { - Seq( - "Create(None)" -> Create(None), - "Recreate(ex)" -> Recreate(new TestException("test2")), - "Suspend()" -> Suspend(), - "Resume(ex)" -> Resume(new TestException("test3")), - "Terminate()" -> Terminate(), - "Supervise(ref, async)" -> Supervise(testRef, async = true), - "Watch(ref, ref)" -> Watch(testRef, testRef2), - "Unwatch(ref, ref)" -> Unwatch(testRef, testRef2), - "Failed(ref, ex, uid)" -> Failed(testRef, new TestException("test4"), 42), - "DeathWatchNotification(ref, confimed, addressTerminated)" -> + Seq("Create(None)" -> Create(None), + "Recreate(ex)" -> Recreate(new TestException("test2")), + "Suspend()" -> Suspend(), + "Resume(ex)" -> Resume(new TestException("test3")), + "Terminate()" -> Terminate(), + "Supervise(ref, async)" -> Supervise(testRef, async = true), + "Watch(ref, ref)" -> Watch(testRef, testRef2), + "Unwatch(ref, ref)" -> Unwatch(testRef, testRef2), + "Failed(ref, ex, uid)" -> Failed(testRef, new TestException("test4"), 42), + "DeathWatchNotification(ref, confimed, addressTerminated)" -> DeathWatchNotification(testRef, existenceConfirmed = true, addressTerminated = true)).foreach { - case (scenario, item) => - s"resolve serializer for [$scenario]" in { - val serializer = SerializationExtension(system) - serializer.serializerFor(item.getClass).getClass should ===(classOf[SystemMessageSerializer]) - } + case (scenario, item) => + s"resolve serializer for [$scenario]" in { + val serializer = SerializationExtension(system) + serializer.serializerFor(item.getClass).getClass should ===(classOf[SystemMessageSerializer]) + } - s"serialize and de-serialize [$scenario]" in { - verifySerialization(item) - } - } + s"serialize and de-serialize [$scenario]" in { + verifySerialization(item) + } + } def verifySerialization(msg: AnyRef): Unit = { val serializer = new SystemMessageSerializer(system.asInstanceOf[ExtendedActorSystem]) diff --git a/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolSpec.scala index 155732dac8..fc765a4cc7 100644 --- a/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolSpec.scala @@ -5,13 +5,18 @@ package akka.remote.transport import akka.actor.{ Address } -import akka.remote.transport.AkkaPduCodec.{ Disassociate, Associate, Heartbeat } +import akka.remote.transport.AkkaPduCodec.{ Associate, Disassociate, Heartbeat } import akka.remote.transport.AkkaProtocolSpec.TestFailureDetector -import akka.remote.transport.AssociationHandle.{ DisassociateInfo, ActorHandleEventListener, Disassociated, InboundPayload } +import akka.remote.transport.AssociationHandle.{ + ActorHandleEventListener, + DisassociateInfo, + Disassociated, + InboundPayload +} import akka.remote.transport.TestTransport._ import akka.remote.transport.Transport._ -import akka.remote.{ WireFormats, FailureDetector } -import akka.testkit.{ ImplicitSender, AkkaSpec } +import akka.remote.{ FailureDetector, WireFormats } +import akka.testkit.{ AkkaSpec, ImplicitSender } import akka.util.ByteString import akka.protobuf.{ ByteString => PByteString } import com.typesafe.config.ConfigFactory @@ -36,8 +41,7 @@ object AkkaProtocolSpec { class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) with ImplicitSender { - val conf = ConfigFactory.parseString( - """ + val conf = ConfigFactory.parseString(""" akka.remote { transport-failure-detector { @@ -70,7 +74,8 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val codec = AkkaPduProtobufCodec - val testMsg = WireFormats.SerializedMessage.newBuilder().setSerializerId(0).setMessage(PByteString.copyFromUtf8("foo")).build + val testMsg = + WireFormats.SerializedMessage.newBuilder().setSerializerId(0).setMessage(PByteString.copyFromUtf8("foo")).build val testEnvelope = codec.constructMessage(localAkkaAddress, testActor, testMsg, OptionVal.None) val testMsgPdu: ByteString = codec.constructPayload(testEnvelope) @@ -92,48 +97,54 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit } def lastActivityIsHeartbeat(registry: AssociationRegistry) = - if (registry.logSnapshot.isEmpty) false else registry.logSnapshot.last match { - case WriteAttempt(sender, recipient, payload) if sender == localAddress && recipient == remoteAddress => - codec.decodePdu(payload) match { - case Heartbeat => true - case _ => false - } - case _ => false - } + if (registry.logSnapshot.isEmpty) false + else + registry.logSnapshot.last match { + case WriteAttempt(sender, recipient, payload) if sender == localAddress && recipient == remoteAddress => + codec.decodePdu(payload) match { + case Heartbeat => true + case _ => false + } + case _ => false + } def lastActivityIsAssociate(registry: AssociationRegistry, uid: Long, cookie: Option[String]) = - if (registry.logSnapshot.isEmpty) false else registry.logSnapshot.last match { - case WriteAttempt(sender, recipient, payload) if sender == localAddress && recipient == remoteAddress => - codec.decodePdu(payload) match { - case Associate(info) => - info.cookie == cookie && info.origin == localAddress && info.uid == uid - case _ => false - } - case _ => false - } + if (registry.logSnapshot.isEmpty) false + else + registry.logSnapshot.last match { + case WriteAttempt(sender, recipient, payload) if sender == localAddress && recipient == remoteAddress => + codec.decodePdu(payload) match { + case Associate(info) => + info.cookie == cookie && info.origin == localAddress && info.uid == uid + case _ => false + } + case _ => false + } def lastActivityIsDisassociate(registry: AssociationRegistry) = - if (registry.logSnapshot.isEmpty) false else registry.logSnapshot.last match { - case WriteAttempt(sender, recipient, payload) if sender == localAddress && recipient == remoteAddress => - codec.decodePdu(payload) match { - case Disassociate(_) => true - case _ => false - } - case _ => false - } + if (registry.logSnapshot.isEmpty) false + else + registry.logSnapshot.last match { + case WriteAttempt(sender, recipient, payload) if sender == localAddress && recipient == remoteAddress => + codec.decodePdu(payload) match { + case Disassociate(_) => true + case _ => false + } + case _ => false + } "ProtocolStateActor" must { "register itself as reader on injecteted handles" in { val (failureDetector, _, _, handle) = collaborators - system.actorOf(ProtocolStateActor.inboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - handle, - ActorAssociationEventListener(testActor), - new AkkaProtocolSettings(conf), - codec, - failureDetector)) + system.actorOf( + ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + handle, + ActorAssociationEventListener(testActor), + new AkkaProtocolSettings(conf), + codec, + failureDetector)) awaitCond(handle.readHandlerPromise.isCompleted) } @@ -141,13 +152,13 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit "in inbound mode accept payload after Associate PDU received" in { val (failureDetector, registry, _, handle) = collaborators - val reader = system.actorOf(ProtocolStateActor.inboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - handle, - ActorAssociationEventListener(testActor), - new AkkaProtocolSettings(conf), - codec, - failureDetector)) + val reader = system.actorOf( + ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + handle, + ActorAssociationEventListener(testActor), + new AkkaProtocolSettings(conf), + codec, + failureDetector)) reader ! testAssociate(uid = 33, cookie = None) @@ -176,13 +187,13 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit "in inbound mode disassociate when an unexpected message arrives instead of Associate" in { val (failureDetector, registry, _, handle) = collaborators - val reader = system.actorOf(ProtocolStateActor.inboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - handle, - ActorAssociationEventListener(testActor), - new AkkaProtocolSettings(conf), - codec, - failureDetector)) + val reader = system.actorOf( + ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + handle, + ActorAssociationEventListener(testActor), + new AkkaProtocolSettings(conf), + codec, + failureDetector)) // a stray message will force a disassociate reader ! testHeartbeat @@ -202,15 +213,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val statusPromise: Promise[AssociationHandle] = Promise() - val reader = system.actorOf(ProtocolStateActor.outboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - remoteAddress, - statusPromise, - transport, - new AkkaProtocolSettings(conf), - codec, - failureDetector, - refuseUid = None)) + val reader = system.actorOf( + ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + remoteAddress, + statusPromise, + transport, + new AkkaProtocolSettings(conf), + codec, + failureDetector, + refuseUid = None)) awaitCond(lastActivityIsAssociate(registry, 42, None)) failureDetector.called should ===(true) @@ -237,13 +248,16 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit "ignore incoming associations with wrong cookie" in { val (failureDetector, registry, _, handle) = collaborators - val reader = system.actorOf(ProtocolStateActor.inboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")), - handle, - ActorAssociationEventListener(testActor), - new AkkaProtocolSettings(ConfigFactory.parseString("akka.remote.require-cookie = on").withFallback(conf)), - codec, - failureDetector)) + val reader = system.actorOf( + ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")), + handle, + ActorAssociationEventListener(testActor), + new AkkaProtocolSettings( + ConfigFactory + .parseString("akka.remote.require-cookie = on") + .withFallback(conf)), + codec, + failureDetector)) reader ! testAssociate(uid = 33, Some("xyzzy")) @@ -256,13 +270,16 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit "accept incoming associations with correct cookie" in { val (failureDetector, registry, _, handle) = collaborators - val reader = system.actorOf(ProtocolStateActor.inboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")), - handle, - ActorAssociationEventListener(testActor), - new AkkaProtocolSettings(ConfigFactory.parseString("akka.remote.require-cookie = on").withFallback(conf)), - codec, - failureDetector)) + val reader = system.actorOf( + ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")), + handle, + ActorAssociationEventListener(testActor), + new AkkaProtocolSettings( + ConfigFactory + .parseString("akka.remote.require-cookie = on") + .withFallback(conf)), + codec, + failureDetector)) // Send the correct cookie reader ! testAssociate(uid = 33, Some("abcde")) @@ -288,15 +305,18 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val statusPromise: Promise[AssociationHandle] = Promise() - system.actorOf(ProtocolStateActor.outboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")), - remoteAddress, - statusPromise, - transport, - new AkkaProtocolSettings(ConfigFactory.parseString("akka.remote.require-cookie = on").withFallback(conf)), - codec, - failureDetector, - refuseUid = None)) + system.actorOf( + ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = Some("abcde")), + remoteAddress, + statusPromise, + transport, + new AkkaProtocolSettings( + ConfigFactory + .parseString("akka.remote.require-cookie = on") + .withFallback(conf)), + codec, + failureDetector, + refuseUid = None)) awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = Some("abcde"))) } @@ -307,15 +327,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val statusPromise: Promise[AssociationHandle] = Promise() - val reader = system.actorOf(ProtocolStateActor.outboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - remoteAddress, - statusPromise, - transport, - new AkkaProtocolSettings(conf), - codec, - failureDetector, - refuseUid = None)) + val reader = system.actorOf( + ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + remoteAddress, + statusPromise, + transport, + new AkkaProtocolSettings(conf), + codec, + failureDetector, + refuseUid = None)) awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = None)) @@ -343,15 +363,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val statusPromise: Promise[AssociationHandle] = Promise() - val reader = system.actorOf(ProtocolStateActor.outboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - remoteAddress, - statusPromise, - transport, - new AkkaProtocolSettings(conf), - codec, - failureDetector, - refuseUid = None)) + val reader = system.actorOf( + ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + remoteAddress, + statusPromise, + transport, + new AkkaProtocolSettings(conf), + codec, + failureDetector, + refuseUid = None)) awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = None)) @@ -379,15 +399,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val statusPromise: Promise[AssociationHandle] = Promise() - val stateActor = system.actorOf(ProtocolStateActor.outboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - remoteAddress, - statusPromise, - transport, - new AkkaProtocolSettings(conf), - codec, - failureDetector, - refuseUid = None)) + val stateActor = system.actorOf( + ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + remoteAddress, + statusPromise, + transport, + new AkkaProtocolSettings(conf), + codec, + failureDetector, + refuseUid = None)) awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = None)) @@ -418,15 +438,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val statusPromise: Promise[AssociationHandle] = Promise() - val stateActor = system.actorOf(ProtocolStateActor.outboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - remoteAddress, - statusPromise, - transport, - new AkkaProtocolSettings(conf), - codec, - failureDetector, - refuseUid = None)) + val stateActor = system.actorOf( + ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + remoteAddress, + statusPromise, + transport, + new AkkaProtocolSettings(conf), + codec, + failureDetector, + refuseUid = None)) awaitCond(lastActivityIsAssociate(registry, uid = 42, cookie = None)) @@ -456,18 +476,17 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit val statusPromise: Promise[AssociationHandle] = Promise() - val conf2 = ConfigFactory.parseString("akka.remote.netty.tcp.connection-timeout = 500 ms"). - withFallback(conf) + val conf2 = ConfigFactory.parseString("akka.remote.netty.tcp.connection-timeout = 500 ms").withFallback(conf) - val stateActor = system.actorOf(ProtocolStateActor.outboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - remoteAddress, - statusPromise, - transport, - new AkkaProtocolSettings(conf2), - codec, - failureDetector, - refuseUid = None)) + val stateActor = system.actorOf( + ProtocolStateActor.outboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + remoteAddress, + statusPromise, + transport, + new AkkaProtocolSettings(conf2), + codec, + failureDetector, + refuseUid = None)) watch(stateActor) intercept[TimeoutException] { @@ -479,16 +498,15 @@ class AkkaProtocolSpec extends AkkaSpec("""akka.actor.provider = remote """) wit "give up inbound after connection timeout" in { val (failureDetector, registry, _, handle) = collaborators - val conf2 = ConfigFactory.parseString("akka.remote.netty.tcp.connection-timeout = 500 ms"). - withFallback(conf) + val conf2 = ConfigFactory.parseString("akka.remote.netty.tcp.connection-timeout = 500 ms").withFallback(conf) - val reader = system.actorOf(ProtocolStateActor.inboundProps( - HandshakeInfo(origin = localAddress, uid = 42, cookie = None), - handle, - ActorAssociationEventListener(testActor), - new AkkaProtocolSettings(conf2), - codec, - failureDetector)) + val reader = system.actorOf( + ProtocolStateActor.inboundProps(HandshakeInfo(origin = localAddress, uid = 42, cookie = None), + handle, + ActorAssociationEventListener(testActor), + new AkkaProtocolSettings(conf2), + codec, + failureDetector)) watch(reader) expectTerminated(reader) diff --git a/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolStressTest.scala b/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolStressTest.scala index 7ed1206e68..534042ef1b 100644 --- a/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolStressTest.scala +++ b/akka-remote/src/test/scala/akka/remote/transport/AkkaProtocolStressTest.scala @@ -4,18 +4,19 @@ package akka.remote.transport -import akka.testkit.{ TimingTest, DefaultTimeout, ImplicitSender, AkkaSpec } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender, TimingTest } import com.typesafe.config.{ Config, ConfigFactory } import AkkaProtocolStressTest._ import akka.actor._ import scala.concurrent.duration._ import akka.testkit._ -import akka.remote.{ RARP, EndpointException } -import akka.remote.transport.FailureInjectorTransportAdapter.{ One, Drop } +import akka.remote.{ EndpointException, RARP } +import akka.remote.transport.FailureInjectorTransportAdapter.{ Drop, One } import scala.concurrent.Await object AkkaProtocolStressTest { - val configA: Config = ConfigFactory parseString (""" + val configA: Config = + ConfigFactory.parseString(""" akka { #loglevel = DEBUG actor.serialize-messages = off @@ -53,12 +54,13 @@ object AkkaProtocolStressTest { def receive = { case "start" => self ! "sendNext" - case "sendNext" => if (nextSeq < limit) { - remote ! nextSeq - nextSeq += 1 - if (nextSeq % 2000 == 0) context.system.scheduler.scheduleOnce(500.milliseconds, self, "sendNext") - else self ! "sendNext" - } + case "sendNext" => + if (nextSeq < limit) { + remote ! nextSeq + nextSeq += 1 + if (nextSeq % 2000 == 0) context.system.scheduler.scheduleOnce(500.milliseconds, self, "sendNext") + else self ! "sendNext" + } case seq: Int => if (seq > maxSeq) { losses += seq - maxSeq - 1 @@ -119,13 +121,14 @@ class AkkaProtocolStressTest extends AkkaSpec(configA) with ImplicitSender with } override def beforeTermination(): Unit = { - system.eventStream.publish(TestEvent.Mute( - EventFilter.warning(source = "akka://AkkaProtocolStressTest/user/$a", start = "received dead letter"), - EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) - systemB.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointException](), - EventFilter.error(start = "AssociationError"), - EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) + system.eventStream.publish( + TestEvent.Mute( + EventFilter.warning(source = "akka://AkkaProtocolStressTest/user/$a", start = "received dead letter"), + EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) + systemB.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointException](), + EventFilter.error(start = "AssociationError"), + EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) } override def afterTermination(): Unit = shutdown(systemB) diff --git a/akka-remote/src/test/scala/akka/remote/transport/GenericTransportSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/GenericTransportSpec.scala index a799e3afc2..cac84bb088 100644 --- a/akka-remote/src/test/scala/akka/remote/transport/GenericTransportSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/transport/GenericTransportSpec.scala @@ -4,19 +4,20 @@ package akka.remote.transport -import akka.actor.{ ExtendedActorSystem, Address } +import akka.actor.{ Address, ExtendedActorSystem } import akka.remote.transport.AssociationHandle.{ ActorHandleEventListener, Disassociated, InboundPayload } import akka.remote.transport.TestTransport._ import akka.remote.transport.Transport._ -import akka.testkit.{ ImplicitSender, DefaultTimeout, AkkaSpec } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender } import akka.util.ByteString -import scala.concurrent.{ Future, Await } +import scala.concurrent.{ Await, Future } import akka.remote.RemoteActorRefProvider -import akka.remote.transport.TestTransport.{ DisassociateAttempt, WriteAttempt, ListenAttempt, AssociateAttempt } +import akka.remote.transport.TestTransport.{ AssociateAttempt, DisassociateAttempt, ListenAttempt, WriteAttempt } abstract class GenericTransportSpec(withAkkaProtocol: Boolean = false) - extends AkkaSpec("""akka.actor.provider = remote """) - with DefaultTimeout with ImplicitSender { + extends AkkaSpec("""akka.actor.provider = remote """) + with DefaultTimeout + with ImplicitSender { def transportName: String def schemeIdentifier: String @@ -32,7 +33,10 @@ abstract class GenericTransportSpec(withAkkaProtocol: Boolean = false) def wrapTransport(transport: Transport): Transport = if (withAkkaProtocol) { val provider = system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider] - new AkkaProtocolTransport(transport, system, new AkkaProtocolSettings(provider.remoteSettings.config), AkkaPduProtobufCodec) + new AkkaProtocolTransport(transport, + system, + new AkkaProtocolSettings(provider.remoteSettings.config), + AkkaPduProtobufCodec) } else transport def newTransportA(registry: AssociationRegistry): Transport = @@ -85,7 +89,9 @@ abstract class GenericTransportSpec(withAkkaProtocol: Boolean = false) awaitCond(registry.transportsReady(addressATest)) // TestTransport throws InvalidAssociationException when trying to associate with non-existing system - intercept[InvalidAssociationException] { Await.result(transportA.associate(nonExistingAddress), timeout.duration) } + intercept[InvalidAssociationException] { + Await.result(transportA.associate(nonExistingAddress), timeout.duration) + } } "successfully send PDUs" in { @@ -121,7 +127,7 @@ abstract class GenericTransportSpec(withAkkaProtocol: Boolean = false) registry.logSnapshot.exists { case WriteAttempt(`addressATest`, `addressBTest`, sentPdu) => sentPdu == pdu - case _ => false + case _ => false } should ===(true) } @@ -157,9 +163,9 @@ abstract class GenericTransportSpec(withAkkaProtocol: Boolean = false) awaitCond(!registry.existsAssociation(addressATest, addressBTest)) awaitCond { - registry.logSnapshot exists { + registry.logSnapshot.exists { case DisassociateAttempt(`addressATest`, `addressBTest`) => true - case _ => false + case _ => false } } } diff --git a/akka-remote/src/test/scala/akka/remote/transport/SwitchableLoggedBehaviorSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/SwitchableLoggedBehaviorSpec.scala index 1c84255bfd..1b4173cac0 100644 --- a/akka-remote/src/test/scala/akka/remote/transport/SwitchableLoggedBehaviorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/transport/SwitchableLoggedBehaviorSpec.scala @@ -4,7 +4,7 @@ package akka.remote.transport -import akka.testkit.{ DefaultTimeout, AkkaSpec } +import akka.testkit.{ AkkaSpec, DefaultTimeout } import akka.remote.transport.TestTransport.SwitchableLoggedBehavior import scala.concurrent.{ Await, Future, Promise } import scala.util.Failure diff --git a/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala b/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala index 562c02eb38..808407326a 100644 --- a/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala +++ b/akka-remote/src/test/scala/akka/remote/transport/SystemMessageDeliveryStressTest.scala @@ -8,13 +8,13 @@ import akka.remote.transport.ThrottlerTransportAdapter._ import akka.testkit.TimingTest import akka.testkit.DefaultTimeout import akka.testkit.ImplicitSender -import akka.testkit.{ TimingTest, DefaultTimeout, ImplicitSender, AkkaSpec } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender, TimingTest } import com.typesafe.config.{ Config, ConfigFactory } import akka.actor._ import scala.concurrent.duration._ import akka.testkit._ -import akka.remote.{ QuarantinedEvent, EndpointException, RARP } -import akka.remote.transport.FailureInjectorTransportAdapter.{ One, Drop } +import akka.remote.{ EndpointException, QuarantinedEvent, RARP } +import akka.remote.transport.FailureInjectorTransportAdapter.{ Drop, One } import scala.concurrent.Await import akka.actor.ActorRef import akka.actor.Actor @@ -34,7 +34,7 @@ object SystemMessageDeliveryStressTest { val burstSize = 100 val burstDelay = 500.millis - val baseConfig: Config = ConfigFactory parseString (s""" + val baseConfig: Config = ConfigFactory.parseString(s""" akka { #loglevel = DEBUG actor.provider = remote @@ -77,7 +77,8 @@ object SystemMessageDeliveryStressTest { } } - class SystemMessageSender(val msgCount: Int, val burstSize: Int, val burstDelay: FiniteDuration, val target: ActorRef) extends Actor { + class SystemMessageSender(val msgCount: Int, val burstSize: Int, val burstDelay: FiniteDuration, val target: ActorRef) + extends Actor { import context.dispatcher var counter = 0 @@ -106,9 +107,9 @@ object SystemMessageDeliveryStressTest { } abstract class SystemMessageDeliveryStressTest(msg: String, cfg: String) - extends AkkaSpec(ConfigFactory.parseString(cfg).withFallback(SystemMessageDeliveryStressTest.baseConfig)) - with ImplicitSender - with DefaultTimeout { + extends AkkaSpec(ConfigFactory.parseString(cfg).withFallback(SystemMessageDeliveryStressTest.baseConfig)) + with ImplicitSender + with DefaultTimeout { import SystemMessageDeliveryStressTest._ override def expectedTestDuration: FiniteDuration = 200.seconds @@ -130,14 +131,14 @@ abstract class SystemMessageDeliveryStressTest(msg: String, cfg: String) val targetForB = RARP(systemB).provider.resolveActorRef(RootActorPath(addressA) / "temp" / sysMsgVerifierA.path.name) override def atStartup() = { - systemA.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointException](), - EventFilter.error(start = "AssociationError"), - EventFilter.warning(pattern = "received dead .*"))) - systemB.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointException](), - EventFilter.error(start = "AssociationError"), - EventFilter.warning(pattern = "received dead .*"))) + systemA.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointException](), + EventFilter.error(start = "AssociationError"), + EventFilter.warning(pattern = "received dead .*"))) + systemB.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointException](), + EventFilter.error(start = "AssociationError"), + EventFilter.warning(pattern = "received dead .*"))) systemA.eventStream.subscribe(probeA.ref, classOf[QuarantinedEvent]) systemB.eventStream.subscribe(probeB.ref, classOf[QuarantinedEvent]) @@ -178,25 +179,24 @@ abstract class SystemMessageDeliveryStressTest(msg: String, cfg: String) } override def beforeTermination(): Unit = { - system.eventStream.publish(TestEvent.Mute( - EventFilter.warning(source = s"akka://AkkaProtocolStressTest/user/$$a", start = "received dead letter"), - EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) - systemB.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointException](), - EventFilter.error(start = "AssociationError"), - EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) + system.eventStream.publish( + TestEvent.Mute( + EventFilter.warning(source = s"akka://AkkaProtocolStressTest/user/$$a", start = "received dead letter"), + EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) + systemB.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointException](), + EventFilter.error(start = "AssociationError"), + EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) } override def afterTermination(): Unit = shutdown(systemB) } -class SystemMessageDeliveryRetryGate extends SystemMessageDeliveryStressTest( - "passive connections on", - "akka.remote.retry-gate-closed-for = 0.5 s") -class SystemMessageDeliveryNoPassiveRetryGate extends SystemMessageDeliveryStressTest( - "passive connections off", - """ +class SystemMessageDeliveryRetryGate + extends SystemMessageDeliveryStressTest("passive connections on", "akka.remote.retry-gate-closed-for = 0.5 s") +class SystemMessageDeliveryNoPassiveRetryGate + extends SystemMessageDeliveryStressTest("passive connections off", """ akka.remote.use-passive-connections = off akka.remote.retry-gate-closed-for = 0.5 s """) diff --git a/akka-remote/src/test/scala/akka/remote/transport/TestTransportSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/TestTransportSpec.scala index 65ff32d2aa..54561be75f 100644 --- a/akka-remote/src/test/scala/akka/remote/transport/TestTransportSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/transport/TestTransportSpec.scala @@ -61,7 +61,9 @@ class TestTransportSpec extends AkkaSpec with DefaultTimeout with ImplicitSender Await.result(transportA.listen, timeout.duration)._2.success(ActorAssociationEventListener(self)) // TestTransport throws IllegalAssociationException when trying to associate with non-existing system - intercept[InvalidAssociationException] { Await.result(transportA.associate(nonExistingAddress), timeout.duration) } + intercept[InvalidAssociationException] { + Await.result(transportA.associate(nonExistingAddress), timeout.duration) + } } @@ -133,9 +135,9 @@ class TestTransportSpec extends AkkaSpec with DefaultTimeout with ImplicitSender awaitCond(!registry.existsAssociation(addressA, addressB)) - registry.logSnapshot exists { + registry.logSnapshot.exists { case DisassociateAttempt(requester, remote) if requester == addressA && remote == addressB => true - case _ => false + case _ => false } should ===(true) } diff --git a/akka-remote/src/test/scala/akka/remote/transport/ThrottlerTransportAdapterSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/ThrottlerTransportAdapterSpec.scala index 460b41cfae..73290564d5 100644 --- a/akka-remote/src/test/scala/akka/remote/transport/ThrottlerTransportAdapterSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/transport/ThrottlerTransportAdapterSpec.scala @@ -4,9 +4,9 @@ package akka.remote.transport -import com.typesafe.config.{ ConfigFactory, Config } +import com.typesafe.config.{ Config, ConfigFactory } import akka.actor._ -import akka.testkit.{ TimingTest, DefaultTimeout, ImplicitSender, AkkaSpec } +import akka.testkit.{ AkkaSpec, DefaultTimeout, ImplicitSender, TimingTest } import ThrottlerTransportAdapterSpec._ import scala.concurrent.duration._ import scala.concurrent.Await @@ -17,7 +17,7 @@ import akka.testkit.EventFilter import akka.remote.EndpointException object ThrottlerTransportAdapterSpec { - val configA: Config = ConfigFactory parseString (""" + val configA: Config = ConfigFactory.parseString(""" akka { actor.provider = remote @@ -53,11 +53,12 @@ object ThrottlerTransportAdapterSpec { case "start" => self ! "sendNext" startTime = System.nanoTime() - case "sendNext" => if (messageCount > 0) { - remote ! "ping" - self ! "sendNext" - messageCount -= 1 - } + case "sendNext" => + if (messageCount > 0) { + remote ! "ping" + self ! "sendNext" + messageCount -= 1 + } case "pong" => received += 1 if (received >= MessageCount) controller ! (System.nanoTime() - startTime) @@ -139,13 +140,14 @@ class ThrottlerTransportAdapterSpec extends AkkaSpec(configA) with ImplicitSende } override def beforeTermination(): Unit = { - system.eventStream.publish(TestEvent.Mute( - EventFilter.warning(source = "akka://AkkaProtocolStressTest/user/$a", start = "received dead letter"), - EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) - systemB.eventStream.publish(TestEvent.Mute( - EventFilter[EndpointException](), - EventFilter.error(start = "AssociationError"), - EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) + system.eventStream.publish( + TestEvent.Mute( + EventFilter.warning(source = "akka://AkkaProtocolStressTest/user/$a", start = "received dead letter"), + EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) + systemB.eventStream.publish( + TestEvent.Mute(EventFilter[EndpointException](), + EventFilter.error(start = "AssociationError"), + EventFilter.warning(pattern = "received dead letter.*(InboundPayload|Disassociate)"))) } override def afterTermination(): Unit = shutdown(systemB) diff --git a/akka-remote/src/test/scala/akka/remote/transport/netty/NettyTransportSpec.scala b/akka-remote/src/test/scala/akka/remote/transport/netty/NettyTransportSpec.scala index e633eb95de..1d36e81a70 100644 --- a/akka-remote/src/test/scala/akka/remote/transport/netty/NettyTransportSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/transport/netty/NettyTransportSpec.scala @@ -17,8 +17,7 @@ import scala.concurrent.Await import scala.concurrent.duration.Duration object NettyTransportSpec { - val commonConfig = ConfigFactory.parseString( - """ + val commonConfig = ConfigFactory.parseString(""" akka.actor.provider = remote """) @@ -44,12 +43,11 @@ class NettyTransportSpec extends WordSpec with Matchers with BindBehavior { import akka.remote.transport.netty.NettyTransportSpec._ "NettyTransport" should { - behave like theOneWhoKnowsTheDifferenceBetweenBoundAndRemotingAddress("tcp") - behave like theOneWhoKnowsTheDifferenceBetweenBoundAndRemotingAddress("udp") + behave.like(theOneWhoKnowsTheDifferenceBetweenBoundAndRemotingAddress("tcp")) + behave.like(theOneWhoKnowsTheDifferenceBetweenBoundAndRemotingAddress("udp")) "bind to a random port" in { - val bindConfig = ConfigFactory.parseString( - s""" + val bindConfig = ConfigFactory.parseString(s""" akka.remote.netty.tcp { port = 0 } @@ -66,8 +64,7 @@ class NettyTransportSpec extends WordSpec with Matchers with BindBehavior { val (openSS, address) = randomOpenServerSocket() try { - val bindConfig = ConfigFactory.parseString( - s""" + val bindConfig = ConfigFactory.parseString(s""" akka.remote.netty.tcp { port = ${address.getPort} bind-port = 0 @@ -94,8 +91,7 @@ class NettyTransportSpec extends WordSpec with Matchers with BindBehavior { "bind to a specified port and remoting accepts from a bound port" in { val address = SocketUtil.temporaryServerAddress(InetAddress.getLocalHost.getHostAddress, udp = false) - val bindConfig = ConfigFactory.parseString( - s""" + val bindConfig = ConfigFactory.parseString(s""" akka.remote.netty.tcp { port = 0 bind-port = ${address.getPort} @@ -110,8 +106,7 @@ class NettyTransportSpec extends WordSpec with Matchers with BindBehavior { } "bind to multiple transports" in { - val bindConfig = ConfigFactory.parseString( - s""" + val bindConfig = ConfigFactory.parseString(s""" akka.remote { netty.tcp.port = 0 netty.udp.port = 0 @@ -127,8 +122,7 @@ class NettyTransportSpec extends WordSpec with Matchers with BindBehavior { } "bind to all interfaces" in { - val bindConfig = ConfigFactory.parseString( - s""" + val bindConfig = ConfigFactory.parseString(s""" akka.remote { netty.tcp.bind-hostname = "0.0.0.0" } @@ -136,7 +130,7 @@ class NettyTransportSpec extends WordSpec with Matchers with BindBehavior { implicit val sys = ActorSystem("sys", bindConfig.withFallback(commonConfig)) getInternal.flatMap(_.port) should contain(getExternal.port.get) - getInternal.map(x => x.host.get should include regex "0.0.0.0".r) // regexp dot is intentional to match IPv4 and 6 addresses + getInternal.map(x => (x.host.get should include).regex("0.0.0.0".r)) // regexp dot is intentional to match IPv4 and 6 addresses Await.result(sys.terminate(), Duration.Inf) } @@ -152,8 +146,7 @@ trait BindBehavior { s"bind to default $proto address" in { val address = SocketUtil.temporaryServerAddress(udp = proto == "udp") - val bindConfig = ConfigFactory.parseString( - s""" + val bindConfig = ConfigFactory.parseString(s""" akka.remote { netty.$proto { hostname = ${address.getAddress.getHostAddress} @@ -181,8 +174,7 @@ trait BindBehavior { null } - val bindConfig = ConfigFactory.parseString( - s""" + val bindConfig = ConfigFactory.parseString(s""" akka.remote { netty.$proto { hostname = ${address.getAddress.getHostAddress} diff --git a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala index 1067249d66..8adde956d3 100644 --- a/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala +++ b/akka-slf4j/src/main/scala/akka/event/slf4j/Slf4jLogger.scala @@ -23,11 +23,12 @@ trait SLF4JLogging { * Logger is a factory for obtaining SLF4J-Loggers */ object Logger { + /** * @param logger - which logger * @return a Logger that corresponds for the given logger name */ - def apply(logger: String): SLFLogger = SLFLoggerFactory getLogger logger + def apply(logger: String): SLFLogger = SLFLoggerFactory.getLogger(logger) /** * @param logClass - the class to log for @@ -36,7 +37,7 @@ object Logger { */ def apply(logClass: Class[_], logSource: String): SLFLogger = logClass match { case c if c == classOf[DummyClassForStringSources] => apply(logSource) - case _ => SLFLoggerFactory getLogger logClass + case _ => SLFLoggerFactory.getLogger(logClass) } /** @@ -66,15 +67,21 @@ class Slf4jLogger extends Actor with SLF4JLogging with RequiresMessageQueue[Logg case Error.NoCause | null => Logger(logClass, logSource).error(markerIfPresent(event), if (message != null) message.toString else null) case _ => - Logger(logClass, logSource).error(markerIfPresent(event), if (message != null) message.toString else cause.getLocalizedMessage, cause) + Logger(logClass, logSource).error(markerIfPresent(event), + if (message != null) message.toString else cause.getLocalizedMessage, + cause) } } case event @ Warning(logSource, logClass, message) => withMdc(logSource, event) { event match { - case e: LogEventWithCause => Logger(logClass, logSource).warn(markerIfPresent(event), if (message != null) message.toString else e.cause.getLocalizedMessage, e.cause) - case _ => Logger(logClass, logSource).warn(markerIfPresent(event), if (message != null) message.toString else null) + case e: LogEventWithCause => + Logger(logClass, logSource).warn(markerIfPresent(event), + if (message != null) message.toString else e.cause.getLocalizedMessage, + e.cause) + case _ => + Logger(logClass, logSource).warn(markerIfPresent(event), if (message != null) message.toString else null) } } @@ -99,8 +106,9 @@ class Slf4jLogger extends Actor with SLF4JLogging with RequiresMessageQueue[Logg MDC.put(mdcThreadAttributeName, logEvent.thread.getName) MDC.put(mdcAkkaTimestamp, formatTimestamp(logEvent.timestamp)) MDC.put(mdcActorSystemAttributeName, actorSystemName) - logEvent.mdc foreach { case (k, v) => MDC.put(k, String.valueOf(v)) } - try logStatement finally { + logEvent.mdc.foreach { case (k, v) => MDC.put(k, String.valueOf(v)) } + try logStatement + finally { MDC.remove(mdcAkkaSourceAttributeName) MDC.remove(mdcThreadAttributeName) MDC.remove(mdcAkkaTimestamp) diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala index 3251f28f7e..4e023c014e 100644 --- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala +++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggerSpec.scala @@ -93,7 +93,7 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft s should include("akkaSource=[akka://Slf4jLoggerSpec/user/logProducer]") s should include("level=[ERROR]") s should include("logger=[akka.event.slf4j.Slf4jLoggerSpec$LogProducer]") - s should include regex (sourceThreadRegex) + (s should include).regex(sourceThreadRegex) s should include("msg=[Simulated error]") s should include("java.lang.RuntimeException: Simulated error") s should include("at akka.event.slf4j.Slf4jLoggerSpec") @@ -107,7 +107,7 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft s should include("akkaSource=[akka://Slf4jLoggerSpec/user/logProducer]") s should include("level=[INFO]") s should include("logger=[akka.event.slf4j.Slf4jLoggerSpec$LogProducer]") - s should include regex (sourceThreadRegex) + (s should include).regex(sourceThreadRegex) s should include("msg=[test x=3 y=17]") } @@ -133,7 +133,9 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft "log info with slf4j marker and MDC" in { val slf4jMarker = MarkerFactory.getMarker("SLF") slf4jMarker.add(MarkerFactory.getMarker("ADDED")) // slf4j markers can have children - producer ! StringWithSlf4jMarkerMDC("security-wise interesting message", slf4jMarker, Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values")) + producer ! StringWithSlf4jMarkerMDC("security-wise interesting message", + slf4jMarker, + Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values")) awaitCond(outputString.contains("----"), 5 seconds) val s = outputString @@ -143,14 +145,15 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft } "put custom MDC values when specified" in { - producer ! StringWithMDC("Message with custom MDC values", Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values")) + producer ! StringWithMDC("Message with custom MDC values", + Map("ticketNumber" -> 3671, "ticketDesc" -> "Custom MDC Values")) awaitCond(outputString.contains("----"), 5 seconds) val s = outputString s should include("akkaSource=[akka://Slf4jLoggerSpec/user/logProducer]") s should include("level=[INFO]") s should include("logger=[akka.event.slf4j.Slf4jLoggerSpec$LogProducer]") - s should include regex (sourceThreadRegex) + (s should include).regex(sourceThreadRegex) s should include("mdc=[ticket-#3671: Custom MDC Values]") s should include("msg=[Message with custom MDC values]") } @@ -171,7 +174,7 @@ class Slf4jLoggerSpec extends AkkaSpec(Slf4jLoggerSpec.config) with BeforeAndAft s should include("akkaSource=[akka://Slf4jLoggerSpec/user/logProducer]") s should include("level=[INFO]") s should include("logger=[akka.event.slf4j.Slf4jLoggerSpec$LogProducer]") - s should include regex (sourceThreadRegex) + (s should include).regex(sourceThreadRegex) s should include("mdc=[ticket-#3671: null]") s should include("msg=[Message with null custom MDC values]") } diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala index 838991c057..ca0fe83c1c 100644 --- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala +++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jLoggingFilterSpec.scala @@ -43,7 +43,7 @@ object Slf4jLoggingFilterSpec { ref ! ("OK") case event: LogEvent => println("# event: " + event) - target foreach { _ ! event } + target.foreach { _ ! event } } } diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala index 128b0c980a..159d8350fc 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/StreamTestKit.scala @@ -64,19 +64,22 @@ object TestPublisher { /** * Probe that implements [[org.reactivestreams.Publisher]] interface. */ - def manualProbe[T](autoOnSubscribe: Boolean = true)(implicit system: ActorSystem): ManualProbe[T] = new ManualProbe(autoOnSubscribe) + def manualProbe[T](autoOnSubscribe: Boolean = true)(implicit system: ActorSystem): ManualProbe[T] = + new ManualProbe(autoOnSubscribe) /** * Probe that implements [[org.reactivestreams.Publisher]] interface and tracks demand. */ - def probe[T](initialPendingRequests: Long = 0)(implicit system: ActorSystem): Probe[T] = new Probe(initialPendingRequests) + def probe[T](initialPendingRequests: Long = 0)(implicit system: ActorSystem): Probe[T] = + new Probe(initialPendingRequests) /** * Implementation of [[org.reactivestreams.Publisher]] that allows various assertions. * This probe does not track demand. Therefore you need to expect demand before sending * elements downstream. */ - class ManualProbe[I] private[TestPublisher] (autoOnSubscribe: Boolean = true)(implicit system: ActorSystem) extends Publisher[I] { + class ManualProbe[I] private[TestPublisher] (autoOnSubscribe: Boolean = true)(implicit system: ActorSystem) + extends Publisher[I] { type Self <: ManualProbe[I] @@ -104,9 +107,8 @@ object TestPublisher { } def executeAfterSubscription[T](f: => T): T = { - subscribed.await( - probe.testKitSettings.DefaultTimeout.duration.length, - probe.testKitSettings.DefaultTimeout.duration.unit) + subscribed.await(probe.testKitSettings.DefaultTimeout.duration.length, + probe.testKitSettings.DefaultTimeout.duration.unit) f } @@ -114,7 +116,9 @@ object TestPublisher { * Expect a subscription. */ def expectSubscription(): PublisherProbeSubscription[I] = - executeAfterSubscription { probe.expectMsgType[Subscribe].subscription.asInstanceOf[PublisherProbeSubscription[I]] } + executeAfterSubscription { + probe.expectMsgType[Subscribe].subscription.asInstanceOf[PublisherProbeSubscription[I]] + } /** * Expect demand from a given subscription. @@ -163,7 +167,9 @@ object TestPublisher { /** * Receive messages for a given duration or until one does not match a given partial function. */ - def receiveWhile[T](max: Duration = Duration.Undefined, idle: Duration = Duration.Inf, messages: Int = Int.MaxValue)(f: PartialFunction[PublisherEvent, T]): immutable.Seq[T] = + def receiveWhile[T](max: Duration = Duration.Undefined, + idle: Duration = Duration.Inf, + messages: Int = Int.MaxValue)(f: PartialFunction[PublisherEvent, T]): immutable.Seq[T] = executeAfterSubscription { probe.receiveWhile(max, idle, messages)(f.asInstanceOf[PartialFunction[AnyRef, T]]) } def expectEventPF[T](f: PartialFunction[PublisherEvent, T]): T = @@ -187,7 +193,9 @@ object TestPublisher { * } * }}} */ - def within[T](min: FiniteDuration, max: FiniteDuration)(f: => T): T = executeAfterSubscription { probe.within(min, max)(f) } + def within[T](min: FiniteDuration, max: FiniteDuration)(f: => T): T = executeAfterSubscription { + probe.within(min, max)(f) + } /** * Same as calling `within(0 seconds, max)(f)`. @@ -198,7 +206,8 @@ object TestPublisher { /** * Single subscription and demand tracking for [[TestPublisher.ManualProbe]]. */ - class Probe[T] private[TestPublisher] (initialPendingRequests: Long)(implicit system: ActorSystem) extends ManualProbe[T] { + class Probe[T] private[TestPublisher] (initialPendingRequests: Long)(implicit system: ActorSystem) + extends ManualProbe[T] { type Self = Probe[T] @@ -363,7 +372,8 @@ object TestSubscriber { * * Expect multiple stream elements. */ - @annotation.varargs def expectNext(e1: I, e2: I, es: I*): Self = + @annotation.varargs + def expectNext(e1: I, e2: I, es: I*): Self = expectNextN((e1 +: e2 +: es).iterator.map(identity).to(immutable.IndexedSeq)) /** @@ -371,7 +381,8 @@ object TestSubscriber { * * Expect multiple stream elements in arbitrary order. */ - @annotation.varargs def expectNextUnordered(e1: I, e2: I, es: I*): Self = + @annotation.varargs + def expectNextUnordered(e1: I, e2: I, es: I*): Self = expectNextUnorderedN((e1 +: e2 +: es).iterator.map(identity).to(immutable.IndexedSeq)) /** @@ -403,7 +414,8 @@ object TestSubscriber { * Expect the given elements to be signalled in any order. */ def expectNextUnorderedN(all: immutable.Seq[I]): Self = { - @annotation.tailrec def expectOneOf(all: immutable.Seq[I]): Unit = all match { + @annotation.tailrec + def expectOneOf(all: immutable.Seq[I]): Unit = all match { case Nil => case list => val next = expectNext() @@ -662,17 +674,21 @@ object TestSubscriber { /** * Receive messages for a given duration or until one does not match a given partial function. */ - def receiveWhile[T](max: Duration = Duration.Undefined, idle: Duration = Duration.Inf, messages: Int = Int.MaxValue)(f: PartialFunction[SubscriberEvent, T]): immutable.Seq[T] = + def receiveWhile[T](max: Duration = Duration.Undefined, + idle: Duration = Duration.Inf, + messages: Int = Int.MaxValue)(f: PartialFunction[SubscriberEvent, T]): immutable.Seq[T] = probe.receiveWhile(max, idle, messages)(f.asInstanceOf[PartialFunction[AnyRef, T]]) /** * Drains a given number of messages */ def receiveWithin(max: FiniteDuration, messages: Int = Int.MaxValue): immutable.Seq[I] = - probe.receiveWhile(max, max, messages) { - case OnNext(i) => Some(i.asInstanceOf[I]) - case _ => None - }.flatten + probe + .receiveWhile(max, max, messages) { + case OnNext(i) => Some(i.asInstanceOf[I]) + case _ => None + } + .flatten /** * Attempt to drain the stream into a strict collection (by requesting `Long.MaxValue` elements). @@ -686,7 +702,9 @@ object TestSubscriber { @tailrec def drain(): immutable.Seq[I] = self.expectEvent(deadline.timeLeft) match { case OnError(ex) => - throw new AssertionError(s"toStrict received OnError while draining stream! Accumulated elements: ${b.result()}", ex) + throw new AssertionError( + s"toStrict received OnError while draining stream! Accumulated elements: ${b.result()}", + ex) case OnComplete => b.result() case OnNext(i: I @unchecked) => @@ -798,7 +816,8 @@ private[testkit] object StreamTestKit { override def cancel(): Unit = () } - final case class PublisherProbeSubscription[I](subscriber: Subscriber[_ >: I], publisherProbe: TestProbe) extends Subscription { + final case class PublisherProbeSubscription[I](subscriber: Subscriber[_ >: I], publisherProbe: TestProbe) + extends Subscription { def request(elements: Long): Unit = publisherProbe.ref ! RequestMore(this, elements) def cancel(): Unit = publisherProbe.ref ! CancelSubscription(this) @@ -819,22 +838,28 @@ private[testkit] object StreamTestKit { def sendOnSubscribe(): Unit = subscriber.onSubscribe(this) } - final class ProbeSource[T](val attributes: Attributes, shape: SourceShape[T])(implicit system: ActorSystem) extends SourceModule[T, TestPublisher.Probe[T]](shape) { + final class ProbeSource[T](val attributes: Attributes, shape: SourceShape[T])(implicit system: ActorSystem) + extends SourceModule[T, TestPublisher.Probe[T]](shape) { override def create(context: MaterializationContext) = { val probe = TestPublisher.probe[T]() (probe, probe) } - override protected def newInstance(shape: SourceShape[T]): SourceModule[T, TestPublisher.Probe[T]] = new ProbeSource[T](attributes, shape) - override def withAttributes(attr: Attributes): SourceModule[T, TestPublisher.Probe[T]] = new ProbeSource[T](attr, amendShape(attr)) + override protected def newInstance(shape: SourceShape[T]): SourceModule[T, TestPublisher.Probe[T]] = + new ProbeSource[T](attributes, shape) + override def withAttributes(attr: Attributes): SourceModule[T, TestPublisher.Probe[T]] = + new ProbeSource[T](attr, amendShape(attr)) } - final class ProbeSink[T](val attributes: Attributes, shape: SinkShape[T])(implicit system: ActorSystem) extends SinkModule[T, TestSubscriber.Probe[T]](shape) { + final class ProbeSink[T](val attributes: Attributes, shape: SinkShape[T])(implicit system: ActorSystem) + extends SinkModule[T, TestSubscriber.Probe[T]](shape) { override def create(context: MaterializationContext) = { val probe = TestSubscriber.probe[T]() (probe, probe) } - override protected def newInstance(shape: SinkShape[T]): SinkModule[T, TestSubscriber.Probe[T]] = new ProbeSink[T](attributes, shape) - override def withAttributes(attr: Attributes): SinkModule[T, TestSubscriber.Probe[T]] = new ProbeSink[T](attr, amendShape(attr)) + override protected def newInstance(shape: SinkShape[T]): SinkModule[T, TestSubscriber.Probe[T]] = + new ProbeSink[T](attributes, shape) + override def withAttributes(attr: Attributes): SinkModule[T, TestSubscriber.Probe[T]] = + new ProbeSink[T](attr, amendShape(attr)) } } diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala index e1ff5fc7e4..d67d10baaa 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/TestGraphStage.scala @@ -41,15 +41,13 @@ object TestSinkStage { * This allows for creation of a "normal" stream ending with the sink while still being * able to assert internal events. */ - def apply[T, M]( - stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M], - probe: TestProbe): Sink[T, M] = Sink.fromGraph(new TestSinkStage(stageUnderTest, probe)) + def apply[T, M](stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M], probe: TestProbe): Sink[T, M] = + Sink.fromGraph(new TestSinkStage(stageUnderTest, probe)) } -private[testkit] class TestSinkStage[T, M]( - stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M], - probe: TestProbe) - extends GraphStageWithMaterializedValue[SinkShape[T], M] { +private[testkit] class TestSinkStage[T, M](stageUnderTest: GraphStageWithMaterializedValue[SinkShape[T], M], + probe: TestProbe) + extends GraphStageWithMaterializedValue[SinkShape[T], M] { val in = Inlet[T]("testSinkStage.in") override val shape: SinkShape[T] = SinkShape.of(in) @@ -106,16 +104,13 @@ object TestSourceStage { * This allows for creation of a "normal" stream starting with the source while still being * able to assert internal events. */ - def apply[T, M]( - stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M], - probe: TestProbe): Source[T, M] = + def apply[T, M](stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M], probe: TestProbe): Source[T, M] = Source.fromGraph(new TestSourceStage(stageUnderTest, probe)) } -private[testkit] class TestSourceStage[T, M]( - stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M], - probe: TestProbe) - extends GraphStageWithMaterializedValue[SourceShape[T], M] { +private[testkit] class TestSourceStage[T, M](stageUnderTest: GraphStageWithMaterializedValue[SourceShape[T], M], + probe: TestProbe) + extends GraphStageWithMaterializedValue[SourceShape[T], M] { val out = Outlet[T]("testSourceStage.out") override val shape: SourceShape[T] = SourceShape.of(out) diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala index cfc1d80656..fa775707f7 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/StreamTestKit.scala @@ -51,9 +51,7 @@ object StreamTestKit { try probe.awaitAssert { supervisor.tell(StreamSupervisor.GetChildren, probe.ref) val children = probe.expectMsgType[StreamSupervisor.Children].children - assert( - children.isEmpty, - s"expected no StreamSupervisor children, but got [${children.mkString(", ")}]") + assert(children.isEmpty, s"expected no StreamSupervisor children, but got [${children.mkString(", ")}]") } catch { case ex: Throwable => import sys.dispatcher @@ -65,10 +63,9 @@ object StreamTestKit { /** INTERNAL API */ @InternalApi private[akka] def printDebugDump(streamSupervisor: ActorRef)(implicit ec: ExecutionContext): Unit = { - val doneDumping = MaterializerState.requestFromSupervisor(streamSupervisor) - .map(snapshots => - snapshots.foreach(s => println(snapshotString(s.asInstanceOf[StreamSnapshotImpl])) - )) + val doneDumping = MaterializerState + .requestFromSupervisor(streamSupervisor) + .map(snapshots => snapshots.foreach(s => println(snapshotString(s.asInstanceOf[StreamSnapshotImpl])))) Await.result(doneDumping, 5.seconds) } @@ -98,7 +95,8 @@ object StreamTestKit { builder.append("GraphInterpreterShell(\n logics: [\n") val logicsToPrint = shell.logics logicsToPrint.foreach { logic => - builder.append(" ") + builder + .append(" ") .append(logic.label) .append(" attrs: [") .append(logic.attributes.attributeList.mkString(", ")) @@ -109,7 +107,8 @@ object StreamTestKit { case running: RunningInterpreter => builder.append("\n ],\n connections: [\n") running.connections.foreach { connection => - builder.append(" ") + builder + .append(" ") .append("Connection(") .append(connection.asInstanceOf[ConnectionSnapshotImpl].id) .append(", ") @@ -157,7 +156,8 @@ object StreamTestKit { } builder.append("}\n================================================================\n") - builder.append(s"// ${snapshot.queueStatus} (running=${snapshot.runningLogicsCount}, shutdown=${snapshot.stoppedLogics.mkString(",")})") + builder.append( + s"// ${snapshot.queueStatus} (running=${snapshot.runningLogicsCount}, shutdown=${snapshot.stoppedLogics.mkString(",")})") builder.toString() } catch { case _: NoSuchElementException => builder.append("Not all logics has a stage listed, cannot create graph") @@ -165,4 +165,3 @@ object StreamTestKit { } } - diff --git a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSource.scala b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSource.scala index 1641aa1f5b..c75f9450aa 100644 --- a/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSource.scala +++ b/akka-stream-testkit/src/main/scala/akka/stream/testkit/scaladsl/TestSource.scala @@ -20,6 +20,7 @@ object TestSource { /** * A Source that materializes to a [[akka.stream.testkit.TestPublisher.Probe]]. */ - def probe[T](implicit system: ActorSystem) = Source.fromGraph[T, TestPublisher.Probe[T]](new ProbeSource(none, SourceShape(Outlet("ProbeSource.out")))) + def probe[T](implicit system: ActorSystem) = + Source.fromGraph[T, TestPublisher.Probe[T]](new ProbeSource(none, SourceShape(Outlet("ProbeSource.out")))) } diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/BaseTwoStreamsSetup.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/BaseTwoStreamsSetup.scala index ed0e615bca..d36e219596 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/BaseTwoStreamsSetup.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/BaseTwoStreamsSetup.scala @@ -14,8 +14,7 @@ import akka.testkit.AkkaSpec abstract class BaseTwoStreamsSetup extends AkkaSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala index ec5f4e9ce7..363e7ba4ed 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ChainSetup.scala @@ -12,19 +12,20 @@ import org.reactivestreams.Publisher import akka.stream.ActorMaterializer class ChainSetup[In, Out, M]( - stream: Flow[In, In, NotUsed] => Flow[In, Out, M], - val settings: ActorMaterializerSettings, - materializer: ActorMaterializer, - toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit val system: ActorSystem) { + stream: Flow[In, In, NotUsed] => Flow[In, Out, M], + val settings: ActorMaterializerSettings, + materializer: ActorMaterializer, + toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit val system: ActorSystem) { - def this(stream: Flow[In, In, NotUsed] => Flow[In, Out, M], settings: ActorMaterializerSettings, toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit system: ActorSystem) = + def this(stream: Flow[In, In, NotUsed] => Flow[In, Out, M], + settings: ActorMaterializerSettings, + toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit system: ActorSystem) = this(stream, settings, ActorMaterializer(settings)(system), toPublisher)(system) - def this( - stream: Flow[In, In, NotUsed] => Flow[In, Out, M], - settings: ActorMaterializerSettings, - materializerCreator: (ActorMaterializerSettings, ActorRefFactory) => ActorMaterializer, - toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit system: ActorSystem) = + def this(stream: Flow[In, In, NotUsed] => Flow[In, Out, M], + settings: ActorMaterializerSettings, + materializerCreator: (ActorMaterializerSettings, ActorRefFactory) => ActorMaterializer, + toPublisher: (Source[Out, _], ActorMaterializer) => Publisher[Out])(implicit system: ActorSystem) = this(stream, settings, materializerCreator(settings, system), toPublisher)(system) val upstream = TestPublisher.manualProbe[In]() diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala index 6d0d091e60..f0d5675b4c 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/ScriptedTest.scala @@ -36,18 +36,23 @@ trait ScriptedTest extends Matchers { jumps ++= Vector.fill(ins.size - 1)(0) ++ Vector(outs.size) } - new Script(providedInputs, expectedOutputs, jumps, inputCursor = 0, outputCursor = 0, outputEndCursor = 0, completed = false) + new Script(providedInputs, + expectedOutputs, + jumps, + inputCursor = 0, + outputCursor = 0, + outputEndCursor = 0, + completed = false) } } - final class Script[In, Out]( - val providedInputs: Vector[In], - val expectedOutputs: Vector[Out], - val jumps: Vector[Int], - val inputCursor: Int, - val outputCursor: Int, - val outputEndCursor: Int, - val completed: Boolean) { + final class Script[In, Out](val providedInputs: Vector[In], + val expectedOutputs: Vector[Out], + val jumps: Vector[Int], + val inputCursor: Int, + val outputCursor: Int, + val outputEndCursor: Int, + val completed: Boolean) { require(jumps.size == providedInputs.size) def provideInput: (In, Script[In, Out]) = @@ -55,21 +60,36 @@ trait ScriptedTest extends Matchers { throw new ScriptException("Script cannot provide more input.") else (providedInputs(inputCursor), - new Script(providedInputs, expectedOutputs, jumps, inputCursor = inputCursor + 1, - outputCursor, outputEndCursor = outputEndCursor + jumps(inputCursor), completed)) + new Script(providedInputs, + expectedOutputs, + jumps, + inputCursor = inputCursor + 1, + outputCursor, + outputEndCursor = outputEndCursor + jumps(inputCursor), + completed)) def consumeOutput(out: Out): Script[In, Out] = { if (noOutsPending) throw new ScriptException(s"Tried to produce element ${out} but no elements should be produced right now.") out should be(expectedOutputs(outputCursor)) - new Script(providedInputs, expectedOutputs, jumps, inputCursor, - outputCursor = outputCursor + 1, outputEndCursor, completed) + new Script(providedInputs, + expectedOutputs, + jumps, + inputCursor, + outputCursor = outputCursor + 1, + outputEndCursor, + completed) } def complete(): Script[In, Out] = { if (finished) - new Script(providedInputs, expectedOutputs, jumps, inputCursor, - outputCursor = outputCursor + 1, outputEndCursor, completed = true) + new Script(providedInputs, + expectedOutputs, + jumps, + inputCursor, + outputCursor = outputCursor + 1, + outputEndCursor, + completed = true) else fail("received onComplete prematurely") } @@ -85,17 +105,19 @@ trait ScriptedTest extends Matchers { def noInsPending: Boolean = pendingIns == 0 def someInsPending: Boolean = !noInsPending - def debug: String = s"Script(pending=($pendingIns in, $pendingOuts out), remainingIns=${providedInputs.drop(inputCursor).mkString("/")}, remainingOuts=${expectedOutputs.drop(outputCursor).mkString("/")})" + def debug: String = + s"Script(pending=($pendingIns in, $pendingOuts out), remainingIns=${providedInputs + .drop(inputCursor) + .mkString("/")}, remainingOuts=${expectedOutputs.drop(outputCursor).mkString("/")})" } - class ScriptRunner[In, Out, M]( - op: Flow[In, In, NotUsed] => Flow[In, Out, M], - settings: ActorMaterializerSettings, - script: Script[In, Out], - maximumOverrun: Int, - maximumRequest: Int, - maximumBuffer: Int)(implicit _system: ActorSystem) - extends ChainSetup(op, settings, toPublisher) { + class ScriptRunner[In, Out, M](op: Flow[In, In, NotUsed] => Flow[In, Out, M], + settings: ActorMaterializerSettings, + script: Script[In, Out], + maximumOverrun: Int, + maximumRequest: Int, + maximumBuffer: Int)(implicit _system: ActorSystem) + extends ChainSetup(op, settings, toPublisher) { var _debugLog = Vector.empty[String] var currentScript = script @@ -125,7 +147,8 @@ trait ScriptedTest extends Matchers { outstandingDemand += demand } - def mayProvideInput: Boolean = currentScript.someInsPending && (pendingRequests > 0) && (currentScript.pendingOuts <= maximumBuffer) + def mayProvideInput: Boolean = + currentScript.someInsPending && (pendingRequests > 0) && (currentScript.pendingOuts <= maximumBuffer) def mayRequestMore: Boolean = remainingDemand > 0 def shakeIt(): Boolean = { @@ -151,7 +174,7 @@ trait ScriptedTest extends Matchers { true case _ => false // Ignore } - (u ++ d) exists (x => x) + (u ++ d).exists(x => x) } def run(): Unit = { @@ -198,8 +221,12 @@ trait ScriptedTest extends Matchers { } - def runScript[In, Out, M](script: Script[In, Out], settings: ActorMaterializerSettings, maximumOverrun: Int = 3, maximumRequest: Int = 3, maximumBuffer: Int = 3)( - op: Flow[In, In, NotUsed] => Flow[In, Out, M])(implicit system: ActorSystem): Unit = { + def runScript[In, Out, M]( + script: Script[In, Out], + settings: ActorMaterializerSettings, + maximumOverrun: Int = 3, + maximumRequest: Int = 3, + maximumBuffer: Int = 3)(op: Flow[In, In, NotUsed] => Flow[In, Out, M])(implicit system: ActorSystem): Unit = { new ScriptRunner(op, settings, script, maximumOverrun, maximumRequest, maximumBuffer).run() } diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamSpec.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamSpec.scala index d0eb004c40..b40ae31942 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamSpec.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamSpec.scala @@ -16,9 +16,7 @@ import scala.concurrent.duration._ class StreamSpec(_system: ActorSystem) extends AkkaSpec(_system) { def this(config: Config) = - this(ActorSystem( - AkkaSpec.getCallerName(getClass), - ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) + this(ActorSystem(AkkaSpec.getCallerName(getClass), ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) def this(s: String) = this(ConfigFactory.parseString(s)) @@ -35,18 +33,20 @@ class StreamSpec(_system: ActorSystem) extends AkkaSpec(_system) { // FIXME correction - I'm not sure this works at _all_ - supposed to dump stream state if test fails val streamSupervisors = system.actorSelection("/user/" + StreamSupervisor.baseName + "*") streamSupervisors.tell(StreamSupervisor.GetChildren, probe.ref) - val children: Seq[ActorRef] = probe.receiveWhile(2.seconds) { - case StreamSupervisor.Children(children) => children - }.flatten + val children: Seq[ActorRef] = probe + .receiveWhile(2.seconds) { + case StreamSupervisor.Children(children) => children + } + .flatten println("--- Stream actors debug dump ---") if (children.isEmpty) println("Stream is completed. No debug information is available") else { println("Stream actors alive: " + children) - Future.sequence(children.map(MaterializerState.requestFromChild)) + Future + .sequence(children.map(MaterializerState.requestFromChild)) .foreach(snapshots => snapshots.foreach(s => - akka.stream.testkit.scaladsl.StreamTestKit.snapshotString(s.asInstanceOf[StreamSnapshotImpl])) - ) + akka.stream.testkit.scaladsl.StreamTestKit.snapshotString(s.asInstanceOf[StreamSnapshotImpl]))) } failed case other => other diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala index 131ce45f2f..718a70a34d 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestDefaultMailbox.scala @@ -19,7 +19,9 @@ import akka.actor.Actor * This mailbox is only used in tests to verify that stream actors are using * the dispatcher defined in ActorMaterializerSettings. */ -private[akka] final case class StreamTestDefaultMailbox() extends MailboxType with ProducesMessageQueue[UnboundedMailbox.MessageQueue] { +private[akka] final case class StreamTestDefaultMailbox() + extends MailboxType + with ProducesMessageQueue[UnboundedMailbox.MessageQueue] { def this(settings: ActorSystem.Settings, config: Config) = this() @@ -27,14 +29,14 @@ private[akka] final case class StreamTestDefaultMailbox() extends MailboxType wi owner match { case Some(r: ActorRefWithCell) => val actorClass = r.underlying.props.actorClass - assert(actorClass != classOf[Actor], s"Don't use anonymous actor classes, actor class for $r was [${actorClass.getName}]") + assert(actorClass != classOf[Actor], + s"Don't use anonymous actor classes, actor class for $r was [${actorClass.getName}]") // StreamTcpManager is allowed to use another dispatcher - assert( - !actorClass.getName.startsWith("akka.stream."), - s"$r with actor class [${actorClass.getName}] must not run on default dispatcher in tests. " + - "Did you forget to define `props.withDispatcher` when creating the actor? " + - "Or did you forget to configure the `akka.stream.materializer` setting accordingly or force the " + - """dispatcher using `ActorMaterializerSettings(sys).withDispatcher("akka.test.stream-dispatcher")` in the test?""") + assert(!actorClass.getName.startsWith("akka.stream."), + s"$r with actor class [${actorClass.getName}] must not run on default dispatcher in tests. " + + "Did you forget to define `props.withDispatcher` when creating the actor? " + + "Or did you forget to configure the `akka.stream.materializer` setting accordingly or force the " + + """dispatcher using `ActorMaterializerSettings(sys).withDispatcher("akka.test.stream-dispatcher")` in the test?""") case _ => } new UnboundedMailbox.MessageQueue diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala index 965d6a5e5d..82c672ca25 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/StreamTestKitSpec.scala @@ -22,27 +22,29 @@ class StreamTestKitSpec extends AkkaSpec { "A TestSink Probe" must { "#toStrict" in { - Source(1 to 4).runWith(TestSink.probe) - .toStrict(remainingOrDefault) should ===(List(1, 2, 3, 4)) + Source(1 to 4).runWith(TestSink.probe).toStrict(remainingOrDefault) should ===(List(1, 2, 3, 4)) } "#toStrict with failing source" in { system.eventStream.publish(Mute(EventFilter[Exception]())) try { val error = intercept[AssertionError] { - Source.fromIterator(() => new Iterator[Int] { - var i = 0 + Source + .fromIterator(() => + new Iterator[Int] { + var i = 0 - override def hasNext: Boolean = true + override def hasNext: Boolean = true - override def next(): Int = { - i += 1 - i match { - case 3 => throw ex - case n => n - } - } - }).runWith(TestSink.probe) + override def next(): Int = { + i += 1 + i match { + case 3 => throw ex + case n => n + } + } + }) + .runWith(TestSink.probe) .toStrict(remainingOrDefault) } @@ -61,84 +63,61 @@ class StreamTestKitSpec extends AkkaSpec { } "#expectNextOrError with right element" in { - Source(1 to 4).runWith(TestSink.probe) - .request(4) - .expectNextOrError(1, ex) + Source(1 to 4).runWith(TestSink.probe).request(4).expectNextOrError(1, ex) } "#expectNextOrError with right exception" in { - Source.failed[Int](ex).runWith(TestSink.probe) - .request(4) - .expectNextOrError(1, ex) + Source.failed[Int](ex).runWith(TestSink.probe).request(4).expectNextOrError(1, ex) } "#expectNextOrError fail if the next element is not the expected one" in { intercept[AssertionError] { - Source(1 to 4).runWith(TestSink.probe) - .request(4) - .expectNextOrError(100, ex) + Source(1 to 4).runWith(TestSink.probe).request(4).expectNextOrError(100, ex) }.getMessage should include("OnNext(1)") } "#expectError" in { - Source.failed[Int](ex).runWith(TestSink.probe) - .request(1) - .expectError() should ===(ex) + Source.failed[Int](ex).runWith(TestSink.probe).request(1).expectError() should ===(ex) } "#expectError fail if no error signalled" in { intercept[AssertionError] { - Source(1 to 4).runWith(TestSink.probe) - .request(1) - .expectError() + Source(1 to 4).runWith(TestSink.probe).request(1).expectError() }.getMessage should include("OnNext") } "#expectComplete should fail if error signalled" in { intercept[AssertionError] { - Source.failed[Int](ex).runWith(TestSink.probe) - .request(1) - .expectComplete() + Source.failed[Int](ex).runWith(TestSink.probe).request(1).expectComplete() }.getMessage should include("OnError") } "#expectComplete should fail if next element signalled" in { intercept[AssertionError] { - Source(1 to 4).runWith(TestSink.probe) - .request(1) - .expectComplete() + Source(1 to 4).runWith(TestSink.probe).request(1).expectComplete() }.getMessage should include("OnNext") } "#expectNextOrComplete with right element" in { - Source(1 to 4).runWith(TestSink.probe) - .request(4) - .expectNextOrComplete(1) + Source(1 to 4).runWith(TestSink.probe).request(4).expectNextOrComplete(1) } "#expectNextOrComplete with completion" in { - Source.single(1).runWith(TestSink.probe) - .request(4) - .expectNextOrComplete(1) - .expectNextOrComplete(1337) + Source.single(1).runWith(TestSink.probe).request(4).expectNextOrComplete(1).expectNextOrComplete(1337) } "#expectNextPF should pass with right element" in { - val result = Source.single(1).runWith(TestSink.probe) - .request(1) - .expectNextPF { - case 1 => "success" - } + val result = Source.single(1).runWith(TestSink.probe).request(1).expectNextPF { + case 1 => "success" + } result should be("success") } "#expectNextPF should fail with wrong element" in { intercept[AssertionError] { - Source.single(1).runWith(TestSink.probe) - .request(1) - .expectNextPF { - case 2 => - } + Source.single(1).runWith(TestSink.probe).request(1).expectNextPF { + case 2 => + } }.getMessage should include("message matching partial function") } @@ -146,7 +125,9 @@ class StreamTestKitSpec extends AkkaSpec { intercept[AssertionError] { val timeout = 100.millis val overTimeout = timeout + 50.millis - Source.tick(overTimeout, 1.millis, 1).runWith(TestSink.probe) + Source + .tick(overTimeout, 1.millis, 1) + .runWith(TestSink.probe) .request(1) .expectNextWithTimeoutPF(timeout, { case 1 => @@ -156,28 +137,26 @@ class StreamTestKitSpec extends AkkaSpec { } "#expectNextChainingPF should pass with right element" in { - Source.single(1).runWith(TestSink.probe) - .request(1) - .expectNextChainingPF { - case 1 => - } + Source.single(1).runWith(TestSink.probe).request(1).expectNextChainingPF { + case 1 => + } } "#expectNextChainingPF should allow to chain test methods" in { - Source(1 to 2).runWith(TestSink.probe) + Source(1 to 2) + .runWith(TestSink.probe) .request(2) .expectNextChainingPF { case 1 => - }.expectNext(2) + } + .expectNext(2) } "#expectNextChainingPF should fail with wrong element" in { intercept[AssertionError] { - Source.single(1).runWith(TestSink.probe) - .request(1) - .expectNextChainingPF { - case 2 => - } + Source.single(1).runWith(TestSink.probe).request(1).expectNextChainingPF { + case 2 => + } }.getMessage should include("message matching partial function") } @@ -185,7 +164,9 @@ class StreamTestKitSpec extends AkkaSpec { intercept[AssertionError] { val timeout = 100.millis val overTimeout = timeout + 50.millis - Source.tick(overTimeout, 1.millis, 1).runWith(TestSink.probe) + Source + .tick(overTimeout, 1.millis, 1) + .runWith(TestSink.probe) .request(1) .expectNextChainingPF(timeout, { case 1 => @@ -195,15 +176,11 @@ class StreamTestKitSpec extends AkkaSpec { } "#expectNextN given a number of elements" in { - Source(1 to 4).runWith(TestSink.probe) - .request(4) - .expectNextN(4) should ===(List(1, 2, 3, 4)) + Source(1 to 4).runWith(TestSink.probe).request(4).expectNextN(4) should ===(List(1, 2, 3, 4)) } "#expectNextN given specific elements" in { - Source(1 to 4).runWith(TestSink.probe) - .request(4) - .expectNextN(4) should ===(List(1, 2, 3, 4)) + Source(1 to 4).runWith(TestSink.probe).request(4).expectNextN(4) should ===(List(1, 2, 3, 4)) } } } diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala index adc7e30af3..2552e7adb0 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TestPublisherSubscriberSpec.scala @@ -14,8 +14,7 @@ import akka.testkit.AkkaSpec class TestPublisherSubscriberSpec extends AkkaSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -32,7 +31,7 @@ class TestPublisherSubscriberSpec extends AkkaSpec { upstreamSubscription.sendNext(1) downstreamSubscription.request(1) upstream.expectEventPF { case RequestMore(_, e) => e } should ===(1L) - downstream.expectEventPF { case OnNext(e) => e } should ===(1) + downstream.expectEventPF { case OnNext(e) => e } should ===(1) upstreamSubscription.sendNext(1) downstreamSubscription.request(1) @@ -54,7 +53,7 @@ class TestPublisherSubscriberSpec extends AkkaSpec { upstreamSubscription.sendNext(1) downstreamSubscription.request(1) - an[AssertionError] should be thrownBy upstream.expectEventPF { case Subscribe(e) => e } + an[AssertionError] should be thrownBy upstream.expectEventPF { case Subscribe(e) => e } an[AssertionError] should be thrownBy downstream.expectNextPF[String] { case e: String => e } upstreamSubscription.sendComplete() @@ -66,9 +65,7 @@ class TestPublisherSubscriberSpec extends AkkaSpec { Source.fromPublisher(upstream).runWith(Sink.fromSubscriber(downstream)) - downstream - .expectSubscription() - .request(10) + downstream.expectSubscription().request(10) upstream.expectRequest() should ===(10L) upstream.sendNext(1) diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TwoStreamsSetup.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TwoStreamsSetup.scala index 91bff9895e..f5f443651c 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/TwoStreamsSetup.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/TwoStreamsSetup.scala @@ -20,15 +20,17 @@ abstract class TwoStreamsSetup extends BaseTwoStreamsSetup { override def setup(p1: Publisher[Int], p2: Publisher[Int]) = { val subscriber = TestSubscriber.probe[Outputs]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val f = fixture(b) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + val f = fixture(b) - Source.fromPublisher(p1) ~> f.left - Source.fromPublisher(p2) ~> f.right - f.out ~> Sink.fromSubscriber(subscriber) - ClosedShape - }).run() + Source.fromPublisher(p1) ~> f.left + Source.fromPublisher(p2) ~> f.right + f.out ~> Sink.fromSubscriber(subscriber) + ClosedShape + }) + .run() subscriber } diff --git a/akka-stream-testkit/src/test/scala/akka/stream/testkit/Utils.scala b/akka-stream-testkit/src/test/scala/akka/stream/testkit/Utils.scala index 261f269fd5..2cb2c83938 100644 --- a/akka-stream-testkit/src/test/scala/akka/stream/testkit/Utils.scala +++ b/akka-stream-testkit/src/test/scala/akka/stream/testkit/Utils.scala @@ -12,14 +12,16 @@ import scala.util.control.NoStackTrace object Utils { /** Sets the default-mailbox to the usual [[akka.dispatch.UnboundedMailbox]] instead of [[StreamTestDefaultMailbox]]. */ - val UnboundedMailboxConfig = ConfigFactory.parseString("""akka.actor.default-mailbox.mailbox-type = "akka.dispatch.UnboundedMailbox"""") + val UnboundedMailboxConfig = + ConfigFactory.parseString("""akka.actor.default-mailbox.mailbox-type = "akka.dispatch.UnboundedMailbox"""") case class TE(message: String) extends RuntimeException(message) with NoStackTrace def assertDispatcher(ref: ActorRef, dispatcher: String): Unit = ref match { case r: ActorRefWithCell => if (r.underlying.props.dispatcher != dispatcher) - throw new AssertionError(s"Expected $ref to use dispatcher [$dispatcher], yet used: [${r.underlying.props.dispatcher}]") + throw new AssertionError( + s"Expected $ref to use dispatcher [$dispatcher], yet used: [${r.underlying.props.dispatcher}]") case _ => throw new Exception(s"Unable to determine dispatcher of $ref") } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorPublisherTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorPublisherTest.scala index 161866e890..04b8f69c98 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorPublisherTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorPublisherTest.scala @@ -18,21 +18,24 @@ object ActorPublisherTest { class TestPublisher(allElements: Long) extends ActorPublisher[Int] { - val source: Iterator[Int] = (if (allElements == Long.MaxValue) 1 to Int.MaxValue else 0 until allElements.toInt).toIterator + val source: Iterator[Int] = + (if (allElements == Long.MaxValue) 1 to Int.MaxValue else 0 until allElements.toInt).toIterator override def receive: Receive = { case Request(elements) => loopDemand() case Produce if totalDemand > 0 && !isCompleted && source.hasNext => onNext(source.next()) - case Produce if !isCompleted && !source.hasNext => onComplete() - case Produce if isCompleted => // no-op - case _ => // no-op + case Produce if !isCompleted && !source.hasNext => onComplete() + case Produce if isCompleted => // no-op + case _ => // no-op } def loopDemand(): Unit = { val loopUntil = math.min(100, totalDemand) - 1 to loopUntil.toInt foreach { _ => self ! Produce } + (1 to loopUntil.toInt).foreach { _ => + self ! Produce + } if (loopUntil > 100) self ! Loop } } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorSystemLifecycle.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorSystemLifecycle.scala index cb1cbaae74..3e5587066d 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorSystemLifecycle.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ActorSystemLifecycle.scala @@ -36,8 +36,9 @@ trait ActorSystemLifecycle { Await.ready(system.terminate(), shutdownTimeout) } catch { case _: TimeoutException => - val msg = "Failed to stop [%s] within [%s] \n%s".format(system.name, shutdownTimeout, - system.asInstanceOf[ActorSystemImpl].printTree) + val msg = "Failed to stop [%s] within [%s] \n%s".format(system.name, + shutdownTimeout, + system.asInstanceOf[ActorSystemImpl].printTree) throw new RuntimeException(msg) } } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala index 9e5b56f545..a834c7b0a3 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaIdentityProcessorVerification.scala @@ -8,18 +8,20 @@ import java.util.concurrent.Executors import java.util.concurrent.ExecutorService import java.util.concurrent.TimeUnit import akka.stream.testkit.TestPublisher -import org.reactivestreams.{ Subscriber, Subscription, Processor, Publisher } +import org.reactivestreams.{ Processor, Publisher, Subscriber, Subscription } import org.reactivestreams.tck.IdentityProcessorVerification import org.reactivestreams.tck.TestEnvironment import org.scalatestplus.testng.TestNGSuiteLike import org.testng.annotations.AfterClass abstract class AkkaIdentityProcessorVerification[T](env: TestEnvironment, publisherShutdownTimeout: Long) - extends IdentityProcessorVerification[T](env, publisherShutdownTimeout) - with TestNGSuiteLike with ActorSystemLifecycle { + extends IdentityProcessorVerification[T](env, publisherShutdownTimeout) + with TestNGSuiteLike + with ActorSystemLifecycle { def this(printlnDebug: Boolean) = - this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug), Timeouts.publisherShutdownTimeoutMillis) + this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug), + Timeouts.publisherShutdownTimeoutMillis) def this() = this(false) diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala index c3f1d99413..b9ba15a31c 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaPublisherVerification.scala @@ -13,23 +13,24 @@ import org.reactivestreams.tck.{ PublisherVerification, TestEnvironment } import org.scalatestplus.testng.TestNGSuiteLike abstract class AkkaPublisherVerification[T](val env: TestEnvironment, publisherShutdownTimeout: Long) - extends PublisherVerification[T](env, publisherShutdownTimeout) - with TestNGSuiteLike with ActorSystemLifecycle { + extends PublisherVerification[T](env, publisherShutdownTimeout) + with TestNGSuiteLike + with ActorSystemLifecycle { def this(printlnDebug: Boolean) = - this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug), Timeouts.publisherShutdownTimeoutMillis) + this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug), + Timeouts.publisherShutdownTimeoutMillis) def this() = this(false) - implicit lazy val materializer = ActorMaterializer( - ActorMaterializerSettings(system).withInputBuffer(initialSize = 512, maxSize = 512))(system) + implicit lazy val materializer = + ActorMaterializer(ActorMaterializerSettings(system).withInputBuffer(initialSize = 512, maxSize = 512))(system) override def createFailedPublisher(): Publisher[T] = TestPublisher.error(new Exception("Unable to serve subscribers right now!")) def iterable(elements: Long): immutable.Iterable[Int] = if (elements > Int.MaxValue) - new immutable.Iterable[Int] { override def iterator = Iterator from 0 } - else + new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } else 0 until elements.toInt } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaSubscriberVerification.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaSubscriberVerification.scala index 563e6d209e..01bc71b48c 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaSubscriberVerification.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/AkkaSubscriberVerification.scala @@ -13,8 +13,10 @@ import org.reactivestreams.tck.TestEnvironment import org.scalatestplus.testng.TestNGSuiteLike abstract class AkkaSubscriberBlackboxVerification[T](env: TestEnvironment) - extends SubscriberBlackboxVerification[T](env) with TestNGSuiteLike - with AkkaSubscriberVerificationLike with ActorSystemLifecycle { + extends SubscriberBlackboxVerification[T](env) + with TestNGSuiteLike + with AkkaSubscriberVerificationLike + with ActorSystemLifecycle { def this(printlnDebug: Boolean) = this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug)) @@ -23,8 +25,9 @@ abstract class AkkaSubscriberBlackboxVerification[T](env: TestEnvironment) } abstract class AkkaSubscriberWhiteboxVerification[T](env: TestEnvironment) - extends SubscriberWhiteboxVerification[T](env) with TestNGSuiteLike - with AkkaSubscriberVerificationLike { + extends SubscriberWhiteboxVerification[T](env) + with TestNGSuiteLike + with AkkaSubscriberVerificationLike { def this(printlnDebug: Boolean) = this(new TestEnvironment(Timeouts.defaultTimeoutMillis, Timeouts.defaultNoSignalsTimeoutMillis, printlnDebug)) diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/EmptyPublisherTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/EmptyPublisherTest.scala index 10ddaaff6d..fe4957ae07 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/EmptyPublisherTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/EmptyPublisherTest.scala @@ -13,4 +13,3 @@ class EmptyPublisherTest extends AkkaPublisherVerification[Int] { override def maxElementsFromPublisher(): Long = 0 } - diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala index 7b795b6876..c5a479fc8f 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FanoutPublisherTest.scala @@ -13,8 +13,8 @@ class FanoutPublisherTest extends AkkaPublisherVerification[Int] { def createPublisher(elements: Long): Publisher[Int] = { val iterable: immutable.Iterable[Int] = - if (elements == 0) new immutable.Iterable[Int] { override def iterator = Iterator from 0 } - else 0 until elements.toInt + if (elements == 0) new immutable.Iterable[Int] { override def iterator = Iterator.from(0) } else + 0 until elements.toInt Source(iterable).runWith(Sink.asPublisher(true)) } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FilePublisherTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FilePublisherTest.scala index 6cac4cbdd5..30987ed4e4 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FilePublisherTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FilePublisherTest.scala @@ -7,7 +7,7 @@ package akka.stream.tck import java.nio.file.Files import akka.actor.ActorSystem import akka.event.Logging -import akka.stream.scaladsl.{ Sink, FileIO } +import akka.stream.scaladsl.{ FileIO, Sink } import akka.stream.testkit.Utils._ import akka.testkit.{ EventFilter, TestEvent } import akka.util.ByteString @@ -37,9 +37,7 @@ class FilePublisherTest extends AkkaPublisherVerification[ByteString] { } def createPublisher(elements: Long): Publisher[ByteString] = - FileIO.fromPath(file, chunkSize = 512) - .take(elements) - .runWith(Sink.asPublisher(false)) + FileIO.fromPath(file, chunkSize = 512).take(elements).runWith(Sink.asPublisher(false)) @AfterClass def after() = Files.delete(file) diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FlatMapConcatDoubleSubscriberTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FlatMapConcatDoubleSubscriberTest.scala index 275817cfc3..d1a9c37cda 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FlatMapConcatDoubleSubscriberTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FlatMapConcatDoubleSubscriberTest.scala @@ -14,10 +14,13 @@ class FlatMapConcatDoubleSubscriberTest extends AkkaSubscriberBlackboxVerificati def createSubscriber(): Subscriber[Int] = { val subscriber = Promise[Subscriber[Int]]() - Source.single(Source.fromPublisher(new Publisher[Int] { - def subscribe(s: Subscriber[_ >: Int]): Unit = - subscriber.success(s.asInstanceOf[Subscriber[Int]]) - })).flatMapConcat(identity).runWith(Sink.ignore) + Source + .single(Source.fromPublisher(new Publisher[Int] { + def subscribe(s: Subscriber[_ >: Int]): Unit = + subscriber.success(s.asInstanceOf[Subscriber[Int]]) + })) + .flatMapConcat(identity) + .runWith(Sink.ignore) Await.result(subscriber.future, 1.second) } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ForeachSinkSubscriberTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ForeachSinkSubscriberTest.scala index bffbd3ae94..5bfa46533b 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ForeachSinkSubscriberTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/ForeachSinkSubscriberTest.scala @@ -10,7 +10,10 @@ import org.reactivestreams.Subscriber class ForeachSinkSubscriberTest extends AkkaSubscriberBlackboxVerification[Int] { override def createSubscriber(): Subscriber[Int] = - Flow[Int].to(Sink.foreach { _ => }).runWith(Source.asSubscriber) + Flow[Int] + .to(Sink.foreach { _ => + }) + .runWith(Source.asSubscriber) override def createElement(element: Int): Int = element } diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FusableProcessorTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FusableProcessorTest.scala index 0d420588ea..e7d10194e3 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FusableProcessorTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/FusableProcessorTest.scala @@ -11,8 +11,8 @@ import org.reactivestreams.Processor class FusableProcessorTest extends AkkaIdentityProcessorVerification[Int] { override def createIdentityProcessor(maxBufferSize: Int): Processor[Int, Int] = { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = maxBufferSize / 2, maxSize = maxBufferSize) + val settings = + ActorMaterializerSettings(system).withInputBuffer(initialSize = maxBufferSize / 2, maxSize = maxBufferSize) implicit val materializer = ActorMaterializer(settings)(system) diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/InputStreamSourceTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/InputStreamSourceTest.scala index c6598aa034..11ed5bdbcb 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/InputStreamSourceTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/InputStreamSourceTest.scala @@ -14,15 +14,17 @@ import org.reactivestreams.Publisher class InputStreamSourceTest extends AkkaPublisherVerification[ByteString] { def createPublisher(elements: Long): Publisher[ByteString] = { - StreamConverters.fromInputStream(() => new InputStream { - @volatile var num = 0 - override def read(): Int = { - num += 1 - num - } - }).withAttributes(ActorAttributes.dispatcher("akka.test.stream-dispatcher")) + StreamConverters + .fromInputStream(() => + new InputStream { + @volatile var num = 0 + override def read(): Int = { + num += 1 + num + } + }) + .withAttributes(ActorAttributes.dispatcher("akka.test.stream-dispatcher")) .take(elements) .runWith(Sink.asPublisher(false)) } } - diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/MaybeSourceTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/MaybeSourceTest.scala index 6a255be864..1ee4a7e9ec 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/MaybeSourceTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/MaybeSourceTest.scala @@ -5,16 +5,15 @@ package akka.stream.tck import org.reactivestreams.Publisher -import akka.stream.scaladsl.{ Keep, Source, Sink } +import akka.stream.scaladsl.{ Keep, Sink, Source } class MaybeSourceTest extends AkkaPublisherVerification[Int] { def createPublisher(elements: Long): Publisher[Int] = { val (p, pub) = Source.maybe[Int].toMat(Sink.asPublisher(false))(Keep.both).run() - p success Some(1) + p.success(Some(1)) pub } override def maxElementsFromPublisher(): Long = 1 } - diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/SingleElementSourceTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/SingleElementSourceTest.scala index a3ea632549..87b34fe2a4 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/SingleElementSourceTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/SingleElementSourceTest.scala @@ -16,4 +16,3 @@ class SingleElementSourceTest extends AkkaPublisherVerification[Int] { override def maxElementsFromPublisher(): Long = 1 } - diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/SinkholeSubscriberTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/SinkholeSubscriberTest.scala index efec8126ae..b03d919737 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/SinkholeSubscriberTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/SinkholeSubscriberTest.scala @@ -6,12 +6,12 @@ package akka.stream.tck import akka.Done import akka.stream.impl.SinkholeSubscriber -import org.reactivestreams.tck.{ TestEnvironment, SubscriberWhiteboxVerification } +import org.reactivestreams.tck.{ SubscriberWhiteboxVerification, TestEnvironment } import org.reactivestreams.tck.SubscriberWhiteboxVerification.{ SubscriberPuppet, WhiteboxSubscriberProbe } import org.scalatestplus.testng.{ TestNGSuiteLike } import java.lang.{ Integer => JInt } import scala.concurrent.Promise -import org.reactivestreams.{ Subscription, Subscriber } +import org.reactivestreams.{ Subscriber, Subscription } class SinkholeSubscriberTest extends SubscriberWhiteboxVerification[JInt](new TestEnvironment()) with TestNGSuiteLike { override def createSubscriber(probe: WhiteboxSubscriberProbe[JInt]): Subscriber[JInt] = { @@ -45,4 +45,3 @@ class SinkholeSubscriberTest extends SubscriberWhiteboxVerification[JInt](new Te override def createElement(element: Int): JInt = element } - diff --git a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/TransformProcessorTest.scala b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/TransformProcessorTest.scala index ae6976860e..8d015612ce 100644 --- a/akka-stream-tests-tck/src/test/scala/akka/stream/tck/TransformProcessorTest.scala +++ b/akka-stream-tests-tck/src/test/scala/akka/stream/tck/TransformProcessorTest.scala @@ -13,18 +13,19 @@ import org.reactivestreams.Processor class TransformProcessorTest extends AkkaIdentityProcessorVerification[Int] { override def createIdentityProcessor(maxBufferSize: Int): Processor[Int, Int] = { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = maxBufferSize / 2, maxSize = maxBufferSize) + val settings = + ActorMaterializerSettings(system).withInputBuffer(initialSize = maxBufferSize / 2, maxSize = maxBufferSize) implicit val materializer = ActorMaterializer(settings)(system) val stage = new SimpleLinearGraphStage[Int] { - override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onPull(): Unit = pull(in) - setHandlers(in, out, this) - } + override def createLogic(inheritedAttributes: Attributes) = + new GraphStageLogic(shape) with InHandler with OutHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onPull(): Unit = pull(in) + setHandlers(in, out, this) + } } Flow[Int].via(stage).toProcessor.run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala index c869919c2f..5343c8c150 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/ActorMaterializerSpec.scala @@ -41,21 +41,23 @@ class ActorMaterializerSpec extends StreamSpec with ImplicitSender { "refuse materialization after shutdown" in { val m = ActorMaterializer.create(system) m.shutdown() - the[IllegalStateException] thrownBy { + (the[IllegalStateException] thrownBy { Source(1 to 5).runWith(Sink.ignore)(m) - } should have message "Trying to materialize stream after materializer has been shutdown" + } should have).message("Trying to materialize stream after materializer has been shutdown") } "refuse materialization when shutdown while materializing" in { val m = ActorMaterializer.create(system) - the[IllegalStateException] thrownBy { - Source(1 to 5).mapMaterializedValue { _ => - // shutdown while materializing - m.shutdown() - Thread.sleep(100) - }.runWith(Sink.ignore)(m) - } should have message "Materializer shutdown while materializing stream" + (the[IllegalStateException] thrownBy { + Source(1 to 5) + .mapMaterializedValue { _ => + // shutdown while materializing + m.shutdown() + Thread.sleep(100) + } + .runWith(Sink.ignore)(m) + } should have).message("Materializer shutdown while materializing stream") } "shut down the supervisor actor it encapsulates" in { @@ -83,9 +85,8 @@ class ActorMaterializerSpec extends StreamSpec with ImplicitSender { "handle properly broken Props" in { val m = ActorMaterializer.create(system) an[IllegalArgumentException] should be thrownBy - Await.result( - Source.actorPublisher(Props(classOf[TestActor], "wrong", "arguments")).runWith(Sink.head)(m), - 3.seconds) + Await.result(Source.actorPublisher(Props(classOf[TestActor], "wrong", "arguments")).runWith(Sink.head)(m), + 3.seconds) } "report correctly if it has been shut down from the side" in { @@ -100,10 +101,12 @@ class ActorMaterializerSpec extends StreamSpec with ImplicitSender { object ActorMaterializerSpec { class ActorWithMaterializer(p: TestProbe) extends Actor { - private val settings: ActorMaterializerSettings = ActorMaterializerSettings(context.system).withDispatcher("akka.test.stream-dispatcher") + private val settings: ActorMaterializerSettings = + ActorMaterializerSettings(context.system).withDispatcher("akka.test.stream-dispatcher") implicit val mat = ActorMaterializer(settings)(context) - Source.repeat("hello") + Source + .repeat("hello") .take(1) .concat(Source.maybe) .map(p.ref ! _) diff --git a/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala index 4af7cf3029..73e2747fb9 100755 --- a/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/DslConsistencySpec.scala @@ -10,8 +10,14 @@ import org.scalatest.Matchers import org.scalatest.WordSpec object DslConsistencySpec { - class ScalaSubSource[Out, Mat] extends impl.SubFlowImpl[Out, Out, Mat, scaladsl.Source[Out, Mat]#Repr, scaladsl.RunnableGraph[Mat]](null, null, null) - class ScalaSubFlow[In, Out, Mat] extends impl.SubFlowImpl[Out, Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]](null, null, null) + class ScalaSubSource[Out, Mat] + extends impl.SubFlowImpl[Out, Out, Mat, scaladsl.Source[Out, Mat]#Repr, scaladsl.RunnableGraph[Mat]](null, + null, + null) + class ScalaSubFlow[In, Out, Mat] + extends impl.SubFlowImpl[Out, Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]](null, + null, + null) } class DslConsistencySpec extends WordSpec with Matchers { @@ -36,78 +42,105 @@ class DslConsistencySpec extends WordSpec with Matchers { val ignore: Set[String] = Set("equals", "hashCode", "notify", "notifyAll", "wait", "toString", "getClass") ++ - Set("productArity", "canEqual", "productPrefix", "copy", "productIterator", "productElement") ++ - Set("productElementName", "productElementNames") ++ - Set("create", "apply", "ops", "appendJava", "andThen", "andThenMat", "isIdentity", "withAttributes", "transformMaterializing") ++ - Set("asScala", "asJava", "deprecatedAndThen", "deprecatedAndThenMat") + Set("productArity", "canEqual", "productPrefix", "copy", "productIterator", "productElement") ++ + Set("productElementName", "productElementNames") ++ + Set("create", + "apply", + "ops", + "appendJava", + "andThen", + "andThenMat", + "isIdentity", + "withAttributes", + "transformMaterializing") ++ + Set("asScala", "asJava", "deprecatedAndThen", "deprecatedAndThenMat") - val graphHelpers = Set("zipGraph", "zipWithGraph", "zipLatestGraph", "zipLatestWithGraph", "mergeGraph", "mergeSortedGraph", "interleaveGraph", "concatGraph", "prependGraph", "alsoToGraph", "wireTapGraph", "orElseGraph", "divertToGraph") + val graphHelpers = Set("zipGraph", + "zipWithGraph", + "zipLatestGraph", + "zipLatestWithGraph", + "mergeGraph", + "mergeSortedGraph", + "interleaveGraph", + "concatGraph", + "prependGraph", + "alsoToGraph", + "wireTapGraph", + "orElseGraph", + "divertToGraph") - val allowMissing: Map[Class[_], Set[String]] = Map( - jFlowClass -> graphHelpers, - jSourceClass -> (graphHelpers ++ Set("watch", "ask")), - // Java subflows can only be nested using .via and .to (due to type system restrictions) - jSubFlowClass -> (graphHelpers ++ Set("groupBy", "splitAfter", "splitWhen", "subFlow", "watch", "ask")), - jSubSourceClass -> (graphHelpers ++ Set("groupBy", "splitAfter", "splitWhen", "subFlow", "watch", "ask")), - - sFlowClass -> Set("of"), - sSourceClass -> Set("adapt", "from", "watch"), - sSinkClass -> Set("adapt"), - sSubFlowClass -> Set(), - sSubSourceClass -> Set(), - - sRunnableGraphClass -> Set("builder")) + val allowMissing: Map[Class[_], Set[String]] = Map(jFlowClass -> graphHelpers, + jSourceClass -> (graphHelpers ++ Set("watch", "ask")), + // Java subflows can only be nested using .via and .to (due to type system restrictions) + jSubFlowClass -> (graphHelpers ++ Set("groupBy", + "splitAfter", + "splitWhen", + "subFlow", + "watch", + "ask")), + jSubSourceClass -> (graphHelpers ++ Set("groupBy", + "splitAfter", + "splitWhen", + "subFlow", + "watch", + "ask")), + sFlowClass -> Set("of"), + sSourceClass -> Set("adapt", "from", "watch"), + sSinkClass -> Set("adapt"), + sSubFlowClass -> Set(), + sSubSourceClass -> Set(), + sRunnableGraphClass -> Set("builder")) def materializing(m: Method): Boolean = m.getParameterTypes.contains(classOf[ActorMaterializer]) def assertHasMethod(c: Class[_], name: String): Unit = { // include class name to get better error message if (!allowMissing.getOrElse(c, Set.empty).contains(name)) - c.getMethods.collect { case m if !ignore(m.getName) => c.getName + "." + m.getName } should contain(c.getName + "." + name) + c.getMethods.collect { case m if !ignore(m.getName) => c.getName + "." + m.getName } should contain( + c.getName + "." + name) } "Java and Scala DSLs" must { (("Source" -> List[Class[_]](sSourceClass, jSourceClass)) :: - ("SubSource" -> List[Class[_]](sSubSourceClass, jSubSourceClass)) :: - ("Flow" -> List[Class[_]](sFlowClass, jFlowClass)) :: - ("SubFlow" -> List[Class[_]](sSubFlowClass, jSubFlowClass)) :: - ("Sink" -> List[Class[_]](sSinkClass, jSinkClass)) :: - ("RunnableFlow" -> List[Class[_]](sRunnableGraphClass, jRunnableGraphClass)) :: - Nil).foreach { - case (element, classes) => + ("SubSource" -> List[Class[_]](sSubSourceClass, jSubSourceClass)) :: + ("Flow" -> List[Class[_]](sFlowClass, jFlowClass)) :: + ("SubFlow" -> List[Class[_]](sSubFlowClass, jSubFlowClass)) :: + ("Sink" -> List[Class[_]](sSinkClass, jSinkClass)) :: + ("RunnableFlow" -> List[Class[_]](sRunnableGraphClass, jRunnableGraphClass)) :: + Nil).foreach { + case (element, classes) => + s"provide same $element transforming operators" in { + val allOps = + (for { + c <- classes + m <- c.getMethods + if !Modifier.isStatic(m.getModifiers) + if !ignore(m.getName) + if !m.getName.contains("$") + if !materializing(m) + } yield m.getName).toSet - s"provide same $element transforming operators" in { - val allOps = - (for { - c <- classes - m <- c.getMethods - if !Modifier.isStatic(m.getModifiers) - if !ignore(m.getName) - if !m.getName.contains("$") - if !materializing(m) - } yield m.getName).toSet + for (c <- classes; op <- allOps) + assertHasMethod(c, op) + } - for (c <- classes; op <- allOps) - assertHasMethod(c, op) - } + s"provide same $element materializing operators" in { + val materializingOps = + (for { + c <- classes + m <- c.getMethods + if !Modifier.isStatic(m.getModifiers) + if !ignore(m.getName) + if !m.getName.contains("$") + if materializing(m) + } yield m.getName).toSet - s"provide same $element materializing operators" in { - val materializingOps = - (for { - c <- classes - m <- c.getMethods - if !Modifier.isStatic(m.getModifiers) - if !ignore(m.getName) - if !m.getName.contains("$") - if materializing(m) - } yield m.getName).toSet + for (c <- classes; op <- materializingOps) + assertHasMethod(c, op) + } - for (c <- classes; op <- materializingOps) - assertHasMethod(c, op) - } - - } + } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala index c9db03ead6..77038600ec 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/DslFactoriesConsistencySpec.scala @@ -12,19 +12,27 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers { // configuration // val scalaIgnore = - Set("equals", "hashCode", "notify", "notifyAll", "wait", "toString", "getClass", "shape", "identityTraversalBuilder") + Set("equals", + "hashCode", + "notify", + "notifyAll", + "wait", + "toString", + "getClass", + "shape", + "identityTraversalBuilder") val javaIgnore = Set("adapt") // the scaladsl -> javadsl bridge val `scala -> java aliases` = ("apply" -> "create") :: - ("apply" -> "of") :: - ("apply" -> "from") :: - ("apply" -> "fromGraph") :: - ("apply" -> "fromIterator") :: - ("apply" -> "fromFunctions") :: - Nil + ("apply" -> "of") :: + ("apply" -> "from") :: + ("apply" -> "fromGraph") :: + ("apply" -> "fromIterator") :: + ("apply" -> "fromFunctions") :: + Nil // format: OFF val `scala -> java types` = @@ -43,7 +51,7 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers { (classOf[akka.stream.scaladsl.Sink[_, _]], classOf[akka.stream.javadsl.Sink[_, _]]) :: (classOf[akka.stream.scaladsl.Flow[_, _, _]], classOf[akka.stream.javadsl.Flow[_, _, _]]) :: (classOf[akka.stream.scaladsl.RunnableGraph[_]], classOf[akka.stream.javadsl.RunnableGraph[_]]) :: - ((2 to 22) map { i => (Class.forName(s"scala.Function$i"), Class.forName(s"akka.japi.function.Function$i")) }).toList + (2 to 22) .map { i => (Class.forName(s"scala.Function$i"), Class.forName(s"akka.japi.function.Function$i")) }.toList // format: ON val sSource = classOf[scaladsl.Source[_, _]] @@ -69,32 +77,35 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers { TestCase(name, Some(sClass), Some(jClass), None) } - val testCases = Seq( - TestCase("Source", scaladsl.Source.getClass, javadsl.Source.getClass), - TestCase("Flow", scaladsl.Flow.getClass, javadsl.Flow.getClass), - TestCase("Sink", scaladsl.Sink.getClass, javadsl.Sink.getClass), - TestCase("BidiFlow", scaladsl.BidiFlow.getClass, javadsl.BidiFlow.getClass), - TestCase("GraphDSL", scaladsl.GraphDSL.getClass, javadsl.GraphDSL.getClass, classOf[javadsl.GraphCreate]), - TestCase("ZipWith", Some(scaladsl.ZipWith.getClass), None, Some(javadsl.ZipWith.getClass)), - TestCase("Merge", scaladsl.Merge.getClass, javadsl.Merge.getClass), - TestCase("MergePreferred", scaladsl.MergePreferred.getClass, javadsl.MergePreferred.getClass), - TestCase("Broadcast", scaladsl.Broadcast.getClass, javadsl.Broadcast.getClass), - TestCase("Balance", scaladsl.Balance.getClass, javadsl.Balance.getClass), - TestCase("Zip", scaladsl.Zip.getClass, javadsl.Zip.getClass), - TestCase("UnZip", scaladsl.Unzip.getClass, javadsl.Unzip.getClass), - TestCase("Concat", scaladsl.Concat.getClass, javadsl.Concat.getClass), - TestCase("FileIO", scaladsl.FileIO.getClass, javadsl.FileIO.getClass), - TestCase("StreamConverters", scaladsl.StreamConverters.getClass, javadsl.StreamConverters.getClass)) + val testCases = Seq(TestCase("Source", scaladsl.Source.getClass, javadsl.Source.getClass), + TestCase("Flow", scaladsl.Flow.getClass, javadsl.Flow.getClass), + TestCase("Sink", scaladsl.Sink.getClass, javadsl.Sink.getClass), + TestCase("BidiFlow", scaladsl.BidiFlow.getClass, javadsl.BidiFlow.getClass), + TestCase("GraphDSL", + scaladsl.GraphDSL.getClass, + javadsl.GraphDSL.getClass, + classOf[javadsl.GraphCreate]), + TestCase("ZipWith", Some(scaladsl.ZipWith.getClass), None, Some(javadsl.ZipWith.getClass)), + TestCase("Merge", scaladsl.Merge.getClass, javadsl.Merge.getClass), + TestCase("MergePreferred", scaladsl.MergePreferred.getClass, javadsl.MergePreferred.getClass), + TestCase("Broadcast", scaladsl.Broadcast.getClass, javadsl.Broadcast.getClass), + TestCase("Balance", scaladsl.Balance.getClass, javadsl.Balance.getClass), + TestCase("Zip", scaladsl.Zip.getClass, javadsl.Zip.getClass), + TestCase("UnZip", scaladsl.Unzip.getClass, javadsl.Unzip.getClass), + TestCase("Concat", scaladsl.Concat.getClass, javadsl.Concat.getClass), + TestCase("FileIO", scaladsl.FileIO.getClass, javadsl.FileIO.getClass), + TestCase("StreamConverters", + scaladsl.StreamConverters.getClass, + javadsl.StreamConverters.getClass)) "Java DSL" must provide { - testCases foreach { + testCases.foreach { case TestCase(name, Some(sClass), jClass, jFactoryOption) => - name which { + name.which { s"allows creating the same ${name}s as Scala DSL" in { - runSpec( - getSMethods(sClass), - jClass.toList.flatMap(getJMethods) ++ - jFactoryOption.toList.flatMap(f => getJMethods(f).map(unspecializeName andThen curryLikeJava))) + runSpec(getSMethods(sClass), + jClass.toList.flatMap(getJMethods) ++ + jFactoryOption.toList.flatMap(f => getJMethods(f).map(unspecializeName.andThen(curryLikeJava)))) } } } @@ -102,35 +113,44 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers { // here be dragons... - private def getJMethods(jClass: Class[_]): List[Method] = jClass.getDeclaredMethods.filterNot(javaIgnore contains _.getName).map(toMethod).filterNot(ignore).toList - private def getSMethods(sClass: Class[_]): List[Method] = sClass.getMethods.filterNot(scalaIgnore contains _.getName).map(toMethod).filterNot(ignore).toList + private def getJMethods(jClass: Class[_]): List[Method] = + jClass.getDeclaredMethods.filterNot(javaIgnore contains _.getName).map(toMethod).filterNot(ignore).toList + private def getSMethods(sClass: Class[_]): List[Method] = + sClass.getMethods.filterNot(scalaIgnore contains _.getName).map(toMethod).filterNot(ignore).toList private def toMethod(m: java.lang.reflect.Method): Method = Method(m.getName, List(m.getParameterTypes: _*), m.getReturnType, m.getDeclaringClass) - private case class Ignore(cls: Class[_] => Boolean, name: String => Boolean, parameters: Int => Boolean, paramTypes: List[Class[_]] => Boolean) + private case class Ignore(cls: Class[_] => Boolean, + name: String => Boolean, + parameters: Int => Boolean, + paramTypes: List[Class[_]] => Boolean) private def ignore(m: Method): Boolean = { val ignores = Seq( - // private scaladsl method - Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "apply", _ == 1, _ == List(classOf[akka.stream.impl.SourceModule[_, _]])), - // corresponding matches on java side would need to have Function23 - Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "apply", _ == 24, _ => true), - Ignore(_ == akka.stream.scaladsl.Flow.getClass, _ == "apply", _ == 24, _ => true), - Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "apply", _ == 24, _ => true), - Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "collection", _ => true, _ => true), - Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl - Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRefWithAck", _ => true, _ => true), // Internal in scaladsl - Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl - Ignore(_ == akka.stream.scaladsl.BidiFlow.getClass, _ == "apply", _ == 24, _ => true), - Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "runnable", _ == 24, _ => true), - Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "create", _ == 24, _ => true), - // all generated methods like scaladsl.Sink$.akka$stream$scaladsl$Sink$$newOnCompleteStage$1 - Ignore(_ => true, _.contains("$"), _ => true, _ => true)) + // private scaladsl method + Ignore(_ == akka.stream.scaladsl.Source.getClass, + _ == "apply", + _ == 1, + _ == List(classOf[akka.stream.impl.SourceModule[_, _]])), + // corresponding matches on java side would need to have Function23 + Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "apply", _ == 24, _ => true), + Ignore(_ == akka.stream.scaladsl.Flow.getClass, _ == "apply", _ == 24, _ => true), + Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "apply", _ == 24, _ => true), + Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "collection", _ => true, _ => true), + Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl + Ignore(_ == akka.stream.scaladsl.Sink.getClass, _ == "actorRefWithAck", _ => true, _ => true), // Internal in scaladsl + Ignore(_ == akka.stream.scaladsl.Source.getClass, _ == "actorRef", _ => true, _ => true), // Internal in scaladsl + Ignore(_ == akka.stream.scaladsl.BidiFlow.getClass, _ == "apply", _ == 24, _ => true), + Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "runnable", _ == 24, _ => true), + Ignore(_ == akka.stream.scaladsl.GraphDSL.getClass, _ == "create", _ == 24, _ => true), + // all generated methods like scaladsl.Sink$.akka$stream$scaladsl$Sink$$newOnCompleteStage$1 + Ignore(_ => true, _.contains("$"), _ => true, _ => true)) ignores.foldLeft(false) { case (acc, i) => - acc || (i.cls(m.declaringClass) && i.name(m.name) && i.parameters(m.parameterTypes.length) && i.paramTypes(m.parameterTypes)) + acc || (i.cls(m.declaringClass) && i.name(m.name) && i.parameters(m.parameterTypes.length) && i.paramTypes( + m.parameterTypes)) } } @@ -149,7 +169,8 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers { */ private val curryLikeJava: PartialFunction[Method, Method] = { case m if m.parameterTypes.size > 1 => - m.copy(name = m.name.filter(Character.isLetter), parameterTypes = m.parameterTypes.dropRight(1) :+ classOf[akka.japi.function.Function[_, _]]) + m.copy(name = m.name.filter(Character.isLetter), + parameterTypes = m.parameterTypes.dropRight(1) :+ classOf[akka.japi.function.Function[_, _]]) case m => m } @@ -168,21 +189,34 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers { if (matches.length == 0) { warnings += 1 alert("No match for " + row._1) - row._2 foreach { m => alert(s" > ${m.j.toString}: ${m.reason}") } + row._2.foreach { m => + alert(s" > ${m.j.toString}: ${m.reason}") + } } else if (matches.length == 1) { - info("Matched: Scala:" + row._1.name + "(" + row._1.parameterTypes.map(_.getName).mkString(",") + "): " + returnTypeString(row._1) + + info( + "Matched: Scala:" + row._1.name + "(" + row._1.parameterTypes + .map(_.getName) + .mkString(",") + "): " + returnTypeString(row._1) + " == " + - "Java:" + matches.head.j.name + "(" + matches.head.j.parameterTypes.map(_.getName).mkString(",") + "): " + returnTypeString(matches.head.j)) + "Java:" + matches.head.j.name + "(" + matches.head.j.parameterTypes + .map(_.getName) + .mkString(",") + "): " + returnTypeString(matches.head.j)) } else { warnings += 1 alert("Multiple matches for " + row._1 + "!") - matches foreach { m => alert(s" > ${m.j.toString}") } + matches.foreach { m => + alert(s" > ${m.j.toString}") + } } } if (warnings > 0) { - jMethods foreach { m => info(" java: " + m + ": " + returnTypeString(m)) } - sMethods foreach { m => info(" scala: " + m + ": " + returnTypeString(m)) } + jMethods.foreach { m => + info(" java: " + m + ": " + returnTypeString(m)) + } + sMethods.foreach { m => + info(" scala: " + m + ": " + returnTypeString(m)) + } fail("Warnings were issued! Fix name / type mappings or delegation code!") } } @@ -231,10 +265,10 @@ class DslFactoriesConsistencySpec extends WordSpec with Matchers { */ def returnTypeMatch(s: Class[_], j: Class[_]): Boolean = (sSource.isAssignableFrom(s) && jSource.isAssignableFrom(j)) || - (sSink.isAssignableFrom(s) && jSink.isAssignableFrom(j)) || - (sFlow.isAssignableFrom(s) && jFlow.isAssignableFrom(j)) || - (sRunnableGraph.isAssignableFrom(s) && jRunnableGraph.isAssignableFrom(j)) || - (graph.isAssignableFrom(s) && graph.isAssignableFrom(j)) + (sSink.isAssignableFrom(s) && jSink.isAssignableFrom(j)) || + (sFlow.isAssignableFrom(s) && jFlow.isAssignableFrom(j)) || + (sRunnableGraph.isAssignableFrom(s) && jRunnableGraph.isAssignableFrom(j)) || + (graph.isAssignableFrom(s) && graph.isAssignableFrom(j)) def typeMatch(scalaParams: List[Class[_]], javaParams: List[Class[_]]): Boolean = (scalaParams.toList, javaParams.toList) match { diff --git a/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala index c112a939f2..9b646a76d3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/FusingSpec.scala @@ -64,7 +64,8 @@ class FusingSpec extends StreamSpec { .via(snitchFlow.async) .mergeSubstreams .runWith(Sink.seq) - .futureValue.sorted should ===(in) + .futureValue + .sorted should ===(in) val refs = receiveN(in.size + in.size) // each element through the first map, then the second map refs.toSet should have size (in.size + 1) // outer/main actor + 1 actor per subflow @@ -79,7 +80,8 @@ class FusingSpec extends StreamSpec { .async .mergeSubstreams .runWith(Sink.seq) - .futureValue.sorted should ===(in) + .futureValue + .sorted should ===(in) val refs = receiveN(in.size + in.size) // each element through the first map, then the second map refs.toSet should have size (in.size + 1) // outer/main actor + 1 actor per subflow } diff --git a/akka-stream-tests/src/test/scala/akka/stream/actor/ActorPublisherSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/actor/ActorPublisherSpec.scala index 54102bf574..d3c05f80fa 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/actor/ActorPublisherSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/actor/ActorPublisherSpec.scala @@ -5,7 +5,7 @@ package akka.stream.actor import akka.actor.{ ActorRef, PoisonPill, Props } -import akka.stream.{ ClosedShape, ActorMaterializer, ActorMaterializerSettings, ActorAttributes } +import akka.stream.{ ActorAttributes, ActorMaterializer, ActorMaterializerSettings, ClosedShape } import akka.stream.scaladsl._ import akka.stream.testkit._ import akka.stream.testkit.scaladsl.StreamTestKit._ @@ -101,11 +101,11 @@ object ActorPublisherSpec { if (totalDemand <= Int.MaxValue) { val (use, keep) = buf.splitAt(totalDemand.toInt) buf = keep - use foreach onNext + use.foreach(onNext) } else { val (use, keep) = buf.splitAt(Int.MaxValue) buf = keep - use foreach onNext + use.foreach(onNext) deliverBuf() } } @@ -319,7 +319,8 @@ class ActorPublisherSpec extends StreamSpec(ActorPublisherSpec.config) with Impl s.expectSubscription() val s2 = TestSubscriber.manualProbe[String]() ActorPublisher[String](ref).subscribe(s2) - s2.expectSubscriptionAndError().getMessage should be(s"ActorPublisher ${ReactiveStreamsCompliance.SupportsOnlyASingleSubscriber}") + s2.expectSubscriptionAndError().getMessage should be( + s"ActorPublisher ${ReactiveStreamsCompliance.SupportsOnlyASingleSubscriber}") } "can not subscribe the same subscriber multiple times" in { @@ -350,19 +351,24 @@ class ActorPublisherSpec extends StreamSpec(ActorPublisherSpec.config) with Impl val source: Source[Int, ActorRef] = Source.actorPublisher(senderProps) val sink: Sink[String, ActorRef] = Sink.actorSubscriber(receiverProps(probe.ref)) - val (snd, rcv) = source.collect { - case n if n % 2 == 0 => "elem-" + n - }.toMat(sink)(Keep.both).run() + val (snd, rcv) = source + .collect { + case n if n % 2 == 0 => "elem-" + n + } + .toMat(sink)(Keep.both) + .run() - (1 to 3) foreach { snd ! _ } + (1 to 3).foreach { snd ! _ } probe.expectMsg("elem-2") - (4 to 500) foreach { n => + (4 to 500).foreach { n => if (n % 19 == 0) Thread.sleep(50) // simulate bursts snd ! n } - (4 to 500 by 2) foreach { n => probe.expectMsg("elem-" + n) } + (4 to 500 by 2).foreach { n => + probe.expectMsg("elem-" + n) + } watch(snd) rcv ! PoisonPill @@ -381,21 +387,23 @@ class ActorPublisherSpec extends StreamSpec(ActorPublisherSpec.config) with Impl val sink1 = Sink.fromSubscriber(ActorSubscriber[String](system.actorOf(receiverProps(probe1.ref)))) val sink2: Sink[String, ActorRef] = Sink.actorSubscriber(receiverProps(probe2.ref)) - val senderRef2 = RunnableGraph.fromGraph(GraphDSL.create(Source.actorPublisher[Int](senderProps)) { implicit b => source2 => - import GraphDSL.Implicits._ + val senderRef2 = RunnableGraph + .fromGraph(GraphDSL.create(Source.actorPublisher[Int](senderProps)) { implicit b => source2 => + import GraphDSL.Implicits._ - val merge = b.add(Merge[Int](2)) - val bcast = b.add(Broadcast[String](2)) + val merge = b.add(Merge[Int](2)) + val bcast = b.add(Broadcast[String](2)) - source1 ~> merge.in(0) - source2.out ~> merge.in(1) + source1 ~> merge.in(0) + source2.out ~> merge.in(1) - merge.out.map(_.toString) ~> bcast.in + merge.out.map(_.toString) ~> bcast.in - bcast.out(0).map(_ + "mark") ~> sink1 - bcast.out(1) ~> sink2 - ClosedShape - }).run() + bcast.out(0).map(_ + "mark") ~> sink1 + bcast.out(1) ~> sink2 + ClosedShape + }) + .run() (0 to 10).foreach { senderRef1 ! _ @@ -447,10 +455,10 @@ class ActorPublisherSpec extends StreamSpec(ActorPublisherSpec.config) with Impl } "use dispatcher from materializer settings" in { - implicit val materializer = ActorMaterializer( - ActorMaterializerSettings(system).withDispatcher("my-dispatcher1")) + implicit val materializer = ActorMaterializer(ActorMaterializerSettings(system).withDispatcher("my-dispatcher1")) val s = TestSubscriber.manualProbe[String]() - val ref = Source.actorPublisher(testPublisherProps(testActor, useTestDispatcher = false)).to(Sink.fromSubscriber(s)).run() + val ref = + Source.actorPublisher(testPublisherProps(testActor, useTestDispatcher = false)).to(Sink.fromSubscriber(s)).run() ref ! ThreadName expectMsgType[String] should include("my-dispatcher1") } @@ -458,9 +466,11 @@ class ActorPublisherSpec extends StreamSpec(ActorPublisherSpec.config) with Impl "use dispatcher from operation attributes" in { implicit val materializer = ActorMaterializer() val s = TestSubscriber.manualProbe[String]() - val ref = Source.actorPublisher(testPublisherProps(testActor, useTestDispatcher = false)) + val ref = Source + .actorPublisher(testPublisherProps(testActor, useTestDispatcher = false)) .withAttributes(ActorAttributes.dispatcher("my-dispatcher1")) - .to(Sink.fromSubscriber(s)).run() + .to(Sink.fromSubscriber(s)) + .run() ref ! ThreadName expectMsgType[String] should include("my-dispatcher1") } @@ -468,9 +478,11 @@ class ActorPublisherSpec extends StreamSpec(ActorPublisherSpec.config) with Impl "use dispatcher from props" in { implicit val materializer = ActorMaterializer() val s = TestSubscriber.manualProbe[String]() - val ref = Source.actorPublisher(testPublisherProps(testActor, useTestDispatcher = false).withDispatcher("my-dispatcher1")) + val ref = Source + .actorPublisher(testPublisherProps(testActor, useTestDispatcher = false).withDispatcher("my-dispatcher1")) .withAttributes(ActorAttributes.dispatcher("my-dispatcher2")) - .to(Sink.fromSubscriber(s)).run() + .to(Sink.fromSubscriber(s)) + .run() ref ! ThreadName expectMsgType[String] should include("my-dispatcher1") } diff --git a/akka-stream-tests/src/test/scala/akka/stream/actor/ActorSubscriberSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/actor/ActorSubscriberSpec.scala index 3374b4b733..74ebb82656 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/actor/ActorSubscriberSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/actor/ActorSubscriberSpec.scala @@ -143,7 +143,9 @@ class ActorSubscriberSpec extends StreamSpec with ImplicitSender { ref ! "ready" ref ! "ready" ref ! "boom" - (3 to 6) foreach { n => expectMsg(OnNext(n)) } + (3 to 6).foreach { n => + expectMsg(OnNext(n)) + } expectNoMsg(200.millis) ref ! "ready" expectMsg(OnNext(7)) @@ -187,7 +189,8 @@ class ActorSubscriberSpec extends StreamSpec with ImplicitSender { } "work with WatermarkRequestStrategy" in { - Source(1 to 17).runWith(Sink.actorSubscriber(requestStrategySubscriberProps(testActor, WatermarkRequestStrategy(highWatermark = 10)))) + Source(1 to 17).runWith( + Sink.actorSubscriber(requestStrategySubscriberProps(testActor, WatermarkRequestStrategy(highWatermark = 10)))) for (n <- 1 to 17) expectMsg(OnNext(n)) expectMsg(OnComplete) } @@ -239,7 +242,7 @@ class ActorSubscriberSpec extends StreamSpec with ImplicitSender { queue += "b" queue += "c" strat.requestDemand(5) should be(2) - ('d' to 'j') foreach { queue += _.toString } + ('d' to 'j').foreach { queue += _.toString } queue.size should be(10) strat.requestDemand(0) should be(0) strat.requestDemand(1) should be(0) diff --git a/akka-stream-tests/src/test/scala/akka/stream/extra/FlowTimedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/extra/FlowTimedSpec.scala index a140c54649..19e6f7caf3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/extra/FlowTimedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/extra/FlowTimedSpec.scala @@ -4,8 +4,8 @@ package akka.stream.extra -import akka.stream.{ ActorMaterializerSettings, ActorMaterializer } -import akka.stream.scaladsl.{ Source, Flow } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings } +import akka.stream.scaladsl.{ Flow, Source } import akka.stream.scaladsl.Sink import akka.stream.testkit._ import akka.stream.testkit.scaladsl.StreamTestKit._ @@ -16,8 +16,7 @@ class FlowTimedSpec extends StreamSpec with ScriptedTest { import scala.concurrent.duration._ - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -37,15 +36,19 @@ class FlowTimedSpec extends StreamSpec with ScriptedTest { val n = 20 val testRuns = 1 to 2 - def script = Script((1 to n) map { x => Seq(x) -> Seq(x) }: _*) - testRuns foreach (_ => runScript(script, settings) { flow => - flow. - map(identity). - timedIntervalBetween(_ % measureBetweenEvery == 0, onInterval = printInfo) - }) + def script = + Script((1 to n).map { x => + Seq(x) -> Seq(x) + }: _*) + testRuns.foreach(_ => + runScript(script, settings) { flow => + flow.map(identity).timedIntervalBetween(_ % measureBetweenEvery == 0, onInterval = printInfo) + }) val expectedNrOfOnIntervalCalls = testRuns.size * ((n / measureBetweenEvery) - 1) // first time has no value to compare to, so skips calling onInterval - 1 to expectedNrOfOnIntervalCalls foreach { _ => testActor.expectMsgType[Duration] } + (1 to expectedNrOfOnIntervalCalls).foreach { _ => + testActor.expectMsgType[Duration] + } } "measure time it takes from start to complete, by wrapping operations" in { @@ -59,12 +62,18 @@ class FlowTimedSpec extends StreamSpec with ScriptedTest { val testRuns = 1 to 3 - def script = Script((1 to n) map { x => Seq(x) -> Seq(x) }: _*) - testRuns foreach (_ => runScript(script, settings) { flow => - flow.timed(_.map(identity), onComplete = printInfo) - }) + def script = + Script((1 to n).map { x => + Seq(x) -> Seq(x) + }: _*) + testRuns.foreach(_ => + runScript(script, settings) { flow => + flow.timed(_.map(identity), onComplete = printInfo) + }) - testRuns foreach { _ => testActor.expectMsgType[Duration] } + testRuns.foreach { _ => + testActor.expectMsgType[Duration] + } testActor.expectNoMsg(1.second) } @@ -98,14 +107,13 @@ class FlowTimedSpec extends StreamSpec with ScriptedTest { // making sure the types come out as expected val flow: Flow[Int, String, _] = - Flow[Int]. - timed(_. - map(_.toDouble). - map(_.toInt). - map(_.toString), duration => probe.ref ! duration). - map { s: String => s + "!" } + Flow[Int].timed(_.map(_.toDouble).map(_.toInt).map(_.toString), duration => probe.ref ! duration).map { + s: String => + s + "!" + } - val (flowIn: Subscriber[Int], flowOut: Publisher[String]) = flow.runWith(Source.asSubscriber[Int], Sink.asPublisher[String](false)) + val (flowIn: Subscriber[Int], flowOut: Publisher[String]) = + flow.runWith(Source.asSubscriber[Int], Sink.asPublisher[String](false)) val c1 = TestSubscriber.manualProbe[String]() val c2 = flowOut.subscribe(c1) @@ -115,7 +123,9 @@ class FlowTimedSpec extends StreamSpec with ScriptedTest { val s = c1.expectSubscription() s.request(200) - 0 to 100 foreach { i => c1.expectNext(i.toString + "!") } + (0 to 100).foreach { i => + c1.expectNext(i.toString + "!") + } c1.expectComplete() val duration = probe.expectMsgType[Duration] diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala index 185b5603ef..7ddee57f78 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/GraphStageLogicSpec.scala @@ -115,7 +115,8 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S new GraphStageLogic(shape) { setHandler(shape.in, EagerTerminateInput) setHandler(shape.out, EagerTerminateOutput) - override def preStart(): Unit = readN(shape.in, n)(e => emitMultiple(shape.out, e.iterator, () => completeStage()), (_) => ()) + override def preStart(): Unit = + readN(shape.in, n)(e => emitMultiple(shape.out, e.iterator, () => completeStage()), (_) => ()) } } @@ -127,36 +128,35 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S setHandler(shape.in, EagerTerminateInput) setHandler(shape.out, EagerTerminateOutput) override def preStart(): Unit = - readN(shape.in, n)( - _ => failStage(new IllegalStateException("Shouldn't happen!")), - e => emitMultiple(shape.out, e.iterator, () => completeStage())) + readN(shape.in, n)(_ => failStage(new IllegalStateException("Shouldn't happen!")), + e => emitMultiple(shape.out, e.iterator, () => completeStage())) } } "A GraphStageLogic" must { "read N and emit N before completing" in assertAllStagesStopped { - Source(1 to 10).via(ReadNEmitN(2)).runWith(TestSink.probe) - .request(10) - .expectNext(1, 2) - .expectComplete() + Source(1 to 10).via(ReadNEmitN(2)).runWith(TestSink.probe).request(10).expectNext(1, 2).expectComplete() } "read N should not emit if upstream completes before N is sent" in assertAllStagesStopped { - Source(1 to 5).via(ReadNEmitN(6)).runWith(TestSink.probe) - .request(10) - .expectComplete() + Source(1 to 5).via(ReadNEmitN(6)).runWith(TestSink.probe).request(10).expectComplete() } "read N should not emit if upstream fails before N is sent" in assertAllStagesStopped { val error = new IllegalArgumentException("Don't argue like that!") - Source(1 to 5).map(x => if (x > 3) throw error else x).via(ReadNEmitN(6)).runWith(TestSink.probe) + Source(1 to 5) + .map(x => if (x > 3) throw error else x) + .via(ReadNEmitN(6)) + .runWith(TestSink.probe) .request(10) .expectError(error) } "read N should provide elements read if onComplete happens before N elements have been seen" in assertAllStagesStopped { - Source(1 to 5).via(ReadNEmitRestOnComplete(6)).runWith(TestSink.probe) + Source(1 to 5) + .via(ReadNEmitRestOnComplete(6)) + .runWith(TestSink.probe) .request(10) .expectNext(1, 2, 3, 4, 5) .expectComplete() @@ -164,7 +164,9 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S "emit all things before completing" in assertAllStagesStopped { - Source.empty.via(emit1234.named("testStage")).runWith(TestSink.probe) + Source.empty + .via(emit1234.named("testStage")) + .runWith(TestSink.probe) .request(5) .expectNext(1) //emitting with callback gives nondeterminism whether 2 or 3 will be pushed first @@ -264,7 +266,8 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S "give a good error message if in handler missing" in { val ex = intercept[IllegalStateException] { - Source.maybe[String] + Source + .maybe[String] .via(new GraphStage[FlowShape[String, String]] { val in = Inlet[String]("in") val out = Outlet[String]("out") @@ -276,14 +279,16 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S } override def toString = "stage-name" - }).runWith(Sink.ignore) + }) + .runWith(Sink.ignore) } ex.getMessage should startWith("No handler defined in stage [stage-name] for in port [in") } "give a good error message if out handler missing" in { val ex = intercept[IllegalStateException] { - Source.maybe[String] + Source + .maybe[String] .via(new GraphStage[FlowShape[String, String]] { val in = Inlet[String]("in") val out = Outlet[String]("out") @@ -308,7 +313,8 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S "give a good error message if out handler missing with downstream boundary" in { val ex = intercept[IllegalStateException] { - Source.maybe[String] + Source + .maybe[String] .via(new GraphStage[FlowShape[String, String]] { val in = Inlet[String]("in") val out = Outlet[String]("out") @@ -323,14 +329,17 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S } override def toString = "stage-name" - }).runWith(Sink.ignore.async) + }) + .runWith(Sink.ignore.async) } ex.getMessage should startWith("No handler defined in stage [stage-name] for out port [out") } "give a good error message if handler missing with downstream publisher" in { val ex = intercept[IllegalStateException] { - Source.maybe[String].async + Source + .maybe[String] + .async .via(new GraphStage[FlowShape[String, String]] { val in = Inlet[String]("in") val out = Outlet[String]("out") @@ -345,14 +354,16 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S } override def toString = "stage-name" - }).runWith(Sink.ignore) + }) + .runWith(Sink.ignore) } ex.getMessage should startWith("No handler defined in stage [stage-name] for out port [out") } "give a good error message if handler missing when stage is an island" in { val ex = intercept[IllegalStateException] { - Source.maybe[String] + Source + .maybe[String] .via(new GraphStage[FlowShape[String, String]] { val in = Inlet[String]("in") val out = Outlet[String]("out") @@ -367,7 +378,9 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S } override def toString = "stage-name" - }).async.runWith(Sink.ignore) + }) + .async + .runWith(Sink.ignore) } ex.getMessage should startWith("No handler defined in stage [stage-name] for out port [out") } @@ -375,7 +388,8 @@ class GraphStageLogicSpec extends StreamSpec with GraphInterpreterSpecKit with S "give a good error message if sub source is pushed twice" in { intercept[Exception] { Source.fromGraph(new SubstreamEmit()).async.runWith(Sink.ignore).futureValue - }.getCause.getMessage should startWith("Cannot push port (SubSourceOutlet(subOut)) twice, or before it being pulled") + }.getCause.getMessage should startWith( + "Cannot push port (SubSourceOutlet(subOut)) twice, or before it being pulled") } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/LinearTraversalBuilderSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/LinearTraversalBuilderSpec.scala index 6cfae289f4..856e665ba7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/LinearTraversalBuilderSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/LinearTraversalBuilderSpec.scala @@ -27,8 +27,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "work with a single Source and Sink" in { val builder = - source.traversalBuilder - .append(sink.traversalBuilder, sink.shape, Keep.left) + source.traversalBuilder.append(sink.traversalBuilder, sink.shape, Keep.left) val mat = testMaterialize(builder) @@ -111,8 +110,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "work with a nested Flow chain" in { val nestedFlows = - flow1.traversalBuilder - .append(flow2.traversalBuilder, flow2.shape, Keep.left) + flow1.traversalBuilder.append(flow2.traversalBuilder, flow2.shape, Keep.left) val builder = source.traversalBuilder .append(nestedFlows, FlowShape(flow1.in, flow2.out), Keep.left) @@ -131,8 +129,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "work with a nested Flow chain used twice (appended to self)" in { val nestedFlows = - flow1.traversalBuilder - .append(flow2.traversalBuilder, flow2.shape, Keep.left) + flow1.traversalBuilder.append(flow2.traversalBuilder, flow2.shape, Keep.left) val builder = source.traversalBuilder .append(nestedFlows, FlowShape(flow1.in, flow2.out), Keep.left) @@ -166,9 +163,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "work with a two Flows wired back to self" in { val builder = - flow1.traversalBuilder - .append(flow2.traversalBuilder, flow2.shape, Keep.left) - .wire(flow2.out, flow1.in) + flow1.traversalBuilder.append(flow2.traversalBuilder, flow2.shape, Keep.left).wire(flow2.out, flow1.in) val mat = testMaterialize(builder) @@ -181,9 +176,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "work with Flow appended to self then wired back to self" in { val builder = - flow1.traversalBuilder - .append(flow1.traversalBuilder, flow1.shape, Keep.left) - .wire(flow1.out, flow1.in) + flow1.traversalBuilder.append(flow1.traversalBuilder, flow1.shape, Keep.left).wire(flow1.out, flow1.in) val mat = testMaterialize(builder) @@ -222,8 +215,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "be able to be joined with a composite flow" in { val embeddedFlow = - flow1.traversalBuilder - .append(flow2.traversalBuilder, flow2.shape, Keep.left) + flow1.traversalBuilder.append(flow2.traversalBuilder, flow2.shape, Keep.left) val builder = compositeFlow1.traversalBuilder @@ -261,9 +253,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "be able to add a flow to an empty composite and join to itself" in { val builder = - TraversalBuilder.empty() - .add(flow1.traversalBuilder, flow1.shape, Keep.left) - .wire(flow1.out, flow1.in) + TraversalBuilder.empty().add(flow1.traversalBuilder, flow1.shape, Keep.left).wire(flow1.out, flow1.in) val mat = testMaterialize(builder) @@ -274,8 +264,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "be able to embed a composite sink in a linear traversal" in { val builder = - source.traversalBuilder - .append(compositeSink.traversalBuilder, compositeSink.shape, Keep.left) + source.traversalBuilder.append(compositeSink.traversalBuilder, compositeSink.shape, Keep.left) val mat = testMaterialize(builder) @@ -305,7 +294,8 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "be able to embed a composite sink with an irregular wiring" in { val sinkBuilder = - TraversalBuilder.empty() + TraversalBuilder + .empty() .add(compositeFlow2.traversalBuilder, compositeFlow2.shape, Keep.left) .add(compositeSink.traversalBuilder, compositeSink.shape, Keep.left) .add(compositeFlow1.traversalBuilder, compositeFlow1.shape, Keep.left) @@ -313,8 +303,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { .wire(compositeFlow1.out, compositeFlow2.in) val builder = - source.traversalBuilder - .append(sinkBuilder, SinkShape(compositeFlow1.in), Keep.left) + source.traversalBuilder.append(sinkBuilder, SinkShape(compositeFlow1.in), Keep.left) val mat = testMaterialize(builder) @@ -382,8 +371,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val shape = FlowShape(compositeFlow1.in, flow1.out) val embeddedBuilder = - LinearTraversalBuilder.empty() - .append(compositeBuilder, shape, Keep.left) + LinearTraversalBuilder.empty().append(compositeBuilder, shape, Keep.left) val builder = source.traversalBuilder @@ -416,8 +404,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly propagate materialized value with Keep.left" in { val builder = - source.traversalBuilder - .append(sink.traversalBuilder, sink.shape, Keep.left) + source.traversalBuilder.append(sink.traversalBuilder, sink.shape, Keep.left) val mat = testMaterialize(builder) @@ -426,7 +413,8 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "keep mapped materialized value of empty builder" in { val builder = - LinearTraversalBuilder.empty() + LinearTraversalBuilder + .empty() .transformMat((_: Any) => "NOTUSED") .append(source.traversalBuilder, source.shape, Keep.left) .append(sink.traversalBuilder, sink.shape, Keep.left) @@ -438,8 +426,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly propagate materialized value with Keep.right" in { val builder = - source.traversalBuilder - .append(sink.traversalBuilder, sink.shape, Keep.right) + source.traversalBuilder.append(sink.traversalBuilder, sink.shape, Keep.right) val mat = testMaterialize(builder) @@ -448,8 +435,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly propagate materialized value with Keep.both" in { val builder = - source.traversalBuilder - .append(sink.traversalBuilder, sink.shape, Keep.both) + source.traversalBuilder.append(sink.traversalBuilder, sink.shape, Keep.both) val mat = testMaterialize(builder) @@ -488,7 +474,8 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly propagate materialized value with Keep.right with composite Source as start" in { val builder = - LinearTraversalBuilder.empty() + LinearTraversalBuilder + .empty() .append(compositeSource.traversalBuilder, compositeSource.shape, Keep.right) .append(flow1.traversalBuilder, flow1.shape, Keep.left) .append(sink.traversalBuilder, sink.shape, Keep.left) @@ -511,7 +498,8 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly propagate materialized value with Keep.both and all composite" in { val builder = - LinearTraversalBuilder.empty() + LinearTraversalBuilder + .empty() .append(compositeSource.traversalBuilder, compositeSource.shape, Keep.both) .append(compositeFlow1.traversalBuilder, compositeFlow1.shape, Keep.both) .append(compositeSink.traversalBuilder, compositeSink.shape, Keep.both) @@ -564,8 +552,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly map materialized value (nested)" in { val flowBuilder = - flow1.traversalBuilder - .transformMat("M1: " + (_: String)) + flow1.traversalBuilder.transformMat("M1: " + (_: String)) val builder = source.traversalBuilder .append(flowBuilder, flow1.shape, Keep.right) @@ -584,10 +571,9 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - sink -> (Attributes.name("test") and Attributes.name("testSink")), - source -> (Attributes.name("test") and Attributes.name("testSource")) - )) + mat.attributesAssignments should ===( + List(sink -> (Attributes.name("test") and Attributes.name("testSink")), + source -> (Attributes.name("test") and Attributes.name("testSource")))) } "properly accumulate attributes in chain" in { @@ -598,10 +584,9 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - sink -> (Attributes.name("test") and Attributes.name("testSink")), - source -> (Attributes.name("test") and Attributes.name("source")) - )) + mat.attributesAssignments should ===( + List(sink -> (Attributes.name("test") and Attributes.name("testSink")), + source -> (Attributes.name("test") and Attributes.name("source")))) } "overwrite last attributes until a new module is added" in { @@ -614,10 +599,9 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - sink -> (Attributes.name("test2") and Attributes.name("testSink")), - source -> (Attributes.name("test2") and Attributes.name("source2")) - )) + mat.attributesAssignments should ===( + List(sink -> (Attributes.name("test2") and Attributes.name("testSink")), + source -> (Attributes.name("test2") and Attributes.name("source2")))) } "propagate attributes to embedded linear sink and source" in { @@ -629,10 +613,9 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - sink -> (Attributes.name("test") and Attributes.name("sink")), - source -> (Attributes.name("test") and Attributes.name("source")) - )) + mat.attributesAssignments should ===( + List(sink -> (Attributes.name("test") and Attributes.name("sink")), + source -> (Attributes.name("test") and Attributes.name("source")))) } "propagate attributes to embedded linear flow" in { @@ -646,11 +629,10 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - sink -> (Attributes.name("test") and Attributes.name("sink")), - flow1 -> (Attributes.name("test") and Attributes.name("compositeSource") and Attributes.name("flow")), - source -> (Attributes.name("test") and Attributes.name("compositeSource") and Attributes.name("source")) - )) + mat.attributesAssignments should ===( + List(sink -> (Attributes.name("test") and Attributes.name("sink")), + flow1 -> (Attributes.name("test") and Attributes.name("compositeSource") and Attributes.name("flow")), + source -> (Attributes.name("test") and Attributes.name("compositeSource") and Attributes.name("source")))) } "propagate attributes to embedded composite sink" in { @@ -663,21 +645,19 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - compositeSink -> (Attributes.name("test") and Attributes.name("sink")), - flow1 -> (Attributes.name("test") and Attributes.name("flow")), - source -> (Attributes.name("test") and Attributes.name("source")) - )) + mat.attributesAssignments should ===( + List(compositeSink -> (Attributes.name("test") and Attributes.name("sink")), + flow1 -> (Attributes.name("test") and Attributes.name("flow")), + source -> (Attributes.name("test") and Attributes.name("source")))) } "propagate attributes to embedded composite source" in { val builder = - LinearTraversalBuilder.empty() - .append( - compositeSource.traversalBuilder - .setAttributes(Attributes.name("source")), - compositeSource.shape, - Keep.left) + LinearTraversalBuilder + .empty() + .append(compositeSource.traversalBuilder.setAttributes(Attributes.name("source")), + compositeSource.shape, + Keep.left) .setAttributes(Attributes.name("source-outer")) .append(flow1.traversalBuilder.setAttributes(Attributes.name("flow")), flow1.shape, Keep.left) .append(sink.traversalBuilder.setAttributes(Attributes.name("sink")), compositeSink.shape, Keep.left) @@ -685,33 +665,35 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - sink -> (Attributes.name("test") and Attributes.name("sink")), - flow1 -> (Attributes.name("test") and Attributes.name("flow")), - compositeSource -> (Attributes.name("test") and Attributes.name("source-outer") and Attributes.name("source")) - )) + mat.attributesAssignments should ===( + List(sink -> (Attributes.name("test") and Attributes.name("sink")), + flow1 -> (Attributes.name("test") and Attributes.name("flow")), + compositeSource -> (Attributes.name("test") and Attributes.name("source-outer") and Attributes.name( + "source")))) } "propagate attributes to embedded composite flow" in { val builder = source.traversalBuilder .setAttributes(Attributes.name("source")) - .append(compositeFlow1.traversalBuilder.setAttributes(Attributes.name("flow")), compositeFlow1.shape, Keep.left) + .append(compositeFlow1.traversalBuilder.setAttributes(Attributes.name("flow")), + compositeFlow1.shape, + Keep.left) .append(sink.traversalBuilder.setAttributes(Attributes.name("sink")), compositeSink.shape, Keep.left) .setAttributes(Attributes.name("test")) val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - sink -> (Attributes.name("test") and Attributes.name("sink")), - compositeFlow1 -> (Attributes.name("test") and Attributes.name("flow")), - source -> (Attributes.name("test") and Attributes.name("source")) - )) + mat.attributesAssignments should ===( + List(sink -> (Attributes.name("test") and Attributes.name("sink")), + compositeFlow1 -> (Attributes.name("test") and Attributes.name("flow")), + source -> (Attributes.name("test") and Attributes.name("source")))) } "properly append a Source to empty linear" in { val builder = - LinearTraversalBuilder.empty() + LinearTraversalBuilder + .empty() .append(source.traversalBuilder, source.shape, Keep.right) .append(sink.traversalBuilder, sink.shape, Keep.right) @@ -724,12 +706,10 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly append a Sink to empty linear" in { val nestedSink = - LinearTraversalBuilder.empty() - .append(sink.traversalBuilder, sink.shape, Keep.right) + LinearTraversalBuilder.empty().append(sink.traversalBuilder, sink.shape, Keep.right) val builder = - source.traversalBuilder - .append(nestedSink, sink.shape, Keep.right) + source.traversalBuilder.append(nestedSink, sink.shape, Keep.right) val mat = testMaterialize(builder) @@ -740,8 +720,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly append a Flow to empty linear" in { val nestedFlow = - LinearTraversalBuilder.empty() - .append(flow1.traversalBuilder, flow1.shape, Keep.right) + LinearTraversalBuilder.empty().append(flow1.traversalBuilder, flow1.shape, Keep.right) val builder = source.traversalBuilder @@ -759,7 +738,8 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly append a composite Source to empty linear" in { val builder = - LinearTraversalBuilder.empty() + LinearTraversalBuilder + .empty() .append(compositeSource.traversalBuilder, compositeSource.shape, Keep.right) .append(sink.traversalBuilder, sink.shape, Keep.right) @@ -772,12 +752,10 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly append a composite Sink to empty linear" in { val nestedSink = - LinearTraversalBuilder.empty() - .append(compositeSink.traversalBuilder, compositeSink.shape, Keep.right) + LinearTraversalBuilder.empty().append(compositeSink.traversalBuilder, compositeSink.shape, Keep.right) val builder = - source.traversalBuilder - .append(nestedSink, compositeSink.shape, Keep.right) + source.traversalBuilder.append(nestedSink, compositeSink.shape, Keep.right) val mat = testMaterialize(builder) @@ -788,8 +766,7 @@ class LinearTraversalBuilderSpec extends AkkaSpec { "properly append a composite Flow to empty linear" in { val nestedFlow = - LinearTraversalBuilder.empty() - .append(compositeFlow1.traversalBuilder, compositeFlow1.shape, Keep.right) + LinearTraversalBuilder.empty().append(compositeFlow1.traversalBuilder, compositeFlow1.shape, Keep.right) val builder = source.traversalBuilder @@ -818,12 +795,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.islandAssignments should ===(List( - (sink, Attributes.none, TestDefaultIsland), - (flow2, Attributes.none, TestDefaultIsland), - (flow1, Attributes.name("island2"), TestIsland2), - (source, Attributes.name("island2") and Attributes.name("island1"), TestIsland1) - )) + mat.islandAssignments should ===( + List((sink, Attributes.none, TestDefaultIsland), + (flow2, Attributes.none, TestDefaultIsland), + (flow1, Attributes.name("island2"), TestIsland2), + (source, Attributes.name("island2") and Attributes.name("island1"), TestIsland1))) } "properly nest flow with islands" in { @@ -842,12 +818,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.islandAssignments should ===(List( - (sink, Attributes.none, TestDefaultIsland), - (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), - (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), - (source, Attributes.none, TestDefaultIsland) - )) + mat.islandAssignments should ===( + List((sink, Attributes.none, TestDefaultIsland), + (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), + (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), + (source, Attributes.none, TestDefaultIsland))) } "properly nest flow with island inside another island" in { @@ -867,19 +842,16 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.islandAssignments should ===(List( - (sink, Attributes.none, TestDefaultIsland), - (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), - (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), - (source, Attributes.name("wholeThing"), TestIsland2) - )) + mat.islandAssignments should ===( + List((sink, Attributes.none, TestDefaultIsland), + (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), + (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), + (source, Attributes.name("wholeThing"), TestIsland2))) } "properly nest flow with islands starting from linear enclosing a composite" in { val nestedFlow = - flow2.traversalBuilder - .setAttributes(Attributes.name("nestedFlow")) - .makeIsland(TestIsland1) + flow2.traversalBuilder.setAttributes(Attributes.name("nestedFlow")).makeIsland(TestIsland1) val builder = source.traversalBuilder @@ -891,12 +863,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.islandAssignments should ===(List( - (sink, Attributes.none, TestDefaultIsland), - (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), - (compositeFlow1, Attributes.name("wholeThing"), TestIsland2), - (source, Attributes.name("wholeThing"), TestIsland2) - )) + mat.islandAssignments should ===( + List((sink, Attributes.none, TestDefaultIsland), + (flow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), + (compositeFlow1, Attributes.name("wholeThing"), TestIsland2), + (source, Attributes.name("wholeThing"), TestIsland2))) } "properly nest flow containing composite with islands" in { @@ -914,12 +885,11 @@ class LinearTraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.islandAssignments should ===(List( - (sink, Attributes.none, TestDefaultIsland), - (compositeFlow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), - (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), - (source, Attributes.none, TestDefaultIsland) - )) + mat.islandAssignments should ===( + List((sink, Attributes.none, TestDefaultIsland), + (compositeFlow2, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), + (flow1, Attributes.name("wholeThing") and Attributes.name("nestedFlow"), TestIsland1), + (source, Attributes.none, TestDefaultIsland))) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/ResizableMultiReaderRingBufferSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/ResizableMultiReaderRingBufferSpec.scala index 86afed03d0..c4f748e76e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/ResizableMultiReaderRingBufferSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/ResizableMultiReaderRingBufferSpec.scala @@ -182,14 +182,17 @@ class ResizableMultiReaderRingBufferSpec extends WordSpec with Matchers { } } - class TestBuffer(iSize: Int, mSize: Int, cursors: Cursors) extends ResizableMultiReaderRingBuffer[Int](iSize, mSize, cursors) { + class TestBuffer(iSize: Int, mSize: Int, cursors: Cursors) + extends ResizableMultiReaderRingBuffer[Int](iSize, mSize, cursors) { def inspect: String = underlyingArray.map(x => if (x == null) 0 else x).mkString("", " ", " " + toString.dropWhile(_ != '(')) } - class Test(iSize: Int, mSize: Int, cursorCount: Int) extends TestBuffer(iSize, mSize, new SimpleCursors(cursorCount)) { + class Test(iSize: Int, mSize: Int, cursorCount: Int) + extends TestBuffer(iSize, mSize, new SimpleCursors(cursorCount)) { def read(cursorIx: Int): Integer = - try read(cursors.cursors(cursorIx)) catch { case NothingToReadException => null } + try read(cursors.cursors(cursorIx)) + catch { case NothingToReadException => null } } class SimpleCursors(cursorCount: Int) extends Cursors { diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/StreamLayoutSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/StreamLayoutSpec.scala index e69de29bb2..8b13789179 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/StreamLayoutSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/StreamLayoutSpec.scala @@ -0,0 +1 @@ + diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala index 13db3179cc..cfc897719f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/TimeoutsSpec.scala @@ -23,28 +23,21 @@ class TimeoutsSpec extends StreamSpec { "InitialTimeout" must { "pass through elements unmodified" in assertAllStagesStopped { - Await.result( - Source(1 to 100).initialTimeout(2.seconds).grouped(200).runWith(Sink.head), - 3.seconds) should ===(1 to 100) + Await.result(Source(1 to 100).initialTimeout(2.seconds).grouped(200).runWith(Sink.head), 3.seconds) should ===( + 1 to 100) } "pass through error unmodified" in assertAllStagesStopped { a[TE] shouldBe thrownBy { Await.result( - Source(1 to 100) - .concat(Source.failed(TE("test"))) - .initialTimeout(2.seconds) - .grouped(200) - .runWith(Sink.head), + Source(1 to 100).concat(Source.failed(TE("test"))).initialTimeout(2.seconds).grouped(200).runWith(Sink.head), 3.seconds) } } "fail if no initial element passes until timeout" in assertAllStagesStopped { val downstreamProbe = TestSubscriber.probe[Int]() - Source.maybe[Int] - .initialTimeout(1.second) - .runWith(Sink.fromSubscriber(downstreamProbe)) + Source.maybe[Int].initialTimeout(1.second).runWith(Sink.fromSubscriber(downstreamProbe)) downstreamProbe.expectSubscription() downstreamProbe.expectNoMsg(500.millis) @@ -58,27 +51,25 @@ class TimeoutsSpec extends StreamSpec { "CompletionTimeout" must { "pass through elements unmodified" in assertAllStagesStopped { - Await.result( - Source(1 to 100).completionTimeout(2.seconds).grouped(200).runWith(Sink.head), - 3.seconds) should ===(1 to 100) + Await.result(Source(1 to 100).completionTimeout(2.seconds).grouped(200).runWith(Sink.head), 3.seconds) should ===( + 1 to 100) } "pass through error unmodified" in assertAllStagesStopped { a[TE] shouldBe thrownBy { - Await.result( - Source(1 to 100).concat(Source.failed(TE("test"))) - .completionTimeout(2.seconds) - .grouped(200).runWith(Sink.head), - 3.seconds) + Await.result(Source(1 to 100) + .concat(Source.failed(TE("test"))) + .completionTimeout(2.seconds) + .grouped(200) + .runWith(Sink.head), + 3.seconds) } } "fail if not completed until timeout" in assertAllStagesStopped { val upstreamProbe = TestPublisher.probe[Int]() val downstreamProbe = TestSubscriber.probe[Int]() - Source.fromPublisher(upstreamProbe) - .completionTimeout(2.seconds) - .runWith(Sink.fromSubscriber(downstreamProbe)) + Source.fromPublisher(upstreamProbe).completionTimeout(2.seconds).runWith(Sink.fromSubscriber(downstreamProbe)) upstreamProbe.sendNext(1) downstreamProbe.requestNext(1) @@ -97,17 +88,14 @@ class TimeoutsSpec extends StreamSpec { "IdleTimeout" must { "pass through elements unmodified" in assertAllStagesStopped { - Await.result( - Source(1 to 100).idleTimeout(2.seconds).grouped(200).runWith(Sink.head), - 3.seconds) should ===(1 to 100) + Await.result(Source(1 to 100).idleTimeout(2.seconds).grouped(200).runWith(Sink.head), 3.seconds) should ===( + 1 to 100) } "pass through error unmodified" in assertAllStagesStopped { a[TE] shouldBe thrownBy { Await.result( - Source(1 to 100).concat(Source.failed(TE("test"))) - .idleTimeout(2.seconds) - .grouped(200).runWith(Sink.head), + Source(1 to 100).concat(Source.failed(TE("test"))).idleTimeout(2.seconds).grouped(200).runWith(Sink.head), 3.seconds) } } @@ -115,9 +103,7 @@ class TimeoutsSpec extends StreamSpec { "fail if time between elements is too large" in assertAllStagesStopped { val upstreamProbe = TestPublisher.probe[Int]() val downstreamProbe = TestSubscriber.probe[Int]() - Source.fromPublisher(upstreamProbe) - .idleTimeout(1.seconds) - .runWith(Sink.fromSubscriber(downstreamProbe)) + Source.fromPublisher(upstreamProbe).idleTimeout(1.seconds).runWith(Sink.fromSubscriber(downstreamProbe)) // Two seconds in overall, but won't timeout until time between elements is large enough // (i.e. this works differently from completionTimeout) @@ -136,15 +122,14 @@ class TimeoutsSpec extends StreamSpec { "BackpressureTimeout" must { "pass through elements unmodified" in assertAllStagesStopped { - Await.result(Source(1 to 100).backpressureTimeout(1.second).grouped(200).runWith(Sink.head), 3.seconds) should ===(1 to 100) + Await.result(Source(1 to 100).backpressureTimeout(1.second).grouped(200).runWith(Sink.head), 3.seconds) should ===( + 1 to 100) } "succeed if subscriber demand arrives" in assertAllStagesStopped { val subscriber = TestSubscriber.probe[Int]() - Source(1 to 4) - .backpressureTimeout(1.second) - .runWith(Sink.fromSubscriber(subscriber)) + Source(1 to 4).backpressureTimeout(1.second).runWith(Sink.fromSubscriber(subscriber)) for (i <- 1 to 3) { subscriber.requestNext(i) @@ -159,9 +144,7 @@ class TimeoutsSpec extends StreamSpec { val publisher = TestPublisher.probe[String]() val subscriber = TestSubscriber.probe[String]() - Source.fromPublisher(publisher) - .backpressureTimeout(1.second) - .runWith(Sink.fromSubscriber(subscriber)) + Source.fromPublisher(publisher).backpressureTimeout(1.second).runWith(Sink.fromSubscriber(subscriber)) subscriber.request(2) @@ -181,9 +164,7 @@ class TimeoutsSpec extends StreamSpec { val publisher = TestPublisher.probe[String]() val subscriber = TestSubscriber.probe[String]() - Source.fromPublisher(publisher) - .backpressureTimeout(1.second) - .runWith(Sink.fromSubscriber(subscriber)) + Source.fromPublisher(publisher).backpressureTimeout(1.second).runWith(Sink.fromSubscriber(subscriber)) subscriber.request(16) subscriber.expectNoMsg(2.second) @@ -196,9 +177,7 @@ class TimeoutsSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.probe[Int]() - Source.fromPublisher(publisher) - .backpressureTimeout(1.second) - .runWith(Sink.fromSubscriber(subscriber)) + Source.fromPublisher(publisher).backpressureTimeout(1.second).runWith(Sink.fromSubscriber(subscriber)) subscriber.request(1) publisher.sendNext(1) @@ -213,9 +192,7 @@ class TimeoutsSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.probe[Int]() - Source.fromPublisher(publisher) - .backpressureTimeout(1.second) - .runWith(Sink.fromSubscriber(subscriber)) + Source.fromPublisher(publisher).backpressureTimeout(1.second).runWith(Sink.fromSubscriber(subscriber)) subscriber.expectSubscription() @@ -228,9 +205,7 @@ class TimeoutsSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.probe[Int]() - Source.fromPublisher(publisher) - .backpressureTimeout(1.second) - .runWith(Sink.fromSubscriber(subscriber)) + Source.fromPublisher(publisher).backpressureTimeout(1.second).runWith(Sink.fromSubscriber(subscriber)) subscriber.request(2) publisher.sendNext(1) @@ -249,9 +224,8 @@ class TimeoutsSpec extends StreamSpec { "not signal error in simple loopback case and pass through elements unmodified" in assertAllStagesStopped { val timeoutIdentity = BidiFlow.bidirectionalIdleTimeout[Int, Int](2.seconds).join(Flow[Int]) - Await.result( - Source(1 to 100).via(timeoutIdentity).grouped(200).runWith(Sink.head), - 3.seconds) should ===(1 to 100) + Await.result(Source(1 to 100).via(timeoutIdentity).grouped(200).runWith(Sink.head), 3.seconds) should ===( + 1 to 100) } "not signal error if traffic is one-way" in assertAllStagesStopped { @@ -288,15 +262,17 @@ class TimeoutsSpec extends StreamSpec { val downWrite = TestPublisher.probe[Int]() val downRead = TestSubscriber.probe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val timeoutStage = b.add(BidiFlow.bidirectionalIdleTimeout[String, Int](2.seconds)) - Source.fromPublisher(upWrite) ~> timeoutStage.in1 - timeoutStage.out1 ~> Sink.fromSubscriber(downRead) - Sink.fromSubscriber(upRead) <~ timeoutStage.out2 - timeoutStage.in2 <~ Source.fromPublisher(downWrite) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + val timeoutStage = b.add(BidiFlow.bidirectionalIdleTimeout[String, Int](2.seconds)) + Source.fromPublisher(upWrite) ~> timeoutStage.in1 + timeoutStage.out1 ~> Sink.fromSubscriber(downRead) + Sink.fromSubscriber(upRead) <~ timeoutStage.out2 + timeoutStage.in2 <~ Source.fromPublisher(downWrite) + ClosedShape + }) + .run() // Request enough for the whole test upRead.request(100) @@ -336,15 +312,17 @@ class TimeoutsSpec extends StreamSpec { val downWrite = TestPublisher.probe[Int]() val downRead = TestSubscriber.probe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val timeoutStage = b.add(BidiFlow.bidirectionalIdleTimeout[String, Int](2.seconds)) - Source.fromPublisher(upWrite) ~> timeoutStage.in1 - timeoutStage.out1 ~> Sink.fromSubscriber(downRead) - Sink.fromSubscriber(upRead) <~ timeoutStage.out2 - timeoutStage.in2 <~ Source.fromPublisher(downWrite) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + val timeoutStage = b.add(BidiFlow.bidirectionalIdleTimeout[String, Int](2.seconds)) + Source.fromPublisher(upWrite) ~> timeoutStage.in1 + timeoutStage.out1 ~> Sink.fromSubscriber(downRead) + Sink.fromSubscriber(upRead) <~ timeoutStage.out2 + timeoutStage.in2 <~ Source.fromPublisher(downWrite) + ClosedShape + }) + .run() val te = TE("test") diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalBuilderSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalBuilderSpec.scala index 47a7b6eeea..3ca897bf7e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalBuilderSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalBuilderSpec.scala @@ -25,9 +25,7 @@ class TraversalBuilderSpec extends AkkaSpec { "work with a single Source and Sink" in { val builder = - source.traversalBuilder - .add(sink.traversalBuilder, sink.shape, Keep.left) - .wire(source.out, sink.in) + source.traversalBuilder.add(sink.traversalBuilder, sink.shape, Keep.left).wire(source.out, sink.in) val mat = testMaterialize(builder) @@ -39,13 +37,10 @@ class TraversalBuilderSpec extends AkkaSpec { "work with a nested Source and Sink" in { val nestedBuilder = - TraversalBuilder.empty() - .add(source.traversalBuilder, source.shape, Keep.left) + TraversalBuilder.empty().add(source.traversalBuilder, source.shape, Keep.left) val builder = - sink.traversalBuilder - .add(nestedBuilder, source.shape, Keep.left) - .wire(source.out, sink.in) + sink.traversalBuilder.add(nestedBuilder, source.shape, Keep.left).wire(source.out, sink.in) val mat = testMaterialize(builder) @@ -58,9 +53,8 @@ class TraversalBuilderSpec extends AkkaSpec { val remappedShape = SourceShape(Outlet[Any]("remapped.out")) remappedShape.out.mappedTo = source.out - val builder = sink.traversalBuilder - .add(source.traversalBuilder, remappedShape, Keep.left) - .wire(remappedShape.out, sink.in) + val builder = + sink.traversalBuilder.add(source.traversalBuilder, remappedShape, Keep.left).wire(remappedShape.out, sink.in) val mat = testMaterialize(builder) @@ -155,9 +149,7 @@ class TraversalBuilderSpec extends AkkaSpec { val nestedFlowShape = FlowShape(flow1.in, flow2.out) val nestedFlows = - flow1.traversalBuilder - .add(flow2.traversalBuilder, flow2.shape, Keep.left) - .wire(flow1.out, flow2.in) + flow1.traversalBuilder.add(flow2.traversalBuilder, flow2.shape, Keep.left).wire(flow1.out, flow2.in) val builder = source.traversalBuilder .add(nestedFlows, nestedFlowShape, Keep.left) @@ -182,9 +174,7 @@ class TraversalBuilderSpec extends AkkaSpec { importedFlowShape.out.mappedTo = flow2.out val nestedFlows = - flow1.traversalBuilder - .add(flow2.traversalBuilder, flow2.shape, Keep.left) - .wire(flow1.out, flow2.in) + flow1.traversalBuilder.add(flow2.traversalBuilder, flow2.shape, Keep.left).wire(flow1.out, flow2.in) val builder = source.traversalBuilder .add(nestedFlows, importedFlowShape, Keep.left) @@ -225,9 +215,7 @@ class TraversalBuilderSpec extends AkkaSpec { "properly propagate materialized value with Keep.left" in { val builder = - source.traversalBuilder - .add(sink.traversalBuilder, sink.shape, Keep.left) - .wire(source.out, sink.in) + source.traversalBuilder.add(sink.traversalBuilder, sink.shape, Keep.left).wire(source.out, sink.in) val mat = testMaterialize(builder) @@ -236,7 +224,8 @@ class TraversalBuilderSpec extends AkkaSpec { "keep mapped materialized value of empty builder" in { val builder = - TraversalBuilder.empty() + TraversalBuilder + .empty() .transformMat((_: Any) => "NOTUSED") .add(source.traversalBuilder, source.shape, Keep.left) .add(sink.traversalBuilder, sink.shape, Keep.left) @@ -249,9 +238,7 @@ class TraversalBuilderSpec extends AkkaSpec { "properly propagate materialized value with Keep.right" in { val builder = - source.traversalBuilder - .add(sink.traversalBuilder, sink.shape, Keep.right) - .wire(source.out, sink.in) + source.traversalBuilder.add(sink.traversalBuilder, sink.shape, Keep.right).wire(source.out, sink.in) val mat = testMaterialize(builder) @@ -260,9 +247,7 @@ class TraversalBuilderSpec extends AkkaSpec { "properly propagate materialized value with Keep.both" in { val builder = - source.traversalBuilder - .add(sink.traversalBuilder, sink.shape, Keep.both) - .wire(source.out, sink.in) + source.traversalBuilder.add(sink.traversalBuilder, sink.shape, Keep.both).wire(source.out, sink.in) val mat = testMaterialize(builder) @@ -344,8 +329,7 @@ class TraversalBuilderSpec extends AkkaSpec { "properly map materialized value (nested)" in { val flowBuilder = - flow1.traversalBuilder - .transformMat("M1: " + (_: String)) + flow1.traversalBuilder.transformMat("M1: " + (_: String)) val builder = source.traversalBuilder .add(flowBuilder, flow1.shape, Keep.right) @@ -367,10 +351,9 @@ class TraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - source -> (Attributes.name("test") and Attributes.name("testSource")), - sink -> (Attributes.name("test") and Attributes.name("testSink")) - )) + mat.attributesAssignments should ===( + List(source -> (Attributes.name("test") and Attributes.name("testSource")), + sink -> (Attributes.name("test") and Attributes.name("testSink")))) } "overwrite last attributes until embedded in other builder" in { @@ -381,23 +364,22 @@ class TraversalBuilderSpec extends AkkaSpec { .setAttributes(Attributes.name("test2")) val builder = - TraversalBuilder.empty() + TraversalBuilder + .empty() .add(innerBuilder, ClosedShape, Keep.left) .setAttributes(Attributes.name("outer")) .setAttributes(Attributes.name("outer2")) val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - source -> (Attributes.name("outer2") and Attributes.name("test2") and Attributes.name("testSource")), - sink -> (Attributes.name("outer2") and Attributes.name("test2") and Attributes.name("testSinkB")) - )) + mat.attributesAssignments should ===( + List(source -> (Attributes.name("outer2") and Attributes.name("test2") and Attributes.name("testSource")), + sink -> (Attributes.name("outer2") and Attributes.name("test2") and Attributes.name("testSinkB")))) } "propagate attributes to embedded flow" in { val flowBuilder = - flow1.traversalBuilder - .setAttributes(Attributes.name("flow")) + flow1.traversalBuilder.setAttributes(Attributes.name("flow")) val builder = source.traversalBuilder .add(flowBuilder, flow1.shape, Keep.left) @@ -408,18 +390,15 @@ class TraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.attributesAssignments should ===(List( - source -> (Attributes.name("test") and Attributes.name("testSource")), - flow1 -> (Attributes.name("test") and Attributes.name("flow")), - sink -> (Attributes.name("test") and Attributes.name("testSink")) - )) + mat.attributesAssignments should ===( + List(source -> (Attributes.name("test") and Attributes.name("testSource")), + flow1 -> (Attributes.name("test") and Attributes.name("flow")), + sink -> (Attributes.name("test") and Attributes.name("testSink")))) } "properly track embedded island and its attributes" in { val flowBuilder = - flow1.traversalBuilder - .makeIsland(TestIsland1) - .setAttributes(Attributes.name("flow")) + flow1.traversalBuilder.makeIsland(TestIsland1).setAttributes(Attributes.name("flow")) val builder = source.traversalBuilder .add(flowBuilder, flow1.shape, Keep.left) @@ -430,19 +409,15 @@ class TraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.islandAssignments should ===(List( - (source, Attributes.none, TestDefaultIsland), - (flow1, Attributes.name("test") and Attributes.name("flow"), TestIsland1), - (sink, Attributes.none, TestDefaultIsland) - )) + mat.islandAssignments should ===( + List((source, Attributes.none, TestDefaultIsland), + (flow1, Attributes.name("test") and Attributes.name("flow"), TestIsland1), + (sink, Attributes.none, TestDefaultIsland))) } "properly ignore redundant island assignment" in { val flowBuilder = - flow1.traversalBuilder - .makeIsland(TestIsland1) - .makeIsland(TestIsland2) - .setAttributes(Attributes.name("flow")) + flow1.traversalBuilder.makeIsland(TestIsland1).makeIsland(TestIsland2).setAttributes(Attributes.name("flow")) val builder = source.traversalBuilder .add(flowBuilder, flow1.shape, Keep.left) @@ -453,11 +428,10 @@ class TraversalBuilderSpec extends AkkaSpec { val mat = testMaterialize(builder) - mat.islandAssignments should ===(List( - (source, Attributes.none, TestDefaultIsland), - (flow1, Attributes.name("test") and Attributes.name("flow"), TestIsland1), - (sink, Attributes.none, TestDefaultIsland) - )) + mat.islandAssignments should ===( + List((source, Attributes.none, TestDefaultIsland), + (flow1, Attributes.name("test") and Attributes.name("flow"), TestIsland1), + (sink, Attributes.none, TestDefaultIsland))) } //TODO: Dummy test cases just for smoke-testing. Should be removed. @@ -505,10 +479,7 @@ class TraversalBuilderSpec extends AkkaSpec { "islands 3" in { implicit val mat = PhasedFusingActorMaterializer() val sub = TestSubscriber.probe[Int]() - Source - .repeat(1) - .take(10) - .runWith(Sink.fromSubscriber(sub)) + Source.repeat(1).take(10).runWith(Sink.fromSubscriber(sub)) sub.request(10) sub.expectNextN(List(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalTestUtils.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalTestUtils.scala index 1efdbbea9b..06552c6270 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalTestUtils.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/TraversalTestUtils.scala @@ -68,13 +68,12 @@ object TraversalTestUtils { override def toString = s"TestFlow$tag" } - class MaterializationResult( - val connections: Int, - val inlets: Array[InPort], - val outlets: Array[OutPort], - val matValue: Any, - val attributesAssignments: List[(AtomicModule[Shape, Any], Attributes)], - val islandAssignments: List[(AtomicModule[Shape, Any], Attributes, IslandTag)]) { + class MaterializationResult(val connections: Int, + val inlets: Array[InPort], + val outlets: Array[OutPort], + val matValue: Any, + val attributesAssignments: List[(AtomicModule[Shape, Any], Attributes)], + val islandAssignments: List[(AtomicModule[Shape, Any], Attributes, IslandTag)]) { override def toString = { outlets.iterator.zip(inlets.iterator).mkString("connections: ", ", ", "") @@ -174,7 +173,12 @@ object TraversalTestUtils { } } - new MaterializationResult(connections, inlets, outlets, matValueStack.peekLast(), attributesResult.reverse, islandsResult.reverse) + new MaterializationResult(connections, + inlets, + outlets, + matValueStack.peekLast(), + attributesResult.reverse, + islandsResult.reverse) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala index fc11f83681..3868ef62bd 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ActorGraphInterpreterSpec.scala @@ -29,23 +29,15 @@ class ActorGraphInterpreterSpec extends StreamSpec { "be able to interpret a simple identity graph stage" in assertAllStagesStopped { val identity = GraphStages.identity[Int] - Await.result( - Source(1 to 100).via(identity).grouped(200).runWith(Sink.head), - 3.seconds) should ===(1 to 100) + Await.result(Source(1 to 100).via(identity).grouped(200).runWith(Sink.head), 3.seconds) should ===(1 to 100) } "be able to reuse a simple identity graph stage" in assertAllStagesStopped { val identity = GraphStages.identity[Int] - Await.result( - Source(1 to 100) - .via(identity) - .via(identity) - .via(identity) - .grouped(200) - .runWith(Sink.head), - 3.seconds) should ===(1 to 100) + Await.result(Source(1 to 100).via(identity).via(identity).via(identity).grouped(200).runWith(Sink.head), + 3.seconds) should ===(1 to 100) } "be able to interpret a simple bidi stage" in assertAllStagesStopped { @@ -81,11 +73,13 @@ class ActorGraphInterpreterSpec extends StreamSpec { override def toString = "IdentityBidi" } - val identity = BidiFlow.fromGraph(identityBidi).join(Flow[Int].map { x => x }) + val identity = BidiFlow + .fromGraph(identityBidi) + .join(Flow[Int].map { x => + x + }) - Await.result( - Source(1 to 10).via(identity).grouped(100).runWith(Sink.head), - 3.seconds) should ===(1 to 10) + Await.result(Source(1 to 10).via(identity).grouped(100).runWith(Sink.head), 3.seconds) should ===(1 to 10) } @@ -127,11 +121,14 @@ class ActorGraphInterpreterSpec extends StreamSpec { } val identityBidiF = BidiFlow.fromGraph(identityBidi) - val identity = (identityBidiF atop identityBidiF atop identityBidiF).join(Flow[Int].map { x => x }) + val identity = identityBidiF + .atop(identityBidiF) + .atop(identityBidiF) + .join(Flow[Int].map { x => + x + }) - Await.result( - Source(1 to 10).via(identity).grouped(100).runWith(Sink.head), - 3.seconds) should ===(1 to 10) + Await.result(Source(1 to 10).via(identity).grouped(100).runWith(Sink.head), 3.seconds) should ===(1 to 10) } @@ -173,11 +170,14 @@ class ActorGraphInterpreterSpec extends StreamSpec { } val identityBidiF = BidiFlow.fromGraph(identityBidi) - val identity = (identityBidiF atop identityBidiF atop identityBidiF).join(Flow[Int].map { x => x }) + val identity = identityBidiF + .atop(identityBidiF) + .atop(identityBidiF) + .join(Flow[Int].map { x => + x + }) - Await.result( - Source(1 to 10).via(identity).grouped(100).runWith(Sink.head), - 3.seconds) should ===(1 to 10) + Await.result(Source(1 to 10).via(identity).grouped(100).runWith(Sink.head), 3.seconds) should ===(1 to 10) } @@ -223,17 +223,19 @@ class ActorGraphInterpreterSpec extends StreamSpec { val takeAll = Flow[Int].grouped(200).toMat(Sink.head)(Keep.right) - val (f1, f2) = RunnableGraph.fromGraph(GraphDSL.create(takeAll, takeAll)(Keep.both) { implicit b => (out1, out2) => - import GraphDSL.Implicits._ - val bidi = b.add(rotatedBidi) + val (f1, f2) = RunnableGraph + .fromGraph(GraphDSL.create(takeAll, takeAll)(Keep.both) { implicit b => (out1, out2) => + import GraphDSL.Implicits._ + val bidi = b.add(rotatedBidi) - Source(1 to 10) ~> bidi.in1 - out2 <~ bidi.out2 + Source(1 to 10) ~> bidi.in1 + out2 <~ bidi.out2 - bidi.in2 <~ Source(1 to 100) - bidi.out1 ~> out1 - ClosedShape - }).run() + bidi.in2 <~ Source(1 to 100) + bidi.out1 ~> out1 + ClosedShape + }) + .run() Await.result(f1, 3.seconds) should ===(1 to 100) Await.result(f2, 3.seconds) should ===(1 to 10) @@ -287,10 +289,8 @@ class ActorGraphInterpreterSpec extends StreamSpec { */ val failyStage = new GraphStage[FanOutShape2[Int, Int, Int]] { - override val shape: FanOutShape2[Int, Int, Int] = new FanOutShape2( - Inlet[Int]("test.in"), - Outlet[Int]("test.out0"), - Outlet[Int]("test.out1")) + override val shape: FanOutShape2[Int, Int, Int] = + new FanOutShape2(Inlet[Int]("test.in"), Outlet[Int]("test.out0"), Outlet[Int]("test.out1")) override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { @@ -311,16 +311,18 @@ class ActorGraphInterpreterSpec extends StreamSpec { val upstream = TestPublisher.probe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val faily = b.add(failyStage) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + val faily = b.add(failyStage) - Source.fromPublisher(upstream) ~> faily.in - faily.out0 ~> Sink.fromSubscriber(downstream0) - faily.out1 ~> Sink.fromSubscriber(downstream1) + Source.fromPublisher(upstream) ~> faily.in + faily.out0 ~> Sink.fromSubscriber(downstream0) + faily.out1 ~> Sink.fromSubscriber(downstream1) - ClosedShape - }).run()(noFuzzMat) + ClosedShape + }) + .run()(noFuzzMat) evilLatch.countDown() downstream0.expectSubscriptionAndError(te) @@ -349,9 +351,8 @@ class ActorGraphInterpreterSpec extends StreamSpec { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - Source.combine( - Source.fromPublisher(filthyPublisher), - Source.fromPublisher(upstream))(count => Merge(count)) + Source + .combine(Source.fromPublisher(filthyPublisher), Source.fromPublisher(upstream))(count => Merge(count)) .runWith(Sink.fromSubscriber(downstream)) upstream.ensureSubscription() @@ -363,7 +364,7 @@ class ActorGraphInterpreterSpec extends StreamSpec { ise shouldBe an[IllegalStateException] ise.getCause shouldBe a[SpecViolation] ise.getCause.getCause shouldBe a[TE] - ise.getCause.getCause should (have message ("violating your spec")) + ise.getCause.getCause should (have.message("violating your spec")) } "be able to handle Subscriber spec violations without leaking" in assertAllStagesStopped { @@ -377,7 +378,8 @@ class ActorGraphInterpreterSpec extends StreamSpec { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - Source.fromPublisher(upstream) + Source + .fromPublisher(upstream) .alsoTo(Sink.fromSubscriber(downstream)) .runWith(Sink.fromSubscriber(filthySubscriber)) @@ -388,7 +390,7 @@ class ActorGraphInterpreterSpec extends StreamSpec { ise shouldBe an[IllegalStateException] ise.getCause shouldBe a[SpecViolation] ise.getCause.getCause shouldBe a[TE] - ise.getCause.getCause should (have message ("violating your spec")) + ise.getCause.getCause should (have.message("violating your spec")) upstream.expectCancellation() } @@ -414,10 +416,7 @@ class ActorGraphInterpreterSpec extends StreamSpec { val downstream = TestSubscriber.probe[String]() - Source.repeat("whatever") - .via(PostStopSnitchFlow) - .to(Sink.fromSubscriber(downstream)) - .run()(mat) + Source.repeat("whatever").via(PostStopSnitchFlow).to(Sink.fromSubscriber(downstream)).run()(mat) downstream.requestNext() @@ -431,24 +430,26 @@ class ActorGraphInterpreterSpec extends StreamSpec { // reproduces #24719 "not allow a second subscriber" in { val done = Promise[Done]() - Source.single(Source.fromPublisher(new Publisher[Int] { - def subscribe(s: Subscriber[_ >: Int]): Unit = { - s.onSubscribe(new Subscription { - def cancel(): Unit = () - def request(n: Long): Unit = () - }) - // reactive streams 2.5 - must cancel if called with onSubscribe when already have one running - s.onSubscribe(new Subscription { - def cancel(): Unit = - done.trySuccess(Done) - def request(n: Long): Unit = - done.tryFailure(new IllegalStateException("request should not have been invoked")) - }) - } - })).flatMapConcat(identity).runWith(Sink.ignore) + Source + .single(Source.fromPublisher(new Publisher[Int] { + def subscribe(s: Subscriber[_ >: Int]): Unit = { + s.onSubscribe(new Subscription { + def cancel(): Unit = () + def request(n: Long): Unit = () + }) + // reactive streams 2.5 - must cancel if called with onSubscribe when already have one running + s.onSubscribe(new Subscription { + def cancel(): Unit = + done.trySuccess(Done) + def request(n: Long): Unit = + done.tryFailure(new IllegalStateException("request should not have been invoked")) + }) + } + })) + .flatMapConcat(identity) + .runWith(Sink.ignore) done.future.futureValue // would throw on failure } } } - diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala index aa162d4fc8..0b41071cbf 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/AsyncCallbackSpec.scala @@ -25,7 +25,7 @@ class AsyncCallbackSpec extends AkkaSpec { case object Stopped class AsyncCallbackGraphStage(probe: ActorRef, early: Option[AsyncCallback[AnyRef] => Unit] = None) - extends GraphStageWithMaterializedValue[FlowShape[Int, Int], AsyncCallback[AnyRef]] { + extends GraphStageWithMaterializedValue[FlowShape[Int, Int], AsyncCallback[AnyRef]] { val in = Inlet[Int]("in") val out = Outlet[Int]("out") @@ -73,7 +73,8 @@ class AsyncCallbackSpec extends AkkaSpec { val probe = TestProbe() val in = TestPublisher.probe[Int]() val out = TestSubscriber.probe[Int]() - val callback = Source.fromPublisher(in) + val callback = Source + .fromPublisher(in) .viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right) .to(Sink.fromSubscriber(out)) .run() @@ -98,7 +99,8 @@ class AsyncCallbackSpec extends AkkaSpec { val probe = TestProbe() val in = TestPublisher.probe[Int]() val out = TestSubscriber.probe[Int]() - val callback = Source.fromPublisher(in) + val callback = Source + .fromPublisher(in) .viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right) .to(Sink.fromSubscriber(out)) .run() @@ -121,10 +123,7 @@ class AsyncCallbackSpec extends AkkaSpec { "fail the feedback future if stage is stopped" in { val probe = TestProbe() - val callback = Source.empty - .viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right) - .to(Sink.ignore) - .run() + val callback = Source.empty.viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right).to(Sink.ignore).run() probe.expectMsg(Started) probe.expectMsg(Stopped) @@ -136,11 +135,9 @@ class AsyncCallbackSpec extends AkkaSpec { "invoke early" in { val probe = TestProbe() val in = TestPublisher.probe[Int]() - val callback = Source.fromPublisher(in) - .viaMat(new AsyncCallbackGraphStage( - probe.ref, - Some(asyncCb => asyncCb.invoke("early")) - ))(Keep.right) + val callback = Source + .fromPublisher(in) + .viaMat(new AsyncCallbackGraphStage(probe.ref, Some(asyncCb => asyncCb.invoke("early"))))(Keep.right) .to(Sink.ignore) .run() @@ -160,11 +157,12 @@ class AsyncCallbackSpec extends AkkaSpec { val probe = TestProbe() val earlyFeedback = Promise[Done]() val in = TestPublisher.probe[Int]() - val callback = Source.fromPublisher(in) - .viaMat(new AsyncCallbackGraphStage( - probe.ref, - Some(asyncCb => earlyFeedback.completeWith(asyncCb.invokeWithFeedback("early"))) - ))(Keep.right) + val callback = Source + .fromPublisher(in) + .viaMat( + new AsyncCallbackGraphStage(probe.ref, + Some(asyncCb => + earlyFeedback.completeWith(asyncCb.invokeWithFeedback("early")))))(Keep.right) .to(Sink.ignore) .run() @@ -185,10 +183,8 @@ class AsyncCallbackSpec extends AkkaSpec { "accept concurrent input" in { val probe = TestProbe() val in = TestPublisher.probe[Int]() - val callback = Source.fromPublisher(in) - .viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right) - .to(Sink.ignore) - .run() + val callback = + Source.fromPublisher(in).viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right).to(Sink.ignore).run() import system.dispatcher val feedbacks = (1 to 100).map { n => @@ -208,10 +204,8 @@ class AsyncCallbackSpec extends AkkaSpec { "fail the feedback if the handler throws" in { val probe = TestProbe() val in = TestPublisher.probe() - val callback = Source.fromPublisher(in) - .viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right) - .to(Sink.ignore) - .run() + val callback = + Source.fromPublisher(in).viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right).to(Sink.ignore).run() probe.expectMsg(Started) callback.invokeWithFeedback("happy-case").futureValue should ===(Done) @@ -227,10 +221,7 @@ class AsyncCallbackSpec extends AkkaSpec { "fail the feedback if the handler fails the stage" in { val probe = TestProbe() - val callback = Source.empty - .viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right) - .to(Sink.ignore) - .run() + val callback = Source.empty.viaMat(new AsyncCallbackGraphStage(probe.ref))(Keep.right).to(Sink.ignore).run() probe.expectMsg(Started) probe.expectMsg(Stopped) @@ -243,7 +234,8 @@ class AsyncCallbackSpec extends AkkaSpec { "behave with multiple async callbacks" in { import system.dispatcher - class ManyAsyncCallbacksStage(probe: ActorRef) extends GraphStageWithMaterializedValue[SourceShape[String], Set[AsyncCallback[AnyRef]]] { + class ManyAsyncCallbacksStage(probe: ActorRef) + extends GraphStageWithMaterializedValue[SourceShape[String], Set[AsyncCallback[AnyRef]]] { val out = Outlet[String]("out") val shape = SourceShape(out) def createLogicAndMaterializedValue(inheritedAttributes: Attributes) = { @@ -261,14 +253,11 @@ class AsyncCallbackSpec extends AkkaSpec { val out = TestSubscriber.probe[String]() - val acbs = Source.fromGraph(new ManyAsyncCallbacksStage(acbProbe.ref)) - .toMat(Sink.fromSubscriber(out))(Keep.left) - .run() + val acbs = + Source.fromGraph(new ManyAsyncCallbacksStage(acbProbe.ref)).toMat(Sink.fromSubscriber(out))(Keep.left).run() val happyPathFeedbacks = - acbs.map(acb => - Future { acb.invokeWithFeedback("bö") }.flatMap(identity) - ) + acbs.map(acb => Future { acb.invokeWithFeedback("bö") }.flatMap(identity)) Future.sequence(happyPathFeedbacks).futureValue // will throw on fail or timeout on not completed for (_ <- 0 to 10) acbProbe.expectMsg("bö") diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ChasingEventsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ChasingEventsSpec.scala index 67b3d0ea86..f32684db0c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ChasingEventsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/ChasingEventsSpec.scala @@ -20,17 +20,18 @@ class ChasingEventsSpec extends AkkaSpec { val out = Outlet[Int]("Propagate.out") override val shape: FlowShape[Int, Int] = FlowShape(in, out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private var first = true - override def onPush(): Unit = push(out, grab(in)) - override def onPull(): Unit = { - pull(in) - if (!first) cancel(in) - first = false - } + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private var first = true + override def onPush(): Unit = push(out, grab(in)) + override def onPull(): Unit = { + pull(in) + if (!first) cancel(in) + first = false + } - setHandlers(in, out, this) - } + setHandlers(in, out, this) + } } class CompleteInChasedPush extends GraphStage[FlowShape[Int, Int]] { @@ -38,16 +39,17 @@ class ChasingEventsSpec extends AkkaSpec { val out = Outlet[Int]("Propagate.out") override val shape: FlowShape[Int, Int] = FlowShape(in, out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private var first = true - override def onPush(): Unit = { - push(out, grab(in)) - complete(out) - } - override def onPull(): Unit = pull(in) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private var first = true + override def onPush(): Unit = { + push(out, grab(in)) + complete(out) + } + override def onPull(): Unit = pull(in) - setHandlers(in, out, this) - } + setHandlers(in, out, this) + } } class FailureInChasedPush extends GraphStage[FlowShape[Int, Int]] { @@ -55,16 +57,17 @@ class ChasingEventsSpec extends AkkaSpec { val out = Outlet[Int]("Propagate.out") override val shape: FlowShape[Int, Int] = FlowShape(in, out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private var first = true - override def onPush(): Unit = { - push(out, grab(in)) - fail(out, TE("test failure")) - } - override def onPull(): Unit = pull(in) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private var first = true + override def onPush(): Unit = { + push(out, grab(in)) + fail(out, TE("test failure")) + } + override def onPull(): Unit = pull(in) - setHandlers(in, out, this) - } + setHandlers(in, out, this) + } } class ChasableSink extends GraphStage[SinkShape[Int]] { @@ -72,11 +75,12 @@ class ChasingEventsSpec extends AkkaSpec { override val shape: SinkShape[Int] = SinkShape(in) @throws(classOf[Exception]) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler { - override def preStart(): Unit = pull(in) - override def onPush(): Unit = pull(in) - setHandler(in, this) - } + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler { + override def preStart(): Unit = pull(in) + override def onPush(): Unit = pull(in) + setHandler(in, this) + } } "Event chasing" must { diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterFailureModesSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterFailureModesSpec.scala index c6b8137851..49b0230daf 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterFailureModesSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterFailureModesSpec.scala @@ -113,4 +113,3 @@ class GraphInterpreterFailureModesSpec extends StreamSpec with GraphInterpreterS } } - diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala index 185d661ccb..d9713639d5 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterPortsSpec.scala @@ -337,7 +337,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"propagate complete while push is in flight and keep ungrabbed element (chasing = $chasing)" in new PortTestSetup(chasing) { + s"propagate complete while push is in flight and keep ungrabbed element (chasing = $chasing)" in new PortTestSetup( + chasing) { in.pull() stepAll() clearEvents() @@ -358,7 +359,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit in.grab() should ===(0) } - s"propagate complete while push is in flight and pulled after the push (chasing = $chasing)" in new PortTestSetup(chasing) { + s"propagate complete while push is in flight and pulled after the push (chasing = $chasing)" in new PortTestSetup( + chasing) { in.pull() stepAll() clearEvents() @@ -718,7 +720,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore any completion if they are concurrent (complete first) (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore any completion if they are concurrent (complete first) (chasing = $chasing)" in new PortTestSetup( + chasing) { out.complete() in.cancel() @@ -735,7 +738,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore completion from a push-complete if cancelled while in flight (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore completion from a push-complete if cancelled while in flight (chasing = $chasing)" in new PortTestSetup( + chasing) { in.pull() stepAll() clearEvents() @@ -757,7 +761,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore completion from a push-complete if cancelled after onPush (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore completion from a push-complete if cancelled after onPush (chasing = $chasing)" in new PortTestSetup( + chasing) { in.pull() stepAll() clearEvents() @@ -1049,7 +1054,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"propagate failure while push is in flight and keep ungrabbed element (chasing = $chasing)" in new PortTestSetup(chasing) { + s"propagate failure while push is in flight and keep ungrabbed element (chasing = $chasing)" in new PortTestSetup( + chasing) { in.pull() stepAll() clearEvents() @@ -1088,7 +1094,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore any failure completion if they are concurrent (cancel first) (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore any failure completion if they are concurrent (cancel first) (chasing = $chasing)" in new PortTestSetup( + chasing) { in.cancel() out.fail(TE("test")) @@ -1105,7 +1112,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore any failure completion if they are concurrent (complete first) (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore any failure completion if they are concurrent (complete first) (chasing = $chasing)" in new PortTestSetup( + chasing) { out.fail(TE("test")) in.cancel() @@ -1122,7 +1130,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore failure from a push-then-fail if cancelled while in flight (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore failure from a push-then-fail if cancelled while in flight (chasing = $chasing)" in new PortTestSetup( + chasing) { in.pull() stepAll() clearEvents() @@ -1144,7 +1153,8 @@ class GraphInterpreterPortsSpec extends StreamSpec with GraphInterpreterSpecKit an[IllegalArgumentException] should be thrownBy { in.grab() } } - s"ignore failure from a push-then-fail if cancelled after onPush (chasing = $chasing)" in new PortTestSetup(chasing) { + s"ignore failure from a push-then-fail if cancelled after onPush (chasing = $chasing)" in new PortTestSetup( + chasing) { in.pull() stepAll() clearEvents() diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpec.scala index 40ebdc1731..d5d2e3f9da 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpec.scala @@ -25,10 +25,7 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { val source = new UpstreamProbe[Int]("source") val sink = new DownstreamProbe[Int]("sink") - builder(identity) - .connect(source, identity.in) - .connect(identity.out, sink) - .init() + builder(identity).connect(source, identity.in).connect(identity.out, sink).init() lastEvents() should ===(Set.empty) @@ -61,10 +58,7 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { val source = new UpstreamProbe[Int]("source") val sink = new DownstreamProbe[Int]("sink") - builder(detach) - .connect(source, detach.shape.in) - .connect(detach.shape.out, sink) - .init() + builder(detach).connect(source, detach.shape.in).connect(detach.shape.out, sink).init() lastEvents() should ===(Set.empty) @@ -96,11 +90,7 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { val source2 = new UpstreamProbe[String]("source2") val sink = new DownstreamProbe[(Int, String)]("sink") - builder(zip) - .connect(source1, zip.in0) - .connect(source2, zip.in1) - .connect(zip.out, sink) - .init() + builder(zip).connect(source1, zip.in0).connect(source2, zip.in1).connect(zip.out, sink).init() lastEvents() should ===(Set.empty) @@ -119,11 +109,7 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { val sink1 = new DownstreamProbe[Int]("sink1") val sink2 = new DownstreamProbe[Int]("sink2") - builder(bcast) - .connect(source, bcast.in) - .connect(bcast.out(0), sink1) - .connect(bcast.out(1), sink2) - .init() + builder(bcast).connect(source, bcast.in).connect(bcast.out(0), sink1).connect(bcast.out(1), sink2).init() lastEvents() should ===(Set.empty) @@ -191,7 +177,8 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { lastEvents() should ===(Set.empty) source2.onNext(2) - lastEvents() should ===(Set(OnNext(sink1, (1, 2)), OnNext(sink2, (1, 2)), RequestOne(source1), RequestOne(source2))) + lastEvents() should ===( + Set(OnNext(sink1, (1, 2)), OnNext(sink2, (1, 2)), RequestOne(source1), RequestOne(source2))) } @@ -200,11 +187,7 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { val source2 = new UpstreamProbe[Int]("source2") val sink = new DownstreamProbe[Int]("sink") - builder(merge) - .connect(source1, merge.in(0)) - .connect(source2, merge.in(1)) - .connect(merge.out, sink) - .init() + builder(merge).connect(source1, merge.in(0)).connect(source2, merge.in(1)).connect(merge.out, sink).init() lastEvents() should ===(Set.empty) @@ -239,11 +222,7 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { val sink1 = new DownstreamProbe[Int]("sink1") val sink2 = new DownstreamProbe[Int]("sink2") - builder(balance) - .connect(source, balance.in) - .connect(balance.out(0), sink1) - .connect(balance.out(1), sink2) - .init() + builder(balance).connect(source, balance.in).connect(balance.out(0), sink1).connect(balance.out(1), sink2).init() lastEvents() should ===(Set.empty) @@ -332,10 +311,7 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { val sink = new DownstreamProbe[String]("sink") val buffer = Buffer[String](2, OverflowStrategy.backpressure) - builder(buffer) - .connect(source, buffer.in) - .connect(buffer.out, sink) - .init() + builder(buffer).connect(source, buffer.in).connect(buffer.out, sink).init() stepAll() lastEvents() should ===(Set(RequestOne(source))) @@ -361,4 +337,3 @@ class GraphInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } } - diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala index 21159cf485..34329c4824 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/GraphInterpreterSpecKit.scala @@ -7,7 +7,12 @@ package akka.stream.impl.fusing import akka.event.Logging import akka.stream.Supervision.Decider import akka.stream._ -import akka.stream.impl.fusing.GraphInterpreter.{ Connection, DownstreamBoundaryStageLogic, Failed, UpstreamBoundaryStageLogic } +import akka.stream.impl.fusing.GraphInterpreter.{ + Connection, + DownstreamBoundaryStageLogic, + Failed, + UpstreamBoundaryStageLogic +} import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler, _ } import akka.stream.testkit.StreamSpec import akka.stream.testkit.Utils.TE @@ -26,11 +31,11 @@ object GraphInterpreterSpecKit { * @param attributes Optional set of attributes to pass to the stages when creating the logics * @return Created logics and the maps of all inlets respective outlets to those logics */ - private[stream] def createLogics( - stages: Array[GraphStageWithMaterializedValue[_ <: Shape, _]], - upstreams: Array[UpstreamBoundaryStageLogic[_]], - downstreams: Array[DownstreamBoundaryStageLogic[_]], - attributes: Array[Attributes] = Array.empty): (Array[GraphStageLogic], SMap[Inlet[_], GraphStageLogic], SMap[Outlet[_], GraphStageLogic]) = { + private[stream] def createLogics(stages: Array[GraphStageWithMaterializedValue[_ <: Shape, _]], + upstreams: Array[UpstreamBoundaryStageLogic[_]], + downstreams: Array[DownstreamBoundaryStageLogic[_]], + attributes: Array[Attributes] = Array.empty) + : (Array[GraphStageLogic], SMap[Inlet[_], GraphStageLogic], SMap[Outlet[_], GraphStageLogic]) = { if (attributes.nonEmpty && attributes.length != stages.length) throw new IllegalArgumentException("Attributes must be either empty or one per stage") @@ -102,49 +107,47 @@ object GraphInterpreterSpecKit { */ private[stream] def createLinearFlowConnections(logics: Seq[GraphStageLogic]): Array[Connection] = { require(logics.length >= 2, s"$logics is too short to create a linear flow") - logics.sliding(2).zipWithIndex.map { - case (window, idx) => - val outOwner = window(0) - val inOwner = window(1) + logics + .sliding(2) + .zipWithIndex + .map { + case (window, idx) => + val outOwner = window(0) + val inOwner = window(1) - val connection = new Connection( - id = idx, - outOwner = outOwner, - outHandler = outOwner.outHandler(0), - inOwner = inOwner, - inHandler = inOwner.inHandler(0) - ) + val connection = new Connection(id = idx, + outOwner = outOwner, + outHandler = outOwner.outHandler(0), + inOwner = inOwner, + inHandler = inOwner.inHandler(0)) - outOwner.portToConn(outOwner.inCount) = connection - inOwner.portToConn(0) = connection + outOwner.portToConn(outOwner.inCount) = connection + inOwner.portToConn(0) = connection - connection - }.toArray + connection + } + .toArray } /** * Create interpreter connections for all the given `connectedPorts`. */ - private[stream] def createConnections( - logics: Seq[GraphStageLogic], - connectedPorts: Seq[(Outlet[_], Inlet[_])], - inOwners: SMap[Inlet[_], GraphStageLogic], - outOwners: SMap[Outlet[_], GraphStageLogic]): Array[Connection] = { + private[stream] def createConnections(logics: Seq[GraphStageLogic], + connectedPorts: Seq[(Outlet[_], Inlet[_])], + inOwners: SMap[Inlet[_], GraphStageLogic], + outOwners: SMap[Outlet[_], GraphStageLogic]): Array[Connection] = { val connections = new Array[Connection](connectedPorts.size) connectedPorts.zipWithIndex.foreach { case ((outlet, inlet), idx) => - val outOwner = outOwners(outlet) val inOwner = inOwners(inlet) - val connection = new Connection( - id = idx, - outOwner = outOwner, - outHandler = outOwner.outHandler(outlet.id), - inOwner = inOwner, - inHandler = inOwner.inHandler(inlet.id) - ) + val connection = new Connection(id = idx, + outOwner = outOwner, + outHandler = outOwner.outHandler(outlet.id), + inOwner = inOwner, + inHandler = inOwner.inHandler(inlet.id)) connections(idx) = connection inOwner.portToConn(inlet.id) = connection @@ -163,7 +166,7 @@ object GraphInterpreterSpecKit { } private def setPortIds(stage: GraphStageWithMaterializedValue[_ <: Shape, _]): Unit = { - stage.shape.inlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } + stage.shape.inlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } stage.shape.outlets.zipWithIndex.foreach { case (inlet, idx) => inlet.id = idx } } @@ -246,14 +249,13 @@ trait GraphInterpreterSpecKit extends StreamSpec { } def manualInit(logics: Array[GraphStageLogic], connections: Array[Connection]): Unit = { - _interpreter = new GraphInterpreter( - NoMaterializer, - logger, - logics, - connections, - onAsyncInput = (_, _, _, _) => (), - fuzzingMode = false, - context = null) + _interpreter = new GraphInterpreter(NoMaterializer, + logger, + logics, + connections, + onAsyncInput = (_, _, _, _) => (), + fuzzingMode = false, + context = null) _interpreter.init(null) } @@ -353,15 +355,16 @@ trait GraphInterpreterSpecKit extends StreamSpec { out.id = 0 override val shape: FlowShape[Int, Int] = FlowShape(in, out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - override def onPush(): Unit = push(out, grab(in)) - override def onPull(): Unit = pull(in) - override def onUpstreamFinish(): Unit = complete(out) - override def onUpstreamFailure(ex: Throwable): Unit = fail(out, ex) - override def onDownstreamFinish(): Unit = cancel(in) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + override def onPush(): Unit = push(out, grab(in)) + override def onPull(): Unit = pull(in) + override def onUpstreamFinish(): Unit = complete(out) + override def onUpstreamFailure(ex: Throwable): Unit = fail(out, ex) + override def onDownstreamFinish(): Unit = cancel(in) - setHandlers(in, out, this) - } + setHandlers(in, out, this) + } override def toString = "EventPropagateStage" } @@ -487,13 +490,11 @@ trait GraphInterpreterSpecKit extends StreamSpec { def cancel(): Unit = cancel(in) } - builder(sandwitchStage) - .connect(upstream, stagein) - .connect(stageout, downstream) - .init() + builder(sandwitchStage).connect(upstream, stagein).connect(stageout, downstream).init() } - abstract class OneBoundedSetupWithDecider[T](decider: Decider, ops: GraphStageWithMaterializedValue[Shape, Any]*) extends Builder { + abstract class OneBoundedSetupWithDecider[T](decider: Decider, ops: GraphStageWithMaterializedValue[Shape, Any]*) + extends Builder { val upstream = new UpstreamOneBoundedProbe[T] val downstream = new DownstreamOneBoundedPortProbe[T] @@ -513,11 +514,7 @@ trait GraphInterpreterSpecKit extends StreamSpec { private def initialize(): Unit = { val supervision = ActorAttributes.supervisionStrategy(decider) val attributes = Array.fill[Attributes](ops.length)(supervision) - val (logics, _, _) = createLogics( - ops.toArray, - Array(upstream), - Array(downstream), - attributes) + val (logics, _, _) = createLogics(ops.toArray, Array(upstream), Array(downstream), attributes) val connections = createLinearFlowConnections(logics) manualInit(logics, connections) } @@ -535,14 +532,15 @@ trait GraphInterpreterSpecKit extends StreamSpec { val out = Outlet[TT]("out") out.id = 0 - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (lastEvent.contains(RequestOne)) lastEvent += RequestAnother - else lastEvent += RequestOne - } + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + if (lastEvent.contains(RequestOne)) lastEvent += RequestAnother + else lastEvent += RequestOne + } - override def onDownstreamFinish(): Unit = lastEvent += Cancel - }) + override def onDownstreamFinish(): Unit = lastEvent += Cancel + }) def onNext(elem: TT): Unit = { push(out, elem) @@ -595,6 +593,5 @@ trait GraphInterpreterSpecKit extends StreamSpec { } abstract class OneBoundedSetup[T](_ops: GraphStageWithMaterializedValue[Shape, Any]*) - extends OneBoundedSetupWithDecider[T](Supervision.stoppingDecider, _ops: _*) + extends OneBoundedSetupWithDecider[T](Supervision.stoppingDecider, _ops: _*) } - diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala index 1a15c0d0a7..fa2b49de4d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSpec.scala @@ -43,10 +43,9 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { lastEvents() should be(Set(OnComplete)) } - "implement chain of maps correctly" in new OneBoundedSetup[Int]( - Map((x: Int) => x + 1), - Map((x: Int) => x * 2), - Map((x: Int) => x + 1)) { + "implement chain of maps correctly" in new OneBoundedSetup[Int](Map((x: Int) => x + 1), + Map((x: Int) => x * 2), + Map((x: Int) => x + 1)) { lastEvents() should be(Set.empty) @@ -79,9 +78,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { lastEvents() should be(Set(OnComplete)) } - "implement one-to-many many-to-one chain correctly" in new OneBoundedSetup[Int]( - Doubler(), - Filter((x: Int) => x != 0)) { + "implement one-to-many many-to-one chain correctly" in new OneBoundedSetup[Int](Doubler(), + Filter((x: Int) => x != 0)) { lastEvents() should be(Set.empty) @@ -104,9 +102,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { lastEvents() should be(Set(OnComplete)) } - "implement many-to-one one-to-many chain correctly" in new OneBoundedSetup[Int]( - Filter((x: Int) => x != 0), - Doubler()) { + "implement many-to-one one-to-many chain correctly" in new OneBoundedSetup[Int](Filter((x: Int) => x != 0), + Doubler()) { lastEvents() should be(Set.empty) @@ -146,10 +143,9 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { lastEvents() should be(Set(OnNext(1), Cancel, OnComplete)) } - "implement take inside a chain" in new OneBoundedSetup[Int]( - Filter((x: Int) => x != 0), - takeTwo, - Map((x: Int) => x + 1)) { + "implement take inside a chain" in new OneBoundedSetup[Int](Filter((x: Int) => x != 0), + takeTwo, + Map((x: Int) => x + 1)) { lastEvents() should be(Set.empty) @@ -208,7 +204,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { lastEvents() should be(Set(Cancel)) } - "work if fold completes while not in a push position" in new OneBoundedSetup[Int](Fold(0, (agg: Int, x: Int) => agg + x)) { + "work if fold completes while not in a push position" in new OneBoundedSetup[Int]( + Fold(0, (agg: Int, x: Int) => agg + x)) { lastEvents() should be(Set.empty) @@ -244,11 +241,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { lastEvents() should be(Set(OnNext(Vector(3)), OnComplete)) } - "implement batch (conflate)" in new OneBoundedSetup[Int](Batch( - 1L, - ConstantFun.zeroLong, - (in: Int) => in, - (agg: Int, x: Int) => agg + x)) { + "implement batch (conflate)" in new OneBoundedSetup[Int]( + Batch(1L, ConstantFun.zeroLong, (in: Int) => in, (agg: Int, x: Int) => agg + x)) { lastEvents() should be(Set(RequestOne)) @@ -304,16 +298,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } "work with batch-batch (conflate-conflate)" in new OneBoundedSetup[Int]( - Batch( - 1L, - ConstantFun.zeroLong, - (in: Int) => in, - (agg: Int, x: Int) => agg + x), - Batch( - 1L, - ConstantFun.zeroLong, - (in: Int) => in, - (agg: Int, x: Int) => agg + x)) { + Batch(1L, ConstantFun.zeroLong, (in: Int) => in, (agg: Int, x: Int) => agg + x), + Batch(1L, ConstantFun.zeroLong, (in: Int) => in, (agg: Int, x: Int) => agg + x)) { lastEvents() should be(Set(RequestOne)) @@ -343,9 +329,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } - "work with expand-expand" in new OneBoundedSetup[Int]( - new Expand((x: Int) => Iterator.from(x)), - new Expand((x: Int) => Iterator.from(x))) { + "work with expand-expand" in new OneBoundedSetup[Int](new Expand((x: Int) => Iterator.from(x)), + new Expand((x: Int) => Iterator.from(x))) { lastEvents() should be(Set(RequestOne)) @@ -377,11 +362,7 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } "implement batch-expand (conflate-expand)" in new OneBoundedSetup[Int]( - Batch( - 1L, - ConstantFun.zeroLong, - (in: Int) => in, - (agg: Int, x: Int) => agg + x), + Batch(1L, ConstantFun.zeroLong, (in: Int) => in, (agg: Int, x: Int) => agg + x), new Expand(Iterator.continually(_: Int))) { lastEvents() should be(Set(RequestOne)) @@ -413,11 +394,7 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { "implement doubler-conflate (doubler-batch)" in new OneBoundedSetup[Int]( Doubler(), - Batch( - 1L, - ConstantFun.zeroLong, - (in: Int) => in, - (agg: Int, x: Int) => agg + x)) { + Batch(1L, ConstantFun.zeroLong, (in: Int) => in, (agg: Int, x: Int) => agg + x)) { lastEvents() should be(Set(RequestOne)) upstream.onNext(1) @@ -432,12 +409,11 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } // Note, the new interpreter has no jumpback table, still did not want to remove the test - "work with jumpback table and completed elements" in new OneBoundedSetup[Int]( - Map((x: Int) => x), - Map((x: Int) => x), - KeepGoing(), - Map((x: Int) => x), - Map((x: Int) => x)) { + "work with jumpback table and completed elements" in new OneBoundedSetup[Int](Map((x: Int) => x), + Map((x: Int) => x), + KeepGoing(), + Map((x: Int) => x), + Map((x: Int) => x)) { lastEvents() should be(Set.empty) @@ -489,9 +465,8 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { lastEvents() should be(Set(OnNext(1), OnComplete)) } - "work with pushAndFinish if upstream completes with pushAndFinish and downstream immediately pulls" in new OneBoundedSetup[Int]( - new PushFinishStage, - Fold(0, (x: Int, y: Int) => x + y)) { + "work with pushAndFinish if upstream completes with pushAndFinish and downstream immediately pulls" in new OneBoundedSetup[ + Int](new PushFinishStage, Fold(0, (x: Int, y: Int) => x + y)) { lastEvents() should be(Set.empty) @@ -512,8 +487,7 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { setHandlers(in, out, this) } - } - ) { + }) { lastEvents() should be(Set.empty) downstream.requestOne() @@ -530,9 +504,7 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } should be(true) } - "implement take-take" in new OneBoundedSetup[Int]( - takeOne, - takeOne) { + "implement take-take" in new OneBoundedSetup[Int](takeOne, takeOne) { lastEvents() should be(Set.empty) downstream.requestOne() @@ -543,9 +515,7 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } - "implement take-take with pushAndFinish from upstream" in new OneBoundedSetup[Int]( - takeOne, - takeOne) { + "implement take-take with pushAndFinish from upstream" in new OneBoundedSetup[Int](takeOne, takeOne) { lastEvents() should be(Set.empty) downstream.requestOne() diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterStressSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterStressSpec.scala index 566fe86a10..83c04e7f9c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterStressSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterStressSpec.scala @@ -23,7 +23,8 @@ class InterpreterStressSpec extends StreamSpec with GraphInterpreterSpecKit { "Interpreter" must { - "work with a massive chain of maps" taggedAs LongRunningTest in new OneBoundedSetup[Int](Vector.fill(chainLength)(map): _*) { + "work with a massive chain of maps" taggedAs LongRunningTest in new OneBoundedSetup[Int]( + Vector.fill(chainLength)(map): _*) { lastEvents() should be(Set.empty) val tstamp = System.nanoTime() @@ -47,8 +48,8 @@ class InterpreterStressSpec extends StreamSpec with GraphInterpreterSpecKit { "work with a massive chain of maps with early complete" taggedAs LongRunningTest in new OneBoundedSetup[Int]( Vector.fill(halfLength)(map) ++ - Seq(takeHalfOfRepetition) ++ - Vector.fill(halfLength)(map): _*) { + Seq(takeHalfOfRepetition) ++ + Vector.fill(halfLength)(map): _*) { lastEvents() should be(Set.empty) val tstamp = System.nanoTime() @@ -105,11 +106,7 @@ class InterpreterStressSpec extends StreamSpec with GraphInterpreterSpecKit { "work with a massive chain of batches by overflowing to the heap" in { - val batch = Batch( - 0L, - ConstantFun.zeroLong, - (in: Int) => in, - (agg: Int, in: Int) => agg + in) + val batch = Batch(0L, ConstantFun.zeroLong, (in: Int) => in, (agg: Int, in: Int) => agg + in) new OneBoundedSetup[Int](Vector.fill(chainLength / 10)(batch): _*) { @@ -126,4 +123,3 @@ class InterpreterStressSpec extends StreamSpec with GraphInterpreterSpecKit { } } - diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSupervisionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSupervisionSpec.scala index f60e38080e..229596e9a1 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSupervisionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/InterpreterSupervisionSpec.scala @@ -52,10 +52,8 @@ class InterpreterSupervisionSpec extends StreamSpec with GraphInterpreterSpecKit lastEvents() should be(Set(Cancel, OnError(TE))) } - "resume when Map throws" in new OneBoundedSetupWithDecider[Int]( - Supervision.resumingDecider, - Map((x: Int) => if (x == 0) throw TE else x) - ) { + "resume when Map throws" in new OneBoundedSetupWithDecider[Int](Supervision.resumingDecider, + Map((x: Int) => if (x == 0) throw TE else x)) { downstream.requestOne() lastEvents() should be(Set(RequestOne)) upstream.onNext(2) @@ -83,8 +81,7 @@ class InterpreterSupervisionSpec extends StreamSpec with GraphInterpreterSpecKit Supervision.resumingDecider, Map((x: Int) => x + 1), Map((x: Int) => if (x == 0) throw TE else x + 10), - Map((x: Int) => x + 100) - ) { + Map((x: Int) => x + 100)) { downstream.requestOne() lastEvents() should be(Set(RequestOne)) @@ -142,8 +139,8 @@ class InterpreterSupervisionSpec extends StreamSpec with GraphInterpreterSpecKit lastEvents() should be(Set(OnNext(Vector(13, 14)), OnComplete)) } - "fail when Expand `seed` throws" in new OneBoundedSetup[Int]( - new Expand((in: Int) => if (in == 2) throw TE else Iterator(in) ++ Iterator.continually(-math.abs(in)))) { + "fail when Expand `seed` throws" in new OneBoundedSetup[Int](new Expand((in: Int) => + if (in == 2) throw TE else Iterator(in) ++ Iterator.continually(-math.abs(in)))) { lastEvents() should be(Set(RequestOne)) @@ -163,8 +160,8 @@ class InterpreterSupervisionSpec extends StreamSpec with GraphInterpreterSpecKit lastEvents() should be(Set(OnError(TE), Cancel)) } - "fail when Expand `expander` throws" in new OneBoundedSetup[Int]( - new Expand((in: Int) => if (in == 2) Iterator.continually(throw TE) else Iterator(in) ++ Iterator.continually(-math.abs(in)))) { + "fail when Expand `expander` throws" in new OneBoundedSetup[Int](new Expand((in: Int) => + if (in == 2) Iterator.continually(throw TE) else Iterator(in) ++ Iterator.continually(-math.abs(in)))) { lastEvents() should be(Set(RequestOne)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala index 00ccbfb463..18e7573a3f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/KeepGoingStageSpec.scala @@ -4,15 +4,15 @@ package akka.stream.impl.fusing -import akka.actor.{ NoSerializationVerificationNeeded, ActorRef } +import akka.actor.{ ActorRef, NoSerializationVerificationNeeded } import akka.stream.scaladsl.{ Keep, Source } import akka.stream.testkit.StreamSpec -import akka.stream.{ Attributes, Inlet, SinkShape, ActorMaterializer } -import akka.stream.stage.{ InHandler, AsyncCallback, GraphStageLogic, GraphStageWithMaterializedValue } +import akka.stream.{ ActorMaterializer, Attributes, Inlet, SinkShape } +import akka.stream.stage.{ AsyncCallback, GraphStageLogic, GraphStageWithMaterializedValue, InHandler } import akka.stream.testkit.Utils._ import akka.stream.testkit.scaladsl.StreamTestKit._ -import scala.concurrent.{ Await, Promise, Future } +import scala.concurrent.{ Await, Future, Promise } import scala.concurrent.duration._ class KeepGoingStageSpec extends StreamSpec { @@ -43,7 +43,8 @@ class KeepGoingStageSpec extends StreamSpec { class PingableSink(keepAlive: Boolean) extends GraphStageWithMaterializedValue[SinkShape[Int], Future[PingRef]] { val shape = SinkShape[Int](Inlet("ping.in")) - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[PingRef]) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, Future[PingRef]) = { val promise = Promise[PingRef]() val logic = new GraphStageLogic(shape) { diff --git a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala index fa8a5f24ae..e906a57ca9 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/impl/fusing/LifecycleInterpreterSpec.scala @@ -28,9 +28,12 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } "call postStop in order on stages - when upstream completes" in new OneBoundedSetup[String]( - PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-a", onStop = () => testActor ! "stop-a"), - PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-b", onStop = () => testActor ! "stop-b"), - PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-c", onStop = () => testActor ! "stop-c")) { + PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-a", + onStop = () => testActor ! "stop-a"), + PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-b", + onStop = () => testActor ! "stop-b"), + PreStartAndPostStopIdentity(onUpstreamCompleted = () => testActor ! "complete-c", + onStop = () => testActor ! "stop-c")) { upstream.onComplete() expectMsg("complete-a") expectMsg("stop-a") @@ -42,9 +45,8 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } "call postStop in order on stages - when upstream onErrors" in new OneBoundedSetup[String]( - PreStartAndPostStopIdentity( - onUpstreamFailed = ex => testActor ! ex.getMessage, - onStop = () => testActor ! "stop-c")) { + PreStartAndPostStopIdentity(onUpstreamFailed = ex => testActor ! ex.getMessage, + onStop = () => testActor ! "stop-c")) { val msg = "Boom! Boom! Boom!" upstream.onError(TE(msg)) expectMsg(msg) @@ -72,26 +74,24 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { expectNoMsg(300.millis) } - "onError when preStart fails" in new OneBoundedSetup[String]( - PreStartFailer(() => throw TE("Boom!"))) { + "onError when preStart fails" in new OneBoundedSetup[String](PreStartFailer(() => throw TE("Boom!"))) { lastEvents() should ===(Set(Cancel, OnError(TE("Boom!")))) } - "not blow up when postStop fails" in new OneBoundedSetup[String]( - PostStopFailer(() => throw TE("Boom!"))) { + "not blow up when postStop fails" in new OneBoundedSetup[String](PostStopFailer(() => throw TE("Boom!"))) { upstream.onComplete() lastEvents() should ===(Set(OnComplete)) } - "onError when preStart fails with stages after" in new OneBoundedSetup[String]( - Map((x: Int) => x), - PreStartFailer(() => throw TE("Boom!")), - Map((x: Int) => x)) { + "onError when preStart fails with stages after" in new OneBoundedSetup[String](Map((x: Int) => x), + PreStartFailer( + () => throw TE("Boom!")), + Map((x: Int) => x)) { lastEvents() should ===(Set(Cancel, OnError(TE("Boom!")))) } - "continue with stream shutdown when postStop fails" in new OneBoundedSetup[String]( - PostStopFailer(() => throw TE("Boom!"))) { + "continue with stream shutdown when postStop fails" in new OneBoundedSetup[String](PostStopFailer(() => + throw TE("Boom!"))) { lastEvents() should ===(Set()) upstream.onComplete() @@ -111,10 +111,8 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { expectMsg("stop") } - "postStop when pushAndFinish called with pushAndFinish if indirect upstream completes with pushAndFinish" in new OneBoundedSetup[String]( - Map((x: Any) => x), - new PushFinishStage(onPostStop = () => testActor ! "stop"), - Map((x: Any) => x)) { + "postStop when pushAndFinish called with pushAndFinish if indirect upstream completes with pushAndFinish" in new OneBoundedSetup[ + String](Map((x: Any) => x), new PushFinishStage(onPostStop = () => testActor ! "stop"), Map((x: Any) => x)) { lastEvents() should be(Set.empty) @@ -126,9 +124,8 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { expectMsg("stop") } - "postStop when pushAndFinish called with pushAndFinish if upstream completes with pushAndFinish and downstream immediately pulls" in new OneBoundedSetup[String]( - new PushFinishStage(onPostStop = () => testActor ! "stop"), - Fold("", (x: String, y: String) => x + y)) { + "postStop when pushAndFinish called with pushAndFinish if upstream completes with pushAndFinish and downstream immediately pulls" in new OneBoundedSetup[ + String](new PushFinishStage(onPostStop = () => testActor ! "stop"), Fold("", (x: String, y: String) => x + y)) { lastEvents() should be(Set.empty) @@ -142,11 +139,11 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { } - private[akka] case class PreStartAndPostStopIdentity[T]( - onStart: () => Unit = () => (), - onStop: () => Unit = () => (), - onUpstreamCompleted: () => Unit = () => (), - onUpstreamFailed: Throwable => Unit = ex => ()) extends SimpleLinearGraphStage[T] { + private[akka] case class PreStartAndPostStopIdentity[T](onStart: () => Unit = () => (), + onStop: () => Unit = () => (), + onUpstreamCompleted: () => Unit = () => (), + onUpstreamFailed: Throwable => Unit = ex => ()) + extends SimpleLinearGraphStage[T] { override def createLogic(attributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { @@ -225,4 +222,3 @@ class LifecycleInterpreterSpec extends StreamSpec with GraphInterpreterSpecKit { override def toString = "PushFinish" } } - diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/ByteStringParserSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/ByteStringParserSpec.scala index f7237130ea..4381cbb4c3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/ByteStringParserSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/ByteStringParserSpec.scala @@ -42,7 +42,8 @@ class ByteStringParserSpec extends StreamSpec { // The Chunker produces two frames for one incoming 4 byte chunk. Hence, the rate in the incoming // side of the Chunker should only be half than on its outgoing side. - val result = Source.repeat(ByteString("abcd")) + val result = Source + .repeat(ByteString("abcd")) .take(500) .throttle(1000, 1.second, 10, ThrottleMode.Enforcing) .via(new Chunker) @@ -73,10 +74,8 @@ class ByteStringParserSpec extends StreamSpec { def run(data: ByteString*): ByteString = Await.result( - Source[ByteString](data.toVector) - .via(MultistepParsing) - .fold(ByteString.empty)(_ ++ _) - .runWith(Sink.head), 5.seconds) + Source[ByteString](data.toVector).via(MultistepParsing).fold(ByteString.empty)(_ ++ _).runWith(Sink.head), + 5.seconds) run(ByteString(0xca), ByteString(0xfe), ByteString(0xef, 0x12)) shouldEqual ByteString(0xef, 0x12) run(ByteString(0xca), ByteString(0xfe, 0xef, 0x12)) shouldEqual ByteString(0xef, 0x12) @@ -103,12 +102,10 @@ class ByteStringParserSpec extends StreamSpec { } (the[IllegalStateException] thrownBy Await.result( - Source.single(ByteString("abc")) - .via(SpinningLogic) - .runWith(Sink.ignore), + Source.single(ByteString("abc")).via(SpinningLogic).runWith(Sink.ignore), 3.seconds)).getMessage shouldBe "Parsing logic didn't produce result after 10 steps. " + - "Aborting processing to avoid infinite cycles. In the unlikely case that the parsing logic needs more recursion, " + - "override ParsingLogic.recursionLimit." + "Aborting processing to avoid infinite cycles. In the unlikely case that the parsing logic needs more recursion, " + + "override ParsingLogic.recursionLimit." } "complete eagerly" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala index 140ca55962..a2961da5cf 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/FileSinkSpec.scala @@ -46,8 +46,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "FileSink" must { "write lines to a file" in assertAllStagesStopped { targetFile { f => - val completion = Source(TestByteStrings) - .runWith(FileIO.toPath(f)) + val completion = Source(TestByteStrings).runWith(FileIO.toPath(f)) val result = Await.result(completion, 3.seconds) result.count should equal(6006) @@ -57,8 +56,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "create new file if not exists" in assertAllStagesStopped { targetFile({ f => - val completion = Source(TestByteStrings) - .runWith(FileIO.toPath(f)) + val completion = Source(TestByteStrings).runWith(FileIO.toPath(f)) val result = Await.result(completion, 3.seconds) result.count should equal(6006) @@ -88,9 +86,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "by default replace the existing file" in assertAllStagesStopped { targetFile { f => def write(lines: List[String]) = - Source(lines) - .map(ByteString(_)) - .runWith(FileIO.toPath(f)) + Source(lines).map(ByteString(_)).runWith(FileIO.toPath(f)) val completion1 = write(TestLines) Await.result(completion1, 3.seconds) @@ -107,9 +103,7 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "allow appending to file" in assertAllStagesStopped { targetFile { f => def write(lines: List[String] = TestLines) = - Source(lines) - .map(ByteString(_)) - .runWith(FileIO.toPath(f, Set(StandardOpenOption.APPEND))) + Source(lines).map(ByteString(_)).runWith(FileIO.toPath(f, Set(StandardOpenOption.APPEND))) val completion1 = write() val result1 = Await.result(completion1, 3.seconds) @@ -134,7 +128,8 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { b.toList } - val commonByteString = TestLinesCommon.map(ByteString(_)).foldLeft[ByteString](ByteString.empty)((acc, line) => acc ++ line).compact + val commonByteString = + TestLinesCommon.map(ByteString(_)).foldLeft[ByteString](ByteString.empty)((acc, line) => acc ++ line).compact val startPosition = commonByteString.size val testLinesPart2: List[String] = { @@ -167,7 +162,10 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { try { Source.fromIterator(() => Iterator.continually(TestByteStrings.head)).runWith(FileIO.toPath(f))(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "fileSink").get assertDispatcher(ref, "akka.stream.default-blocking-io-dispatcher") } finally shutdown(sys) @@ -180,11 +178,15 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { val materializer = ActorMaterializer()(sys) try { - Source.fromIterator(() => Iterator.continually(TestByteStrings.head)) + Source + .fromIterator(() => Iterator.continually(TestByteStrings.head)) .to(FileIO.toPath(f).addAttributes(ActorAttributes.dispatcher("akka.actor.default-dispatcher"))) .run()(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "fileSink").get assertDispatcher(ref, "akka.actor.default-dispatcher") } finally shutdown(sys) @@ -194,9 +196,9 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "write single line to a file from lazy sink" in assertAllStagesStopped { //LazySink must wait for result of initialization even if got upstreamComplete targetFile { f => - val completion = Source(List(TestByteStrings.head)) - .runWith(Sink.lazyInitAsync( - () => Future.successful(FileIO.toPath(f))) + val completion = Source(List(TestByteStrings.head)).runWith( + Sink + .lazyInitAsync(() => Future.successful(FileIO.toPath(f))) // map a Future[Option[Future[IOResult]]] into a Future[Option[IOResult]] .mapMaterializedValue(_.flatMap { case Some(future) => future.map(Some(_))(ExecutionContexts.sameThreadExecutionContext) @@ -225,8 +227,8 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { } "complete with failure when file cannot be open" in { - val completion = Source.single(ByteString("42")) - .runWith(FileIO.toPath(fs.getPath("/I/hope/this/file/doesnt/exist.txt"))) + val completion = + Source.single(ByteString("42")).runWith(FileIO.toPath(fs.getPath("/I/hope/this/file/doesnt/exist.txt"))) completion.failed.futureValue shouldBe an[NoSuchFileException] } @@ -235,7 +237,8 @@ class FileSinkSpec extends StreamSpec(UnboundedMailboxConfig) { private def targetFile(block: Path => Unit, create: Boolean = true): Unit = { val targetFile = Files.createTempFile(fs.getPath("/"), "synchronous-file-sink", ".tmp") if (!create) Files.delete(targetFile) - try block(targetFile) finally Files.delete(targetFile) + try block(targetFile) + finally Files.delete(targetFile) } def checkFileContents(f: Path, contents: String): Unit = { diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/FileSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/FileSourceSpec.scala index bcf307a440..65c96e1b6a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/FileSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/FileSourceSpec.scala @@ -37,11 +37,11 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val TestText = { ("a" * 1000) + - ("b" * 1000) + - ("c" * 1000) + - ("d" * 1000) + - ("e" * 1000) + - ("f" * 1000) + ("b" * 1000) + + ("c" * 1000) + + ("d" * 1000) + + ("e" * 1000) + + ("f" * 1000) } val testFile = { @@ -75,7 +75,8 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "read contents from a file" in assertAllStagesStopped { val chunkSize = 512 - val p = FileIO.fromPath(testFile, chunkSize) + val p = FileIO + .fromPath(testFile, chunkSize) .addAttributes(Attributes.inputBuffer(1, 2)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[ByteString]() @@ -110,9 +111,11 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "complete future even when abrupt termination happened" in { val chunkSize = 512 val mat = ActorMaterializer() - val (future, p) = FileIO.fromPath(testFile, chunkSize) + val (future, p) = FileIO + .fromPath(testFile, chunkSize) .addAttributes(Attributes.inputBuffer(1, 2)) - .toMat(TestSink.probe)(Keep.both).run()(mat) + .toMat(TestSink.probe)(Keep.both) + .run()(mat) p.request(1) p.expectNext().utf8String should ===(TestText.splitAt(chunkSize)._1) mat.shutdown() @@ -124,7 +127,8 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val startPosition = 1000 val bufferAttributes = Attributes.inputBuffer(1, 2) - val p = FileIO.fromPath(testFile, chunkSize, startPosition) + val p = FileIO + .fromPath(testFile, chunkSize, startPosition) .withAttributes(bufferAttributes) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[ByteString]() @@ -148,9 +152,11 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "be able to read not whole file" in { val chunkSize = 512 - val (future, p) = FileIO.fromPath(testFile, chunkSize) + val (future, p) = FileIO + .fromPath(testFile, chunkSize) .addAttributes(Attributes.inputBuffer(1, 2)) - .toMat(TestSink.probe)(Keep.both).run() + .toMat(TestSink.probe)(Keep.both) + .run() p.request(1) p.expectNext().utf8String should ===(TestText.splitAt(chunkSize)._1) p.cancel() @@ -162,7 +168,8 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val demandAllButOneChunks = TestText.length / chunkSize - 1 - val p = FileIO.fromPath(testFile, chunkSize) + val p = FileIO + .fromPath(testFile, chunkSize) .addAttributes(Attributes.inputBuffer(4, 8)) .runWith(Sink.asPublisher(false)) @@ -216,22 +223,21 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { r.futureValue.status.isFailure shouldBe true } - List( - Settings(chunkSize = 512, readAhead = 2), - Settings(chunkSize = 512, readAhead = 4), - Settings(chunkSize = 2048, readAhead = 2), - Settings(chunkSize = 2048, readAhead = 4)) foreach { settings => - import settings._ + List(Settings(chunkSize = 512, readAhead = 2), + Settings(chunkSize = 512, readAhead = 4), + Settings(chunkSize = 2048, readAhead = 2), + Settings(chunkSize = 2048, readAhead = 4)).foreach { settings => + import settings._ - s"count lines in real file (chunkSize = $chunkSize, readAhead = $readAhead)" in { - val s = FileIO.fromPath(manyLines, chunkSize = chunkSize) - .withAttributes(Attributes.inputBuffer(readAhead, readAhead)) + s"count lines in real file (chunkSize = $chunkSize, readAhead = $readAhead)" in { + val s = + FileIO.fromPath(manyLines, chunkSize = chunkSize).withAttributes(Attributes.inputBuffer(readAhead, readAhead)) - val f = s.runWith(Sink.fold(0) { case (acc, l) => acc + l.utf8String.count(_ == '\n') }) + val f = s.runWith(Sink.fold(0) { case (acc, l) => acc + l.utf8String.count(_ == '\n') }) - f.futureValue should ===(LinesCount) - } + f.futureValue should ===(LinesCount) } + } "use dedicated blocking-io-dispatcher by default" in assertAllStagesStopped { val sys = ActorSystem("dispatcher-testing", UnboundedMailboxConfig) @@ -239,9 +245,13 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { try { val p = FileIO.fromPath(manyLines).runWith(TestSink.probe)(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "fileSource").get - try assertDispatcher(ref, "akka.stream.default-blocking-io-dispatcher") finally p.cancel() + try assertDispatcher(ref, "akka.stream.default-blocking-io-dispatcher") + finally p.cancel() } finally shutdown(sys) } @@ -250,18 +260,24 @@ class FileSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val materializer = ActorMaterializer()(sys) try { - val p = FileIO.fromPath(manyLines) + val p = FileIO + .fromPath(manyLines) .addAttributes(ActorAttributes.dispatcher("akka.actor.default-dispatcher")) .runWith(TestSink.probe)(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "fileSource").get - try assertDispatcher(ref, "akka.actor.default-dispatcher") finally p.cancel() + try assertDispatcher(ref, "akka.actor.default-dispatcher") + finally p.cancel() } finally shutdown(sys) } "not signal onComplete more than once" in { - FileIO.fromPath(testFile, 2 * TestText.length) + FileIO + .fromPath(testFile, 2 * TestText.length) .runWith(TestSink.probe) .requestNext(ByteString(TestText, UTF_8.name)) .expectComplete() diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala index 7ca11c65b7..54813461d5 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSinkSpec.scala @@ -60,8 +60,7 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "read bytes correctly if requested by InputStream not in chunk size" in assertAllStagesStopped { val sinkProbe = TestProbe() val byteString2 = randomByteString(3) - val inputStream = Source(byteString :: byteString2 :: Nil) - .runWith(testSink(sinkProbe)) + val inputStream = Source(byteString :: byteString2 :: Nil).runWith(testSink(sinkProbe)) sinkProbe.expectMsgAllOf(GraphStageMessages.Push, GraphStageMessages.Push) @@ -126,8 +125,7 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "return all data when upstream is completed" in assertAllStagesStopped { val sinkProbe = TestProbe() - val (probe, inputStream) = TestSource.probe[ByteString] - .toMat(testSink(sinkProbe))(Keep.both).run() + val (probe, inputStream) = TestSource.probe[ByteString].toMat(testSink(sinkProbe))(Keep.both).run() val bytes = randomByteString(1) probe.sendNext(bytes) @@ -162,11 +160,12 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "successfully read several chunks at once" in assertAllStagesStopped { val bytes = List.fill(4)(randomByteString(4)) val sinkProbe = TestProbe() - val inputStream = Source[ByteString](bytes) - .runWith(testSink(sinkProbe)) + val inputStream = Source[ByteString](bytes).runWith(testSink(sinkProbe)) //need to wait while all elements arrive to sink - bytes foreach { _ => sinkProbe.expectMsg(GraphStageMessages.Push) } + bytes.foreach { _ => + sinkProbe.expectMsg(GraphStageMessages.Push) + } for (i <- 0 to 1) readN(inputStream, 8) should ===((8, bytes(i * 2) ++ bytes(i * 2 + 1))) @@ -219,7 +218,10 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { val materializer = ActorMaterializer()(sys) try { TestSource.probe[ByteString].runWith(StreamConverters.asInputStream())(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "inputStreamSink").get assertDispatcher(ref, "akka.stream.default-blocking-io-dispatcher") } finally shutdown(sys) @@ -243,14 +245,13 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "fail to materialize with zero sized input buffer" in { an[IllegalArgumentException] shouldBe thrownBy { - Source.single(byteString) - .runWith(StreamConverters.asInputStream(timeout).withAttributes(inputBuffer(0, 0))) + Source.single(byteString).runWith(StreamConverters.asInputStream(timeout).withAttributes(inputBuffer(0, 0))) /* With Source.single we test the code path in which the sink itself throws an exception when being materialized. If Source.empty is used, the same exception is thrown by Materializer. - */ + */ } } @@ -267,9 +268,8 @@ class InputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "propagate error to InputStream" in { val readTimeout = 3.seconds - val (probe, inputStream) = TestSource.probe[ByteString] - .toMat(StreamConverters.asInputStream(readTimeout))(Keep.both) - .run() + val (probe, inputStream) = + TestSource.probe[ByteString].toMat(StreamConverters.asInputStream(readTimeout))(Keep.both).run() probe.sendNext(ByteString("one")) val error = new RuntimeException("failure") diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSourceSpec.scala index 8eb2e067b5..8190cd4441 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/InputStreamSourceSpec.scala @@ -25,29 +25,30 @@ class InputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "InputStreamSource" must { "not signal when no demand" in { - val f = StreamConverters.fromInputStream(() => new InputStream { - override def read(): Int = 42 - }) + val f = StreamConverters.fromInputStream(() => + new InputStream { + override def read(): Int = 42 + }) - Await.result(f - .takeWithin(5.seconds) - .runForeach(it => ()), 10.seconds) + Await.result(f.takeWithin(5.seconds).runForeach(it => ()), 10.seconds) } "read bytes from InputStream" in assertAllStagesStopped { - val f = StreamConverters.fromInputStream(() => new InputStream { - @volatile var buf = List("a", "b", "c").map(_.charAt(0).toInt) - override def read(): Int = { - buf match { - case head :: tail => - buf = tail - head - case Nil => - -1 - } + val f = StreamConverters + .fromInputStream(() => + new InputStream { + @volatile var buf = List("a", "b", "c").map(_.charAt(0).toInt) + override def read(): Int = { + buf match { + case head :: tail => + buf = tail + head + case Nil => + -1 + } - } - }) + } + }) .runWith(Sink.head) f.futureValue should ===(ByteString("abc")) @@ -55,18 +56,21 @@ class InputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "emit as soon as read" in assertAllStagesStopped { val latch = new CountDownLatch(1) - val probe = StreamConverters.fromInputStream(() => new InputStream { - @volatile var emitted = false - override def read(): Int = { - if (!emitted) { - emitted = true - 'M'.toInt - } else { - latch.await() - -1 - } - } - }, chunkSize = 1) + val probe = StreamConverters + .fromInputStream(() => + new InputStream { + @volatile var emitted = false + override def read(): Int = { + if (!emitted) { + emitted = true + 'M'.toInt + } else { + latch.await() + -1 + } + } + }, + chunkSize = 1) .runWith(TestSink.probe) probe.request(4) diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSinkSpec.scala index a69b3022f0..10d29fca50 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSinkSpec.scala @@ -27,8 +27,8 @@ class OutputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { val p = TestProbe() val datas = List(ByteString("a"), ByteString("c"), ByteString("c")) - val completion = Source(datas) - .runWith(StreamConverters.fromOutputStream(() => new OutputStream { + val completion = Source(datas).runWith(StreamConverters.fromOutputStream(() => + new OutputStream { override def write(i: Int): Unit = () override def write(bytes: Array[Byte]): Unit = p.ref ! ByteString(bytes).utf8String })) @@ -41,29 +41,33 @@ class OutputStreamSinkSpec extends StreamSpec(UnboundedMailboxConfig) { "close underlying stream when error received" in assertAllStagesStopped { val p = TestProbe() - Source.failed(TE("Boom!")) - .runWith(StreamConverters.fromOutputStream(() => new OutputStream { - override def write(i: Int): Unit = () - override def close() = p.ref ! "closed" - })) + Source + .failed(TE("Boom!")) + .runWith(StreamConverters.fromOutputStream(() => + new OutputStream { + override def write(i: Int): Unit = () + override def close() = p.ref ! "closed" + })) p.expectMsg("closed") } "complete materialized value with the error" in assertAllStagesStopped { - val completion = Source.failed(TE("Boom!")) - .runWith(StreamConverters.fromOutputStream(() => new OutputStream { - override def write(i: Int): Unit = () - override def close() = () - })) + val completion = Source + .failed(TE("Boom!")) + .runWith(StreamConverters.fromOutputStream(() => + new OutputStream { + override def write(i: Int): Unit = () + override def close() = () + })) completion.failed.futureValue shouldBe an[AbruptIOTerminationException] } "close underlying stream when completion received" in assertAllStagesStopped { val p = TestProbe() - Source.empty - .runWith(StreamConverters.fromOutputStream(() => new OutputStream { + Source.empty.runWith(StreamConverters.fromOutputStream(() => + new OutputStream { override def write(i: Int): Unit = () override def write(bytes: Array[Byte]): Unit = p.ref ! ByteString(bytes).utf8String override def close() = p.ref ! "closed" diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala index c5aaff84a9..eb8960a6c0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/OutputStreamSourceSpec.scala @@ -47,11 +47,15 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { def assertNoBlockedThreads(): Unit = { def threadsBlocked = - ManagementFactory.getThreadMXBean.dumpAllThreads(true, true).toSeq - .filter(t => t.getThreadName.startsWith("OutputStreamSourceSpec") && - t.getLockName != null && - t.getLockName.startsWith("java.util.concurrent.locks.AbstractQueuedSynchronizer") && - t.getStackTrace.exists(s => s.getClassName.startsWith(classOf[OutputStreamSourceStage].getName))) + ManagementFactory.getThreadMXBean + .dumpAllThreads(true, true) + .toSeq + .filter( + t => + t.getThreadName.startsWith("OutputStreamSourceSpec") && + t.getLockName != null && + t.getLockName.startsWith("java.util.concurrent.locks.AbstractQueuedSynchronizer") && + t.getStackTrace.exists(s => s.getClassName.startsWith(classOf[OutputStreamSourceStage].getName))) awaitAssert(threadsBlocked should ===(Seq()), 5.seconds, interval = 500.millis) } @@ -72,7 +76,8 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "not truncate the stream on close" in assertAllStagesStopped { for (_ <- 1 to 10) { val (outputStream, result) = - StreamConverters.asOutputStream() + StreamConverters + .asOutputStream() .toMat(Sink.fold[ByteString, ByteString](ByteString.empty)(_ ++ _))(Keep.both) .run outputStream.write(bytesArray) @@ -100,11 +105,16 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { } "block writes when buffer is full" in assertAllStagesStopped { - val (outputStream, probe) = StreamConverters.asOutputStream().toMat(TestSink.probe[ByteString])(Keep.both) - .withAttributes(Attributes.inputBuffer(16, 16)).run + val (outputStream, probe) = StreamConverters + .asOutputStream() + .toMat(TestSink.probe[ByteString])(Keep.both) + .withAttributes(Attributes.inputBuffer(16, 16)) + .run val s = probe.expectSubscription() - (1 to 16).foreach { _ => outputStream.write(bytesArray) } + (1 to 16).foreach { _ => + outputStream.write(bytesArray) + } //blocked call val f = Future(outputStream.write(bytesArray)) @@ -130,8 +140,7 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { } "throw IOException when writing to the stream after the subscriber has cancelled the reactive stream" in assertAllStagesStopped { - val (outputStream, sink) = StreamConverters.asOutputStream() - .toMat(TestSink.probe[ByteString])(Keep.both).run + val (outputStream, sink) = StreamConverters.asOutputStream().toMat(TestSink.probe[ByteString])(Keep.both).run val s = sink.expectSubscription() @@ -148,15 +157,13 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail to materialize with zero sized input buffer" in { an[IllegalArgumentException] shouldBe thrownBy { - StreamConverters.asOutputStream(timeout) - .withAttributes(inputBuffer(0, 0)) - .runWith(Sink.head) + StreamConverters.asOutputStream(timeout).withAttributes(inputBuffer(0, 0)).runWith(Sink.head) /* With Sink.head we test the code path in which the source itself throws an exception when being materialized. If Sink.ignore is used, the same exception is thrown by Materializer. - */ + */ } } @@ -164,8 +171,8 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { // make sure previous tests didn't leak assertNoBlockedThreads() - val (outputStream, probe) = StreamConverters.asOutputStream(timeout) - .toMat(TestSink.probe[ByteString])(Keep.both).run()(materializer) + val (outputStream, probe) = + StreamConverters.asOutputStream(timeout).toMat(TestSink.probe[ByteString])(Keep.both).run()(materializer) val sub = probe.expectSubscription() @@ -179,8 +186,8 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "not leave blocked threads when materializer shutdown" in { val materializer2 = ActorMaterializer(settings) - val (outputStream, probe) = StreamConverters.asOutputStream(timeout) - .toMat(TestSink.probe[ByteString])(Keep.both).run()(materializer2) + val (outputStream, probe) = + StreamConverters.asOutputStream(timeout).toMat(TestSink.probe[ByteString])(Keep.both).run()(materializer2) val sub = probe.expectSubscription() @@ -197,9 +204,11 @@ class OutputStreamSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val bufSize = 4 val sourceProbe = TestProbe() - val (outputStream, probe) = StreamConverters.asOutputStream(timeout) + val (outputStream, probe) = StreamConverters + .asOutputStream(timeout) .addAttributes(Attributes.inputBuffer(bufSize, bufSize)) - .toMat(TestSink.probe[ByteString])(Keep.both).run + .toMat(TestSink.probe[ByteString])(Keep.both) + .run // fill the buffer up (1 to (bufSize - 1)).foreach(outputStream.write) diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TcpHelper.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TcpHelper.scala index 4291cf3b2a..45b455e0b6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/TcpHelper.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/TcpHelper.scala @@ -5,7 +5,7 @@ package akka.stream.io import akka.actor._ -import akka.io.Tcp.{ ResumeReading, ConnectionClosed } +import akka.io.Tcp.{ ConnectionClosed, ResumeReading } import akka.io.{ IO, Tcp } import akka.stream.testkit._ import akka.stream.{ ActorMaterializer, ActorMaterializerSettings } @@ -117,8 +117,7 @@ object TcpHelper { trait TcpHelper { this: TestKitBase => import akka.stream.io.TcpHelper._ - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 4, maxSize = 4) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 4, maxSize = 4) implicit val materializer = ActorMaterializer(settings) diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala index 615f76abe1..26e5d2b41c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/TcpSpec.scala @@ -45,7 +45,11 @@ class TcpSpec extends StreamSpec(""" val tcpReadProbe = new TcpReadProbe() val tcpWriteProbe = new TcpWriteProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() validateServerClientCommunication(testData, serverConnection, tcpReadProbe, tcpWriteProbe) @@ -75,7 +79,8 @@ class TcpSpec extends StreamSpec(""" val idle = new TcpWriteProbe() // Just register an idle upstream val resultFuture = - Source.fromPublisher(idle.publisherProbe) + Source + .fromPublisher(idle.publisherProbe) .via(Tcp().outgoingConnection(server.address)) .runFold(ByteString.empty)((acc, in) => acc ++ in) val serverConnection = server.waitAccept() @@ -91,8 +96,10 @@ class TcpSpec extends StreamSpec(""" "fail the materialized future when the connection fails" in assertAllStagesStopped { val tcpWriteProbe = new TcpWriteProbe() - val future = Source.fromPublisher(tcpWriteProbe.publisherProbe) - .viaMat(Tcp().outgoingConnection(InetSocketAddress.createUnresolved("example.com", 666), connectTimeout = 1.second))(Keep.right) + val future = Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .viaMat(Tcp().outgoingConnection(InetSocketAddress.createUnresolved("example.com", 666), + connectTimeout = 1.second))(Keep.right) .toMat(Sink.ignore)(Keep.left) .run() @@ -105,7 +112,11 @@ class TcpSpec extends StreamSpec(""" val tcpWriteProbe = new TcpWriteProbe() val tcpReadProbe = new TcpReadProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() // Client can still write @@ -135,7 +146,11 @@ class TcpSpec extends StreamSpec(""" val tcpWriteProbe = new TcpWriteProbe() val tcpReadProbe = new TcpReadProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() // Server can still write @@ -163,7 +178,11 @@ class TcpSpec extends StreamSpec(""" val tcpWriteProbe = new TcpWriteProbe() val tcpReadProbe = new TcpReadProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() // Server can still write @@ -195,7 +214,11 @@ class TcpSpec extends StreamSpec(""" val tcpWriteProbe = new TcpWriteProbe() val tcpReadProbe = new TcpReadProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() // Client can still write @@ -228,7 +251,11 @@ class TcpSpec extends StreamSpec(""" val tcpWriteProbe = new TcpWriteProbe() val tcpReadProbe = new TcpReadProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() // Server can still write @@ -258,7 +285,11 @@ class TcpSpec extends StreamSpec(""" val tcpWriteProbe = new TcpWriteProbe() val tcpReadProbe = new TcpReadProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() // Server can still write @@ -285,7 +316,11 @@ class TcpSpec extends StreamSpec(""" val tcpWriteProbe = new TcpWriteProbe() val tcpReadProbe = new TcpReadProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() // Server can still write @@ -314,7 +349,11 @@ class TcpSpec extends StreamSpec(""" val tcpWriteProbe = new TcpWriteProbe() val tcpReadProbe = new TcpReadProbe() - Source.fromPublisher(tcpWriteProbe.publisherProbe).via(Tcp().outgoingConnection(server.address)).to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)).run() + Source + .fromPublisher(tcpWriteProbe.publisherProbe) + .via(Tcp().outgoingConnection(server.address)) + .to(Sink.fromSubscriber(tcpReadProbe.subscriberProbe)) + .run() val serverConnection = server.waitAccept() serverConnection.abort() @@ -336,12 +375,15 @@ class TcpSpec extends StreamSpec(""" val outgoingConnection = Tcp().outgoingConnection(server.address) val conn1F = - Source.fromPublisher(tcpWriteProbe1.publisherProbe) + Source + .fromPublisher(tcpWriteProbe1.publisherProbe) .viaMat(outgoingConnection)(Keep.right) - .to(Sink.fromSubscriber(tcpReadProbe1.subscriberProbe)).run() + .to(Sink.fromSubscriber(tcpReadProbe1.subscriberProbe)) + .run() val serverConnection1 = server.waitAccept() val conn2F = - Source.fromPublisher(tcpWriteProbe2.publisherProbe) + Source + .fromPublisher(tcpWriteProbe2.publisherProbe) .viaMat(outgoingConnection)(Keep.right) .to(Sink.fromSubscriber(tcpReadProbe2.subscriberProbe)) .run() @@ -370,13 +412,16 @@ class TcpSpec extends StreamSpec(""" Flow.fromSinkAndSourceMat(Sink.ignore, Source.single(ByteString("Early response")))(Keep.right) val binding = - Tcp().bind(serverAddress.getHostString, serverAddress.getPort, halfClose = false).toMat(Sink.foreach { conn => - conn.flow.join(writeButIgnoreRead).run() - })(Keep.left) + Tcp() + .bind(serverAddress.getHostString, serverAddress.getPort, halfClose = false) + .toMat(Sink.foreach { conn => + conn.flow.join(writeButIgnoreRead).run() + })(Keep.left) .run() .futureValue - val (promise, result) = Source.maybe[ByteString] + val (promise, result) = Source + .maybe[ByteString] .via(Tcp().outgoingConnection(serverAddress.getHostString, serverAddress.getPort)) .toMat(Sink.fold(ByteString.empty)(_ ++ _))(Keep.both) .run() @@ -391,9 +436,11 @@ class TcpSpec extends StreamSpec(""" val serverAddress = temporaryServerAddress() val binding = - Tcp().bind(serverAddress.getHostString, serverAddress.getPort, halfClose = false).toMat(Sink.foreach { conn => - conn.flow.join(Flow[ByteString]).run() - })(Keep.left) + Tcp() + .bind(serverAddress.getHostString, serverAddress.getPort, halfClose = false) + .toMat(Sink.foreach { conn => + conn.flow.join(Flow[ByteString]).run() + })(Keep.left) .run() .futureValue @@ -407,8 +454,8 @@ class TcpSpec extends StreamSpec(""" } "handle when connection actor terminates unexpectedly" in { - val system2 = ActorSystem("TcpSpec-unexpected-system2", ConfigFactory.parseString( - """ + val system2 = ActorSystem("TcpSpec-unexpected-system2", + ConfigFactory.parseString(""" akka.loglevel = DEBUG # issue #21660 """).withFallback(system.settings.config)) @@ -417,15 +464,20 @@ class TcpSpec extends StreamSpec(""" val mat2 = ActorMaterializer.create(system2) val serverAddress = temporaryServerAddress() - val binding = Tcp(system2).bindAndHandle(Flow[ByteString], serverAddress.getHostString, serverAddress.getPort)(mat2).futureValue + val binding = Tcp(system2) + .bindAndHandle(Flow[ByteString], serverAddress.getHostString, serverAddress.getPort)(mat2) + .futureValue val probe = TestProbe() val testMsg = ByteString(0) val result = - Source.single(testMsg) + Source + .single(testMsg) .concat(Source.maybe[ByteString]) .via(Tcp(system2).outgoingConnection(serverAddress)) - .runForeach { msg => probe.ref ! msg }(mat2) + .runForeach { msg => + probe.ref ! msg + }(mat2) // Ensure first that the actor is there probe.expectMsg(testMsg) @@ -475,10 +527,7 @@ class TcpSpec extends StreamSpec(""" "be able to implement echo" in { val serverAddress = temporaryServerAddress() val (bindingFuture, echoServerFinish) = - Tcp() - .bind(serverAddress.getHostString, serverAddress.getPort) - .toMat(echoHandler)(Keep.both) - .run() + Tcp().bind(serverAddress.getHostString, serverAddress.getPort).toMat(echoHandler)(Keep.both).run() // make sure that the server has bound to the socket val binding = bindingFuture.futureValue @@ -498,10 +547,7 @@ class TcpSpec extends StreamSpec(""" "work with a chain of echoes" in { val serverAddress = temporaryServerAddress() val (bindingFuture, echoServerFinish) = - Tcp() - .bind(serverAddress.getHostString, serverAddress.getPort) - .toMat(echoHandler)(Keep.both) - .run() + Tcp().bind(serverAddress.getHostString, serverAddress.getPort).toMat(echoHandler)(Keep.both).run() // make sure that the server has bound to the socket val binding = bindingFuture.futureValue @@ -568,12 +614,12 @@ class TcpSpec extends StreamSpec(""" """) val serverSystem = ActorSystem("server", config) val clientSystem = ActorSystem("client", config) - val serverMaterializer = ActorMaterializer(ActorMaterializerSettings(serverSystem) - .withSubscriptionTimeoutSettings(StreamSubscriptionTimeoutSettings( - StreamSubscriptionTimeoutTerminationMode.cancel, 42.seconds)))(serverSystem) - val clientMaterializer = ActorMaterializer(ActorMaterializerSettings(clientSystem) - .withSubscriptionTimeoutSettings(StreamSubscriptionTimeoutSettings( - StreamSubscriptionTimeoutTerminationMode.cancel, 42.seconds)))(clientSystem) + val serverMaterializer = ActorMaterializer( + ActorMaterializerSettings(serverSystem).withSubscriptionTimeoutSettings( + StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, 42.seconds)))(serverSystem) + val clientMaterializer = ActorMaterializer( + ActorMaterializerSettings(clientSystem).withSubscriptionTimeoutSettings( + StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, 42.seconds)))(clientSystem) try { @@ -595,7 +641,8 @@ class TcpSpec extends StreamSpec(""" import serverSystem.dispatcher val futureBinding: Future[ServerBinding] = - Tcp(serverSystem).bind(address.getHostString, address.getPort) + Tcp(serverSystem) + .bind(address.getHostString, address.getPort) // accept one connection, then cancel .take(1) // keep the accepted request hanging @@ -657,35 +704,35 @@ class TcpSpec extends StreamSpec(""" val connectionCounter = new AtomicInteger(0) val accept2ConnectionSink: Sink[IncomingConnection, NotUsed] = - Flow[IncomingConnection].take(2) + Flow[IncomingConnection] + .take(2) .mapAsync(2) { incoming => val connectionNr = connectionCounter.incrementAndGet() if (connectionNr == 1) { // echo - incoming.flow.joinMat( - Flow[ByteString].mapMaterializedValue { mat => - firstClientConnected.trySuccess(()) - mat - }.watchTermination()(Keep.right) - )(Keep.right).run() + incoming.flow + .joinMat(Flow[ByteString] + .mapMaterializedValue { mat => + firstClientConnected.trySuccess(()) + mat + } + .watchTermination()(Keep.right))(Keep.right) + .run() } else { // just ignore it secondClientIgnored.trySuccess(()) Future.successful(Done) } - }.to(Sink.ignore) + } + .to(Sink.ignore) - val serverBound = Tcp().bind(address.getHostString, address.getPort) - .toMat(accept2ConnectionSink)(Keep.left) - .run() + val serverBound = Tcp().bind(address.getHostString, address.getPort).toMat(accept2ConnectionSink)(Keep.left).run() // make sure server has started serverBound.futureValue val firstProbe = TestPublisher.probe[ByteString]() - val firstResult = Source.fromPublisher(firstProbe) - .via(Tcp().outgoingConnection(address)) - .runWith(Sink.seq) + val firstResult = Source.fromPublisher(firstProbe).via(Tcp().outgoingConnection(address)).runWith(Sink.seq) // create the first connection and wait until the flow is running server side firstClientConnected.future.futureValue(Timeout(5.seconds)) @@ -716,10 +763,7 @@ class TcpSpec extends StreamSpec(""" // Ensure server is running bindingFuture.futureValue // and is possible to communicate with - Source.single(ByteString(0)) - .via(Tcp().outgoingConnection(address)) - .runWith(Sink.ignore) - .futureValue + Source.single(ByteString(0)).via(Tcp().outgoingConnection(address)).runWith(Sink.ignore).futureValue sys2.terminate().futureValue @@ -737,24 +781,24 @@ class TcpSpec extends StreamSpec(""" val (sslContext, firstSession) = initSslMess() val address = temporaryServerAddress() - Tcp().bindAndHandleTls( - // just echo charactes until we reach '\n', then complete stream - // also - byte is our framing - Flow[ByteString].mapConcat(_.utf8String.toList) - .takeWhile(_ != '\n') - .map(c => ByteString(c)), - address.getHostName, - address.getPort, - sslContext, - firstSession - ).futureValue + Tcp() + .bindAndHandleTls( + // just echo charactes until we reach '\n', then complete stream + // also - byte is our framing + Flow[ByteString].mapConcat(_.utf8String.toList).takeWhile(_ != '\n').map(c => ByteString(c)), + address.getHostName, + address.getPort, + sslContext, + firstSession) + .futureValue system.log.info(s"Server bound to ${address.getHostString}:${address.getPort}") val connectionFlow = Tcp().outgoingTlsConnection(address.getHostName, address.getPort, sslContext, firstSession) val chars = "hello\n".toList.map(_.toString) val (connectionF, result) = - Source(chars).map(c => ByteString(c)) + Source(chars) + .map(c => ByteString(c)) .concat(Source.maybe) // do not complete it from our side .viaMat(connectionFlow)(Keep.right) .map(_.utf8String) @@ -818,11 +862,10 @@ class TcpSpec extends StreamSpec(""" } - def validateServerClientCommunication( - testData: ByteString, - serverConnection: ServerConnection, - readProbe: TcpReadProbe, - writeProbe: TcpWriteProbe): Unit = { + def validateServerClientCommunication(testData: ByteString, + serverConnection: ServerConnection, + readProbe: TcpReadProbe, + writeProbe: TcpWriteProbe): Unit = { serverConnection.write(testData) serverConnection.read(5) readProbe.read(5) should be(testData) diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala index 89016bcfc4..c649013fa3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/TlsSpec.scala @@ -60,7 +60,8 @@ object TlsSpec { * independent of the traffic going through. The purpose is to include the last seen * element in the exception message to help in figuring out what went wrong. */ - class Timeout(duration: FiniteDuration)(implicit system: ActorSystem) extends GraphStage[FlowShape[ByteString, ByteString]] { + class Timeout(duration: FiniteDuration)(implicit system: ActorSystem) + extends GraphStage[FlowShape[ByteString, ByteString]] { private val in = Inlet[ByteString]("in") private val out = Outlet[ByteString]("out") @@ -115,94 +116,88 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing x } - val cipherSuites = NegotiateNewSession.withCipherSuites("TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA") + val cipherSuites = + NegotiateNewSession.withCipherSuites("TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA") def clientTls(closing: TLSClosing) = TLS(sslContext, None, cipherSuites, Client, closing) def badClientTls(closing: TLSClosing) = TLS(initWithTrust("/badtruststore"), None, cipherSuites, Client, closing) def serverTls(closing: TLSClosing) = TLS(sslContext, None, cipherSuites, Server, closing) trait Named { def name: String = - getClass.getName - .reverse - .dropWhile(c => "$0123456789".indexOf(c) != -1) - .takeWhile(_ != '$') - .reverse + getClass.getName.reverse.dropWhile(c => "$0123456789".indexOf(c) != -1).takeWhile(_ != '$').reverse } trait CommunicationSetup extends Named { - def decorateFlow(leftClosing: TLSClosing, rightClosing: TLSClosing, + def decorateFlow(leftClosing: TLSClosing, + rightClosing: TLSClosing, rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]): Flow[SslTlsOutbound, SslTlsInbound, NotUsed] def cleanup(): Unit = () } object ClientInitiates extends CommunicationSetup { - def decorateFlow(leftClosing: TLSClosing, rightClosing: TLSClosing, + def decorateFlow(leftClosing: TLSClosing, + rightClosing: TLSClosing, rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) = - clientTls(leftClosing) atop serverTls(rightClosing).reversed join rhs + clientTls(leftClosing).atop(serverTls(rightClosing).reversed).join(rhs) } object ServerInitiates extends CommunicationSetup { - def decorateFlow(leftClosing: TLSClosing, rightClosing: TLSClosing, + def decorateFlow(leftClosing: TLSClosing, + rightClosing: TLSClosing, rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) = - serverTls(leftClosing) atop clientTls(rightClosing).reversed join rhs + serverTls(leftClosing).atop(clientTls(rightClosing).reversed).join(rhs) } def server(flow: Flow[ByteString, ByteString, Any]) = { - val server = Tcp() - .bind("localhost", 0) - .to(Sink.foreach(c => c.flow.join(flow).run())) - .run() + val server = Tcp().bind("localhost", 0).to(Sink.foreach(c => c.flow.join(flow).run())).run() Await.result(server, 2.seconds) } object ClientInitiatesViaTcp extends CommunicationSetup { var binding: Tcp.ServerBinding = null - def decorateFlow(leftClosing: TLSClosing, rightClosing: TLSClosing, + def decorateFlow(leftClosing: TLSClosing, + rightClosing: TLSClosing, rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) = { - binding = server(serverTls(rightClosing).reversed join rhs) - clientTls(leftClosing) join Tcp().outgoingConnection(binding.localAddress) + binding = server(serverTls(rightClosing).reversed.join(rhs)) + clientTls(leftClosing).join(Tcp().outgoingConnection(binding.localAddress)) } override def cleanup(): Unit = binding.unbind() } object ServerInitiatesViaTcp extends CommunicationSetup { var binding: Tcp.ServerBinding = null - def decorateFlow(leftClosing: TLSClosing, rightClosing: TLSClosing, + def decorateFlow(leftClosing: TLSClosing, + rightClosing: TLSClosing, rhs: Flow[SslTlsInbound, SslTlsOutbound, Any]) = { - binding = server(clientTls(rightClosing).reversed join rhs) - serverTls(leftClosing) join Tcp().outgoingConnection(binding.localAddress) + binding = server(clientTls(rightClosing).reversed.join(rhs)) + serverTls(leftClosing).join(Tcp().outgoingConnection(binding.localAddress)) } override def cleanup(): Unit = binding.unbind() } val communicationPatterns = - Seq( - ClientInitiates, - ServerInitiates, - ClientInitiatesViaTcp, - ServerInitiatesViaTcp) + Seq(ClientInitiates, ServerInitiates, ClientInitiatesViaTcp, ServerInitiatesViaTcp) trait PayloadScenario extends Named { def flow: Flow[SslTlsInbound, SslTlsOutbound, Any] = - Flow[SslTlsInbound] - .map { - var session: SSLSession = null - def setSession(s: SSLSession) = { - session = s - system.log.debug(s"new session: $session (${session.getId mkString ","})") - } - - { - case SessionTruncated => SendBytes(ByteString("TRUNCATED")) - case SessionBytes(s, b) if session == null => - setSession(s) - SendBytes(b) - case SessionBytes(s, b) if s != session => - setSession(s) - SendBytes(ByteString("NEWSESSION") ++ b) - case SessionBytes(s, b) => SendBytes(b) - } + Flow[SslTlsInbound].map { + var session: SSLSession = null + def setSession(s: SSLSession) = { + session = s + system.log.debug(s"new session: $session (${session.getId.mkString(",")})") } + + { + case SessionTruncated => SendBytes(ByteString("TRUNCATED")) + case SessionBytes(s, b) if session == null => + setSession(s) + SendBytes(b) + case SessionBytes(s, b) if s != session => + setSession(s) + SendBytes(ByteString("NEWSESSION") ++ b) + case SessionBytes(s, b) => SendBytes(b) + } + } def leftClosing: TLSClosing = IgnoreComplete def rightClosing: TLSClosing = IgnoreComplete @@ -220,15 +215,15 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing } object MediumMessages extends PayloadScenario { - val strs = "0123456789" map (d => d.toString * (rnd.nextInt(9000) + 1000)) - def inputs = strs map (s => SendBytes(ByteString(s))) + val strs = "0123456789".map(d => d.toString * (rnd.nextInt(9000) + 1000)) + def inputs = strs.map(s => SendBytes(ByteString(s))) def output = ByteString(strs.foldRight("")(_ ++ _)) } object LargeMessages extends PayloadScenario { // TLS max packet size is 16384 bytes - val strs = "0123456789" map (d => d.toString * (rnd.nextInt(9000) + 17000)) - def inputs = strs map (s => SendBytes(ByteString(s))) + val strs = "0123456789".map(d => d.toString * (rnd.nextInt(9000) + 17000)) + def inputs = strs.map(s => SendBytes(ByteString(s))) def output = ByteString(strs.foldRight("")(_ ++ _)) } @@ -309,23 +304,22 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing def output = ByteString(str + "NEWSESSIONhello world") } - val logCipherSuite = Flow[SslTlsInbound] - .map { - var session: SSLSession = null - def setSession(s: SSLSession) = { - session = s - system.log.debug(s"new session: $session (${session.getId mkString ","})") - } - - { - case SessionTruncated => SendBytes(ByteString("TRUNCATED")) - case SessionBytes(s, b) if s != session => - setSession(s) - SendBytes(ByteString(s.getCipherSuite) ++ b) - case SessionBytes(s, b) => SendBytes(b) - } + val logCipherSuite = Flow[SslTlsInbound].map { + var session: SSLSession = null + def setSession(s: SSLSession) = { + session = s + system.log.debug(s"new session: $session (${session.getId.mkString(",")})") } + { + case SessionTruncated => SendBytes(ByteString("TRUNCATED")) + case SessionBytes(s, b) if s != session => + setSession(s) + SendBytes(ByteString(s.getCipherSuite) ++ b) + case SessionBytes(s, b) => SendBytes(b) + } + } + object SessionRenegotiationFirstOne extends PayloadScenario { override def flow = logCipherSuite def inputs = NegotiateNewSession.withCipherSuites("TLS_RSA_WITH_AES_128_CBC_SHA") :: send("hello") :: Nil @@ -339,18 +333,17 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing } val scenarios = - Seq( - SingleBytes, - MediumMessages, - LargeMessages, - EmptyBytesFirst, - EmptyBytesInTheMiddle, - EmptyBytesLast, - CancellingRHS, - SessionRenegotiationBySender, - SessionRenegotiationByReceiver, - SessionRenegotiationFirstOne, - SessionRenegotiationFirstTwo) + Seq(SingleBytes, + MediumMessages, + LargeMessages, + EmptyBytesFirst, + EmptyBytesInTheMiddle, + EmptyBytesLast, + CancellingRHS, + SessionRenegotiationBySender, + SessionRenegotiationByReceiver, + SessionRenegotiationFirstOne, + SessionRenegotiationFirstTwo) for { commPattern <- communicationPatterns @@ -362,17 +355,18 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing Source(scenario.inputs) .via(commPattern.decorateFlow(scenario.leftClosing, scenario.rightClosing, onRHS)) .via(new SimpleLinearGraphStage[SslTlsInbound] { - override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { - setHandlers(in, out, this) + override def createLogic(inheritedAttributes: Attributes) = + new GraphStageLogic(shape) with InHandler with OutHandler { + setHandlers(in, out, this) - override def onPush() = push(out, grab(in)) - override def onPull() = pull(in) + override def onPush() = push(out, grab(in)) + override def onPull() = pull(in) - override def onDownstreamFinish() = { - system.log.debug("me cancelled") - completeStage() + override def onDownstreamFinish() = { + system.log.debug("me cancelled") + completeStage() + } } - } }) .via(debug) .collect { case SessionBytes(_, b) => b } @@ -391,7 +385,8 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing val getError = Flow[SslTlsInbound] .map[Either[SslTlsInbound, SSLException]](i => Left(i)) .recover { case e: SSLException => Right(e) } - .collect { case Right(e) => e }.toMat(Sink.head)(Keep.right) + .collect { case Right(e) => e } + .toMat(Sink.head)(Keep.right) val simple = Flow.fromSinkAndSourceMat(getError, Source.maybe[SslTlsOutbound])(Keep.left) @@ -399,13 +394,14 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing // under error conditions, and has the bonus of matching most actual SSL deployments. val (server, serverErr) = Tcp() .bind("localhost", 0) - .mapAsync(1)(c => - c.flow.joinMat(serverTls(IgnoreBoth).reversed.joinMat(simple)(Keep.right))(Keep.right).run() - ) - .toMat(Sink.head)(Keep.both).run() + .mapAsync(1)(c => c.flow.joinMat(serverTls(IgnoreBoth).reversed.joinMat(simple)(Keep.right))(Keep.right).run()) + .toMat(Sink.head)(Keep.both) + .run() - val clientErr = simple.join(badClientTls(IgnoreBoth)) - .join(Tcp().outgoingConnection(Await.result(server, 1.second).localAddress)).run() + val clientErr = simple + .join(badClientTls(IgnoreBoth)) + .join(Tcp().outgoingConnection(Await.result(server, 1.second).localAddress)) + .run() Await.result(serverErr, 1.second).getMessage should include("certificate_unknown") val clientErrText = Await.result(clientErr, 1.second).getMessage @@ -418,12 +414,16 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing "reliably cancel subscriptions when TransportIn fails early" in assertAllStagesStopped { val ex = new Exception("hello") val (sub, out1, out2) = - RunnableGraph.fromGraph(GraphDSL.create(Source.asSubscriber[SslTlsOutbound], Sink.head[ByteString], Sink.head[SslTlsInbound])((_, _, _)) { implicit b => (s, o1, o2) => - val tls = b.add(clientTls(EagerClose)) - s ~> tls.in1; tls.out1 ~> o1 - o2 <~ tls.out2; tls.in2 <~ Source.failed(ex) - ClosedShape - }).run() + RunnableGraph + .fromGraph( + GraphDSL.create(Source.asSubscriber[SslTlsOutbound], Sink.head[ByteString], Sink.head[SslTlsInbound])( + (_, _, _)) { implicit b => (s, o1, o2) => + val tls = b.add(clientTls(EagerClose)) + s ~> tls.in1; tls.out1 ~> o1 + o2 <~ tls.out2; tls.in2 <~ Source.failed(ex) + ClosedShape + }) + .run() the[Exception] thrownBy Await.result(out1, 1.second) should be(ex) the[Exception] thrownBy Await.result(out2, 1.second) should be(ex) Thread.sleep(500) @@ -435,12 +435,15 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing "reliably cancel subscriptions when UserIn fails early" in assertAllStagesStopped { val ex = new Exception("hello") val (sub, out1, out2) = - RunnableGraph.fromGraph(GraphDSL.create(Source.asSubscriber[ByteString], Sink.head[ByteString], Sink.head[SslTlsInbound])((_, _, _)) { implicit b => (s, o1, o2) => - val tls = b.add(clientTls(EagerClose)) - Source.failed[SslTlsOutbound](ex) ~> tls.in1; tls.out1 ~> o1 - o2 <~ tls.out2; tls.in2 <~ s - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create(Source.asSubscriber[ByteString], Sink.head[ByteString], Sink.head[SslTlsInbound])( + (_, _, _)) { implicit b => (s, o1, o2) => + val tls = b.add(clientTls(EagerClose)) + Source.failed[SslTlsOutbound](ex) ~> tls.in1; tls.out1 ~> o1 + o2 <~ tls.out2; tls.in2 <~ s + ClosedShape + }) + .run() the[Exception] thrownBy Await.result(out1, 1.second) should be(ex) the[Exception] thrownBy Await.result(out2, 1.second) should be(ex) Thread.sleep(500) @@ -457,7 +460,11 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing val outFlow = { val terminator = BidiFlow.fromFlows(Flow[ByteString], ks.flow[ByteString]) - clientTls(scenario.leftClosing) atop terminator atop serverTls(scenario.rightClosing).reversed join debug.via(scenario.flow) via debug + clientTls(scenario.leftClosing) + .atop(terminator) + .atop(serverTls(scenario.rightClosing).reversed) + .join(debug.via(scenario.flow)) + .via(debug) } val inFlow = Flow[SslTlsInbound] @@ -480,13 +487,12 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing "verify hostname" in assertAllStagesStopped { def run(hostName: String): Future[akka.Done] = { - val rhs = Flow[SslTlsInbound] - .map { - case SessionTruncated => SendBytes(ByteString.empty) - case SessionBytes(_, b) => SendBytes(b) - } + val rhs = Flow[SslTlsInbound].map { + case SessionTruncated => SendBytes(ByteString.empty) + case SessionBytes(_, b) => SendBytes(b) + } val clientTls = TLS(sslContext, None, cipherSuites, Client, EagerClose, Some((hostName, 80))) - val flow = clientTls atop serverTls(EagerClose).reversed join rhs + val flow = clientTls.atop(serverTls(EagerClose).reversed).join(rhs) Source.single(SendBytes(ByteString.empty)).via(flow).runWith(Sink.ignore) } @@ -504,7 +510,7 @@ class TlsSpec extends StreamSpec(TlsSpec.configOverrides) with WithLogCapturing "pass through data" in { val f = Source(1 to 3) .map(b => SendBytes(ByteString(b.toByte))) - .via(TLSPlacebo() join Flow.apply) + .via(TLSPlacebo().join(Flow.apply)) .grouped(10) .runWith(Sink.head) val result = Await.result(f, 3.seconds) diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CodecSpecSupport.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CodecSpecSupport.scala index 0f36b678ad..6aee0f6aa3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CodecSpecSupport.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CodecSpecSupport.scala @@ -12,7 +12,8 @@ import org.scalatest.{ BeforeAndAfterAll, Matchers, Suite } trait CodecSpecSupport extends Matchers with BeforeAndAfterAll { self: Suite => - def readAs(string: String, charset: String = "UTF8") = equal(string).matcher[String] compose { (_: ByteString).decodeString(charset) } + def readAs(string: String, charset: String = "UTF8") = + equal(string).matcher[String].compose { (_: ByteString).decodeString(charset) } def hexDump(bytes: ByteString) = bytes.map("%02x".format(_)).mkString def fromHexDump(dump: String) = dump.grouped(2).toArray.map(chars => Integer.parseInt(new String(chars), 16).toByte) @@ -67,7 +68,9 @@ invidunt ut labore et dolore magna aliquyam erat. Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus -est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy e""".replace("\r\n", "\n") +est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy e""".replace( + "\r\n", + "\n") implicit val system = ActorSystem(getClass.getSimpleName) implicit val materializer = ActorMaterializer() diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala index f3e3d2314b..33c8b4659c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CoderSpec.scala @@ -24,7 +24,8 @@ abstract class CoderSpec(codecName: String) extends WordSpec with CodecSpecSuppo protected def newCompressor(): Compressor protected def encoderFlow: Flow[ByteString, ByteString, Any] - protected def decoderFlow(maxBytesPerChunk: Int = Compression.MaxBytesPerChunkDefault): Flow[ByteString, ByteString, Any] + protected def decoderFlow( + maxBytesPerChunk: Int = Compression.MaxBytesPerChunkDefault): Flow[ByteString, ByteString, Any] protected def newDecodedInputStream(underlying: InputStream): InputStream protected def newEncodedOutputStream(underlying: OutputStream): OutputStream @@ -86,7 +87,9 @@ abstract class CoderSpec(codecName: String) extends WordSpec with CodecSpecSuppo "support chunked round-trip encoding/decoding" in { val chunks = largeTextBytes.grouped(512).toVector val comp = newCompressor() - val compressedChunks = chunks.map { chunk => comp.compressAndFlush(chunk) } :+ comp.finish() + val compressedChunks = chunks.map { chunk => + comp.compressAndFlush(chunk) + } :+ comp.finish() val uncompressed = decodeFromIterator(() => compressedChunks.iterator) uncompressed should readAs(largeText) @@ -103,7 +106,9 @@ abstract class CoderSpec(codecName: String) extends WordSpec with CodecSpecSuppo } "works for chunked compressed data of sizes just above 1024" in { val comp = newCompressor() - val inputBytes = ByteString("""{"baseServiceURL":"http://www.acme.com","endpoints":{"assetSearchURL":"/search","showsURL":"/shows","mediaContainerDetailURL":"/container","featuredTapeURL":"/tape","assetDetailURL":"/asset","moviesURL":"/movies","recentlyAddedURL":"/recent","topicsURL":"/topics","scheduleURL":"/schedule"},"urls":{"aboutAweURL":"www.foobar.com"},"channelName":"Cool Stuff","networkId":"netId","slotProfile":"slot_1","brag":{"launchesUntilPrompt":10,"daysUntilPrompt":5,"launchesUntilReminder":5,"daysUntilReminder":2},"feedbackEmailAddress":"feedback@acme.com","feedbackEmailSubject":"Commends from User","splashSponsor":[],"adProvider":{"adProviderProfile":"","adProviderProfileAndroid":"","adProviderNetworkID":0,"adProviderSiteSectionNetworkID":0,"adProviderVideoAssetNetworkID":0,"adProviderSiteSectionCustomID":{},"adProviderServerURL":"","adProviderLiveVideoAssetID":""},"update":[{"forPlatform":"ios","store":{"iTunes":"www.something.com"},"minVer":"1.2.3","notificationVer":"1.2.5"},{"forPlatform":"android","store":{"amazon":"www.something.com","play":"www.something.com"},"minVer":"1.2.3","notificationVer":"1.2.5"}],"tvRatingPolicies":[{"type":"sometype","imageKey":"tv_rating_small","durationMS":15000,"precedence":1},{"type":"someothertype","imageKey":"tv_rating_big","durationMS":15000,"precedence":2}],"exts":{"adConfig":{"globals":{"#{adNetworkID}":"2620","#{ssid}":"usa_tveapp"},"iPad":{"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_ipad/shows","adSize":[{"#{height}":90,"#{width}":728}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_ipad&sz=1x1&t=&c=#{doubleclickrandom}"},"watchwithshowtile":{"adMobAdUnitID":"/2620/usa_tveapp_ipad/watchwithshowtile","adSize":[{"#{height}":120,"#{width}":240}]},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_ipad/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"iPadRetina":{"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_ipad/shows","adSize":[{"#{height}":90,"#{width}":728}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_ipad&sz=1x1&t=&c=#{doubleclickrandom}"},"watchwithshowtile":{"adMobAdUnitID":"/2620/usa_tveapp_ipad/watchwithshowtile","adSize":[{"#{height}":120,"#{width}":240}]},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_ipad/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"iPhone":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/home","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/shows","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/shows/#{SHOW_NAME}","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_iphone&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_iphone/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"iPhoneRetina":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/home","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/shows","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/shows/#{SHOW_NAME}","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_iphone&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_iphone/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"Tablet":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/home","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/shows","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/shows/#{SHOW_NAME}","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_androidtab&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_androidtab/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"TabletHD":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/home","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/shows","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/shows/#{SHOW_NAME}","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_androidtab&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_androidtab/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"Phone":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_android/home","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_android/shows","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_android/shows/#{SHOW_NAME}","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_android&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_android/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"PhoneHD":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_android/home","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_android/shows","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_android/shows/#{SHOW_NAME}","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_android&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_android/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}}}}}""", "utf8") + val inputBytes = ByteString( + """{"baseServiceURL":"http://www.acme.com","endpoints":{"assetSearchURL":"/search","showsURL":"/shows","mediaContainerDetailURL":"/container","featuredTapeURL":"/tape","assetDetailURL":"/asset","moviesURL":"/movies","recentlyAddedURL":"/recent","topicsURL":"/topics","scheduleURL":"/schedule"},"urls":{"aboutAweURL":"www.foobar.com"},"channelName":"Cool Stuff","networkId":"netId","slotProfile":"slot_1","brag":{"launchesUntilPrompt":10,"daysUntilPrompt":5,"launchesUntilReminder":5,"daysUntilReminder":2},"feedbackEmailAddress":"feedback@acme.com","feedbackEmailSubject":"Commends from User","splashSponsor":[],"adProvider":{"adProviderProfile":"","adProviderProfileAndroid":"","adProviderNetworkID":0,"adProviderSiteSectionNetworkID":0,"adProviderVideoAssetNetworkID":0,"adProviderSiteSectionCustomID":{},"adProviderServerURL":"","adProviderLiveVideoAssetID":""},"update":[{"forPlatform":"ios","store":{"iTunes":"www.something.com"},"minVer":"1.2.3","notificationVer":"1.2.5"},{"forPlatform":"android","store":{"amazon":"www.something.com","play":"www.something.com"},"minVer":"1.2.3","notificationVer":"1.2.5"}],"tvRatingPolicies":[{"type":"sometype","imageKey":"tv_rating_small","durationMS":15000,"precedence":1},{"type":"someothertype","imageKey":"tv_rating_big","durationMS":15000,"precedence":2}],"exts":{"adConfig":{"globals":{"#{adNetworkID}":"2620","#{ssid}":"usa_tveapp"},"iPad":{"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_ipad/shows","adSize":[{"#{height}":90,"#{width}":728}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_ipad&sz=1x1&t=&c=#{doubleclickrandom}"},"watchwithshowtile":{"adMobAdUnitID":"/2620/usa_tveapp_ipad/watchwithshowtile","adSize":[{"#{height}":120,"#{width}":240}]},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_ipad/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"iPadRetina":{"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_ipad/shows","adSize":[{"#{height}":90,"#{width}":728}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_ipad&sz=1x1&t=&c=#{doubleclickrandom}"},"watchwithshowtile":{"adMobAdUnitID":"/2620/usa_tveapp_ipad/watchwithshowtile","adSize":[{"#{height}":120,"#{width}":240}]},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_ipad/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"iPhone":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/home","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/shows","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/shows/#{SHOW_NAME}","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_iphone&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_iphone/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"iPhoneRetina":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/home","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/shows","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_iphone/shows/#{SHOW_NAME}","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_iphone&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_iphone/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"Tablet":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/home","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/shows","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/shows/#{SHOW_NAME}","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_androidtab&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_androidtab/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"TabletHD":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/home","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/shows","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_androidtab/shows/#{SHOW_NAME}","adSize":[{"#{height}":90,"#{width}":728},{"#{height}":50,"#{width}":320},{"#{height}":50,"#{width}":300}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_androidtab&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_androidtab/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"Phone":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_android/home","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_android/shows","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_android/shows/#{SHOW_NAME}","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_android&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_android/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}},"PhoneHD":{"home":{"adMobAdUnitID":"/2620/usa_tveapp_android/home","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"showlist":{"adMobAdUnitID":"/2620/usa_tveapp_android/shows","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"episodepage":{"adMobAdUnitID":"/2620/usa_tveapp_android/shows/#{SHOW_NAME}","adSize":[{"#{height}":50,"#{width}":300},{"#{height}":50,"#{width}":320}]},"launch":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_android&sz=1x1&t=&c=#{doubleclickrandom}"},"showpage":{"doubleClickCallbackURL":"http://pubads.g.doubleclick.net/gampad/ad?iu=/2620/usa_tveapp_android/shows/#{SHOW_NAME}&sz=1x1&t=&c=#{doubleclickrandom}"}}}}}""", + "utf8") val compressed = comp.compressAndFinish(inputBytes) ourDecode(compressed) should equal(inputBytes) @@ -114,9 +119,11 @@ abstract class CoderSpec(codecName: String) extends WordSpec with CodecSpecSuppo val compressed = streamEncode(ByteString(array)) val limit = 10000 val resultBs = - Source.single(compressed) + Source + .single(compressed) .via(decoderFlow(maxBytesPerChunk = limit)) - .limit(4200).runWith(Sink.seq) + .limit(4200) + .runWith(Sink.seq) .awaitResult(3.seconds) forAll(resultBs) { bs => @@ -136,7 +143,8 @@ abstract class CoderSpec(codecName: String) extends WordSpec with CodecSpecSuppo ByteString(Array.fill(size)(1.toByte)) val sizesAfterRoundtrip = - Source.fromIterator(() => sizes.toIterator.map(createByteString)) + Source + .fromIterator(() => sizes.toIterator.map(createByteString)) .via(encoderFlow) .via(decoderFlow()) .runFold(Seq.empty[Int])(_ :+ _.size) @@ -150,10 +158,7 @@ abstract class CoderSpec(codecName: String) extends WordSpec with CodecSpecSuppo def encode(s: String) = ourEncode(ByteString(s, "UTF8")) def ourEncode(bytes: ByteString): ByteString = newCompressor().compressAndFinish(bytes) def ourDecode(bytes: ByteString): ByteString = - Source.single(bytes) - .via(decoderFlow()) - .join - .awaitResult(3.seconds) + Source.single(bytes).via(decoderFlow()).join.awaitResult(3.seconds) lazy val corruptContent = { val content = encode(largeText).toArray diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CompressionTestingTools.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CompressionTestingTools.scala index cd9d24b31b..a5a99f8223 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/CompressionTestingTools.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/CompressionTestingTools.scala @@ -17,12 +17,15 @@ import scala.util.{ Failure, Success } // a few useful helpers copied over from akka-http object CompressionTestingTools { implicit class AddFutureAwaitResult[T](val future: Future[T]) extends AnyVal { + /** "Safe" Await.result that doesn't throw away half of the stacktrace */ def awaitResult(atMost: Duration): T = { Await.ready(future, atMost) future.value.get match { - case Success(t) => t - case Failure(ex) => throw new RuntimeException("Trying to await result of failed Future, see the cause for the original problem.", ex) + case Success(t) => t + case Failure(ex) => + throw new RuntimeException("Trying to await result of failed Future, see the cause for the original problem.", + ex) } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/GzipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/GzipSpec.scala index 06018aeac3..1cbe6af6bc 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/GzipSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/GzipSpec.scala @@ -43,7 +43,7 @@ class GzipSpec extends CoderSpec("gzip") { } "throw early if header is corrupt" in { val cause = (the[RuntimeException] thrownBy ourDecode(ByteString(0, 1, 2, 3, 4))).ultimateCause - cause should (be(a[ZipException]) and have message "Not in GZIP format") + cause should ((be(a[ZipException]) and have).message("Not in GZIP format")) } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/io/compression/GzipWithCustomCompressionLevelSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/io/compression/GzipWithCustomCompressionLevelSpec.scala index 10dcb9c245..d7f47121c4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/io/compression/GzipWithCustomCompressionLevelSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/io/compression/GzipWithCustomCompressionLevelSpec.scala @@ -35,7 +35,7 @@ class GzipWithCustomCompressionLevelSpec extends GzipSpec { } "throw early if header is corrupt" in { val cause = (the[RuntimeException] thrownBy ourDecode(ByteString(0, 1, 2, 3, 4))).ultimateCause - cause should (be(a[ZipException]) and have message "Not in GZIP format") + cause should ((be(a[ZipException]) and have).message("Not in GZIP format")) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala index ab07d19c8a..2dbf9182bf 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala @@ -24,12 +24,12 @@ object ActorRefBackpressureSinkSpec { def receive = { case `initMessage` => sender() ! ackMessage - ref forward initMessage + ref.forward(initMessage) case `completeMessage` => - ref forward completeMessage + ref.forward(completeMessage) case msg: Int => sender() ! ackMessage - ref forward msg + ref.forward(msg) } } @@ -43,7 +43,7 @@ object ActorRefBackpressureSinkSpec { actorRef ! ackMessage case msg => actorRef = sender() - ref forward msg + ref.forward(msg) } } @@ -60,9 +60,7 @@ class ActorRefBackpressureSinkSpec extends StreamSpec { "send the elements to the ActorRef" in assertAllStagesStopped { val fw = createActor(classOf[Fw]) - Source(List(1, 2, 3)).runWith(Sink.actorRefWithAck( - fw, - initMessage, ackMessage, completeMessage)) + Source(List(1, 2, 3)).runWith(Sink.actorRefWithAck(fw, initMessage, ackMessage, completeMessage)) expectMsg("start") expectMsg(1) expectMsg(2) @@ -72,9 +70,7 @@ class ActorRefBackpressureSinkSpec extends StreamSpec { "send the elements to the ActorRef2" in assertAllStagesStopped { val fw = createActor(classOf[Fw]) - val probe = TestSource.probe[Int].to(Sink.actorRefWithAck( - fw, - initMessage, ackMessage, completeMessage)).run() + val probe = TestSource.probe[Int].to(Sink.actorRefWithAck(fw, initMessage, ackMessage, completeMessage)).run() probe.sendNext(1) expectMsg("start") expectMsg(1) @@ -88,9 +84,8 @@ class ActorRefBackpressureSinkSpec extends StreamSpec { "cancel stream when actor terminates" in assertAllStagesStopped { val fw = createActor(classOf[Fw]) - val publisher = TestSource.probe[Int].to(Sink.actorRefWithAck( - fw, - initMessage, ackMessage, completeMessage)).run().sendNext(1) + val publisher = + TestSource.probe[Int].to(Sink.actorRefWithAck(fw, initMessage, ackMessage, completeMessage)).run().sendNext(1) expectMsg(initMessage) expectMsg(1) system.stop(fw) @@ -99,9 +94,7 @@ class ActorRefBackpressureSinkSpec extends StreamSpec { "send message only when backpressure received" in assertAllStagesStopped { val fw = createActor(classOf[Fw2]) - val publisher = TestSource.probe[Int].to(Sink.actorRefWithAck( - fw, - initMessage, ackMessage, completeMessage)).run() + val publisher = TestSource.probe[Int].to(Sink.actorRefWithAck(fw, initMessage, ackMessage, completeMessage)).run() expectMsg(initMessage) publisher.sendNext(1) @@ -124,7 +117,8 @@ class ActorRefBackpressureSinkSpec extends StreamSpec { val bufferSize = 16 val streamElementCount = bufferSize + 4 val fw = createActor(classOf[Fw2]) - val sink = Sink.actorRefWithAck(fw, initMessage, ackMessage, completeMessage) + val sink = Sink + .actorRefWithAck(fw, initMessage, ackMessage, completeMessage) .withAttributes(inputBuffer(bufferSize, bufferSize)) val bufferFullProbe = Promise[akka.Done.type] Source(1 to streamElementCount) @@ -144,10 +138,10 @@ class ActorRefBackpressureSinkSpec extends StreamSpec { "work with one element buffer" in assertAllStagesStopped { val fw = createActor(classOf[Fw2]) val publisher = - TestSource.probe[Int].to(Sink.actorRefWithAck( - fw, - initMessage, ackMessage, completeMessage) - .withAttributes(inputBuffer(1, 1))).run() + TestSource + .probe[Int] + .to(Sink.actorRefWithAck(fw, initMessage, ackMessage, completeMessage).withAttributes(inputBuffer(1, 1))) + .run() expectMsg(initMessage) fw ! TriggerAckMessage @@ -169,9 +163,8 @@ class ActorRefBackpressureSinkSpec extends StreamSpec { "fail to materialize with zero sized input buffer" in { val fw = createActor(classOf[Fw]) an[IllegalArgumentException] shouldBe thrownBy { - val badSink = Sink - .actorRefWithAck(fw, initMessage, ackMessage, completeMessage) - .withAttributes(inputBuffer(0, 0)) + val badSink = + Sink.actorRefWithAck(fw, initMessage, ackMessage, completeMessage).withAttributes(inputBuffer(0, 0)) Source.single(()).runWith(badSink) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSinkSpec.scala index 713009a718..c4406613a7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSinkSpec.scala @@ -15,7 +15,7 @@ import akka.actor.Props object ActorRefSinkSpec { case class Fw(ref: ActorRef) extends Actor { def receive = { - case msg => ref forward msg + case msg => ref.forward(msg) } } } @@ -36,7 +36,8 @@ class ActorRefSinkSpec extends StreamSpec { "cancel stream when actor terminates" in assertAllStagesStopped { val fw = system.actorOf(Props(classOf[Fw], testActor).withDispatcher("akka.test.stream-dispatcher")) - val publisher = TestSource.probe[Int].to(Sink.actorRef(fw, onCompleteMessage = "done")).run().sendNext(1).sendNext(2) + val publisher = + TestSource.probe[Int].to(Sink.actorRef(fw, onCompleteMessage = "done")).run().sendNext(1).sendNext(2) expectMsg(1) expectMsg(2) system.stop(fw) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSourceSpec.scala index 3fc945b5bb..9a86faf0a4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefSourceSpec.scala @@ -5,7 +5,7 @@ package akka.stream.scaladsl import scala.concurrent.duration._ -import akka.stream.{ Attributes, ActorMaterializer, OverflowStrategy } +import akka.stream.{ ActorMaterializer, Attributes, OverflowStrategy } import akka.stream.testkit._ import akka.stream.testkit.scaladsl._ import akka.stream.testkit.Utils._ @@ -142,7 +142,11 @@ class ActorRefSourceSpec extends StreamSpec { "set actor name equal to stage name" in assertAllStagesStopped { val s = TestSubscriber.manualProbe[Int]() val name = "SomeCustomName" - val ref = Source.actorRef(10, OverflowStrategy.fail).withAttributes(Attributes.name(name)).to(Sink.fromSubscriber(s)).run() + val ref = Source + .actorRef(10, OverflowStrategy.fail) + .withAttributes(Attributes.name(name)) + .to(Sink.fromSubscriber(s)) + .run() ref.path.name.contains(name) should ===(true) ref ! PoisonPill } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala index c0789314b9..9f2213991b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/AttributesSpec.scala @@ -18,7 +18,8 @@ import com.typesafe.config.ConfigFactory object AttributesSpec { - class AttributesSource(_initialAttributes: Attributes = Attributes.none) extends GraphStageWithMaterializedValue[SourceShape[Any], Attributes] { + class AttributesSource(_initialAttributes: Attributes = Attributes.none) + extends GraphStageWithMaterializedValue[SourceShape[Any], Attributes] { val out = Outlet[Any]("out") override protected def initialAttributes: Attributes = _initialAttributes override val shape = SourceShape.of(out) @@ -26,8 +27,7 @@ object AttributesSpec { override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Attributes) = { val logic = new GraphStageLogic(shape) { setHandler(out, new OutHandler { - def onPull(): Unit = { - } + def onPull(): Unit = {} }) } (logic, inheritedAttributes) @@ -35,7 +35,8 @@ object AttributesSpec { } - class AttributesFlow(_initialAttributes: Attributes = Attributes.none) extends GraphStageWithMaterializedValue[FlowShape[Any, Any], Attributes] { + class AttributesFlow(_initialAttributes: Attributes = Attributes.none) + extends GraphStageWithMaterializedValue[FlowShape[Any, Any], Attributes] { val in = Inlet[Any]("in") val out = Outlet[Any]("out") @@ -55,7 +56,8 @@ object AttributesSpec { } } - class AttributesSink(_initialAttributes: Attributes = Attributes.none) extends GraphStageWithMaterializedValue[SinkShape[Any], Attributes] { + class AttributesSink(_initialAttributes: Attributes = Attributes.none) + extends GraphStageWithMaterializedValue[SinkShape[Any], Attributes] { val in = Inlet[Any]("in") @@ -99,8 +101,10 @@ object AttributesSpec { case class WhateverAttribute(label: String) extends Attribute } -class AttributesSpec extends StreamSpec(ConfigFactory.parseString( - """ +class AttributesSpec + extends StreamSpec( + ConfigFactory + .parseString(""" my-dispatcher { type = Dispatcher executor = "thread-pool-executor" @@ -110,14 +114,13 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( throughput = 1 } """) - // we need to revert to the regular mailbox or else the test suite will complain - // about using non-test worthy dispatchers - .withFallback(Utils.UnboundedMailboxConfig)) { + // we need to revert to the regular mailbox or else the test suite will complain + // about using non-test worthy dispatchers + .withFallback(Utils.UnboundedMailboxConfig)) { import AttributesSpec._ - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -145,11 +148,12 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "be appended with addAttributes" in { val attributes = - Source.fromGraph(new AttributesSource() - .addAttributes(Attributes.name("new-name")) - .addAttributes(Attributes.name("re-added")) // adding twice at same level replaces - .addAttributes(whateverAttribute("other-thing")) - ) + Source + .fromGraph( + new AttributesSource() + .addAttributes(Attributes.name("new-name")) + .addAttributes(Attributes.name("re-added")) // adding twice at same level replaces + .addAttributes(whateverAttribute("other-thing"))) .toMat(Sink.head)(Keep.left) .run() @@ -159,10 +163,12 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "be replaced withAttributes directly on a stage" in { val attributes = - Source.fromGraph(new AttributesSource() - .withAttributes(Attributes.name("new-name") and whateverAttribute("other-thing")) - .withAttributes(Attributes.name("re-added")) // we loose all previous attributes for same level - ) + Source + .fromGraph( + new AttributesSource() + .withAttributes(Attributes.name("new-name") and whateverAttribute("other-thing")) + .withAttributes(Attributes.name("re-added")) // we loose all previous attributes for same level + ) .toMat(Sink.head)(Keep.left) .run() @@ -172,7 +178,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "be overridable on a module basis" in { val attributes = - Source.fromGraph(new AttributesSource().withAttributes(Attributes.name("new-name"))) + Source + .fromGraph(new AttributesSource().withAttributes(Attributes.name("new-name"))) .toMat(Sink.head)(Keep.left) .run() @@ -180,7 +187,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( } "keep the outermost attribute as the least specific" in { - val attributes = Source.fromGraph(new AttributesSource(Attributes.name("original-name"))) + val attributes = Source + .fromGraph(new AttributesSource(Attributes.name("original-name"))) .map(identity) .addAttributes(Attributes.name("whole-graph")) .toMat(Sink.head)(Keep.left) @@ -198,8 +206,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "make the attributes on fromGraph(single-source-stage) Source behave the same as the stage itself" in { val attributes = - Source.fromGraph( - new AttributesSource(Attributes.name("original-name") and whateverAttribute("whatever")) + Source + .fromGraph(new AttributesSource(Attributes.name("original-name") and whateverAttribute("whatever")) .withAttributes(Attributes.name("new-name"))) .toMat(Sink.head)(Keep.left) .run() @@ -213,9 +221,11 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "make the attributes on Source.fromGraph source behave the same as the stage itself" in { val attributes = - Source.fromGraph(new AttributesSource(Attributes.name("original-name"))) + Source + .fromGraph(new AttributesSource(Attributes.name("original-name"))) .withAttributes(Attributes.name("replaced")) // this actually replaces now - .toMat(Sink.head)(Keep.left).withAttributes(Attributes.name("whole-graph")) + .toMat(Sink.head)(Keep.left) + .withAttributes(Attributes.name("whole-graph")) .run() // most specific @@ -228,7 +238,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( } "not replace stage specific attributes with attributes on surrounding composite source" in { - val attributes = Source.fromGraph(new AttributesSource(Attributes.name("original-name"))) + val attributes = Source + .fromGraph(new AttributesSource(Attributes.name("original-name"))) .map(identity) .addAttributes(Attributes.name("composite-graph")) .toMat(Sink.head)(Keep.left) @@ -243,10 +254,12 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "make the attributes on Sink.fromGraph source behave the same as the stage itself" in { val attributes = - Source.maybe.toMat( - Sink.fromGraph(new AttributesSink(Attributes.name("original-name"))) - .withAttributes(Attributes.name("replaced")) // this actually replaces now - )(Keep.right) + Source.maybe + .toMat( + Sink + .fromGraph(new AttributesSink(Attributes.name("original-name"))) + .withAttributes(Attributes.name("replaced")) // this actually replaces now + )(Keep.right) .withAttributes(Attributes.name("whole-graph")) .run() @@ -259,19 +272,18 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "use the initial attributes for dispatcher" in { val dispatcher = - Source.fromGraph(new ThreadNameSnitchingStage("my-dispatcher")) - .runWith(Sink.head) - .futureValue + Source.fromGraph(new ThreadNameSnitchingStage("my-dispatcher")).runWith(Sink.head).futureValue dispatcher should startWith("AttributesSpec-my-dispatcher") } "use an explicit attribute on the stage to select dispatcher" in { val dispatcher = - Source.fromGraph( - // directly on stage - new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher") - .addAttributes(ActorAttributes.dispatcher("my-dispatcher"))) + Source + .fromGraph( + // directly on stage + new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher").addAttributes( + ActorAttributes.dispatcher("my-dispatcher"))) .runWith(Sink.head) .futureValue @@ -280,8 +292,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "use the most specific dispatcher when another one is defined on a surrounding composed graph" in { val dispatcher = - Source.fromGraph( - new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) + Source + .fromGraph(new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) .map(identity) // this is now for the composed source -> flow graph .addAttributes(ActorAttributes.dispatcher("my-dispatcher")) @@ -293,8 +305,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "not change dispatcher from one defined on a surrounding graph" in { val dispatcher = - Source.fromGraph( - new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) + Source + .fromGraph(new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) // this already introduces an async boundary here .map(identity) // this is now just for map since there already is one in-between stage and map @@ -308,8 +320,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "change dispatcher when defined directly on top of the async boundary" in { val dispatcher = - Source.fromGraph( - new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) + Source + .fromGraph(new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) .async .withAttributes(ActorAttributes.dispatcher("my-dispatcher")) .runWith(Sink.head) @@ -320,8 +332,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "change dispatcher when defined on the async call" in { val dispatcher = - Source.fromGraph( - new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) + Source + .fromGraph(new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) .async("my-dispatcher") .runWith(Sink.head) .futureValue @@ -335,9 +347,9 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "make the attributes on fromGraph(flow-stage) Flow behave the same as the stage itself" in { val attributes = Source.empty - .viaMat( - Flow.fromGraph(new AttributesFlow(Attributes.name("original-name"))) - .withAttributes(Attributes.name("replaced")) // this actually replaces now + .viaMat(Flow + .fromGraph(new AttributesFlow(Attributes.name("original-name"))) + .withAttributes(Attributes.name("replaced")) // this actually replaces now )(Keep.right) .withAttributes(Attributes.name("source-flow")) .toMat(Sink.ignore)(Keep.left) @@ -352,13 +364,13 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( val attributes = Source.empty .viaMat( - Flow.fromGraph(new AttributesFlow(Attributes.name("original-name"))) + Flow + .fromGraph(new AttributesFlow(Attributes.name("original-name"))) .map(identity) .withAttributes(Attributes.name("replaced")) .addAttributes(whateverAttribute("whatever")) .withAttributes(Attributes.name("replaced-again")) - .addAttributes(whateverAttribute("replaced")) - )(Keep.right) + .addAttributes(whateverAttribute("replaced")))(Keep.right) .toMat(Sink.ignore)(Keep.left) .run() @@ -376,10 +388,12 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "attributes on a Sink" must { "make the attributes on fromGraph(sink-stage) Sink behave the same as the stage itself" in { val attributes = - Source.empty.toMat( - Sink.fromGraph(new AttributesSink(Attributes.name("original-name"))) - .withAttributes(Attributes.name("replaced")) // this actually replaces now - )(Keep.right) + Source.empty + .toMat( + Sink + .fromGraph(new AttributesSink(Attributes.name("original-name"))) + .withAttributes(Attributes.name("replaced")) // this actually replaces now + )(Keep.right) .withAttributes(Attributes.name("whole-graph")) .run() @@ -396,8 +410,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "not change dispatcher from one defined on a surrounding graph" in { val dispatcherF = - javadsl.Source.fromGraph( - new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) + javadsl.Source + .fromGraph(new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) // this already introduces an async boundary here .detach // this is now just for map since there already is one in-between stage and map @@ -412,8 +426,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "change dispatcher when defined directly on top of the async boundary" in { val dispatcherF = - javadsl.Source.fromGraph( - new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) + javadsl.Source + .fromGraph(new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) .async .withAttributes(ActorAttributes.dispatcher("my-dispatcher")) .runWith(javadsl.Sink.head(), materializer) @@ -425,7 +439,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "make the attributes on Source.fromGraph source behave the same as the stage itself" in { val attributes: Attributes = - javadsl.Source.fromGraph(new AttributesSource(Attributes.name("original-name"))) + javadsl.Source + .fromGraph(new AttributesSource(Attributes.name("original-name"))) .withAttributes(Attributes.name("replaced")) // this actually replaces now .toMat(javadsl.Sink.ignore(), javadsl.Keep.left[Attributes, CompletionStage[Done]]) .withAttributes(Attributes.name("whole-graph")) @@ -440,11 +455,14 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "make the attributes on Flow.fromGraph source behave the same as the stage itself" in { val attributes: Attributes = - javadsl.Source.empty[Any] + javadsl.Source + .empty[Any] .viaMat( - javadsl.Flow.fromGraph(new AttributesFlow(Attributes.name("original-name"))) + javadsl.Flow + .fromGraph(new AttributesFlow(Attributes.name("original-name"))) .withAttributes(Attributes.name("replaced")) // this actually replaces now - , javadsl.Keep.right[NotUsed, Attributes]) + , + javadsl.Keep.right[NotUsed, Attributes]) .withAttributes(Attributes.name("source-flow")) .toMat(javadsl.Sink.ignore(), javadsl.Keep.left[Attributes, CompletionStage[Done]]) .withAttributes(Attributes.name("whole-graph")) @@ -459,10 +477,14 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "make the attributes on Sink.fromGraph source behave the same as the stage itself" in { val attributes: Attributes = - javadsl.Source.empty[Any].toMat( - javadsl.Sink.fromGraph(new AttributesSink(Attributes.name("original-name"))) - .withAttributes(Attributes.name("replaced")) // this actually replaces now - , javadsl.Keep.right[NotUsed, Attributes]) + javadsl.Source + .empty[Any] + .toMat( + javadsl.Sink + .fromGraph(new AttributesSink(Attributes.name("original-name"))) + .withAttributes(Attributes.name("replaced")) // this actually replaces now + , + javadsl.Keep.right[NotUsed, Attributes]) .withAttributes(Attributes.name("whole-graph")) .run(materializer) @@ -484,7 +506,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( try { val dispatcher = - Source.fromGraph(new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) + Source + .fromGraph(new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher")) .runWith(Sink.head)(myDispatcherMaterializer) .futureValue @@ -501,7 +524,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "the default dispatcher attributes" must { - val config = ConfigFactory.parseString(s""" + val config = ConfigFactory + .parseString(s""" my-dispatcher { type = Dispatcher executor = "thread-pool-executor" @@ -516,7 +540,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( """) // we need to revert to the regular mailbox or else the test suite will complain // about using non-test worthy dispatchers - .withFallback(Utils.UnboundedMailboxConfig).resolve() + .withFallback(Utils.UnboundedMailboxConfig) + .resolve() "allow for specifying a custom default dispatcher" in { @@ -538,8 +563,7 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( import ActorAttributes._ val threadName = - Source.fromGraph(new ThreadNameSnitchingStage(None) - .addAttributes(Attributes(IODispatcher))).runWith(Sink.head) + Source.fromGraph(new ThreadNameSnitchingStage(None).addAttributes(Attributes(IODispatcher))).runWith(Sink.head) threadName.futureValue should startWith("AttributesSpec-akka.stream.default-blocking-io-dispatcher") } @@ -552,8 +576,9 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( val mat = ActorMaterializer()(system) val threadName = - Source.fromGraph(new ThreadNameSnitchingStage(None) - .addAttributes(Attributes(IODispatcher))).runWith(Sink.head)(mat) + Source + .fromGraph(new ThreadNameSnitchingStage(None).addAttributes(Attributes(IODispatcher))) + .runWith(Sink.head)(mat) threadName.futureValue should startWith("AttributesSpec-io-dispatcher-override-my-io-dispatcher-") @@ -571,7 +596,8 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString( "resolve the blocking io dispatcher attribute" in { import ActorAttributes._ - Dispatcher.resolve(Attributes(IODispatcher), materializer.settings) should be("akka.stream.default-blocking-io-dispatcher") + Dispatcher.resolve(Attributes(IODispatcher), materializer.settings) should be( + "akka.stream.default-blocking-io-dispatcher") } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala index 874f522660..4a15f3f817 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/BidiFlowSpec.scala @@ -19,13 +19,11 @@ class BidiFlowSpec extends StreamSpec { implicit val materializer = ActorMaterializer() - val bidi = BidiFlow.fromFlows( - Flow[Int].map(x => x.toLong + 2).withAttributes(name("top")), - Flow[ByteString].map(_.decodeString("UTF-8")).withAttributes(name("bottom"))) + val bidi = BidiFlow.fromFlows(Flow[Int].map(x => x.toLong + 2).withAttributes(name("top")), + Flow[ByteString].map(_.decodeString("UTF-8")).withAttributes(name("bottom"))) - val inverse = BidiFlow.fromFlows( - Flow[Long].map(x => x.toInt + 2).withAttributes(name("top")), - Flow[String].map(ByteString(_)).withAttributes(name("bottom"))) + val inverse = BidiFlow.fromFlows(Flow[Long].map(x => x.toInt + 2).withAttributes(name("top")), + Flow[String].map(ByteString(_)).withAttributes(name("bottom"))) val bidiMat = BidiFlow.fromGraph(GraphDSL.create(Sink.head[Int]) { implicit b => s => Source.single(42) ~> s @@ -41,13 +39,15 @@ class BidiFlowSpec extends StreamSpec { "A BidiFlow" must { "work top/bottom in isolation" in { - val (top, bottom) = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Long], Sink.head[String])(Keep.both) { implicit b => (st, sb) => - val s = b.add(bidi) + val (top, bottom) = RunnableGraph + .fromGraph(GraphDSL.create(Sink.head[Long], Sink.head[String])(Keep.both) { implicit b => (st, sb) => + val s = b.add(bidi) - Source.single(1) ~> s.in1; s.out1 ~> st - sb <~ s.out2; s.in2 <~ Source.single(bytes) - ClosedShape - }).run() + Source.single(1) ~> s.in1; s.out1 ~> st + sb <~ s.out2; s.in2 <~ Source.single(bytes) + ClosedShape + }) + .run() Await.result(top, 1.second) should ===(3L) Await.result(bottom, 1.second) should ===(str) @@ -79,10 +79,12 @@ class BidiFlowSpec extends StreamSpec { } "materialize to its value" in { - val f = RunnableGraph.fromGraph(GraphDSL.create(bidiMat) { implicit b => bidi => - Flow[String].map(Integer.valueOf(_).toInt) <~> bidi <~> Flow[Long].map(x => ByteString(s"Hello $x")) - ClosedShape - }).run() + val f = RunnableGraph + .fromGraph(GraphDSL.create(bidiMat) { implicit b => bidi => + Flow[String].map(Integer.valueOf(_).toInt) <~> bidi <~> Flow[Long].map(x => ByteString(s"Hello $x")) + ClosedShape + }) + .run() Await.result(f, 1.second) should ===(42) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala index 64a7bd9e68..bd5da0eb8a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CollectionSinkSpec.scala @@ -12,8 +12,7 @@ import scala.concurrent.{ Await, Future } class CollectionSinkSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val mat = ActorMaterializer(settings) @@ -51,7 +50,8 @@ class CollectionSinkSpec extends StreamSpec { "return an empty Vector[T] from an empty Source" in { val input = Nil - val future: Future[immutable.Vector[Int]] = Source.fromIterator(() => input.iterator).runWith(Sink.collection[Int, Vector[Int]]) + val future: Future[immutable.Vector[Int]] = + Source.fromIterator(() => input.iterator).runWith(Sink.collection[Int, Vector[Int]]) val result: immutable.Vector[Int] = Await.result(future, remainingOrDefault) result should be(Vector.empty[Int]) } @@ -66,4 +66,3 @@ class CollectionSinkSpec extends StreamSpec { } } } - diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CompressionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CompressionSpec.scala index 1f5246c71a..506095a4ad 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CompressionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CompressionSpec.scala @@ -23,9 +23,7 @@ class CompressionSpec extends StreamSpec { "Gzip decompression" must { "be able to decompress a gzipped stream" in { - val source = Source.single(gzip(data)) - .via(Compression.gunzip()) - .map(_.decodeString(StandardCharsets.UTF_8)) + val source = Source.single(gzip(data)).via(Compression.gunzip()).map(_.decodeString(StandardCharsets.UTF_8)) val res = source.runFold("")(_ + _) res.futureValue should ===(data) @@ -34,9 +32,7 @@ class CompressionSpec extends StreamSpec { "Deflate decompression" must { "be able to decompress a deflated stream" in { - val source = Source.single(deflate(data)) - .via(Compression.inflate()) - .map(_.decodeString(StandardCharsets.UTF_8)) + val source = Source.single(deflate(data)).via(Compression.inflate()).map(_.decodeString(StandardCharsets.UTF_8)) val res = source.runFold("")(_ + _) res.futureValue should ===(data) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala index e2c82abe3b..3e5c511088 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/CoupledTerminationFlowSpec.scala @@ -18,8 +18,7 @@ import scala.xml.Node class CoupledTerminationFlowSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) import system.dispatcher @@ -86,9 +85,7 @@ class CoupledTerminationFlowSpec extends StreamSpec with ScriptedTest { "completed out:Source => complete in:Sink" in { val probe = TestProbe() - val f = Flow.fromSinkAndSourceCoupledMat( - Sink.onComplete(d => probe.ref ! "done"), - Source.empty)(Keep.none) // completes right away, should complete the sink as well + val f = Flow.fromSinkAndSourceCoupledMat(Sink.onComplete(d => probe.ref ! "done"), Source.empty)(Keep.none) // completes right away, should complete the sink as well f.runWith(Source.maybe, Sink.ignore) // these do nothing. @@ -97,17 +94,15 @@ class CoupledTerminationFlowSpec extends StreamSpec with ScriptedTest { "cancel in:Sink => cancel out:Source" in { val probe = TestProbe() - val f = Flow.fromSinkAndSourceCoupledMat( - Sink.cancelled, - Source.fromPublisher(new Publisher[String] { - override def subscribe(subscriber: Subscriber[_ >: String]): Unit = { - subscriber.onSubscribe(new Subscription { - override def cancel(): Unit = probe.ref ! "cancelled" + val f = Flow.fromSinkAndSourceCoupledMat(Sink.cancelled, Source.fromPublisher(new Publisher[String] { + override def subscribe(subscriber: Subscriber[_ >: String]): Unit = { + subscriber.onSubscribe(new Subscription { + override def cancel(): Unit = probe.ref ! "cancelled" - override def request(l: Long): Unit = () // do nothing - }) - } - }))(Keep.none) // completes right away, should complete the sink as well + override def request(l: Long): Unit = () // do nothing + }) + } + }))(Keep.none) // completes right away, should complete the sink as well f.runWith(Source.maybe, Sink.ignore) // these do nothing. @@ -147,11 +142,12 @@ class CoupledTerminationFlowSpec extends StreamSpec with ScriptedTest { val downstreamEffect = Sink.onComplete(s => probe.ref ! s) val upstreamEffect = Source.fromPublisher(new Publisher[String] { - override def subscribe(s: Subscriber[_ >: String]): Unit = s.onSubscribe(new Subscription { - override def cancel(): Unit = probe.ref ! "cancel-received" + override def subscribe(s: Subscriber[_ >: String]): Unit = + s.onSubscribe(new Subscription { + override def cancel(): Unit = probe.ref ! "cancel-received" - override def request(n: Long): Unit = () - }) + override def request(n: Long): Unit = () + }) }) val assertCancel = () => { val m = probe.expectMsgType[String] @@ -236,10 +232,12 @@ class CoupledTerminationFlowSpec extends StreamSpec with ScriptedTest { val causeComplete = Source.empty[String] val causeError = Source.failed(new Exception("Boom")) - val catchEffect = Source.maybe[String].mapMaterializedValue(p => { - p.future.onComplete(t => probe.ref ! t) - NotUsed - }) + val catchEffect = Source + .maybe[String] + .mapMaterializedValue(p => { + p.future.onComplete(t => probe.ref ! t) + NotUsed + }) val assertCancel = () => { val m = probe.expectMsgType[Try[Option[String]]] m.isFailure should ===(false) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAppendSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAppendSpec.scala index 72d8ec0cdf..3cabaeb96e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAppendSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAppendSpec.scala @@ -30,15 +30,11 @@ class FlowAppendSpec extends StreamSpec with River { "Source" should { "append Flow" in riverOf[String] { subscriber => - Source(elements) - .via(otherFlow) - .to(Sink.fromSubscriber(subscriber)).run() + Source(elements).via(otherFlow).to(Sink.fromSubscriber(subscriber)).run() } "append Sink" in riverOf[String] { subscriber => - Source(elements) - .to(otherFlow.to(Sink.fromSubscriber(subscriber))) - .run() + Source(elements).to(otherFlow.to(Sink.fromSubscriber(subscriber))).run() } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala index e74d6e9135..613a91a76a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowAskSpec.scala @@ -79,19 +79,25 @@ class FlowAskSpec extends StreamSpec { implicit val timeout = akka.util.Timeout(10.seconds) - val replyOnInts = system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "replyOnInts") + val replyOnInts = + system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "replyOnInts") val dontReply = system.actorOf(TestActors.blackholeProps.withDispatcher("akka.test.stream-dispatcher"), "dontReply") - val replyRandomDelays = system.actorOf(Props(classOf[RandomDelaysReplier]).withDispatcher("akka.test.stream-dispatcher"), "replyRandomDelays") + val replyRandomDelays = + system.actorOf(Props(classOf[RandomDelaysReplier]).withDispatcher("akka.test.stream-dispatcher"), + "replyRandomDelays") - val statusReplier = system.actorOf(Props(new StatusReplier).withDispatcher("akka.test.stream-dispatcher"), "statusReplier") + val statusReplier = + system.actorOf(Props(new StatusReplier).withDispatcher("akka.test.stream-dispatcher"), "statusReplier") - def replierFailOn(n: Int) = system.actorOf(Props(new FailOn(n)).withDispatcher("akka.test.stream-dispatcher"), s"failureReplier-$n") + def replierFailOn(n: Int) = + system.actorOf(Props(new FailOn(n)).withDispatcher("akka.test.stream-dispatcher"), s"failureReplier-$n") val failsOn1 = replierFailOn(1) val failsOn3 = replierFailOn(3) - def replierFailAllExceptOn(n: Int) = system.actorOf(Props(new FailOnAllExcept(n)).withDispatcher("akka.test.stream-dispatcher"), s"failureReplier-$n") + def replierFailAllExceptOn(n: Int) = + system.actorOf(Props(new FailOnAllExcept(n)).withDispatcher("akka.test.stream-dispatcher"), s"failureReplier-$n") val failAllExcept6 = replierFailAllExceptOn(6) "produce asked elements" in assertAllStagesStopped { @@ -147,9 +153,11 @@ class FlowAskSpec extends StreamSpec { "signal ask timeout failure" in assertAllStagesStopped { val c = TestSubscriber.manualProbe[Reply]() implicit val ec = system.dispatcher - Source(1 to 5).map(_ + " nope") + Source(1 to 5) + .map(_ + " nope") .ask[Reply](4)(dontReply)(akka.util.Timeout(10.millis), implicitly[ClassTag[Reply]]) - .to(Sink.fromSubscriber(c)).run() + .to(Sink.fromSubscriber(c)) + .run() c.expectSubscription().request(10) c.expectError().getMessage should startWith("Ask timed out on [Actor[akka://FlowAskSpec/user/dontReply#") } @@ -166,13 +174,13 @@ class FlowAskSpec extends StreamSpec { "signal failure when target actor is terminated" in assertAllStagesStopped { val r = system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "wanna-fail") - val done = Source.maybe[Int] - .ask[Reply](4)(r).runWith(Sink.ignore) + val done = Source.maybe[Int].ask[Reply](4)(r).runWith(Sink.ignore) intercept[RuntimeException] { r ! PoisonPill Await.result(done, remainingOrDefault) - }.getMessage should startWith("Actor watched by [ask()] has terminated! Was: Actor[akka://FlowAskSpec/user/wanna-fail#") + }.getMessage should startWith( + "Actor watched by [ask()] has terminated! Was: Actor[akka://FlowAskSpec/user/wanna-fail#") } "a failure mid-stream must skip element with resume strategy" in assertAllStagesStopped { @@ -180,7 +188,8 @@ class FlowAskSpec extends StreamSpec { val input = "a" :: "b" :: "c" :: "d" :: "e" :: "f" :: Nil - val elements = Source.fromIterator(() => input.iterator) + val elements = Source + .fromIterator(() => input.iterator) .ask[String](5)(p.ref) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .runWith(Sink.seq) @@ -216,7 +225,8 @@ class FlowAskSpec extends StreamSpec { val p = Source(1 to 5) .ask[Reply](4)(ref) .withAttributes(supervisionStrategy(resumingDecider)) - .to(Sink.fromSubscriber(c)).run() + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) for (n <- List(1, 2, 4, 5)) c.expectNext(Reply(n)) @@ -224,10 +234,11 @@ class FlowAskSpec extends StreamSpec { } "resume after multiple failures" in assertAllStagesStopped { - Await.result( - Source(1 to 6) - .ask[Reply](2)(failAllExcept6).withAttributes(supervisionStrategy(resumingDecider)) - .runWith(Sink.head), 3.seconds) should ===(Reply(6)) + Await.result(Source(1 to 6) + .ask[Reply](2)(failAllExcept6) + .withAttributes(supervisionStrategy(resumingDecider)) + .runWith(Sink.head), + 3.seconds) should ===(Reply(6)) } "should handle cancel properly" in assertAllStagesStopped { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBatchSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBatchSpec.scala index 3362e27fd8..9009973a50 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBatchSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBatchSpec.scala @@ -7,13 +7,12 @@ package akka.stream.scaladsl import scala.concurrent.Await import scala.concurrent.duration._ import java.util.concurrent.ThreadLocalRandom -import akka.stream.{ OverflowStrategy, ActorMaterializer, ActorMaterializerSettings } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, OverflowStrategy } import akka.stream.testkit._ class FlowBatchSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -23,7 +22,11 @@ class FlowBatchSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).batch(max = 2, seed = i => i)(aggregate = _ + _).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .batch(max = 2, seed = i => i)(aggregate = _ + _) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() for (i <- 1 to 100) { @@ -39,7 +42,11 @@ class FlowBatchSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[List[Int]]() - Source.fromPublisher(publisher).batch(max = Long.MaxValue, seed = i => List(i))(aggregate = (ints, i) => i :: ints).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .batch(max = Long.MaxValue, seed = i => List(i))(aggregate = (ints, i) => i :: ints) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() for (i <- 1 to 10) { @@ -54,7 +61,9 @@ class FlowBatchSpec extends StreamSpec { "work on a variable rate chain" in { val future = Source(1 to 1000) .batch(max = 100, seed = i => i)(aggregate = (sum, i) => sum + i) - .map { i => if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i } + .map { i => + if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i + } .runFold(0)(_ + _) Await.result(future, 10.seconds) should be(500500) } @@ -63,7 +72,11 @@ class FlowBatchSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).batch(max = 2, seed = i => i)(aggregate = _ + _).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .batch(max = 2, seed = i => i)(aggregate = _ + _) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() sub.request(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBatchWeightedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBatchWeightedSpec.scala index d2a649a048..663b7fc7ba 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBatchWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBatchWeightedSpec.scala @@ -10,8 +10,7 @@ import scala.concurrent.duration._ class FlowBatchWeightedSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -20,7 +19,11 @@ class FlowBatchWeightedSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).batchWeighted(max = 3, _ => 4, seed = i => i)(aggregate = _ + _).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .batchWeighted(max = 3, _ => 4, seed = i => i)(aggregate = _ + _) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() publisher.sendNext(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBufferSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBufferSpec.scala index 8b15a4ce16..8015d60706 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBufferSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowBufferSpec.scala @@ -7,30 +7,29 @@ package akka.stream.scaladsl import scala.concurrent.Await import scala.concurrent.Future import scala.concurrent.duration._ -import akka.stream.{ BufferOverflowException, ActorMaterializer, ActorMaterializerSettings, OverflowStrategy } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, BufferOverflowException, OverflowStrategy } import akka.stream.testkit._ import akka.stream.testkit.scaladsl._ import akka.stream.testkit.scaladsl.StreamTestKit._ class FlowBufferSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 1, maxSize = 1) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 1, maxSize = 1) implicit val materializer = ActorMaterializer(settings) "Buffer" must { "pass elements through normally in backpressured mode" in { - val future: Future[Seq[Int]] = Source(1 to 1000).buffer(100, overflowStrategy = OverflowStrategy.backpressure).grouped(1001). - runWith(Sink.head) + val future: Future[Seq[Int]] = + Source(1 to 1000).buffer(100, overflowStrategy = OverflowStrategy.backpressure).grouped(1001).runWith(Sink.head) Await.result(future, 3.seconds) should be(1 to 1000) } "pass elements through normally in backpressured mode with buffer size one" in { val futureSink = Sink.head[Seq[Int]] - val future = Source(1 to 1000).buffer(1, overflowStrategy = OverflowStrategy.backpressure).grouped(1001). - runWith(Sink.head) + val future = + Source(1 to 1000).buffer(1, overflowStrategy = OverflowStrategy.backpressure).grouped(1001).runWith(Sink.head) Await.result(future, 3.seconds) should be(1 to 1000) } @@ -51,7 +50,11 @@ class FlowBufferSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).buffer(100, overflowStrategy = OverflowStrategy.backpressure).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .buffer(100, overflowStrategy = OverflowStrategy.backpressure) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() // Fill up buffer @@ -69,7 +72,11 @@ class FlowBufferSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).buffer(100, overflowStrategy = OverflowStrategy.dropHead).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .buffer(100, overflowStrategy = OverflowStrategy.dropHead) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() // Fill up buffer @@ -98,7 +105,11 @@ class FlowBufferSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).buffer(100, overflowStrategy = OverflowStrategy.dropTail).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .buffer(100, overflowStrategy = OverflowStrategy.dropTail) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() // Fill up buffer @@ -130,7 +141,11 @@ class FlowBufferSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).buffer(100, overflowStrategy = OverflowStrategy.dropBuffer).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .buffer(100, overflowStrategy = OverflowStrategy.dropBuffer) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() // Fill up buffer @@ -156,7 +171,11 @@ class FlowBufferSpec extends StreamSpec { } "drop new elements if buffer is full and configured so" in { - val (publisher, subscriber) = TestSource.probe[Int].buffer(100, overflowStrategy = OverflowStrategy.dropNew).toMat(TestSink.probe[Int])(Keep.both).run() + val (publisher, subscriber) = TestSource + .probe[Int] + .buffer(100, overflowStrategy = OverflowStrategy.dropNew) + .toMat(TestSink.probe[Int])(Keep.both) + .run() subscriber.ensureSubscription() @@ -184,7 +203,11 @@ class FlowBufferSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).buffer(100, overflowStrategy = OverflowStrategy.fail).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .buffer(100, overflowStrategy = OverflowStrategy.fail) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() // Fill up buffer diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowCollectSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowCollectSpec.scala index 456e24da9c..5570b942b0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowCollectSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowCollectSpec.scala @@ -12,7 +12,7 @@ import akka.stream.testkit.scaladsl.TestSink import java.util.concurrent.ThreadLocalRandom.{ current => random } import akka.stream.{ ActorMaterializer, ActorMaterializerSettings } -import akka.stream.testkit.{ StreamSpec, ScriptedTest } +import akka.stream.testkit.{ ScriptedTest, StreamSpec } class FlowCollectSpec extends StreamSpec with ScriptedTest { @@ -22,17 +22,20 @@ class FlowCollectSpec extends StreamSpec with ScriptedTest { "A Collect" must { "collect" in { - def script = Script(TestConfig.RandomTestRange map { _ => - val x = random.nextInt(0, 10000) - Seq(x) -> (if ((x & 1) == 0) Seq((x * x).toString) else Seq.empty[String]) - }: _*) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.collect { case x if x % 2 == 0 => (x * x).toString })) + def script = + Script(TestConfig.RandomTestRange.map { _ => + val x = random.nextInt(0, 10000) + Seq(x) -> (if ((x & 1) == 0) Seq((x * x).toString) else Seq.empty[String]) + }: _*) + TestConfig.RandomTestRange.foreach(_ => + runScript(script, settings)(_.collect { case x if x % 2 == 0 => (x * x).toString })) } "restart when Collect throws" in { - val pf: PartialFunction[Int, Int] = - { case x: Int => if (x == 2) throw TE("") else x } - Source(1 to 3).collect(pf).withAttributes(supervisionStrategy(restartingDecider)) + val pf: PartialFunction[Int, Int] = { case x: Int => if (x == 2) throw TE("") else x } + Source(1 to 3) + .collect(pf) + .withAttributes(supervisionStrategy(restartingDecider)) .runWith(TestSink.probe[Int]) .request(1) .expectNext(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllSpec.scala index 4ef0aa2060..5dc9ea7089 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatAllSpec.scala @@ -15,8 +15,7 @@ import akka.stream.testkit.scaladsl.StreamTestKit._ class FlowConcatAllSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -63,7 +62,11 @@ class FlowConcatAllSpec extends StreamSpec { "on onError on master stream cancel the current open substream and signal error" in assertAllStagesStopped { val publisher = TestPublisher.manualProbe[Source[Int, NotUsed]]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).flatMapConcat(ConstantFun.scalaIdentityFunction).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .flatMapConcat(ConstantFun.scalaIdentityFunction) + .to(Sink.fromSubscriber(subscriber)) + .run() val upstream = publisher.expectSubscription() val downstream = subscriber.expectSubscription() @@ -83,7 +86,11 @@ class FlowConcatAllSpec extends StreamSpec { "on onError on master stream cancel the currently opening substream and signal error" in assertAllStagesStopped { val publisher = TestPublisher.manualProbe[Source[Int, NotUsed]]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).flatMapConcat(ConstantFun.scalaIdentityFunction).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .flatMapConcat(ConstantFun.scalaIdentityFunction) + .to(Sink.fromSubscriber(subscriber)) + .run() val upstream = publisher.expectSubscription() val downstream = subscriber.expectSubscription() @@ -123,7 +130,11 @@ class FlowConcatAllSpec extends StreamSpec { "on onError on open substream, cancel the master stream and signal error " in assertAllStagesStopped { val publisher = TestPublisher.manualProbe[Source[Int, NotUsed]]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).flatMapConcat(ConstantFun.scalaIdentityFunction).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .flatMapConcat(ConstantFun.scalaIdentityFunction) + .to(Sink.fromSubscriber(subscriber)) + .run() val upstream = publisher.expectSubscription() val downstream = subscriber.expectSubscription() @@ -143,7 +154,11 @@ class FlowConcatAllSpec extends StreamSpec { "on cancellation cancel the current open substream and the master stream" in assertAllStagesStopped { val publisher = TestPublisher.manualProbe[Source[Int, NotUsed]]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).flatMapConcat(ConstantFun.scalaIdentityFunction).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .flatMapConcat(ConstantFun.scalaIdentityFunction) + .to(Sink.fromSubscriber(subscriber)) + .run() val upstream = publisher.expectSubscription() val downstream = subscriber.expectSubscription() @@ -164,7 +179,11 @@ class FlowConcatAllSpec extends StreamSpec { "on cancellation cancel the currently opening substream and the master stream" in assertAllStagesStopped { val publisher = TestPublisher.manualProbe[Source[Int, NotUsed]]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).flatMapConcat(ConstantFun.scalaIdentityFunction).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .flatMapConcat(ConstantFun.scalaIdentityFunction) + .to(Sink.fromSubscriber(subscriber)) + .run() val upstream = publisher.expectSubscription() val downstream = subscriber.expectSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala index 26bb7d3944..62001ab9d0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConcatSpec.scala @@ -142,7 +142,8 @@ class FlowConcatSpec extends BaseTwoStreamsSetup { } "work with Flow DSL" in { - val testFlow: Flow[Int, Seq[Int], (NotUsed, NotUsed)] = Flow[Int].concatMat(Source(6 to 10))(Keep.both).grouped(1000) + val testFlow: Flow[Int, Seq[Int], (NotUsed, NotUsed)] = + Flow[Int].concatMat(Source(6 to 10))(Keep.both).grouped(1000) Await.result(Source(1 to 5).viaMat(testFlow)(Keep.both).runWith(Sink.head), 3.seconds) should ===(1 to 10) val runnable = Source(1 to 5).viaMat(testFlow)(Keep.both).to(Sink.ignore) @@ -172,8 +173,8 @@ class FlowConcatSpec extends BaseTwoStreamsSetup { "subscribe at once to initial source and to one that it's concat to" in { val publisher1 = TestPublisher.probe[Int]() val publisher2 = TestPublisher.probe[Int]() - val probeSink = Source.fromPublisher(publisher1).concat(Source.fromPublisher(publisher2)) - .runWith(TestSink.probe[Int]) + val probeSink = + Source.fromPublisher(publisher1).concat(Source.fromPublisher(publisher2)).runWith(TestSink.probe[Int]) val sub1 = publisher1.expectSubscription() val sub2 = publisher2.expectSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConflateSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConflateSpec.scala index 8ab9d1bd42..5e0f70ef1c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConflateSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowConflateSpec.scala @@ -6,7 +6,7 @@ package akka.stream.scaladsl import akka.stream.ActorAttributes.supervisionStrategy import akka.stream.Attributes.inputBuffer -import akka.stream.Supervision.{ resumingDecider, restartingDecider } +import akka.stream.Supervision.{ restartingDecider, resumingDecider } import akka.stream.testkit.Utils.TE import akka.testkit.TestLatch import scala.concurrent.Await @@ -17,8 +17,7 @@ import akka.stream.testkit._ class FlowConflateSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -28,7 +27,11 @@ class FlowConflateSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).conflateWithSeed(seed = i => i)(aggregate = (sum, i) => sum + i).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .conflateWithSeed(seed = i => i)(aggregate = (sum, i) => sum + i) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() for (i <- 1 to 100) { @@ -60,7 +63,11 @@ class FlowConflateSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).conflateWithSeed(seed = i => i)(aggregate = (sum, i) => sum + i).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .conflateWithSeed(seed = i => i)(aggregate = (sum, i) => sum + i) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() for (i <- 1 to 100) { @@ -91,7 +98,9 @@ class FlowConflateSpec extends StreamSpec { "work on a variable rate chain" in { val future = Source(1 to 1000) .conflateWithSeed(seed = i => i)(aggregate = (sum, i) => sum + i) - .map { i => if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i } + .map { i => + if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i + } .runFold(0)(_ + _) Await.result(future, 10.seconds) should be(500500) } @@ -99,7 +108,9 @@ class FlowConflateSpec extends StreamSpec { "work on a variable rate chain (simple conflate)" in { val future = Source(1 to 1000) .conflate(_ + _) - .map { i => if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i } + .map { i => + if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i + } .runFold(0)(_ + _) Await.result(future, 10.seconds) should be(500500) } @@ -108,7 +119,11 @@ class FlowConflateSpec extends StreamSpec { val publisher = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(publisher).conflateWithSeed(seed = i => i)(aggregate = (sum, i) => sum + i).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .conflateWithSeed(seed = i => i)(aggregate = (sum, i) => sum + i) + .to(Sink.fromSubscriber(subscriber)) + .run() val sub = subscriber.expectSubscription() sub.request(1) @@ -146,7 +161,8 @@ class FlowConflateSpec extends StreamSpec { val sinkProbe = TestSubscriber.probe[Int]() val exceptionLatch = TestLatch() - val future = Source.fromPublisher(sourceProbe) + val future = Source + .fromPublisher(sourceProbe) .conflateWithSeed { i => if (i % 2 == 0) { exceptionLatch.open() @@ -200,7 +216,8 @@ class FlowConflateSpec extends StreamSpec { val sourceProbe = TestPublisher.probe[String]() val sinkProbe = TestSubscriber.probe[String]() - Source.fromPublisher(sourceProbe) + Source + .fromPublisher(sourceProbe) .via(conflate) .to(Sink.fromSubscriber(sinkProbe)) .withAttributes(inputBuffer(initial = 4, max = 4)) @@ -226,7 +243,8 @@ class FlowConflateSpec extends StreamSpec { val sinkProbe = TestSubscriber.probe[Vector[Int]]() val saw4Latch = TestLatch() - val future = Source.fromPublisher(sourceProbe) + val future = Source + .fromPublisher(sourceProbe) .conflateWithSeed(seed = i => Vector(i))((state, elem) => if (elem == 2) { throw TE("three is a four letter word") diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala index c3cba3179b..ae62add410 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDelaySpec.scala @@ -24,13 +24,14 @@ class FlowDelaySpec extends StreamSpec { "A Delay" must { "deliver elements with some time shift" taggedAs TimingTest in { - Await.result( - Source(1 to 10).delay(1.seconds).grouped(100).runWith(Sink.head), - 1200.millis) should ===(1 to 10) + Await.result(Source(1 to 10).delay(1.seconds).grouped(100).runWith(Sink.head), 1200.millis) should ===(1 to 10) } "add delay to initialDelay if exists upstream" taggedAs TimingTest in { - Source(1 to 10).initialDelay(1.second).delay(1.second).runWith(TestSink.probe[Int]) + Source(1 to 10) + .initialDelay(1.second) + .delay(1.second) + .runWith(TestSink.probe[Int]) .request(10) .expectNoMsg(1800.millis) .expectNext(300.millis, 1) @@ -39,7 +40,9 @@ class FlowDelaySpec extends StreamSpec { } "deliver element after time passed from actual receiving element" in { - Source(1 to 3).delay(300.millis).runWith(TestSink.probe[Int]) + Source(1 to 3) + .delay(300.millis) + .runWith(TestSink.probe[Int]) .request(2) .expectNoMsg(200.millis) //delay .expectNext(200.millis, 1) //delayed element @@ -89,31 +92,37 @@ class FlowDelaySpec extends StreamSpec { } "drop tail for internal buffer if it's full in DropTail mode" in assertAllStagesStopped { - Await.result( - Source(1 to 20).delay(1.seconds, DelayOverflowStrategy.dropTail).withAttributes(inputBuffer(16, 16)) - .grouped(100) - .runWith(Sink.head), - 1200.millis) should ===((1 to 15).toList :+ 20) + Await.result(Source(1 to 20) + .delay(1.seconds, DelayOverflowStrategy.dropTail) + .withAttributes(inputBuffer(16, 16)) + .grouped(100) + .runWith(Sink.head), + 1200.millis) should ===((1 to 15).toList :+ 20) } "drop head for internal buffer if it's full in DropHead mode" in assertAllStagesStopped { - Await.result( - Source(1 to 20).delay(1.seconds, DelayOverflowStrategy.dropHead).withAttributes(inputBuffer(16, 16)) - .grouped(100) - .runWith(Sink.head), - 1200.millis) should ===(5 to 20) + Await.result(Source(1 to 20) + .delay(1.seconds, DelayOverflowStrategy.dropHead) + .withAttributes(inputBuffer(16, 16)) + .grouped(100) + .runWith(Sink.head), + 1200.millis) should ===(5 to 20) } "clear all for internal buffer if it's full in DropBuffer mode" in assertAllStagesStopped { - Await.result( - Source(1 to 20).delay(1.seconds, DelayOverflowStrategy.dropBuffer).withAttributes(inputBuffer(16, 16)) - .grouped(100) - .runWith(Sink.head), - 1200.millis) should ===(17 to 20) + Await.result(Source(1 to 20) + .delay(1.seconds, DelayOverflowStrategy.dropBuffer) + .withAttributes(inputBuffer(16, 16)) + .grouped(100) + .runWith(Sink.head), + 1200.millis) should ===(17 to 20) } "pass elements with delay through normally in backpressured mode" in assertAllStagesStopped { - Source(1 to 3).delay(300.millis, DelayOverflowStrategy.backpressure).withAttributes(inputBuffer(1, 1)).runWith(TestSink.probe[Int]) + Source(1 to 3) + .delay(300.millis, DelayOverflowStrategy.backpressure) + .withAttributes(inputBuffer(1, 1)) + .runWith(TestSink.probe[Int]) .request(5) .expectNoMsg(200.millis) .expectNext(200.millis, 1) @@ -124,7 +133,8 @@ class FlowDelaySpec extends StreamSpec { } "fail on overflow in Fail mode" in assertAllStagesStopped { - Source(1 to 20).delay(300.millis, DelayOverflowStrategy.fail) + Source(1 to 20) + .delay(300.millis, DelayOverflowStrategy.fail) .withAttributes(inputBuffer(16, 16)) .runWith(TestSink.probe[Int]) .request(100) @@ -136,7 +146,12 @@ class FlowDelaySpec extends StreamSpec { val c = TestSubscriber.manualProbe[Int]() val p = TestPublisher.manualProbe[Int]() - Source.fromPublisher(p).delay(10.seconds, DelayOverflowStrategy.emitEarly).withAttributes(inputBuffer(16, 16)).to(Sink.fromSubscriber(c)).run() + Source + .fromPublisher(p) + .delay(10.seconds, DelayOverflowStrategy.emitEarly) + .withAttributes(inputBuffer(16, 16)) + .to(Sink.fromSubscriber(c)) + .run() val cSub = c.expectSubscription() val pSub = p.expectSubscription() cSub.request(20) @@ -157,7 +172,8 @@ class FlowDelaySpec extends StreamSpec { Source(1 to 5) .delay(500.millis, DelayOverflowStrategy.backpressure) .withAttributes(Attributes.inputBuffer(initial = 1, max = 1)) - .runWith(Sink.ignore).pipeTo(testActor) + .runWith(Sink.ignore) + .pipeTo(testActor) expectNoMsg(2.seconds) expectMsg(Done) @@ -166,39 +182,39 @@ class FlowDelaySpec extends StreamSpec { Source(1 to 100) .delay(1.second, DelayOverflowStrategy.backpressure) .withAttributes(Attributes.inputBuffer(initial = 100, max = 100)) - .runWith(Sink.ignore).pipeTo(testActor) + .runWith(Sink.ignore) + .pipeTo(testActor) expectMsg(Done) // Delays that are already present are preserved when buffer is large enough - Source.tick(100.millis, 100.millis, ()).take(10) + Source + .tick(100.millis, 100.millis, ()) + .take(10) .delay(1.second, DelayOverflowStrategy.backpressure) .withAttributes(Attributes.inputBuffer(initial = 10, max = 10)) - .runWith(Sink.ignore).pipeTo(testActor) + .runWith(Sink.ignore) + .pipeTo(testActor) expectNoMsg(900.millis) expectMsg(Done) } "not overflow buffer when DelayOverflowStrategy.backpressure" in { - val probe = Source(1 to 6).delay(100.millis, DelayOverflowStrategy.backpressure) + val probe = Source(1 to 6) + .delay(100.millis, DelayOverflowStrategy.backpressure) .withAttributes(Attributes.inputBuffer(2, 2)) .throttle(1, 200.millis, 1, ThrottleMode.Shaping) .runWith(TestSink.probe) - probe.request(10) - .expectNextN(1 to 6) - .expectComplete() + probe.request(10).expectNextN(1 to 6).expectComplete() } "not drop messages on overflow when EmitEarly" in { - val probe = Source(1 to 2) - .delay(1.second, EmitEarly).withAttributes(Attributes.inputBuffer(1, 1)) - .runWith(TestSink.probe) + val probe = + Source(1 to 2).delay(1.second, EmitEarly).withAttributes(Attributes.inputBuffer(1, 1)).runWith(TestSink.probe) - probe.request(10) - .expectNextN(1 to 2) - .expectComplete() + probe.request(10).expectNextN(1 to 2).expectComplete() } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDetacherSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDetacherSpec.scala index dba86e0d68..ed3777e110 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDetacherSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDetacherSpec.scala @@ -18,18 +18,12 @@ class FlowDetacherSpec extends StreamSpec { "A Detacher" must { "pass through all elements" in assertAllStagesStopped { - Source(1 to 100) - .detach - .runWith(Sink.seq) - .futureValue should ===(1 to 100) + Source(1 to 100).detach.runWith(Sink.seq).futureValue should ===(1 to 100) } "pass through failure" in assertAllStagesStopped { val ex = new Exception("buh") - val result = Source(1 to 100) - .map(x => if (x == 50) throw ex else x) - .detach - .runWith(Sink.seq) + val result = Source(1 to 100).map(x => if (x == 50) throw ex else x).detach.runWith(Sink.seq) intercept[Exception] { Await.result(result, 2.seconds) } should ===(ex) @@ -37,7 +31,8 @@ class FlowDetacherSpec extends StreamSpec { } "emit the last element when completed without demand" in assertAllStagesStopped { - Source.single(42) + Source + .single(42) .detach .runWith(TestSink.probe) .ensureSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDispatcherSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDispatcherSpec.scala index fe7ae66003..7e3d4bc77e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDispatcherSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDispatcherSpec.scala @@ -13,15 +13,14 @@ class FlowDispatcherSpec extends StreamSpec(s"my-dispatcher = $${akka.test.strea val defaultSettings = ActorMaterializerSettings(system) - def testDispatcher(settings: ActorMaterializerSettings = defaultSettings, dispatcher: String = "akka.test.stream-dispatcher") = { + def testDispatcher(settings: ActorMaterializerSettings = defaultSettings, + dispatcher: String = "akka.test.stream-dispatcher") = { implicit val materializer = ActorMaterializer(settings) val probe = TestProbe() - val p = Source(List(1, 2, 3)).map(i => - { probe.ref ! Thread.currentThread().getName(); i }). - to(Sink.ignore).run() - probe.receiveN(3) foreach { + val p = Source(List(1, 2, 3)).map(i => { probe.ref ! Thread.currentThread().getName(); i }).to(Sink.ignore).run() + probe.receiveN(3).foreach { case s: String => s should startWith(system.name + "-" + dispatcher) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala index 8c3f55c9a1..ac9ec6d45d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropSpec.scala @@ -11,16 +11,18 @@ import akka.stream.testkit._ class FlowDropSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "A Drop" must { "drop" in { - def script(d: Int) = Script(TestConfig.RandomTestRange map { n => Seq(n) -> (if (n <= d) Nil else Seq(n)) }: _*) - TestConfig.RandomTestRange foreach { _ => + def script(d: Int) = + Script(TestConfig.RandomTestRange.map { n => + Seq(n) -> (if (n <= d) Nil else Seq(n)) + }: _*) + TestConfig.RandomTestRange.foreach { _ => val d = Math.min(Math.max(random.nextInt(-10, 60), 0), 50) runScript(script(d), settings)(_.drop(d)) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropWhileSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropWhileSpec.scala index 9d9331cb1a..477813a588 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropWhileSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropWhileSpec.scala @@ -22,31 +22,30 @@ class FlowDropWhileSpec extends StreamSpec { "A DropWhile" must { "drop while predicate is true" in assertAllStagesStopped { - Source(1 to 4).dropWhile(_ < 3).runWith(TestSink.probe[Int]) - .request(2) - .expectNext(3, 4) - .expectComplete() + Source(1 to 4).dropWhile(_ < 3).runWith(TestSink.probe[Int]).request(2).expectNext(3, 4).expectComplete() } "complete the future for an empty stream" in assertAllStagesStopped { - Source.empty[Int].dropWhile(_ < 2).runWith(TestSink.probe[Int]) - .request(1) - .expectComplete() + Source.empty[Int].dropWhile(_ < 2).runWith(TestSink.probe[Int]).request(1).expectComplete() } "continue if error" in assertAllStagesStopped { - Source(1 to 4).dropWhile(a => if (a < 3) true else throw TE("")).withAttributes(supervisionStrategy(resumingDecider)) + Source(1 to 4) + .dropWhile(a => if (a < 3) true else throw TE("")) + .withAttributes(supervisionStrategy(resumingDecider)) .runWith(TestSink.probe[Int]) .request(1) .expectComplete() } "restart with strategy" in assertAllStagesStopped { - Source(1 to 4).dropWhile { - case 1 | 3 => true - case 4 => false - case 2 => throw TE("") - }.withAttributes(supervisionStrategy(restartingDecider)) + Source(1 to 4) + .dropWhile { + case 1 | 3 => true + case 4 => false + case 2 => throw TE("") + } + .withAttributes(supervisionStrategy(restartingDecider)) .runWith(TestSink.probe[Int]) .request(1) .expectNext(4) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropWithinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropWithinSpec.scala index cbf1966946..5d7664099d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropWithinSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowDropWithinSpec.scala @@ -23,13 +23,21 @@ class FlowDropWithinSpec extends StreamSpec { val cSub = c.expectSubscription cSub.request(100) val demand1 = pSub.expectRequest - (1 to demand1.toInt) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand1.toInt).foreach { _ => + pSub.sendNext(input.next()) + } val demand2 = pSub.expectRequest - (1 to demand2.toInt) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand2.toInt).foreach { _ => + pSub.sendNext(input.next()) + } val demand3 = pSub.expectRequest c.expectNoMsg(1500.millis) - (1 to demand3.toInt) foreach { _ => pSub.sendNext(input.next()) } - ((demand1 + demand2 + 1).toInt to (demand1 + demand2 + demand3).toInt) foreach { n => c.expectNext(n) } + (1 to demand3.toInt).foreach { _ => + pSub.sendNext(input.next()) + } + ((demand1 + demand2 + 1).toInt to (demand1 + demand2 + demand3).toInt).foreach { n => + c.expectNext(n) + } pSub.sendComplete() c.expectComplete c.expectNoMsg(200.millis) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExpandSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExpandSpec.scala index af7090409a..c39ae7c43c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExpandSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExpandSpec.scala @@ -14,8 +14,7 @@ import akka.stream.testkit.scaladsl.TestSink class FlowExpandSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -84,7 +83,9 @@ class FlowExpandSpec extends StreamSpec { "work on a variable rate chain" in { val future = Source(1 to 100) - .map { i => if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i } + .map { i => + if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i + } .expand(Iterator.continually(_)) .runFold(Set.empty[Int])(_ + _) @@ -129,22 +130,11 @@ class FlowExpandSpec extends StreamSpec { "work properly with finite extrapolations" in { val (source, sink) = - TestSource.probe[Int] - .expand(i => Iterator.from(0).map(i -> _).take(3)) - .toMat(TestSink.probe)(Keep.both) - .run() - source - .sendNext(1) - sink - .request(4) - .expectNext(1 -> 0, 1 -> 1, 1 -> 2) - .expectNoMsg(100.millis) - source - .sendNext(2) - .sendComplete() - sink - .expectNext(2 -> 0) - .expectComplete() + TestSource.probe[Int].expand(i => Iterator.from(0).map(i -> _).take(3)).toMat(TestSink.probe)(Keep.both).run() + source.sendNext(1) + sink.request(4).expectNext(1 -> 0, 1 -> 1, 1 -> 2).expectNoMsg(100.millis) + source.sendNext(2).sendComplete() + sink.expectNext(2 -> 0).expectComplete() } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExtrapolateSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExtrapolateSpec.scala index bd4f8f604a..18fef88f98 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExtrapolateSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowExtrapolateSpec.scala @@ -15,8 +15,7 @@ import scala.concurrent.duration._ class FlowExtrapolateSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -46,7 +45,11 @@ class FlowExtrapolateSpec extends StreamSpec { val subscriber = TestSubscriber.probe[Int]() // Simply repeat the last element as an extrapolation step - Source.fromPublisher(publisher).extrapolate(e => Iterator.continually(e + 1)).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .extrapolate(e => Iterator.continually(e + 1)) + .to(Sink.fromSubscriber(subscriber)) + .run() publisher.sendNext(42) subscriber.requestNext(42) @@ -71,7 +74,11 @@ class FlowExtrapolateSpec extends StreamSpec { val testInit = 44 // Simply repeat the last element as an extrapolation step - Source.fromPublisher(publisher).extrapolate(Iterator.continually(_), initial = Some(testInit)).to(Sink.fromSubscriber(subscriber)).run() + Source + .fromPublisher(publisher) + .extrapolate(Iterator.continually(_), initial = Some(testInit)) + .to(Sink.fromSubscriber(subscriber)) + .run() publisher.sendNext(42) subscriber.requestNext(testInit) @@ -102,7 +109,9 @@ class FlowExtrapolateSpec extends StreamSpec { "work on a variable rate chain" in { val future = Source(1 to 100) - .map { i => if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i } + .map { i => + if (ThreadLocalRandom.current().nextBoolean()) Thread.sleep(10); i + } .extrapolate(Iterator.continually(_)) .runFold(Set.empty[Int])(_ + _) @@ -147,22 +156,11 @@ class FlowExtrapolateSpec extends StreamSpec { "work properly with finite extrapolations" in { val (source, sink) = - TestSource.probe[Int] - .expand(i => Iterator.from(0).map(i -> _).take(3)) - .toMat(TestSink.probe)(Keep.both) - .run() - source - .sendNext(1) - sink - .request(4) - .expectNext(1 -> 0, 1 -> 1, 1 -> 2) - .expectNoMsg(100.millis) - source - .sendNext(2) - .sendComplete() - sink - .expectNext(2 -> 0) - .expectComplete() + TestSource.probe[Int].expand(i => Iterator.from(0).map(i -> _).take(3)).toMat(TestSink.probe)(Keep.both).run() + source.sendNext(1) + sink.request(4).expectNext(1 -> 0, 1 -> 1, 1 -> 2).expectNoMsg(100.millis) + source.sendNext(2).sendComplete() + sink.expectNext(2 -> 0).expectComplete() } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala index 9078dd60da..b3316f9a63 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFilterSpec.scala @@ -18,21 +18,22 @@ import scala.util.control.NoStackTrace class FlowFilterSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "A Filter" must { "filter" in { - def script = Script(TestConfig.RandomTestRange map { _ => val x = random.nextInt(); Seq(x) -> (if ((x & 1) == 0) Seq(x) else Seq()) }: _*) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.filter(_ % 2 == 0))) + def script = + Script(TestConfig.RandomTestRange.map { _ => + val x = random.nextInt(); Seq(x) -> (if ((x & 1) == 0) Seq(x) else Seq()) + }: _*) + TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.filter(_ % 2 == 0))) } "not blow up with high request counts" in { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 1, maxSize = 1) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 1, maxSize = 1) implicit val materializer = ActorMaterializer(settings) val probe = TestSubscriber.manualProbe[Int]() @@ -52,7 +53,9 @@ class FlowFilterSpec extends StreamSpec with ScriptedTest { override def toString = "TE" } - Source(1 to 3).filter((x: Int) => if (x == 2) throw TE else true).withAttributes(supervisionStrategy(resumingDecider)) + Source(1 to 3) + .filter((x: Int) => if (x == 2) throw TE else true) + .withAttributes(supervisionStrategy(resumingDecider)) .runWith(TestSink.probe[Int]) .request(3) .expectNext(1, 3) @@ -63,12 +66,12 @@ class FlowFilterSpec extends StreamSpec with ScriptedTest { "A FilterNot" must { "filter based on inverted predicate" in { - def script = Script(TestConfig.RandomTestRange map - { _ => + def script = + Script(TestConfig.RandomTestRange.map { _ => val x = random.nextInt() Seq(x) -> (if ((x & 1) == 1) Seq(x) else Seq()) }: _*) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.filterNot(_ % 2 == 0))) + TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.filterNot(_ % 2 == 0))) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala index 2786f7320f..318248468d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFlattenMergeSpec.scala @@ -62,41 +62,29 @@ class FlowFlattenMergeSpec extends StreamSpec { "propagate early failure from main stream" in assertAllStagesStopped { val ex = new Exception("buh") intercept[TestFailedException] { - Source.failed(ex) - .flatMapMerge(1, identity) - .runWith(Sink.head) - .futureValue + Source.failed(ex).flatMapMerge(1, identity).runWith(Sink.head).futureValue }.cause.get should ===(ex) } "propagate late failure from main stream" in assertAllStagesStopped { val ex = new Exception("buh") intercept[TestFailedException] { - (Source(List(blocked, blocked)) ++ Source.failed(ex)) - .flatMapMerge(10, identity) - .runWith(Sink.head) - .futureValue + (Source(List(blocked, blocked)) ++ Source.failed(ex)).flatMapMerge(10, identity).runWith(Sink.head).futureValue }.cause.get should ===(ex) } "propagate failure from map function" in assertAllStagesStopped { val ex = new Exception("buh") intercept[TestFailedException] { - Source(1 to 3) - .flatMapMerge(10, i => if (i == 3) throw ex else blocked) - .runWith(Sink.head) - .futureValue + Source(1 to 3).flatMapMerge(10, i => if (i == 3) throw ex else blocked).runWith(Sink.head).futureValue }.cause.get should ===(ex) } "bubble up substream exceptions" in assertAllStagesStopped { val ex = new Exception("buh") val result = intercept[TestFailedException] { - Source(List(blocked, blocked, Source.failed(ex))) - .flatMapMerge(10, identity) - .runWith(Sink.head) - .futureValue - }.cause.get should ===(ex) + Source(List(blocked, blocked, Source.failed(ex))).flatMapMerge(10, identity).runWith(Sink.head).futureValue + }.cause.get should ===(ex) } "bubble up substream materialization exception" in assertAllStagesStopped { @@ -177,9 +165,7 @@ class FlowFlattenMergeSpec extends StreamSpec { } "work with many concurrently queued events" in assertAllStagesStopped { - val p = Source((0 until 100).map(i => src10(10 * i))) - .flatMapMerge(Int.MaxValue, identity) - .runWith(TestSink.probe) + val p = Source((0 until 100).map(i => src10(10 * i))).flatMapMerge(Int.MaxValue, identity).runWith(TestSink.probe) p.within(1.second) { p.ensureSubscription() p.expectNoMsg() @@ -189,21 +175,22 @@ class FlowFlattenMergeSpec extends StreamSpec { elems should ===((0 until 1000).toSet) } - val attributesSource = Source.fromGraph( - new GraphStage[SourceShape[Attributes]] { - val out = Outlet[Attributes]("AttributesSource.out") - override val shape: SourceShape[Attributes] = SourceShape(out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { + val attributesSource = Source.fromGraph(new GraphStage[SourceShape[Attributes]] { + val out = Outlet[Attributes]("AttributesSource.out") + override val shape: SourceShape[Attributes] = SourceShape(out) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { override def onPull(): Unit = { push(out, inheritedAttributes) completeStage() } setHandler(out, this) } - }) + }) "propagate attributes to inner streams" in assertAllStagesStopped { - val f = Source.single(attributesSource.addAttributes(Attributes.name("inner"))) + val f = Source + .single(attributesSource.addAttributes(Attributes.name("inner"))) .flatMapMerge(1, identity) .addAttributes(Attributes.name("outer")) .runWith(Sink.head) @@ -215,16 +202,11 @@ class FlowFlattenMergeSpec extends StreamSpec { } "work with optimized Source.single" in assertAllStagesStopped { - Source(0 to 3) - .flatMapConcat(Source.single) - .runWith(toSeq) - .futureValue should ===(0 to 3) + Source(0 to 3).flatMapConcat(Source.single).runWith(toSeq).futureValue should ===(0 to 3) } "work with optimized Source.single when slow demand" in assertAllStagesStopped { - val probe = Source(0 to 4) - .flatMapConcat(Source.single) - .runWith(TestSink.probe) + val probe = Source(0 to 4).flatMapConcat(Source.single).runWith(TestSink.probe) probe.request(3) probe.expectNext(0) @@ -239,21 +221,18 @@ class FlowFlattenMergeSpec extends StreamSpec { } "work with mix of Source.single and other sources when slow demand" in assertAllStagesStopped { - val sources: Source[Source[Int, NotUsed], NotUsed] = Source(List( - Source.single(0), - Source.single(1), - Source(2 to 4), - Source.single(5), - Source(6 to 6), - Source.single(7), - Source(8 to 10), - Source.single(11) - )) + val sources: Source[Source[Int, NotUsed], NotUsed] = Source( + List(Source.single(0), + Source.single(1), + Source(2 to 4), + Source.single(5), + Source(6 to 6), + Source.single(7), + Source(8 to 10), + Source.single(11))) val probe = - sources - .flatMapConcat(identity) - .runWith(TestSink.probe) + sources.flatMapConcat(identity).runWith(TestSink.probe) probe.request(3) probe.expectNext(0) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala index edb6a41d35..9d6f303ac7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldAsyncSpec.scala @@ -32,10 +32,12 @@ class FlowFoldAsyncSpec extends StreamSpec { Future(a + b) } val flowDelayMS = 100L - val foldFlow = Flow[Int].foldAsync(0) { - (a, b) => Future { Thread.sleep(flowDelayMS); a + b } + val foldFlow = Flow[Int].foldAsync(0) { (a, b) => + Future { Thread.sleep(flowDelayMS); a + b } + } + val foldSink = Sink.foldAsync[Int, Int](0) { (a, b) => + Future(a + b) } - val foldSink = Sink.foldAsync[Int, Int](0) { (a, b) => Future(a + b) } "work when using Source.foldAsync" in assertAllStagesStopped { foldSource.runWith(Sink.head).futureValue(timeout) should ===(expected) @@ -49,13 +51,11 @@ class FlowFoldAsyncSpec extends StreamSpec { val flowTimeout = Timeout((flowDelayMS * input.size).milliseconds + 3.seconds) - inputSource.via(foldFlow).runWith(Sink.head). - futureValue(flowTimeout) should ===(expected) + inputSource.via(foldFlow).runWith(Sink.head).futureValue(flowTimeout) should ===(expected) } "work when using Source.foldAsync + Flow.foldAsync + Sink.foldAsync" in assertAllStagesStopped { - foldSource.via(foldFlow).runWith(foldSink). - futureValue(timeout) should ===(expected) + foldSource.via(foldFlow).runWith(foldSink).futureValue(timeout) should ===(expected) } "propagate an error" in assertAllStagesStopped { @@ -77,9 +77,13 @@ class FlowFoldAsyncSpec extends StreamSpec { val probe = TestSubscriber.manualProbe[Long]() var i = 0 - Source.fromIterator(() => Iterator.fill[Int](10000) { i += 1; i }). - foldAsync(1L) { (a, b) => Future(a + b) }. - runWith(Sink.asPublisher(true)).subscribe(probe) + Source + .fromIterator(() => Iterator.fill[Int](10000) { i += 1; i }) + .foldAsync(1L) { (a, b) => + Future(a + b) + } + .runWith(Sink.asPublisher(true)) + .subscribe(probe) val subscription = probe.expectSubscription() subscription.request(Int.MaxValue) @@ -91,9 +95,12 @@ class FlowFoldAsyncSpec extends StreamSpec { "signal future failure" in assertAllStagesStopped { val probe = TestSubscriber.probe[Int]() implicit val ec = system.dispatcher - Source(1 to 5).foldAsync(0) { (_, n) => - Future(if (n == 3) throw TE("err1") else n) - }.to(Sink.fromSubscriber(probe)).run() + Source(1 to 5) + .foldAsync(0) { (_, n) => + Future(if (n == 3) throw TE("err1") else n) + } + .to(Sink.fromSubscriber(probe)) + .run() val sub = probe.expectSubscription() sub.request(10) @@ -103,10 +110,13 @@ class FlowFoldAsyncSpec extends StreamSpec { "signal error from foldAsync" in assertAllStagesStopped { val probe = TestSubscriber.probe[Int]() implicit val ec = system.dispatcher - Source(1 to 5).foldAsync(0) { (_, n) => - if (n == 3) throw new RuntimeException("err2") with NoStackTrace - Future(n + 1) - }.to(Sink.fromSubscriber(probe)).run() + Source(1 to 5) + .foldAsync(0) { (_, n) => + if (n == 3) throw new RuntimeException("err2") with NoStackTrace + Future(n + 1) + } + .to(Sink.fromSubscriber(probe)) + .run() val sub = probe.expectSubscription() sub.request(10) @@ -116,14 +126,17 @@ class FlowFoldAsyncSpec extends StreamSpec { "resume after future failure" in assertAllStagesStopped { val probe = TestSubscriber.probe[(Int, Int)]() implicit val ec = system.dispatcher - Source(1 to 5).foldAsync(0 -> 1) { - case ((i, res), n) => - Future { - if (n == 3) throw new RuntimeException("err3") with NoStackTrace - else n -> (i + (res * n)) - } - }.withAttributes(supervisionStrategy(resumingDecider)). - to(Sink.fromSubscriber(probe)).run() + Source(1 to 5) + .foldAsync(0 -> 1) { + case ((i, res), n) => + Future { + if (n == 3) throw new RuntimeException("err3") with NoStackTrace + else n -> (i + (res * n)) + } + } + .withAttributes(supervisionStrategy(resumingDecider)) + .to(Sink.fromSubscriber(probe)) + .run() val sub = probe.expectSubscription() sub.request(10) @@ -134,14 +147,17 @@ class FlowFoldAsyncSpec extends StreamSpec { "restart after future failure" in assertAllStagesStopped { val probe = TestSubscriber.probe[(Int, Int)]() implicit val ec = system.dispatcher - Source(1 to 5).foldAsync(0 -> 1) { - case ((i, res), n) => - Future { - if (n == 3) throw new RuntimeException("err3") with NoStackTrace - else n -> (i + (res * n)) - } - }.withAttributes(supervisionStrategy(restartingDecider)). - to(Sink.fromSubscriber(probe)).run() + Source(1 to 5) + .foldAsync(0 -> 1) { + case ((i, res), n) => + Future { + if (n == 3) throw new RuntimeException("err3") with NoStackTrace + else n -> (i + (res * n)) + } + } + .withAttributes(supervisionStrategy(restartingDecider)) + .to(Sink.fromSubscriber(probe)) + .run() val sub = probe.expectSubscription() sub.request(10) @@ -150,39 +166,48 @@ class FlowFoldAsyncSpec extends StreamSpec { } "resume after multiple failures" in assertAllStagesStopped { - val futures: List[Future[String]] = List( - Future.failed(Utils.TE("failure1")), - Future.failed(Utils.TE("failure2")), - Future.failed(Utils.TE("failure3")), - Future.failed(Utils.TE("failure4")), - Future.failed(Utils.TE("failure5")), - Future.successful("happy!")) + val futures: List[Future[String]] = List(Future.failed(Utils.TE("failure1")), + Future.failed(Utils.TE("failure2")), + Future.failed(Utils.TE("failure3")), + Future.failed(Utils.TE("failure4")), + Future.failed(Utils.TE("failure5")), + Future.successful("happy!")) - Source(futures).foldAsync("") { (_, s) => s }. - withAttributes(supervisionStrategy(resumingDecider)).runWith(Sink.head). - futureValue(timeout) should ===("happy!") + Source(futures) + .foldAsync("") { (_, s) => + s + } + .withAttributes(supervisionStrategy(resumingDecider)) + .runWith(Sink.head) + .futureValue(timeout) should ===("happy!") } "finish after future failure" in assertAllStagesStopped { - Source(1 to 3).foldAsync(1) { (_, n) => - Future { - if (n == 3) throw new RuntimeException("err3b") with NoStackTrace - else n + Source(1 to 3) + .foldAsync(1) { (_, n) => + Future { + if (n == 3) throw new RuntimeException("err3b") with NoStackTrace + else n + } } - }.withAttributes(supervisionStrategy(resumingDecider)) - .grouped(10).runWith(Sink.head). - futureValue(Timeout(1.second)) should ===(Seq(2)) + .withAttributes(supervisionStrategy(resumingDecider)) + .grouped(10) + .runWith(Sink.head) + .futureValue(Timeout(1.second)) should ===(Seq(2)) } "resume when foldAsync throws" in { val c = TestSubscriber.manualProbe[(Int, Int)]() implicit val ec = system.dispatcher - val p = Source(1 to 5).foldAsync(0 -> 1) { - case ((i, res), n) => - if (n == 3) throw new RuntimeException("err4") with NoStackTrace - else Future(n -> (i + (res * n))) - }.withAttributes(supervisionStrategy(resumingDecider)). - to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 5) + .foldAsync(0 -> 1) { + case ((i, res), n) => + if (n == 3) throw new RuntimeException("err4") with NoStackTrace + else Future(n -> (i + (res * n))) + } + .withAttributes(supervisionStrategy(resumingDecider)) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectNext(5 -> 74) @@ -192,12 +217,15 @@ class FlowFoldAsyncSpec extends StreamSpec { "restart when foldAsync throws" in { val c = TestSubscriber.manualProbe[(Int, Int)]() implicit val ec = system.dispatcher - val p = Source(1 to 5).foldAsync(0 -> 1) { - case ((i, res), n) => - if (n == 3) throw new RuntimeException("err4") with NoStackTrace - else Future(n -> (i + (res * n))) - }.withAttributes(supervisionStrategy(restartingDecider)). - to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 5) + .foldAsync(0 -> 1) { + case ((i, res), n) => + if (n == 3) throw new RuntimeException("err4") with NoStackTrace + else Future(n -> (i + (res * n))) + } + .withAttributes(supervisionStrategy(restartingDecider)) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectNext(5 -> 24) @@ -206,9 +234,12 @@ class FlowFoldAsyncSpec extends StreamSpec { "signal NPE when future is completed with null" in { val c = TestSubscriber.manualProbe[String]() - val p = Source(List("a", "b")).foldAsync("") { (_, elem) => - Future.successful(null.asInstanceOf[String]) - }.to(Sink.fromSubscriber(c)).run() + val p = Source(List("a", "b")) + .foldAsync("") { (_, elem) => + Future.successful(null.asInstanceOf[String]) + } + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectError().getMessage should be(ReactiveStreamsCompliance.ElementMustNotBeNullMsg) @@ -216,11 +247,14 @@ class FlowFoldAsyncSpec extends StreamSpec { "resume when future is completed with null" in { val c = TestSubscriber.manualProbe[String]() - val p = Source(List("a", "b", "c")).foldAsync("") { (str, elem) => - if (elem == "b") Future.successful(null.asInstanceOf[String]) - else Future.successful(str + elem) - }.withAttributes(supervisionStrategy(resumingDecider)). - to(Sink.fromSubscriber(c)).run() + val p = Source(List("a", "b", "c")) + .foldAsync("") { (str, elem) => + if (elem == "b") Future.successful(null.asInstanceOf[String]) + else Future.successful(str + elem) + } + .withAttributes(supervisionStrategy(resumingDecider)) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectNext("ac") // 1: "" + "a"; 2: null => resume "a"; 3: "a" + "c" @@ -229,11 +263,14 @@ class FlowFoldAsyncSpec extends StreamSpec { "restart when future is completed with null" in { val c = TestSubscriber.manualProbe[String]() - val p = Source(List("a", "b", "c")).foldAsync("") { (str, elem) => - if (elem == "b") Future.successful(null.asInstanceOf[String]) - else Future.successful(str + elem) - }.withAttributes(supervisionStrategy(restartingDecider)). - to(Sink.fromSubscriber(c)).run() + val p = Source(List("a", "b", "c")) + .foldAsync("") { (str, elem) => + if (elem == "b") Future.successful(null.asInstanceOf[String]) + else Future.successful(str + elem) + } + .withAttributes(supervisionStrategy(restartingDecider)) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectNext("c") // 1: "" + "a"; 2: null => restart ""; 3: "" + "c" @@ -244,9 +281,12 @@ class FlowFoldAsyncSpec extends StreamSpec { val pub = TestPublisher.manualProbe[Int]() val sub = TestSubscriber.manualProbe[Int]() - Source.fromPublisher(pub). - foldAsync(0) { (_, n) => Future.successful(n) }. - runWith(Sink.fromSubscriber(sub)) + Source + .fromPublisher(pub) + .foldAsync(0) { (_, n) => + Future.successful(n) + } + .runWith(Sink.fromSubscriber(sub)) val upstream = pub.expectSubscription() upstream.expectRequest() @@ -258,16 +298,14 @@ class FlowFoldAsyncSpec extends StreamSpec { "complete future and return zero given an empty stream" in assertAllStagesStopped { val futureValue = - Source.fromIterator[Int](() => Iterator.empty) - .runFoldAsync(0)((acc, elem) => Future.successful(acc + elem)) + Source.fromIterator[Int](() => Iterator.empty).runFoldAsync(0)((acc, elem) => Future.successful(acc + elem)) Await.result(futureValue, remainingOrDefault) should be(0) } "complete future and return zero + item given a stream of one item" in assertAllStagesStopped { val futureValue = - Source.single(100) - .runFoldAsync(5)((acc, elem) => Future.successful(acc + elem)) + Source.single(100).runFoldAsync(5)((acc, elem) => Future.successful(acc + elem)) Await.result(futureValue, remainingOrDefault) should be(105) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldSpec.scala index 252879f2a1..12bf4a842e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowFoldSpec.scala @@ -30,19 +30,19 @@ class FlowFoldSpec extends StreamSpec { } "work when using Source.fold" in assertAllStagesStopped { - Await.result(foldSource runWith Sink.head, 3.seconds) should be(expected) + Await.result(foldSource.runWith(Sink.head), 3.seconds) should be(expected) } "work when using Sink.fold" in assertAllStagesStopped { - Await.result(inputSource runWith foldSink, 3.seconds) should be(expected) + Await.result(inputSource.runWith(foldSink), 3.seconds) should be(expected) } "work when using Flow.fold" in assertAllStagesStopped { - Await.result(inputSource via foldFlow runWith Sink.head, 3.seconds) should be(expected) + Await.result(inputSource.via(foldFlow).runWith(Sink.head), 3.seconds) should be(expected) } "work when using Source.fold + Flow.fold + Sink.fold" in assertAllStagesStopped { - Await.result(foldSource via foldFlow runWith foldSink, 3.seconds) should be(expected) + Await.result(foldSource.via(foldFlow).runWith(foldSink), 3.seconds) should be(expected) } "propagate an error" in assertAllStagesStopped { @@ -60,7 +60,8 @@ class FlowFoldSpec extends StreamSpec { "resume with the accumulated state when the folding function throws and the supervisor strategy decides to resume" in assertAllStagesStopped { val error = TE("Boom!") val fold = Sink.fold[Int, Int](0)((x, y) => if (y == 50) throw error else x + y) - val future = inputSource.runWith(fold.withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))) + val future = + inputSource.runWith(fold.withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))) Await.result(future, 3.seconds) should be(expected - 50) } @@ -68,15 +69,15 @@ class FlowFoldSpec extends StreamSpec { "resume and reset the state when the folding function throws when the supervisor strategy decides to restart" in assertAllStagesStopped { val error = TE("Boom!") val fold = Sink.fold[Int, Int](0)((x, y) => if (y == 50) throw error else x + y) - val future = inputSource.runWith(fold.withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider))) + val future = + inputSource.runWith(fold.withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider))) Await.result(future, 3.seconds) should be((51 to 100).sum) } "complete future and return zero given an empty stream" in assertAllStagesStopped { val futureValue = - Source.fromIterator[Int](() => Iterator.empty) - .runFold(0)(_ + _) + Source.fromIterator[Int](() => Iterator.empty).runFold(0)(_ + _) Await.result(futureValue, 3.seconds) should be(0) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowForeachSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowForeachSpec.scala index 59970cb6eb..25ff4e8dcb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowForeachSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowForeachSpec.scala @@ -20,8 +20,8 @@ class FlowForeachSpec extends StreamSpec { "A runForeach" must { "call the procedure for each element" in assertAllStagesStopped { - Source(1 to 3).runForeach(testActor ! _) foreach { - _ => testActor ! "done" + Source(1 to 3).runForeach(testActor ! _).foreach { _ => + testActor ! "done" } expectMsg(1) expectMsg(2) @@ -30,16 +30,16 @@ class FlowForeachSpec extends StreamSpec { } "complete the future for an empty stream" in assertAllStagesStopped { - Source.empty[String].runForeach(testActor ! _) foreach { - _ => testActor ! "done" + Source.empty[String].runForeach(testActor ! _).foreach { _ => + testActor ! "done" } expectMsg("done") } "yield the first error" in assertAllStagesStopped { val p = TestPublisher.manualProbe[Int]() - Source.fromPublisher(p).runForeach(testActor ! _).failed foreach { - ex => testActor ! ex + Source.fromPublisher(p).runForeach(testActor ! _).failed.foreach { ex => + testActor ! ex } val proc = p.expectSubscription() proc.expectRequest() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala index 0d7c0d3e66..4e8c0979d3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupBySpec.scala @@ -32,7 +32,8 @@ import akka.testkit.TestLatch object FlowGroupBySpec { implicit class Lift[M](val f: SubFlow[Int, M, Source[Int, M]#Repr, RunnableGraph[M]]) extends AnyVal { - def lift(key: Int => Int) = f.prefixAndTail(1).map(p => key(p._1.head) -> (Source.single(p._1.head) ++ p._2)).concatSubstreams + def lift(key: Int => Int) = + f.prefixAndTail(1).map(p => key(p._1.head) -> (Source.single(p._1.head) ++ p._2)).concatSubstreams } } @@ -40,8 +41,7 @@ object FlowGroupBySpec { class FlowGroupBySpec extends StreamSpec { import FlowGroupBySpec._ - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -61,7 +61,8 @@ class FlowGroupBySpec extends StreamSpec { class SubstreamsSupport(groupCount: Int = 2, elementCount: Int = 6, maxSubstreams: Int = -1) { val source = Source(1 to elementCount).runWith(Sink.asPublisher(false)) val max = if (maxSubstreams > 0) maxSubstreams else groupCount - val groupStream = Source.fromPublisher(source).groupBy(max, _ % groupCount).lift(_ % groupCount).runWith(Sink.asPublisher(false)) + val groupStream = + Source.fromPublisher(source).groupBy(max, _ % groupCount).lift(_ % groupCount).runWith(Sink.asPublisher(false)) val masterSubscriber = TestSubscriber.manualProbe[(Int, Source[Int, _])]() groupStream.subscribe(masterSubscriber) @@ -169,7 +170,8 @@ class FlowGroupBySpec extends StreamSpec { "accept cancellation of master stream when not consumed anything" in assertAllStagesStopped { val publisherProbeProbe = TestPublisher.manualProbe[Int]() - val publisher = Source.fromPublisher(publisherProbeProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.asPublisher(false)) + val publisher = + Source.fromPublisher(publisherProbeProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.asPublisher(false)) val subscriber = TestSubscriber.manualProbe[(Int, Source[Int, _])]() publisher.subscribe(subscriber) @@ -189,7 +191,8 @@ class FlowGroupBySpec extends StreamSpec { "abort on onError from upstream" in assertAllStagesStopped { val publisherProbeProbe = TestPublisher.manualProbe[Int]() - val publisher = Source.fromPublisher(publisherProbeProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.asPublisher(false)) + val publisher = + Source.fromPublisher(publisherProbeProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.asPublisher(false)) val subscriber = TestSubscriber.manualProbe[(Int, Source[Int, _])]() publisher.subscribe(subscriber) @@ -206,7 +209,8 @@ class FlowGroupBySpec extends StreamSpec { "abort on onError from upstream when substreams are running" in assertAllStagesStopped { val publisherProbeProbe = TestPublisher.manualProbe[Int]() - val publisher = Source.fromPublisher(publisherProbeProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.asPublisher(false)) + val publisher = + Source.fromPublisher(publisherProbeProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.asPublisher(false)) val subscriber = TestSubscriber.manualProbe[(Int, Source[Int, _])]() publisher.subscribe(subscriber) @@ -234,7 +238,8 @@ class FlowGroupBySpec extends StreamSpec { "fail stream when groupBy function throws" in assertAllStagesStopped { val publisherProbeProbe = TestPublisher.manualProbe[Int]() val exc = TE("test") - val publisher = Source.fromPublisher(publisherProbeProbe) + val publisher = Source + .fromPublisher(publisherProbeProbe) .groupBy(2, elem => if (elem == 2) throw exc else elem % 2) .lift(_ % 2) .runWith(Sink.asPublisher(false)) @@ -264,7 +269,8 @@ class FlowGroupBySpec extends StreamSpec { "resume stream when groupBy function throws" in { val publisherProbeProbe = TestPublisher.manualProbe[Int]() val exc = TE("test") - val publisher = Source.fromPublisher(publisherProbeProbe) + val publisher = Source + .fromPublisher(publisherProbeProbe) .groupBy(2, elem => if (elem == 2) throw exc else elem % 2) .lift(_ % 2) .withAttributes(ActorAttributes.supervisionStrategy(resumingDecider)) @@ -318,9 +324,8 @@ class FlowGroupBySpec extends StreamSpec { } "fail when exceeding maxSubstreams" in assertAllStagesStopped { - val (up, down) = Flow[Int] - .groupBy(1, _ % 2).prefixAndTail(0).mergeSubstreams - .runWith(TestSource.probe[Int], TestSink.probe) + val (up, down) = + Flow[Int].groupBy(1, _ % 2).prefixAndTail(0).mergeSubstreams.runWith(TestSource.probe[Int], TestSink.probe) down.request(2) @@ -339,7 +344,8 @@ class FlowGroupBySpec extends StreamSpec { "resume when exceeding maxSubstreams" in { val (up, down) = Flow[Int] - .groupBy(0, identity).mergeSubstreams + .groupBy(0, identity) + .mergeSubstreams .withAttributes(ActorAttributes.supervisionStrategy(resumingDecider)) .runWith(TestSource.probe[Int], TestSink.probe) @@ -351,12 +357,7 @@ class FlowGroupBySpec extends StreamSpec { "emit subscribe before completed" in assertAllStagesStopped { val futureGroupSource = - Source.single(0) - .groupBy(1, elem => "all") - .prefixAndTail(0) - .map(_._2) - .concatSubstreams - .runWith(Sink.head) + Source.single(0).groupBy(1, elem => "all").prefixAndTail(0).map(_._2).concatSubstreams.runWith(Sink.head) val pub: Publisher[Int] = Await.result(futureGroupSource, 3.seconds).runWith(Sink.asPublisher(false)) val probe = TestSubscriber.manualProbe[Int]() pub.subscribe(probe) @@ -371,9 +372,14 @@ class FlowGroupBySpec extends StreamSpec { val publisherProbe = TestPublisher.manualProbe[ByteString]() val subscriber = TestSubscriber.manualProbe[ByteString]() - val publisher = Source.fromPublisher[ByteString](publisherProbe) - .groupBy(256, elem => elem.head).map(_.reverse).mergeSubstreams - .groupBy(256, elem => elem.head).map(_.reverse).mergeSubstreams + val publisher = Source + .fromPublisher[ByteString](publisherProbe) + .groupBy(256, elem => elem.head) + .map(_.reverse) + .mergeSubstreams + .groupBy(256, elem => elem.head) + .map(_.reverse) + .mergeSubstreams .runWith(Sink.asPublisher(false)) publisher.subscribe(subscriber) @@ -543,8 +549,7 @@ class FlowGroupBySpec extends StreamSpec { } "work with random demand" in assertAllStagesStopped { - val mat = ActorMaterializer(ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 1, maxSize = 1)) + val mat = ActorMaterializer(ActorMaterializerSettings(system).withInputBuffer(initialSize = 1, maxSize = 1)) var blockingNextElement: ByteString = null.asInstanceOf[ByteString] @@ -557,7 +562,8 @@ class FlowGroupBySpec extends StreamSpec { case class SubFlowState(probe: TestSubscriber.Probe[ByteString], hasDemand: Boolean, firstElement: ByteString) val map = new util.HashMap[Int, SubFlowState]() - final class ProbeSink(val attributes: Attributes, shape: SinkShape[ByteString])(implicit system: ActorSystem) extends SinkModule[ByteString, TestSubscriber.Probe[ByteString]](shape) { + final class ProbeSink(val attributes: Attributes, shape: SinkShape[ByteString])(implicit system: ActorSystem) + extends SinkModule[ByteString, TestSubscriber.Probe[ByteString]](shape) { override def create(context: MaterializationContext) = { val promise = probes.get(probesWriterTop) val probe = TestSubscriber.probe[ByteString]() @@ -565,8 +571,11 @@ class FlowGroupBySpec extends StreamSpec { probesWriterTop += 1 (probe, probe) } - override protected def newInstance(shape: SinkShape[ByteString]): SinkModule[ByteString, TestSubscriber.Probe[ByteString]] = new ProbeSink(attributes, shape) - override def withAttributes(attr: Attributes): SinkModule[ByteString, TestSubscriber.Probe[ByteString]] = new ProbeSink(attr, amendShape(attr)) + override protected def newInstance( + shape: SinkShape[ByteString]): SinkModule[ByteString, TestSubscriber.Probe[ByteString]] = + new ProbeSink(attributes, shape) + override def withAttributes(attr: Attributes): SinkModule[ByteString, TestSubscriber.Probe[ByteString]] = + new ProbeSink(attr, amendShape(attr)) } @tailrec @@ -595,8 +604,11 @@ class FlowGroupBySpec extends StreamSpec { } val publisherProbe = TestPublisher.manualProbe[ByteString]() - Source.fromPublisher[ByteString](publisherProbe) - .groupBy(100, elem => Math.abs(elem.head % 100)).to(Sink.fromGraph(new ProbeSink(none, SinkShape(Inlet("ProbeSink.in"))))).run()(mat) + Source + .fromPublisher[ByteString](publisherProbe) + .groupBy(100, elem => Math.abs(elem.head % 100)) + .to(Sink.fromGraph(new ProbeSink(none, SinkShape(Inlet("ProbeSink.in"))))) + .run()(mat) val upstreamSubscription = publisherProbe.expectSubscription() @@ -634,26 +646,25 @@ class FlowGroupBySpec extends StreamSpec { "not block all substreams when one is blocked but has a buffer in front" in assertAllStagesStopped { case class Elem(id: Int, substream: Int, f: () => Any) - val queue = Source.queue[Elem](3, OverflowStrategy.backpressure) + val queue = Source + .queue[Elem](3, OverflowStrategy.backpressure) .groupBy(2, _.substream) .buffer(2, OverflowStrategy.backpressure) - .map { _.f() }.async + .map { _.f() } + .async .to(Sink.ignore) .run() val threeProcessed = Promise[Done]() val blockSubStream1 = TestLatch() - List( - Elem(1, 1, () => { - // timeout just to not wait forever if something is wrong, not really relevant for test - Await.result(blockSubStream1, 10.seconds) - 1 - }), - Elem(2, 1, () => 2), - Elem(3, 2, () => { - threeProcessed.success(Done) - 3 - })).foreach(queue.offer) + List(Elem(1, 1, () => { + // timeout just to not wait forever if something is wrong, not really relevant for test + Await.result(blockSubStream1, 10.seconds) + 1 + }), Elem(2, 1, () => 2), Elem(3, 2, () => { + threeProcessed.success(Done) + 3 + })).foreach(queue.offer) // two and three are processed as fast as possible, not blocked by substream 1 being clogged threeProcessed.future.futureValue should ===(Done) // let 1 pass so stream can complete diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala index f6cb4128a8..3e940977df 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedSpec.scala @@ -8,12 +8,11 @@ import scala.collection.immutable import java.util.concurrent.ThreadLocalRandom.{ current => random } import akka.stream.ActorMaterializerSettings -import akka.stream.testkit.{ StreamSpec, ScriptedTest } +import akka.stream.testkit.{ ScriptedTest, StreamSpec } class FlowGroupedSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) "A Grouped" must { @@ -22,14 +21,20 @@ class FlowGroupedSpec extends StreamSpec with ScriptedTest { "group evenly" in { val testLen = random.nextInt(1, 16) - def script = Script(TestConfig.RandomTestRange map { _ => randomTest(testLen) }: _*) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.grouped(testLen))) + def script = + Script(TestConfig.RandomTestRange.map { _ => + randomTest(testLen) + }: _*) + TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.grouped(testLen))) } "group with rest" in { val testLen = random.nextInt(1, 16) - def script = Script(TestConfig.RandomTestRange.map { _ => randomTest(testLen) } :+ randomTest(1): _*) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.grouped(testLen))) + def script = + Script(TestConfig.RandomTestRange.map { _ => + randomTest(testLen) + } :+ randomTest(1): _*) + TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.grouped(testLen))) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala index 772d4f8405..d88ad35514 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowGroupedWithinSpec.scala @@ -32,12 +32,18 @@ class FlowGroupedWithinSpec extends StreamSpec with ScriptedTest { val cSub = c.expectSubscription cSub.request(100) val demand1 = pSub.expectRequest.toInt - (1 to demand1) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand1).foreach { _ => + pSub.sendNext(input.next()) + } val demand2 = pSub.expectRequest.toInt - (1 to demand2) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand2).foreach { _ => + pSub.sendNext(input.next()) + } val demand3 = pSub.expectRequest.toInt c.expectNext((1 to (demand1 + demand2).toInt).toVector) - (1 to demand3) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand3).foreach { _ => + pSub.sendNext(input.next()) + } c.expectNoMsg(300.millis) c.expectNext(((demand1 + demand2 + 1).toInt to (demand1 + demand2 + demand3).toInt).toVector) c.expectNoMsg(300.millis) @@ -69,10 +75,14 @@ class FlowGroupedWithinSpec extends StreamSpec with ScriptedTest { val cSub = c.expectSubscription cSub.request(1) val demand1 = pSub.expectRequest.toInt - (1 to demand1) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand1).foreach { _ => + pSub.sendNext(input.next()) + } c.expectNext((1 to demand1).toVector) val demand2 = pSub.expectRequest.toInt - (1 to demand2) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand2).foreach { _ => + pSub.sendNext(input.next()) + } c.expectNoMsg(300.millis) cSub.request(1) c.expectNext(((demand1 + 1) to (demand1 + demand2)).toVector) @@ -154,21 +164,28 @@ class FlowGroupedWithinSpec extends StreamSpec with ScriptedTest { } "group evenly" taggedAs TimingTest in { - def script = Script(TestConfig.RandomTestRange map { _ => val x, y, z = random.nextInt(); Seq(x, y, z) -> Seq(immutable.Seq(x, y, z)) }: _*) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.groupedWithin(3, 10.minutes))) + def script = + Script(TestConfig.RandomTestRange.map { _ => + val x, y, z = random.nextInt(); Seq(x, y, z) -> Seq(immutable.Seq(x, y, z)) + }: _*) + TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.groupedWithin(3, 10.minutes))) } "group with rest" taggedAs TimingTest in { - def script = Script((TestConfig.RandomTestRange.map { _ => val x, y, z = random.nextInt(); Seq(x, y, z) -> Seq(immutable.Seq(x, y, z)) } + def script = + Script((TestConfig.RandomTestRange.map { _ => + val x, y, z = random.nextInt(); Seq(x, y, z) -> Seq(immutable.Seq(x, y, z)) + } :+ { val x = random.nextInt(); Seq(x) -> Seq(immutable.Seq(x)) }): _*) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.groupedWithin(3, 10.minutes))) + TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.groupedWithin(3, 10.minutes))) } "group with small groups with backpressure" taggedAs TimingTest in { Source(1 to 10) .groupedWithin(1, 1.day) .throttle(1, 110.millis, 0, ThrottleMode.Shaping) - .runWith(Sink.seq).futureValue should ===((1 to 10).map(List(_))) + .runWith(Sink.seq) + .futureValue should ===((1 to 10).map(List(_))) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala index 5e584adde3..b33923efda 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIdleInjectSpec.scala @@ -5,32 +5,29 @@ package akka.stream.scaladsl import akka.stream.{ ActorMaterializer, ActorMaterializerSettings } -import akka.stream.testkit.{ StreamSpec, TestSubscriber, TestPublisher } +import akka.stream.testkit.{ StreamSpec, TestPublisher, TestSubscriber } import akka.stream.testkit.scaladsl.StreamTestKit._ import scala.concurrent.Await import scala.concurrent.duration._ class FlowIdleInjectSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "keepAlive" must { "not emit additional elements if upstream is fast enough" in assertAllStagesStopped { - Await.result( - Source(1 to 10).keepAlive(1.second, () => 0).grouped(1000).runWith(Sink.head), - 3.seconds) should ===(1 to 10) + Await.result(Source(1 to 10).keepAlive(1.second, () => 0).grouped(1000).runWith(Sink.head), 3.seconds) should ===( + 1 to 10) } "emit elements periodically after silent periods" in assertAllStagesStopped { val sourceWithIdleGap = Source(1 to 5) ++ Source(6 to 10).initialDelay(2.second) - val result = Await.result( - sourceWithIdleGap.keepAlive(0.6.seconds, () => 0).grouped(1000).runWith(Sink.head), - 3.seconds) should ===(List(1, 2, 3, 4, 5, 0, 0, 0, 6, 7, 8, 9, 10)) + val result = Await.result(sourceWithIdleGap.keepAlive(0.6.seconds, () => 0).grouped(1000).runWith(Sink.head), + 3.seconds) should ===(List(1, 2, 3, 4, 5, 0, 0, 0, 6, 7, 8, 9, 10)) } "immediately pull upstream" in { @@ -52,7 +49,9 @@ class FlowIdleInjectSpec extends StreamSpec { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - (Source(1 to 10) ++ Source.fromPublisher(upstream)).keepAlive(1.second, () => 0).runWith(Sink.fromSubscriber(downstream)) + (Source(1 to 10) ++ Source.fromPublisher(upstream)) + .keepAlive(1.second, () => 0) + .runWith(Sink.fromSubscriber(downstream)) downstream.request(10) downstream.expectNextN(1 to 10) @@ -85,7 +84,9 @@ class FlowIdleInjectSpec extends StreamSpec { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - (Source(1 to 10) ++ Source.fromPublisher(upstream)).keepAlive(1.second, () => 0).runWith(Sink.fromSubscriber(downstream)) + (Source(1 to 10) ++ Source.fromPublisher(upstream)) + .keepAlive(1.second, () => 0) + .runWith(Sink.fromSubscriber(downstream)) downstream.request(10) downstream.expectNextN(1 to 10) @@ -119,7 +120,9 @@ class FlowIdleInjectSpec extends StreamSpec { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - (Source(1 to 10) ++ Source.fromPublisher(upstream)).keepAlive(1.second, () => 0).runWith(Sink.fromSubscriber(downstream)) + (Source(1 to 10) ++ Source.fromPublisher(upstream)) + .keepAlive(1.second, () => 0) + .runWith(Sink.fromSubscriber(downstream)) downstream.request(10) downstream.expectNextN(1 to 10) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInitialDelaySpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInitialDelaySpec.scala index 9174bd1d0e..d8214f33c2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInitialDelaySpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInitialDelaySpec.scala @@ -13,29 +13,23 @@ import scala.concurrent.duration._ class FlowInitialDelaySpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "Flow initialDelay" must { "work with zero delay" in assertAllStagesStopped { - Await.result( - Source(1 to 10).initialDelay(Duration.Zero).grouped(100).runWith(Sink.head), - 1.second) should ===(1 to 10) + Await.result(Source(1 to 10).initialDelay(Duration.Zero).grouped(100).runWith(Sink.head), 1.second) should ===( + 1 to 10) } "delay elements by the specified time but not more" in assertAllStagesStopped { a[TimeoutException] shouldBe thrownBy { - Await.result( - Source(1 to 10).initialDelay(2.seconds).initialTimeout(1.second).runWith(Sink.ignore), - 2.seconds) + Await.result(Source(1 to 10).initialDelay(2.seconds).initialTimeout(1.second).runWith(Sink.ignore), 2.seconds) } - Await.ready( - Source(1 to 10).initialDelay(1.seconds).initialTimeout(2.second).runWith(Sink.ignore), - 2.seconds) + Await.ready(Source(1 to 10).initialDelay(1.seconds).initialTimeout(2.second).runWith(Sink.ignore), 2.seconds) } "properly ignore timer while backpressured" in assertAllStagesStopped { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala index ddaf738576..91e18ed8df 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowInterleaveSpec.scala @@ -51,7 +51,10 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { val source1 = TestPublisher.probe[Int]() val source2 = TestPublisher.probe[Int]() - Source.fromPublisher(source1).interleave(Source.fromPublisher(source2), 2, eagerClose = true).runWith(Sink.fromSubscriber(probe)) + Source + .fromPublisher(source1) + .interleave(Source.fromPublisher(source2), 2, eagerClose = true) + .runWith(Sink.fromSubscriber(probe)) probe.expectSubscription().request(10) // just to make it extra clear that it eagerly pulls all inputs @@ -82,7 +85,10 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { val source1 = TestPublisher.probe[Int]() val source2 = TestPublisher.probe[Int]() - Source.fromPublisher(source1).interleave(Source.fromPublisher(source2), 2, eagerClose = true).runWith(Sink.fromSubscriber(probe)) + Source + .fromPublisher(source1) + .interleave(Source.fromPublisher(source2), 2, eagerClose = true) + .runWith(Sink.fromSubscriber(probe)) probe.expectSubscription().request(10) // just to make it extra clear that it eagerly pulls all inputs @@ -110,12 +116,10 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { val source1 = TestPublisher.probe[Int]() val source2 = TestPublisher.probe[Int]() - Source.fromPublisher(source1) - .interleave( - Source.fromPublisher(source2), - 2, - eagerClose = true - ).runWith(Sink.fromSubscriber(probe)) + Source + .fromPublisher(source1) + .interleave(Source.fromPublisher(source2), 2, eagerClose = true) + .runWith(Sink.fromSubscriber(probe)) probe.expectSubscription().request(10) @@ -207,8 +211,7 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { val subscription2 = subscriber2.expectSubscription() subscription2.request(4) subscriber2.expectNextOrError(1, TestException).isLeft || - subscriber2.expectNextOrError(2, TestException).isLeft || - { subscriber2.expectError(TestException); true } + subscriber2.expectNextOrError(2, TestException).isLeft || { subscriber2.expectError(TestException); true } } "work with one delayed failed and one nonempty publisher" in { @@ -221,8 +224,11 @@ class FlowInterleaveSpec extends BaseTwoStreamsSetup { val up2 = TestPublisher.manualProbe[Int]() val down = TestSubscriber.manualProbe[Int]() - val (graphSubscriber1, graphSubscriber2) = Source.asSubscriber[Int] - .interleaveMat(Source.asSubscriber[Int], 2)((_, _)).toMat(Sink.fromSubscriber(down))(Keep.left).run + val (graphSubscriber1, graphSubscriber2) = Source + .asSubscriber[Int] + .interleaveMat(Source.asSubscriber[Int], 2)((_, _)) + .toMat(Sink.fromSubscriber(down))(Keep.left) + .run val downstream = down.expectSubscription() downstream.cancel() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIntersperseSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIntersperseSpec.scala index 99bb44e7eb..c5d549e761 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIntersperseSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIntersperseSpec.scala @@ -5,69 +5,51 @@ package akka.stream.scaladsl import akka.stream.testkit._ -import akka.stream.testkit.scaladsl.{ TestSource, TestSink } +import akka.stream.testkit.scaladsl.{ TestSink, TestSource } import akka.stream.{ ActorMaterializer, ActorMaterializerSettings } import scala.concurrent.duration._ class FlowIntersperseSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "A Intersperse" must { "inject element between existing elements" in { - val probe = Source(List(1, 2, 3)) - .map(_.toString) - .intersperse(",") - .runWith(TestSink.probe) + val probe = Source(List(1, 2, 3)).map(_.toString).intersperse(",").runWith(TestSink.probe) probe.expectSubscription() probe.toStrict(1.second).mkString("") should ===(List(1, 2, 3).mkString(",")) } "inject element between existing elements, when downstream is fold" in { - val concated = Source(List(1, 2, 3)) - .map(_.toString) - .intersperse(",") - .runFold("")(_ + _) + val concated = Source(List(1, 2, 3)).map(_.toString).intersperse(",").runFold("")(_ + _) concated.futureValue should ===("1,2,3") } "inject element between existing elements, and surround with []" in { - val probe = Source(List(1, 2, 3)) - .map(_.toString) - .intersperse("[", ",", "]") - .runWith(TestSink.probe) + val probe = Source(List(1, 2, 3)).map(_.toString).intersperse("[", ",", "]").runWith(TestSink.probe) probe.toStrict(1.second).mkString("") should ===(List(1, 2, 3).mkString("[", ",", "]")) } "demonstrate how to prepend only" in { - val probe = ( - Source.single(">> ") ++ Source(List("1", "2", "3")).intersperse(",")) - .runWith(TestSink.probe) + val probe = (Source.single(">> ") ++ Source(List("1", "2", "3")).intersperse(",")).runWith(TestSink.probe) probe.toStrict(1.second).mkString("") should ===(List(1, 2, 3).mkString(">> ", ",", "")) } "surround empty stream with []" in { - val probe = Source(List()) - .map(_.toString) - .intersperse("[", ",", "]") - .runWith(TestSink.probe) + val probe = Source(List()).map(_.toString).intersperse("[", ",", "]").runWith(TestSink.probe) probe.expectSubscription() probe.toStrict(1.second).mkString("") should ===(List().mkString("[", ",", "]")) } "surround single element stream with []" in { - val probe = Source(List(1)) - .map(_.toString) - .intersperse("[", ",", "]") - .runWith(TestSink.probe) + val probe = Source(List(1)).map(_.toString).intersperse("[", ",", "]").runWith(TestSink.probe) probe.expectSubscription() probe.toStrict(1.second).mkString("") should ===(List(1).mkString("[", ",", "]")) @@ -76,23 +58,15 @@ class FlowIntersperseSpec extends StreamSpec { "complete the stage when the Source has been completed" in { val (p1, p2) = TestSource.probe[String].intersperse(",").toMat(TestSink.probe[String])(Keep.both).run p2.request(10) - p1.sendNext("a") - .sendNext("b") - .sendComplete() - p2.expectNext("a") - .expectNext(",") - .expectNext("b") - .expectComplete() + p1.sendNext("a").sendNext("b").sendComplete() + p2.expectNext("a").expectNext(",").expectNext("b").expectComplete() } "complete the stage when the Sink has been cancelled" in { val (p1, p2) = TestSource.probe[String].intersperse(",").toMat(TestSink.probe[String])(Keep.both).run p2.request(10) - p1.sendNext("a") - .sendNext("b") - p2.expectNext("a") - .expectNext(",") - .cancel() + p1.sendNext("a").sendNext("b") + p2.expectNext("a").expectNext(",").cancel() p1.expectCancellation() } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIteratorSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIteratorSpec.scala index 69e67d5a05..d0a60c0837 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIteratorSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowIteratorSpec.scala @@ -71,8 +71,7 @@ class FlowIterableSpec extends AbstractFlowIteratorSpec { abstract class AbstractFlowIteratorSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) private val m = ActorMaterializer(settings) implicit final def materializer = m diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala index ebc6e2f50d..1aa0b81c89 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowJoinSpec.scala @@ -15,8 +15,7 @@ import scala.collection.immutable class FlowJoinSpec extends StreamSpec(ConfigFactory.parseString("akka.loglevel=INFO")) { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -121,15 +120,16 @@ class FlowJoinSpec extends StreamSpec(ConfigFactory.parseString("akka.loglevel=I } "allow for concat cycle" in assertAllStagesStopped { - val flow = Flow.fromGraph(GraphDSL.create(TestSource.probe[String](system), Sink.head[String])(Keep.both) { implicit b => (source, sink) => - import GraphDSL.Implicits._ - val concat = b.add(Concat[String](2)) - val broadcast = b.add(Broadcast[String](2, eagerCancel = true)) - source ~> concat.in(0) - concat.out ~> broadcast.in - broadcast.out(0) ~> sink + val flow = Flow.fromGraph(GraphDSL.create(TestSource.probe[String](system), Sink.head[String])(Keep.both) { + implicit b => (source, sink) => + import GraphDSL.Implicits._ + val concat = b.add(Concat[String](2)) + val broadcast = b.add(Broadcast[String](2, eagerCancel = true)) + source ~> concat.in(0) + concat.out ~> broadcast.in + broadcast.out(0) ~> sink - FlowShape(concat.in(1), broadcast.out(1)) + FlowShape(concat.in(1), broadcast.out(1)) }) val (probe, result) = flow.join(Flow[String]).run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowKillSwitchSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowKillSwitchSpec.scala index a5982bcf5a..fe53d8eaec 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowKillSwitchSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowKillSwitchSpec.scala @@ -47,11 +47,13 @@ class FlowKillSwitchSpec extends StreamSpec { "work if used multiple times in a flow" in { val (((upstream, switch1), switch2), downstream) = - TestSource.probe[Int] + TestSource + .probe[Int] .viaMat(KillSwitches.single)(Keep.both) .recover { case TE(_) => -1 } .viaMat(KillSwitches.single)(Keep.both) - .toMat(TestSink.probe)(Keep.both).run() + .toMat(TestSink.probe)(Keep.both) + .run() downstream.request(1) upstream.sendNext(1) @@ -274,15 +276,17 @@ class FlowKillSwitchSpec extends StreamSpec { val switch1 = KillSwitches.shared("switch") val switch2 = KillSwitches.shared("switch") - val downstream = RunnableGraph.fromGraph(GraphDSL.create(TestSink.probe[Int]) { implicit b => snk => - import GraphDSL.Implicits._ - val merge = b.add(Merge[Int](2)) + val downstream = RunnableGraph + .fromGraph(GraphDSL.create(TestSink.probe[Int]) { implicit b => snk => + import GraphDSL.Implicits._ + val merge = b.add(Merge[Int](2)) - Source.maybe[Int].via(switch1.flow) ~> merge ~> snk - Source.maybe[Int].via(switch2.flow) ~> merge + Source.maybe[Int].via(switch1.flow) ~> merge ~> snk + Source.maybe[Int].via(switch2.flow) ~> merge - ClosedShape - }).run() + ClosedShape + }) + .run() downstream.ensureSubscription() downstream.expectNoMsg(100.millis) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala index 47e417ee7c..16a2d6f355 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitSpec.scala @@ -5,13 +5,12 @@ package akka.stream.scaladsl import akka.stream.testkit.StreamSpec -import akka.stream.{ StreamLimitReachedException, ActorMaterializer, ActorMaterializerSettings } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, StreamLimitReachedException } import scala.concurrent.Await class FlowLimitSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val mat = ActorMaterializer(settings) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala index ca31a73b76..cf25630ae2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLimitWeightedSpec.scala @@ -5,13 +5,12 @@ package akka.stream.scaladsl import akka.stream.testkit.StreamSpec -import akka.stream.{ StreamLimitReachedException, ActorMaterializer, ActorMaterializerSettings } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, StreamLimitReachedException } import scala.concurrent.Await class FlowLimitWeightedSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val mat = ActorMaterializer(settings) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala index df2e75ad34..5aa1b8454e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowLogSpec.scala @@ -9,7 +9,7 @@ import akka.event.{ DummyClassForStringSources, Logging } import akka.stream.ActorAttributes._ import akka.stream.Attributes.LogLevels import akka.stream.Supervision._ -import akka.stream.testkit.{ StreamSpec, ScriptedTest } +import akka.stream.testkit.{ ScriptedTest, StreamSpec } import akka.stream._ import akka.testkit.TestProbe @@ -48,16 +48,11 @@ class FlowLogSpec extends StreamSpec(""" } "allow disabling element logging" in { - val disableElementLogging = Attributes.logLevels( - onElement = LogLevels.Off, - onFinish = Logging.DebugLevel, - onFailure = Logging.DebugLevel) + val disableElementLogging = + Attributes.logLevels(onElement = LogLevels.Off, onFinish = Logging.DebugLevel, onFailure = Logging.DebugLevel) val debugging = Flow[Int].log("my-debug") - Source(1 to 2) - .via(debugging) - .withAttributes(disableElementLogging) - .runWith(Sink.ignore) + Source(1 to 2).via(debugging).withAttributes(disableElementLogging).runWith(Sink.ignore) logProbe.expectMsg(Logging.Debug(LogSrc, LogClazz, "[my-debug] Upstream finished.")) } @@ -68,7 +63,8 @@ class FlowLogSpec extends StreamSpec(""" "debug each element" in { val log = Logging(system, "com.example.ImportantLogger") - val debugging: javadsl.Flow[Integer, Integer, NotUsed] = javadsl.Flow.of(classOf[Integer]) + val debugging: javadsl.Flow[Integer, Integer, NotUsed] = javadsl.Flow + .of(classOf[Integer]) .log("log-1") .log("log-2", new akka.japi.function.Function[Integer, Integer] { def apply(i: Integer) = i }) .log("log-3", new akka.japi.function.Function[Integer, Integer] { def apply(i: Integer) = i }, log) @@ -126,34 +122,32 @@ class FlowLogSpec extends StreamSpec(""" } "allow configuring log levels via Attributes" in { - val logAttrs = Attributes.logLevels( - onElement = Logging.WarningLevel, - onFinish = Logging.InfoLevel, - onFailure = Logging.DebugLevel) + val logAttrs = Attributes.logLevels(onElement = Logging.WarningLevel, + onFinish = Logging.InfoLevel, + onFailure = Logging.DebugLevel) - Source.single(42) + Source + .single(42) .log("flow-6") - .withAttributes(Attributes.logLevels( - onElement = Logging.WarningLevel, - onFinish = Logging.InfoLevel, - onFailure = Logging.DebugLevel)) + .withAttributes(Attributes + .logLevels(onElement = Logging.WarningLevel, onFinish = Logging.InfoLevel, onFailure = Logging.DebugLevel)) .runWith(Sink.ignore) logProbe.expectMsg(Logging.Warning(LogSrc, LogClazz, "[flow-6] Element: 42")) logProbe.expectMsg(Logging.Info(LogSrc, LogClazz, "[flow-6] Upstream finished.")) val cause = new TestException - Source.failed(cause) - .log("flow-6e") - .withAttributes(logAttrs) - .runWith(Sink.ignore) - logProbe.expectMsg(Logging.Debug(LogSrc, LogClazz, "[flow-6e] Upstream failed, cause: FlowLogSpec$TestException: Boom!")) + Source.failed(cause).log("flow-6e").withAttributes(logAttrs).runWith(Sink.ignore) + logProbe.expectMsg( + Logging.Debug(LogSrc, LogClazz, "[flow-6e] Upstream failed, cause: FlowLogSpec$TestException: Boom!")) } "follow supervision strategy when exception thrown" in { val ex = new RuntimeException() with NoStackTrace - val future = Source(1 to 5).log("hi", n => throw ex) - .withAttributes(supervisionStrategy(resumingDecider)).runWith(Sink.fold(0)(_ + _)) + val future = Source(1 to 5) + .log("hi", n => throw ex) + .withAttributes(supervisionStrategy(resumingDecider)) + .runWith(Sink.fold(0)(_ + _)) Await.result(future, 500.millis) shouldEqual 0 } } @@ -162,7 +156,8 @@ class FlowLogSpec extends StreamSpec(""" "debug each element" in { val log = Logging(system, "com.example.ImportantLogger") - javadsl.Source.single[Integer](1) + javadsl.Source + .single[Integer](1) .log("log-1") .log("log-2", new akka.japi.function.Function[Integer, Integer] { def apply(i: Integer) = i }) .log("log-3", new akka.japi.function.Function[Integer, Integer] { def apply(i: Integer) = i }, log) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala index 50683640c1..bde1750c82 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncSpec.scala @@ -46,12 +46,17 @@ class FlowMapAsyncSpec extends StreamSpec { "produce future elements in order" in { val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher - val p = Source(1 to 50).mapAsync(4)(n => - if (n % 3 == 0) Future.successful(n) - else Future { - Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10)) - n - }).to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 50) + .mapAsync(4)( + n => + if (n % 3 == 0) Future.successful(n) + else + Future { + Thread.sleep(ThreadLocalRandom.current().nextInt(1, 10)) + n + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(1000) for (n <- 1 to 50) c.expectNext(n) @@ -62,10 +67,14 @@ class FlowMapAsyncSpec extends StreamSpec { val probe = TestProbe() val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher - val p = Source(1 to 20).mapAsync(8)(n => Future { - probe.ref ! n - n - }).to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 20) + .mapAsync(8)(n => + Future { + probe.ref ! n + n + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() probe.expectNoMsg(500.millis) sub.request(1) @@ -86,13 +95,17 @@ class FlowMapAsyncSpec extends StreamSpec { val latch = TestLatch(1) val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher - val p = Source(1 to 5).mapAsync(4)(n => - if (n == 3) Future.failed[Int](new TE("err1")) - else Future { - Await.ready(latch, 10.seconds) - n - } - ).to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 5) + .mapAsync(4)( + n => + if (n == 3) Future.failed[Int](new TE("err1")) + else + Future { + Await.ready(latch, 10.seconds) + n + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectError().getMessage should be("err1") @@ -103,13 +116,17 @@ class FlowMapAsyncSpec extends StreamSpec { val latch = TestLatch(1) val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher - val p = Source(1 to 5).mapAsync(4)(n => Future { - if (n == 3) throw new RuntimeException("err1") with NoStackTrace - else { - Await.ready(latch, 10.seconds) - n - } - }).to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 5) + .mapAsync(4)(n => + Future { + if (n == 3) throw new RuntimeException("err1") with NoStackTrace + else { + Await.ready(latch, 10.seconds) + n + } + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectError().getMessage should be("err1") @@ -130,7 +147,8 @@ class FlowMapAsyncSpec extends StreamSpec { .mapAsync(4) { n => if (n == 1) Future.failed(new RuntimeException("err1") with NoStackTrace) else Future.successful(n) - }.runWith(Sink.ignore) + } + .runWith(Sink.ignore) intercept[RuntimeException] { Await.result(done, remainingOrDefault) }.getMessage should be("err1") @@ -148,9 +166,8 @@ class FlowMapAsyncSpec extends StreamSpec { val input = pa :: pb :: pc :: pd :: pe :: pf :: Nil - val probe = Source.fromIterator(() => input.iterator) - .mapAsync(5)(p => p.future.map(_.toUpperCase)) - .runWith(TestSink.probe) + val probe = + Source.fromIterator(() => input.iterator).mapAsync(5)(p => p.future.map(_.toUpperCase)).runWith(TestSink.probe) import TestSubscriber._ var gotErrorAlready = false @@ -197,7 +214,8 @@ class FlowMapAsyncSpec extends StreamSpec { val input = pa :: pb :: pc :: pd :: pe :: pf :: Nil - val elements = Source.fromIterator(() => input.iterator) + val elements = Source + .fromIterator(() => input.iterator) .mapAsync(5)(p => p.future) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .runWith(Sink.seq) @@ -217,15 +235,17 @@ class FlowMapAsyncSpec extends StreamSpec { val latch = TestLatch(1) val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher - val p = Source(1 to 5).mapAsync(4)(n => - if (n == 3) throw new RuntimeException("err2") with NoStackTrace - else { - Future { - Await.ready(latch, 10.seconds) - n - } - }). - to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 5) + .mapAsync(4)(n => + if (n == 3) throw new RuntimeException("err2") with NoStackTrace + else { + Future { + Await.ready(latch, 10.seconds) + n + } + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectError().getMessage should be("err2") @@ -236,12 +256,14 @@ class FlowMapAsyncSpec extends StreamSpec { val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher val p = Source(1 to 5) - .mapAsync(4)(n => Future { - if (n == 3) throw new RuntimeException("err3") with NoStackTrace - else n - }) + .mapAsync(4)(n => + Future { + if (n == 3) throw new RuntimeException("err3") with NoStackTrace + else n + }) .withAttributes(supervisionStrategy(resumingDecider)) - .to(Sink.fromSubscriber(c)).run() + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) for (n <- List(1, 2, 4, 5)) c.expectNext(n) @@ -254,10 +276,10 @@ class FlowMapAsyncSpec extends StreamSpec { val p = Source(1 to 5) .mapAsync(4)(n => if (n == 3) Future.failed(new TE("err3")) - else Future.successful(n) - ) + else Future.successful(n)) .withAttributes(supervisionStrategy(resumingDecider)) - .to(Sink.fromSubscriber(c)).run() + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) for (n <- List(1, 2, 4, 5)) c.expectNext(n) @@ -265,28 +287,31 @@ class FlowMapAsyncSpec extends StreamSpec { } "resume after multiple failures" in assertAllStagesStopped { - val futures: List[Future[String]] = List( - Future.failed(Utils.TE("failure1")), - Future.failed(Utils.TE("failure2")), - Future.failed(Utils.TE("failure3")), - Future.failed(Utils.TE("failure4")), - Future.failed(Utils.TE("failure5")), - Future.successful("happy!")) + val futures: List[Future[String]] = List(Future.failed(Utils.TE("failure1")), + Future.failed(Utils.TE("failure2")), + Future.failed(Utils.TE("failure3")), + Future.failed(Utils.TE("failure4")), + Future.failed(Utils.TE("failure5")), + Future.successful("happy!")) Await.result( - Source(futures) - .mapAsync(2)(identity).withAttributes(supervisionStrategy(resumingDecider)) - .runWith(Sink.head), 3.seconds) should ===("happy!") + Source(futures).mapAsync(2)(identity).withAttributes(supervisionStrategy(resumingDecider)).runWith(Sink.head), + 3.seconds) should ===("happy!") } "finish after future failure" in assertAllStagesStopped { import system.dispatcher - Await.result(Source(1 to 3).mapAsync(1)(n => Future { - if (n == 3) throw new RuntimeException("err3b") with NoStackTrace - else n - }).withAttributes(supervisionStrategy(resumingDecider)) - .grouped(10) - .runWith(Sink.head), 1.second) should be(Seq(1, 2)) + Await.result( + Source(1 to 3) + .mapAsync(1)(n => + Future { + if (n == 3) throw new RuntimeException("err3b") with NoStackTrace + else n + }) + .withAttributes(supervisionStrategy(resumingDecider)) + .grouped(10) + .runWith(Sink.head), + 1.second) should be(Seq(1, 2)) } "resume when mapAsync throws" in { @@ -297,7 +322,8 @@ class FlowMapAsyncSpec extends StreamSpec { if (n == 3) throw new RuntimeException("err4") with NoStackTrace else Future(n)) .withAttributes(supervisionStrategy(resumingDecider)) - .to(Sink.fromSubscriber(c)).run() + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) for (n <- List(1, 2, 4, 5)) c.expectNext(n) @@ -317,7 +343,8 @@ class FlowMapAsyncSpec extends StreamSpec { val p = Source(List("a", "b", "c")) .mapAsync(4)(elem => if (elem == "b") Future.successful(null) else Future.successful(elem)) .withAttributes(supervisionStrategy(resumingDecider)) - .to(Sink.fromSubscriber(c)).run() + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) for (elem <- List("a", "c")) c.expectNext(elem) @@ -393,8 +420,8 @@ class FlowMapAsyncSpec extends StreamSpec { Future { if (elem) throw TE("this has gone too far") else elem - } - ).addAttributes(supervisionStrategy { + }) + .addAttributes(supervisionStrategy { case TE("this has gone too far") => failCount.incrementAndGet() Supervision.resume @@ -411,8 +438,8 @@ class FlowMapAsyncSpec extends StreamSpec { val result = Source(List(true, false)) .mapAsync(1)(elem => if (elem) Future.failed(TE("this has gone too far")) - else Future.successful(elem) - ).addAttributes(supervisionStrategy { + else Future.successful(elem)) + .addAttributes(supervisionStrategy { case TE("this has gone too far") => failCount.incrementAndGet() Supervision.resume @@ -428,12 +455,14 @@ class FlowMapAsyncSpec extends StreamSpec { import system.dispatcher val failCount = new AtomicInteger(0) val result = Source(List(true, false)) - .mapAsync(1)(elem => - if (elem) throw TE("this has gone too far") - else Future { - elem - } - ).addAttributes(supervisionStrategy { + .mapAsync(1)( + elem => + if (elem) throw TE("this has gone too far") + else + Future { + elem + }) + .addAttributes(supervisionStrategy { case TE("this has gone too far") => failCount.incrementAndGet() Supervision.resume diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala index 85c73d0802..3cdf3ed0c8 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapAsyncUnorderedSpec.scala @@ -35,10 +35,14 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher val latch = (1 to 4).map(_ -> TestLatch(1)).toMap - val p = Source(1 to 4).mapAsyncUnordered(4)(n => Future { - Await.ready(latch(n), 5.seconds) - n - }).to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 4) + .mapAsyncUnordered(4)(n => + Future { + Await.ready(latch(n), 5.seconds) + n + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(5) latch(2).countDown() @@ -56,15 +60,18 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { val probe = TestProbe() val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher - val p = Source(1 to 20).mapAsyncUnordered(4)(n => - if (n % 3 == 0) { - probe.ref ! n - Future.successful(n) - } else - Future { + val p = Source(1 to 20) + .mapAsyncUnordered(4)(n => + if (n % 3 == 0) { probe.ref ! n - n - }).to(Sink.fromSubscriber(c)).run() + Future.successful(n) + } else + Future { + probe.ref ! n + n + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() c.expectNoMsg(200.millis) probe.expectNoMsg(Duration.Zero) @@ -86,13 +93,17 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { val latch = TestLatch(1) val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher - val p = Source(1 to 5).mapAsyncUnordered(4)(n => Future { - if (n == 3) throw new RuntimeException("err1") with NoStackTrace - else { - Await.ready(latch, 10.seconds) - n - } - }).to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 5) + .mapAsyncUnordered(4)(n => + Future { + if (n == 3) throw new RuntimeException("err1") with NoStackTrace + else { + Await.ready(latch, 10.seconds) + n + } + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectError.getMessage should be("err1") @@ -113,7 +124,8 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { .mapAsyncUnordered(4) { n => if (n == 1) Future.failed(new RuntimeException("err1") with NoStackTrace) else Future.successful(n) - }.runWith(Sink.ignore) + } + .runWith(Sink.ignore) intercept[RuntimeException] { Await.result(done, remainingOrDefault) }.getMessage should be("err1") @@ -124,15 +136,17 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { val latch = TestLatch(1) val c = TestSubscriber.manualProbe[Int]() implicit val ec = system.dispatcher - val p = Source(1 to 5).mapAsyncUnordered(4)(n => - if (n == 3) throw new RuntimeException("err2") with NoStackTrace - else { - Future { - Await.ready(latch, 10.seconds) - n - } - }). - to(Sink.fromSubscriber(c)).run() + val p = Source(1 to 5) + .mapAsyncUnordered(4)(n => + if (n == 3) throw new RuntimeException("err2") with NoStackTrace + else { + Future { + Await.ready(latch, 10.seconds) + n + } + }) + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectError.getMessage should be("err2") @@ -142,10 +156,11 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { "resume after future failure" in { implicit val ec = system.dispatcher Source(1 to 5) - .mapAsyncUnordered(4)(n => Future { - if (n == 3) throw new RuntimeException("err3") with NoStackTrace - else n - }) + .mapAsyncUnordered(4)(n => + Future { + if (n == 3) throw new RuntimeException("err3") with NoStackTrace + else n + }) .withAttributes(supervisionStrategy(resumingDecider)) .runWith(TestSink.probe[Int]) .request(10) @@ -154,28 +169,33 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { } "resume after multiple failures" in assertAllStagesStopped { - val futures: List[Future[String]] = List( - Future.failed(Utils.TE("failure1")), - Future.failed(Utils.TE("failure2")), - Future.failed(Utils.TE("failure3")), - Future.failed(Utils.TE("failure4")), - Future.failed(Utils.TE("failure5")), - Future.successful("happy!")) + val futures: List[Future[String]] = List(Future.failed(Utils.TE("failure1")), + Future.failed(Utils.TE("failure2")), + Future.failed(Utils.TE("failure3")), + Future.failed(Utils.TE("failure4")), + Future.failed(Utils.TE("failure5")), + Future.successful("happy!")) - Await.result( - Source(futures) - .mapAsyncUnordered(2)(identity).withAttributes(supervisionStrategy(resumingDecider)) - .runWith(Sink.head), 3.seconds) should ===("happy!") + Await.result(Source(futures) + .mapAsyncUnordered(2)(identity) + .withAttributes(supervisionStrategy(resumingDecider)) + .runWith(Sink.head), + 3.seconds) should ===("happy!") } "finish after future failure" in assertAllStagesStopped { import system.dispatcher - Await.result(Source(1 to 3).mapAsyncUnordered(1)(n => Future { - if (n == 3) throw new RuntimeException("err3b") with NoStackTrace - else n - }).withAttributes(supervisionStrategy(resumingDecider)) - .grouped(10) - .runWith(Sink.head), 1.second) should be(Seq(1, 2)) + Await.result( + Source(1 to 3) + .mapAsyncUnordered(1)(n => + Future { + if (n == 3) throw new RuntimeException("err3b") with NoStackTrace + else n + }) + .withAttributes(supervisionStrategy(resumingDecider)) + .grouped(10) + .runWith(Sink.head), + 1.second) should be(Seq(1, 2)) } "resume when mapAsyncUnordered throws" in { @@ -193,7 +213,8 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { "signal NPE when future is completed with null" in { val c = TestSubscriber.manualProbe[String]() - val p = Source(List("a", "b")).mapAsyncUnordered(4)(elem => Future.successful(null)).to(Sink.fromSubscriber(c)).run() + val p = + Source(List("a", "b")).mapAsyncUnordered(4)(elem => Future.successful(null)).to(Sink.fromSubscriber(c)).run() val sub = c.expectSubscription() sub.request(10) c.expectError.getMessage should be(ReactiveStreamsCompliance.ElementMustNotBeNullMsg) @@ -204,7 +225,8 @@ class FlowMapAsyncUnorderedSpec extends StreamSpec { val p = Source(List("a", "b", "c")) .mapAsyncUnordered(4)(elem => if (elem == "b") Future.successful(null) else Future.successful(elem)) .withAttributes(supervisionStrategy(resumingDecider)) - .to(Sink.fromSubscriber(c)).run() + .to(Sink.fromSubscriber(c)) + .run() val sub = c.expectSubscription() sub.request(10) c.expectNextUnordered("a", "c") diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala index 53402e012e..bf7062c230 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapConcatSpec.scala @@ -5,28 +5,26 @@ package akka.stream.scaladsl import akka.stream.testkit.scaladsl.TestSink -import akka.stream.{ Supervision, ActorAttributes, ActorMaterializer, ActorMaterializerSettings } +import akka.stream.{ ActorAttributes, ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.stream.testkit.scaladsl.StreamTestKit._ import akka.stream.testkit._ import scala.util.control.NoStackTrace class FlowMapConcatSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "A MapConcat" must { "map and concat" in { - val script = Script( - Seq(0) -> Seq(), - Seq(1) -> Seq(1), - Seq(2) -> Seq(2, 2), - Seq(3) -> Seq(3, 3, 3), - Seq(2) -> Seq(2, 2), - Seq(1) -> Seq(1)) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.mapConcat(x => (1 to x) map (_ => x)))) + val script = Script(Seq(0) -> Seq(), + Seq(1) -> Seq(1), + Seq(2) -> Seq(2, 2), + Seq(3) -> Seq(3, 3, 3), + Seq(2) -> Seq(2, 2), + Seq(1) -> Seq(1)) + TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.mapConcat(x => (1 to x).map(_ => x)))) } "map and concat grouping with slow downstream" in assertAllStagesStopped { @@ -42,10 +40,12 @@ class FlowMapConcatSpec extends StreamSpec with ScriptedTest { "be able to resume" in assertAllStagesStopped { val ex = new Exception("TEST") with NoStackTrace - Source(1 to 5).mapConcat(x => if (x == 3) throw ex else List(x)) + Source(1 to 5) + .mapConcat(x => if (x == 3) throw ex else List(x)) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .runWith(TestSink.probe[Int]) - .request(4).expectNext(1, 2, 4, 5) + .request(4) + .expectNext(1, 2, 4, 5) .expectComplete() } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapErrorSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapErrorSpec.scala index 8a1bf124d7..15edd66e2f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapErrorSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapErrorSpec.scala @@ -22,7 +22,10 @@ class FlowMapErrorSpec extends StreamSpec { "A MapError" must { "mapError when there is a handler" in assertAllStagesStopped { - Source(1 to 4).map { a => if (a == 3) throw ex else a } + Source(1 to 4) + .map { a => + if (a == 3) throw ex else a + } .mapError { case t: Throwable => boom } .runWith(TestSink.probe[Int]) .request(3) @@ -32,7 +35,10 @@ class FlowMapErrorSpec extends StreamSpec { } "fail the stream with exception thrown in handler (and log it)" in assertAllStagesStopped { - Source(1 to 3).map { a => if (a == 2) throw ex else a } + Source(1 to 3) + .map { a => + if (a == 2) throw ex else a + } .mapError { case t: Exception => throw boom } .runWith(TestSink.probe[Int]) .requestNext(1) @@ -41,7 +47,10 @@ class FlowMapErrorSpec extends StreamSpec { } "pass through the original exception if partial function does not handle it" in assertAllStagesStopped { - Source(1 to 3).map { a => if (a == 2) throw ex else a } + Source(1 to 3) + .map { a => + if (a == 2) throw ex else a + } .mapError { case t: IndexOutOfBoundsException => boom } .runWith(TestSink.probe[Int]) .requestNext(1) @@ -50,7 +59,8 @@ class FlowMapErrorSpec extends StreamSpec { } "not influence stream when there is no exceptions" in assertAllStagesStopped { - Source(1 to 3).map(identity) + Source(1 to 3) + .map(identity) .mapError { case t: Throwable => boom } .runWith(TestSink.probe[Int]) .request(3) @@ -59,7 +69,8 @@ class FlowMapErrorSpec extends StreamSpec { } "finish stream if it's empty" in assertAllStagesStopped { - Source.empty.map(identity) + Source.empty + .map(identity) .mapError { case t: Throwable => boom } .runWith(TestSink.probe[Int]) .request(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala index 4e74e8bb87..1485c64bd7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMapSpec.scala @@ -11,23 +11,30 @@ import akka.stream.testkit._ class FlowMapSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "A Map" must { "map" in { - def script = Script(TestConfig.RandomTestRange map { _ => val x = random.nextInt(); Seq(x) -> Seq(x.toString) }: _*) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.map(_.toString))) + def script = + Script(TestConfig.RandomTestRange.map { _ => + val x = random.nextInt(); Seq(x) -> Seq(x.toString) + }: _*) + TestConfig.RandomTestRange.foreach(_ => runScript(script, settings)(_.map(_.toString))) } "not blow up with high request counts" in { val probe = TestSubscriber.manualProbe[Int]() - Source(List(1)). - map(_ + 1).map(_ + 1).map(_ + 1).map(_ + 1).map(_ + 1). - runWith(Sink.asPublisher(false)).subscribe(probe) + Source(List(1)) + .map(_ + 1) + .map(_ + 1) + .map(_ + 1) + .map(_ + 1) + .map(_ + 1) + .runWith(Sink.asPublisher(false)) + .subscribe(probe) val subscription = probe.expectSubscription() for (_ <- 1 to 10000) { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala index 7485404055..c1e47bf1a1 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMergeSpec.scala @@ -27,8 +27,7 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { val source3 = Source(4 to 9) val probe = TestSubscriber.manualProbe[Int]() - source1.merge(source2).merge(source3) - .map(_ * 2).map(_ / 2).map(_ + 1).runWith(Sink.fromSubscriber(probe)) + source1.merge(source2).merge(source3).map(_ * 2).map(_ / 2).map(_ + 1).runWith(Sink.fromSubscriber(probe)) val subscription = probe.expectSubscription() @@ -87,8 +86,11 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { val up2 = TestPublisher.manualProbe[Int]() val down = TestSubscriber.manualProbe[Int]() - val (graphSubscriber1, graphSubscriber2) = Source.asSubscriber[Int] - .mergeMat(Source.asSubscriber[Int])((_, _)).toMat(Sink.fromSubscriber(down))(Keep.left).run + val (graphSubscriber1, graphSubscriber2) = Source + .asSubscriber[Int] + .mergeMat(Source.asSubscriber[Int])((_, _)) + .toMat(Sink.fromSubscriber(down))(Keep.left) + .run val downstream = down.expectSubscription() downstream.cancel() @@ -104,10 +106,7 @@ class FlowMergeSpec extends BaseTwoStreamsSetup { val up2 = TestPublisher.probe[Int]() val down = TestSubscriber.probe[Int]() - Source.fromPublisher(up1) - .merge(Source.fromPublisher(up2), eagerComplete = true) - .to(Sink.fromSubscriber(down)) - .run + Source.fromPublisher(up1).merge(Source.fromPublisher(up2), eagerComplete = true).to(Sink.fromSubscriber(down)).run up1.ensureSubscription() up2.ensureSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMonitorSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMonitorSpec.scala index f56115e7ed..194fbed015 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMonitorSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowMonitorSpec.scala @@ -76,8 +76,7 @@ class FlowMonitorSpec extends StreamSpec { TestSource.probe[Any].monitor.to(Sink.ignore).run()(mat) mat.shutdown() - awaitAssert( - monitor.state shouldBe a[FlowMonitorState.Failed], remainingOrDefault) + awaitAssert(monitor.state shouldBe a[FlowMonitorState.Failed], remainingOrDefault) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOnCompleteSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOnCompleteSpec.scala index 78190872cd..74352d6d3b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOnCompleteSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowOnCompleteSpec.scala @@ -16,8 +16,7 @@ import akka.testkit.TestProbe class FlowOnCompleteSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -62,14 +61,17 @@ class FlowOnCompleteSpec extends StreamSpec with ScriptedTest { val onCompleteProbe = TestProbe() val p = TestPublisher.manualProbe[Int]() import system.dispatcher // for the Future.onComplete - val foreachSink = Sink.foreach[Int] { - x => onCompleteProbe.ref ! ("foreach-" + x) + val foreachSink = Sink.foreach[Int] { x => + onCompleteProbe.ref ! ("foreach-" + x) } - val future = Source.fromPublisher(p).map { x => - onCompleteProbe.ref ! ("map-" + x) - x - }.runWith(foreachSink) - future onComplete { onCompleteProbe.ref ! _ } + val future = Source + .fromPublisher(p) + .map { x => + onCompleteProbe.ref ! ("map-" + x) + x + } + .runWith(foreachSink) + future.onComplete { onCompleteProbe.ref ! _ } val proc = p.expectSubscription proc.expectRequest() proc.sendNext(42) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrefixAndTailSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrefixAndTailSpec.scala index 9d62396d35..d6d93d6332 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrefixAndTailSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowPrefixAndTailSpec.scala @@ -14,8 +14,7 @@ import akka.stream.testkit.scaladsl.StreamTestKit._ class FlowPrefixAndTailSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -100,7 +99,8 @@ class FlowPrefixAndTailSpec extends StreamSpec { val subscriber2 = TestSubscriber.probe[Int]() tail.to(Sink.fromSubscriber(subscriber2)).run() - subscriber2.expectSubscriptionAndError().getMessage should ===("Substream Source cannot be materialized more than once") + subscriber2.expectSubscriptionAndError().getMessage should ===( + "Substream Source cannot be materialized more than once") subscriber1.requestNext(2).expectComplete() @@ -110,8 +110,8 @@ class FlowPrefixAndTailSpec extends StreamSpec { val ms = 300 val tightTimeoutMaterializer = - ActorMaterializer(ActorMaterializerSettings(system) - .withSubscriptionTimeoutSettings( + ActorMaterializer( + ActorMaterializerSettings(system).withSubscriptionTimeoutSettings( StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, ms.millisecond))) val futureSink = newHeadSink @@ -123,12 +123,13 @@ class FlowPrefixAndTailSpec extends StreamSpec { Thread.sleep(1000) tail.to(Sink.fromSubscriber(subscriber)).run()(tightTimeoutMaterializer) - subscriber.expectSubscriptionAndError().getMessage should ===(s"Substream Source has not been materialized in ${ms} milliseconds") + subscriber.expectSubscriptionAndError().getMessage should ===( + s"Substream Source has not been materialized in ${ms} milliseconds") } "not fail the stream if substream has not been subscribed in time and configured subscription timeout is noop" in assertAllStagesStopped { val tightTimeoutMaterializer = - ActorMaterializer(ActorMaterializerSettings(system) - .withSubscriptionTimeoutSettings( + ActorMaterializer( + ActorMaterializerSettings(system).withSubscriptionTimeoutSettings( StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.noop, 1.millisecond))) val futureSink = newHeadSink diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverSpec.scala index c42400fec1..5dc1aab051 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverSpec.scala @@ -21,7 +21,10 @@ class FlowRecoverSpec extends StreamSpec { "A Recover" must { "recover when there is a handler" in assertAllStagesStopped { - Source(1 to 4).map { a => if (a == 3) throw ex else a } + Source(1 to 4) + .map { a => + if (a == 3) throw ex else a + } .recover { case t: Throwable => 0 } .runWith(TestSink.probe[Int]) .requestNext(1) @@ -32,7 +35,10 @@ class FlowRecoverSpec extends StreamSpec { } "failed stream if handler is not for such exception type" in assertAllStagesStopped { - Source(1 to 3).map { a => if (a == 2) throw ex else a } + Source(1 to 3) + .map { a => + if (a == 2) throw ex else a + } .recover { case t: IndexOutOfBoundsException => 0 } .runWith(TestSink.probe[Int]) .requestNext(1) @@ -41,7 +47,8 @@ class FlowRecoverSpec extends StreamSpec { } "not influence stream when there is no exceptions" in assertAllStagesStopped { - Source(1 to 3).map(identity) + Source(1 to 3) + .map(identity) .recover { case t: Throwable => 0 } .runWith(TestSink.probe[Int]) .request(3) @@ -50,7 +57,8 @@ class FlowRecoverSpec extends StreamSpec { } "finish stream if it's empty" in assertAllStagesStopped { - Source.empty.map(identity) + Source.empty + .map(identity) .recover { case t: Throwable => 0 } .runWith(TestSink.probe[Int]) .request(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala index dc37ceb682..5747ac118b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowRecoverWithSpec.scala @@ -23,7 +23,10 @@ class FlowRecoverWithSpec extends StreamSpec { "A RecoverWith" must { "recover when there is a handler" in assertAllStagesStopped { - Source(1 to 4).map { a => if (a == 3) throw ex else a } + Source(1 to 4) + .map { a => + if (a == 3) throw ex else a + } .recoverWith { case t: Throwable => Source(List(0, -1)) } .runWith(TestSink.probe[Int]) .request(2) @@ -36,7 +39,10 @@ class FlowRecoverWithSpec extends StreamSpec { } "cancel substream if parent is terminated when there is a handler" in assertAllStagesStopped { - Source(1 to 4).map { a => if (a == 3) throw ex else a } + Source(1 to 4) + .map { a => + if (a == 3) throw ex else a + } .recoverWith { case t: Throwable => Source(List(0, -1)) } .runWith(TestSink.probe[Int]) .request(2) @@ -47,7 +53,10 @@ class FlowRecoverWithSpec extends StreamSpec { } "failed stream if handler is not for such exception type" in assertAllStagesStopped { - Source(1 to 3).map { a => if (a == 2) throw ex else a } + Source(1 to 3) + .map { a => + if (a == 2) throw ex else a + } .recoverWith { case t: IndexOutOfBoundsException => Source.single(0) } .runWith(TestSink.probe[Int]) .request(1) @@ -57,8 +66,11 @@ class FlowRecoverWithSpec extends StreamSpec { } "be able to recover with the same unmaterialized source if configured" in assertAllStagesStopped { - val src = Source(1 to 3).map { a => if (a == 3) throw ex else a } - src.recoverWith { case t: Throwable => src } + val src = Source(1 to 3).map { a => + if (a == 3) throw ex else a + } + src + .recoverWith { case t: Throwable => src } .runWith(TestSink.probe[Int]) .request(2) .expectNextN(1 to 2) @@ -70,7 +82,8 @@ class FlowRecoverWithSpec extends StreamSpec { } "not influence stream when there is no exceptions" in assertAllStagesStopped { - Source(1 to 3).map(identity) + Source(1 to 3) + .map(identity) .recoverWith { case t: Throwable => Source.single(0) } .runWith(TestSink.probe[Int]) .request(3) @@ -79,7 +92,8 @@ class FlowRecoverWithSpec extends StreamSpec { } "finish stream if it's empty" in assertAllStagesStopped { - Source.empty.map(identity) + Source.empty + .map(identity) .recoverWith { case t: Throwable => Source.single(0) } .runWith(TestSink.probe[Int]) .request(3) @@ -87,12 +101,16 @@ class FlowRecoverWithSpec extends StreamSpec { } "switch the second time if alternative source throws exception" in assertAllStagesStopped { - val k = Source(1 to 3).map { a => if (a == 3) throw new IndexOutOfBoundsException() else a } + val k = Source(1 to 3) + .map { a => + if (a == 3) throw new IndexOutOfBoundsException() else a + } .recoverWith { case t: IndexOutOfBoundsException => Source(List(11, 22)).map(m => if (m == 22) throw new IllegalArgumentException() else m) case t: IllegalArgumentException => Source(List(33, 44)) - }.runWith(TestSink.probe[Int]) + } + .runWith(TestSink.probe[Int]) .request(2) .expectNextN(List(1, 2)) .request(2) @@ -103,11 +121,15 @@ class FlowRecoverWithSpec extends StreamSpec { } "terminate with exception if partial function fails to match after an alternative source failure" in assertAllStagesStopped { - Source(1 to 3).map { a => if (a == 3) throw new IndexOutOfBoundsException() else a } + Source(1 to 3) + .map { a => + if (a == 3) throw new IndexOutOfBoundsException() else a + } .recoverWith { case t: IndexOutOfBoundsException => Source(List(11, 22)).map(m => if (m == 22) throw ex else m) - }.runWith(TestSink.probe[Int]) + } + .runWith(TestSink.probe[Int]) .request(2) .expectNextN(List(1, 2)) .request(1) @@ -117,11 +139,15 @@ class FlowRecoverWithSpec extends StreamSpec { } "terminate with exception after set number of retries" in assertAllStagesStopped { - Source(1 to 3).map { a => if (a == 3) throw new IndexOutOfBoundsException() else a } + Source(1 to 3) + .map { a => + if (a == 3) throw new IndexOutOfBoundsException() else a + } .recoverWithRetries(3, { case t: Throwable => Source(List(11, 22, 33)).map(m => if (m == 33) throw ex else m) - }).runWith(TestSink.probe[Int]) + }) + .runWith(TestSink.probe[Int]) .request(100) .expectNextN(List(1, 2)) .expectNextN(List(11, 22)) @@ -131,7 +157,10 @@ class FlowRecoverWithSpec extends StreamSpec { } "not attempt recovering when attempts is zero" in assertAllStagesStopped { - Source(1 to 3).map { a => if (a == 3) throw ex else a } + Source(1 to 3) + .map { a => + if (a == 3) throw ex else a + } .recoverWithRetries(0, { case t: Throwable => Source(List(22, 33)) }) .runWith(TestSink.probe[Int]) .request(100) @@ -140,7 +169,9 @@ class FlowRecoverWithSpec extends StreamSpec { } "recover infinitely when negative (-1) number of attempts given" in assertAllStagesStopped { - val oneThenBoom = Source(1 to 2).map { a => if (a == 2) throw ex else a } + val oneThenBoom = Source(1 to 2).map { a => + if (a == 2) throw ex else a + } oneThenBoom .recoverWithRetries(-1, { case t: Throwable => oneThenBoom }) @@ -151,7 +182,9 @@ class FlowRecoverWithSpec extends StreamSpec { } "recover infinitely when negative (smaller than -1) number of attempts given" in assertAllStagesStopped { - val oneThenBoom = Source(1 to 2).map { a => if (a == 2) throw ex else a } + val oneThenBoom = Source(1 to 2).map { a => + if (a == 2) throw ex else a + } oneThenBoom .recoverWithRetries(-10, { case t: Throwable => oneThenBoom }) @@ -171,9 +204,12 @@ class FlowRecoverWithSpec extends StreamSpec { } } - val result = Source.failed(TE("trigger")).recoverWithRetries(1, { - case _: TE => Source.fromGraph(FailingInnerMat) - }).runWith(Sink.ignore) + val result = Source + .failed(TE("trigger")) + .recoverWithRetries(1, { + case _: TE => Source.fromGraph(FailingInnerMat) + }) + .runWith(Sink.ignore) result.failed.futureValue should ===(matFail) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowReduceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowReduceSpec.scala index 7a4bad862a..777231962d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowReduceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowReduceSpec.scala @@ -29,19 +29,19 @@ class FlowReduceSpec extends StreamSpec { } "work when using Source.reduce" in assertAllStagesStopped { - Await.result(reduceSource runWith Sink.head, 3.seconds) should be(expected) + Await.result(reduceSource.runWith(Sink.head), 3.seconds) should be(expected) } "work when using Sink.reduce" in assertAllStagesStopped { - Await.result(inputSource runWith reduceSink, 3.seconds) should be(expected) + Await.result(inputSource.runWith(reduceSink), 3.seconds) should be(expected) } "work when using Flow.reduce" in assertAllStagesStopped { - Await.result(inputSource via reduceFlow runWith Sink.head, 3.seconds) should be(expected) + Await.result(inputSource.via(reduceFlow).runWith(Sink.head), 3.seconds) should be(expected) } "work when using Source.reduce + Flow.reduce + Sink.reduce" in assertAllStagesStopped { - Await.result(reduceSource via reduceFlow runWith reduceSink, 3.seconds) should be(expected) + Await.result(reduceSource.via(reduceFlow).runWith(reduceSink), 3.seconds) should be(expected) } "propagate an error" in assertAllStagesStopped { @@ -59,14 +59,16 @@ class FlowReduceSpec extends StreamSpec { "resume with the accumulated state when the folding function throws and the supervisor strategy decides to resume" in assertAllStagesStopped { val error = TE("Boom!") val reduce = Sink.reduce[Int]((x, y) => if (y == 50) throw error else x + y) - val future = inputSource.runWith(reduce.withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))) + val future = + inputSource.runWith(reduce.withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))) Await.result(future, 3.seconds) should be(expected - 50) } "resume and reset the state when the folding function throws when the supervisor strategy decides to restart" in assertAllStagesStopped { val error = TE("Boom!") val reduce = Sink.reduce[Int]((x, y) => if (y == 50) throw error else x + y) - val future = inputSource.runWith(reduce.withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider))) + val future = + inputSource.runWith(reduce.withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider))) Await.result(future, 3.seconds) should be((51 to 100).sum) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanAsyncSpec.scala index 86ee221e8f..6fe3a3a742 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanAsyncSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanAsyncSpec.scala @@ -30,16 +30,13 @@ class FlowScanAsyncSpec extends StreamSpec { } "work with a empty source" in { - Source.empty[Int] - .via(sumScanFlow) - .runWith(TestSink.probe[Int]) - .request(1) - .expectNextOrComplete(0) + Source.empty[Int].via(sumScanFlow).runWith(TestSink.probe[Int]).request(1).expectNextOrComplete(0) } "complete after zero-element has been consumed" in { val (pub, sub) = - TestSource.probe[Int] + TestSource + .probe[Int] .via(Flow[Int].scanAsync(0)((acc, in) => Future.successful(acc + in))) .toMat(TestSink.probe)(Keep.both) .run() @@ -52,7 +49,8 @@ class FlowScanAsyncSpec extends StreamSpec { "complete after stream has been consumed and pending futures resolved" in { val (pub, sub) = - TestSource.probe[Int] + TestSource + .probe[Int] .via(Flow[Int].scanAsync(0)((acc, in) => Future.successful(acc + in))) .toMat(TestSink.probe)(Keep.both) .run() @@ -67,7 +65,8 @@ class FlowScanAsyncSpec extends StreamSpec { "fail after zero-element has been consumed" in { val (pub, sub) = - TestSource.probe[Int] + TestSource + .probe[Int] .via(Flow[Int].scanAsync(0)((acc, in) => Future.successful(acc + in))) .toMat(TestSink.probe)(Keep.both) .run() @@ -79,21 +78,16 @@ class FlowScanAsyncSpec extends StreamSpec { } "work with a single source" in { - Source.single(1) - .via(sumScanFlow) - .runWith(TestSink.probe[Int]) - .request(2) - .expectNext(0, 1) - .expectComplete() + Source.single(1).via(sumScanFlow).runWith(TestSink.probe[Int]).request(2).expectNext(0, 1).expectComplete() } "work with a large source" in { val elements = 1 to 100000 val expectedSum = elements.sum - val eventualActual: Future[Int] = Source(elements) - .via(sumScanFlow) - .runWith(Sink.last) - whenReady(eventualActual) { actual => assert(actual === expectedSum) } + val eventualActual: Future[Int] = Source(elements).via(sumScanFlow).runWith(Sink.last) + whenReady(eventualActual) { actual => + assert(actual === expectedSum) + } } "work with slow futures" in { @@ -114,7 +108,8 @@ class FlowScanAsyncSpec extends StreamSpec { "throw error with a failed source" in { val expected = Utils.TE("failed source") - Source.failed[Int](expected) + Source + .failed[Int](expected) .via(sumScanFlow) .runWith(TestSink.probe[Int]) .request(2) @@ -124,16 +119,12 @@ class FlowScanAsyncSpec extends StreamSpec { "with the restarting decider" should { "skip error values with a failed scan" in { val elements = 1 :: -1 :: 1 :: Nil - whenFailedScan(elements, 0, decider = Supervision.restartingDecider) - .expectNext(1, 1) - .expectComplete() + whenFailedScan(elements, 0, decider = Supervision.restartingDecider).expectNext(1, 1).expectComplete() } "emit zero with a failed future" in { val elements = 1 :: -1 :: 1 :: Nil - whenFailedFuture(elements, 0, decider = Supervision.restartingDecider) - .expectNext(1, 1) - .expectComplete() + whenFailedFuture(elements, 0, decider = Supervision.restartingDecider).expectNext(1, 1).expectComplete() } "skip error values and handle stage completion after future get resolved" in { @@ -162,16 +153,12 @@ class FlowScanAsyncSpec extends StreamSpec { "with the resuming decider" should { "skip values with a failed scan" in { val elements = 1 :: -1 :: 1 :: Nil - whenFailedScan(elements, 0, decider = Supervision.resumingDecider) - .expectNext(1, 2) - .expectComplete() + whenFailedScan(elements, 0, decider = Supervision.resumingDecider).expectNext(1, 2).expectComplete() } "skip values with a failed future" in { val elements = 1 :: -1 :: 1 :: Nil - whenFailedFuture(elements, 0, decider = Supervision.resumingDecider) - .expectNext(1, 2) - .expectComplete() + whenFailedFuture(elements, 0, decider = Supervision.resumingDecider).expectNext(1, 2).expectComplete() } "skip error values and handle stage completion after future get resolved" in { @@ -201,32 +188,28 @@ class FlowScanAsyncSpec extends StreamSpec { "throw error with a failed scan function" in { val expected = Utils.TE("failed scan function") val elements = -1 :: Nil - whenFailedScan(elements, 0, expected) - .expectError(expected) + whenFailedScan(elements, 0, expected).expectError(expected) } "throw error with a failed future" in { val expected = Utils.TE("failed future generated from scan function") val elements = -1 :: Nil - whenFailedFuture(elements, 0, expected) - .expectError(expected) + whenFailedFuture(elements, 0, expected).expectError(expected) } "throw error with a null element" in { val expectedMessage = ReactiveStreamsCompliance.ElementMustNotBeNullMsg val elements = "null" :: Nil - val actual = whenNullElement(elements, "") - .expectError() + val actual = whenNullElement(elements, "").expectError() assert(actual.getClass === classOf[NullPointerException]) assert(actual.getMessage === expectedMessage) } } - def whenFailedScan( - elements: immutable.Seq[Int], - zero: Int, - throwable: Throwable = new Exception("non fatal exception"), - decider: Supervision.Decider = Supervision.stoppingDecider): Probe[Int] = { + def whenFailedScan(elements: immutable.Seq[Int], + zero: Int, + throwable: Throwable = new Exception("non fatal exception"), + decider: Supervision.Decider = Supervision.stoppingDecider): Probe[Int] = { val failedScanFlow = Flow[Int].scanAsync(zero) { (accumulator: Int, next: Int) => if (next >= 0) Future(accumulator + next) else throw throwable @@ -239,17 +222,17 @@ class FlowScanAsyncSpec extends StreamSpec { .expectNext(zero) } - def whenEventualFuture( - promises: immutable.Seq[Promise[Int]], - zero: Int, - decider: Supervision.Decider = Supervision.stoppingDecider - ): (TestPublisher.Probe[Int], TestSubscriber.Probe[Int]) = { + def whenEventualFuture(promises: immutable.Seq[Promise[Int]], + zero: Int, + decider: Supervision.Decider = Supervision.stoppingDecider) + : (TestPublisher.Probe[Int], TestSubscriber.Probe[Int]) = { require(promises.nonEmpty, "must be at least one promise") val promiseScanFlow = Flow[Int].scanAsync(zero) { (accumulator: Int, next: Int) => promises(next).future } - val (pub, sub) = TestSource.probe[Int] + val (pub, sub) = TestSource + .probe[Int] .via(promiseScanFlow) .withAttributes(ActorAttributes.supervisionStrategy(decider)) .toMat(TestSink.probe)(Keep.both) @@ -260,11 +243,10 @@ class FlowScanAsyncSpec extends StreamSpec { (pub, sub) } - def whenFailedFuture( - elements: immutable.Seq[Int], - zero: Int, - throwable: Throwable = new Exception("non fatal exception"), - decider: Supervision.Decider = Supervision.stoppingDecider): Probe[Int] = { + def whenFailedFuture(elements: immutable.Seq[Int], + zero: Int, + throwable: Throwable = new Exception("non fatal exception"), + decider: Supervision.Decider = Supervision.stoppingDecider): Probe[Int] = { val failedFutureScanFlow = Flow[Int].scanAsync(zero) { (accumulator: Int, next: Int) => if (next >= 0) Future(accumulator + next) else Future.failed(throwable) @@ -277,10 +259,9 @@ class FlowScanAsyncSpec extends StreamSpec { .expectNext(zero) } - def whenNullElement( - elements: immutable.Seq[String], - zero: String, - decider: Supervision.Decider = Supervision.stoppingDecider): Probe[String] = { + def whenNullElement(elements: immutable.Seq[String], + zero: String, + decider: Supervision.Decider = Supervision.stoppingDecider): Probe[String] = { val nullFutureScanFlow: Flow[String, String, _] = Flow[String].scanAsync(zero) { (_: String, next: String) => if (next != "null") Future(next) else Future(null) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanSpec.scala index 7d3abd8f38..4793b35380 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowScanSpec.scala @@ -18,8 +18,7 @@ import java.util.concurrent.ThreadLocalRandom.{ current => random } class FlowScanSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -35,7 +34,7 @@ class FlowScanSpec extends StreamSpec { "Scan empty failed" in assertAllStagesStopped { val e = new Exception("fail!") - intercept[Exception](scan(Source.failed[Int](e))) should be theSameInstanceAs (e) + (intercept[Exception](scan(Source.failed[Int](e))) should be).theSameInstanceAs(e) } "Scan empty" in assertAllStagesStopped { @@ -49,26 +48,32 @@ class FlowScanSpec extends StreamSpec { "restart properly" in { import ActorAttributes._ - val scan = Flow[Int].scan(0) { (old, current) => - require(current > 0) - old + current - }.withAttributes(supervisionStrategy(Supervision.restartingDecider)) - Source(List(1, 3, -1, 5, 7)).via(scan).runWith(TestSink.probe) - .toStrict(1.second) should ===(Seq(0, 1, 4, 0, 5, 12)) + val scan = Flow[Int] + .scan(0) { (old, current) => + require(current > 0) + old + current + } + .withAttributes(supervisionStrategy(Supervision.restartingDecider)) + Source(List(1, 3, -1, 5, 7)).via(scan).runWith(TestSink.probe).toStrict(1.second) should ===( + Seq(0, 1, 4, 0, 5, 12)) } "resume properly" in { import ActorAttributes._ - val scan = Flow[Int].scan(0) { (old, current) => - require(current > 0) - old + current - }.withAttributes(supervisionStrategy(Supervision.resumingDecider)) - Source(List(1, 3, -1, 5, 7)).via(scan).runWith(TestSink.probe) - .toStrict(1.second) should ===(Seq(0, 1, 4, 9, 16)) + val scan = Flow[Int] + .scan(0) { (old, current) => + require(current > 0) + old + current + } + .withAttributes(supervisionStrategy(Supervision.resumingDecider)) + Source(List(1, 3, -1, 5, 7)).via(scan).runWith(TestSink.probe).toStrict(1.second) should ===(Seq(0, 1, 4, 9, 16)) } "scan normally for empty source" in { - Source.empty[Int].scan(0) { case (a, b) => a + b }.runWith(TestSink.probe[Int]) + Source + .empty[Int] + .scan(0) { case (a, b) => a + b } + .runWith(TestSink.probe[Int]) .request(2) .expectNext(0) .expectComplete() @@ -76,7 +81,8 @@ class FlowScanSpec extends StreamSpec { "fail when upstream failed" in { val ex = TE("") - Source.failed[Int](ex) + Source + .failed[Int](ex) .scan(0) { case (a, b) => a + b } .runWith(TestSink.probe[Int]) .request(2) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala index 9b36fd472f..01b15edfe2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSectionSpec.scala @@ -23,7 +23,7 @@ class FlowSectionSpec extends StreamSpec(FlowSectionSpec.config) { implicit val materializer = ActorMaterializer() - "A flow" can { + "A flow".can { "have an op with a different dispatcher" in { val flow = Flow[Int].map(sendThreadNameTo(testActor)).withAttributes(dispatcher("my-dispatcher1")) @@ -34,8 +34,11 @@ class FlowSectionSpec extends StreamSpec(FlowSectionSpec.config) { } "have a nested flow with a different dispatcher" in { - Source.single(1).via( - Flow[Int].map(sendThreadNameTo(testActor)).withAttributes(dispatcher("my-dispatcher1"))).to(Sink.ignore).run() + Source + .single(1) + .via(Flow[Int].map(sendThreadNameTo(testActor)).withAttributes(dispatcher("my-dispatcher1"))) + .to(Sink.ignore) + .run() expectMsgType[String] should include("my-dispatcher1") } @@ -70,7 +73,9 @@ class FlowSectionSpec extends StreamSpec(FlowSectionSpec.config) { val customDispatcher = TestProbe() val f1 = Flow[Int].map(sendThreadNameTo(defaultDispatcher.ref)) - val f2 = Flow[Int].map(sendThreadNameTo(customDispatcher.ref)).map(x => x) + val f2 = Flow[Int] + .map(sendThreadNameTo(customDispatcher.ref)) + .map(x => x) .withAttributes(dispatcher("my-dispatcher1") and name("separate-disptacher")) Source(0 to 2).via(f1).via(f2).runWith(Sink.ignore) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSlidingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSlidingSpec.scala index 458fa5eb35..08f6c48d40 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSlidingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSlidingSpec.scala @@ -13,8 +13,7 @@ import akka.pattern.pipe class FlowSlidingSpec extends StreamSpec with GeneratorDrivenPropertyChecks { import system.dispatcher - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -23,8 +22,13 @@ class FlowSlidingSpec extends StreamSpec with GeneratorDrivenPropertyChecks { def check(gen: Gen[(Int, Int, Int)]): Unit = forAll(gen, MinSize(1000), MaxSize(1000)) { case (len, win, step) => - val af = Source.fromIterator(() => Iterator.from(0).take(len)).sliding(win, step).runFold(Seq.empty[Seq[Int]])(_ :+ _) - val cf = Source.fromIterator(() => Iterator.from(0).take(len).sliding(win, step)).runFold(Seq.empty[Seq[Int]])(_ :+ _) + val af = Source + .fromIterator(() => Iterator.from(0).take(len)) + .sliding(win, step) + .runFold(Seq.empty[Seq[Int]])(_ :+ _) + val cf = Source + .fromIterator(() => Iterator.from(0).take(len).sliding(win, step)) + .runFold(Seq.empty[Seq[Int]])(_ :+ _) af.futureValue should be(cf.futureValue) } @@ -53,7 +57,7 @@ class FlowSlidingSpec extends StreamSpec with GeneratorDrivenPropertyChecks { } "work with empty sources" in assertAllStagesStopped { - Source.empty.sliding(1).runForeach(testActor ! _).map(_ => "done") pipeTo testActor + Source.empty.sliding(1).runForeach(testActor ! _).map(_ => "done").pipeTo(testActor) expectMsg("done") } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala index 585a3837aa..7c4abe06c6 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSpec.scala @@ -25,18 +25,18 @@ object FlowSpec { class Fruit class Apple extends Fruit class Orange extends Fruit - val fruits = () => new Iterator[Fruit] { - override def hasNext: Boolean = true - override def next(): Fruit = if (ThreadLocalRandom.current().nextBoolean()) new Apple else new Orange - } + val fruits = () => + new Iterator[Fruit] { + override def hasNext: Boolean = true + override def next(): Fruit = if (ThreadLocalRandom.current().nextBoolean()) new Apple else new Orange + } } class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.receive=off\nakka.loglevel=INFO")) { import FlowSpec._ - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val materializer = ActorMaterializer(settings) @@ -257,25 +257,26 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re "be covariant" in { val f1: Source[Fruit, _] = Source.fromIterator[Fruit](fruits) val p1: Publisher[Fruit] = Source.fromIterator[Fruit](fruits).runWith(Sink.asPublisher(false)) - val f2: SubFlow[Fruit, _, Source[Fruit, NotUsed]#Repr, _] = Source.fromIterator[Fruit](fruits).splitWhen(_ => true) - val f3: SubFlow[Fruit, _, Source[Fruit, NotUsed]#Repr, _] = Source.fromIterator[Fruit](fruits).groupBy(2, _ => true) + val f2: SubFlow[Fruit, _, Source[Fruit, NotUsed]#Repr, _] = + Source.fromIterator[Fruit](fruits).splitWhen(_ => true) + val f3: SubFlow[Fruit, _, Source[Fruit, NotUsed]#Repr, _] = + Source.fromIterator[Fruit](fruits).groupBy(2, _ => true) val f4: Source[(immutable.Seq[Fruit], Source[Fruit, _]), _] = Source.fromIterator[Fruit](fruits).prefixAndTail(1) - val d1: SubFlow[Fruit, _, Flow[String, Fruit, NotUsed]#Repr, _] = Flow[String].map(_ => new Apple).splitWhen(_ => true) - val d2: SubFlow[Fruit, _, Flow[String, Fruit, NotUsed]#Repr, _] = Flow[String].map(_ => new Apple).groupBy(2, _ => true) - val d3: Flow[String, (immutable.Seq[Apple], Source[Fruit, _]), _] = Flow[String].map(_ => new Apple).prefixAndTail(1) + val d1: SubFlow[Fruit, _, Flow[String, Fruit, NotUsed]#Repr, _] = + Flow[String].map(_ => new Apple).splitWhen(_ => true) + val d2: SubFlow[Fruit, _, Flow[String, Fruit, NotUsed]#Repr, _] = + Flow[String].map(_ => new Apple).groupBy(2, _ => true) + val d3: Flow[String, (immutable.Seq[Apple], Source[Fruit, _]), _] = + Flow[String].map(_ => new Apple).prefixAndTail(1) } "be possible to convert to a processor, and should be able to take a Processor" in { val identity1 = Flow[Int].toProcessor val identity2 = Flow.fromProcessor(() => identity1.run()) - Await.result( - Source(1 to 10).via(identity2).limit(100).runWith(Sink.seq), - 3.seconds) should ===(1 to 10) + Await.result(Source(1 to 10).via(identity2).limit(100).runWith(Sink.seq), 3.seconds) should ===(1 to 10) // Reusable: - Await.result( - Source(1 to 10).via(identity2).limit(100).runWith(Sink.seq), - 3.seconds) should ===(1 to 10) + Await.result(Source(1 to 10).via(identity2).limit(100).runWith(Sink.seq), 3.seconds) should ===(1 to 10) } "eliminate passed in when matval from passed in not used" in { @@ -287,7 +288,7 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re "not eliminate passed in when matval from passed in is used" in { val map = Flow.fromFunction((n: Int) => n + 1) val result = map.viaMat(Flow[Int])(Keep.right) - result shouldNot be theSameInstanceAs (map) + (result shouldNot be).theSameInstanceAs(map) } "eliminate itself if identity" in { @@ -299,15 +300,14 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re "not eliminate itself if identity but matval is used" in { val map = Flow.fromFunction((n: Int) => n + 1) val result = Flow[Int].viaMat(map)(Keep.left) - result shouldNot be theSameInstanceAs (map) + (result shouldNot be).theSameInstanceAs(map) } } "A Flow with multiple subscribers (FanOutBox)" must { "adapt speed to the currently slowest subscriber" in { - new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), - toFanoutPublisher(1)) { + new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), toFanoutPublisher(1)) { val downstream2 = TestSubscriber.manualProbe[Any]() publisher.subscribe(downstream2) val downstream2Subscription = downstream2.expectSubscription() @@ -333,8 +333,7 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re } "support slow subscriber with fan-out 2" in { - new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), - toFanoutPublisher(2)) { + new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), toFanoutPublisher(2)) { val downstream2 = TestSubscriber.manualProbe[Any]() publisher.subscribe(downstream2) val downstream2Subscription = downstream2.expectSubscription() @@ -373,8 +372,7 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re } "support incoming subscriber while elements were requested before" in { - new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), - toFanoutPublisher(1)) { + new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), toFanoutPublisher(1)) { downstreamSubscription.request(5) upstream.expectRequest(upstreamSubscription, 1) upstreamSubscription.sendNext("a1") @@ -411,8 +409,7 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re } "be unblocked when blocking subscriber cancels subscription" in { - new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), - toFanoutPublisher(1)) { + new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), toFanoutPublisher(1)) { val downstream2 = TestSubscriber.manualProbe[Any]() publisher.subscribe(downstream2) val downstream2Subscription = downstream2.expectSubscription() @@ -448,8 +445,7 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re } "call future subscribers' onError after onSubscribe if initial upstream was completed" in { - new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), - toFanoutPublisher(1)) { + new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), toFanoutPublisher(1)) { val downstream2 = TestSubscriber.manualProbe[Any]() // don't link it just yet @@ -487,8 +483,9 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re } "call future subscribers' onError should be called instead of onSubscribed after initial upstream reported an error" in { - new ChainSetup[Int, String, NotUsed](_.map(_ => throw TestException), settings.withInputBuffer(initialSize = 1, maxSize = 1), - toFanoutPublisher(1)) { + new ChainSetup[Int, String, NotUsed](_.map(_ => throw TestException), + settings.withInputBuffer(initialSize = 1, maxSize = 1), + toFanoutPublisher(1)) { downstreamSubscription.request(1) upstreamSubscription.expectRequest(1) @@ -504,8 +501,7 @@ class FlowSpec extends StreamSpec(ConfigFactory.parseString("akka.actor.debug.re } "call future subscribers' onError when all subscriptions were cancelled" in { - new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), - toFanoutPublisher(16)) { + new ChainSetup(identity, settings.withInputBuffer(initialSize = 1, maxSize = 1), toFanoutPublisher(16)) { upstreamSubscription.expectRequest(1) downstreamSubscription.cancel() upstreamSubscription.expectCancellation() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala index 852e8b8d61..7702b46f73 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitAfterSpec.scala @@ -30,7 +30,8 @@ class FlowSplitAfterSpec extends StreamSpec { val settings = ActorMaterializerSettings(system) .withInputBuffer(initialSize = 2, maxSize = 2) - .withSubscriptionTimeoutSettings(StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, 1.second)) + .withSubscriptionTimeoutSettings( + StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, 1.second)) implicit val materializer = ActorMaterializer(settings) @@ -47,10 +48,9 @@ class FlowSplitAfterSpec extends StreamSpec { def cancel(): Unit = subscription.cancel() } - class SubstreamsSupport( - splitAfter: Int = 3, - elementCount: Int = 6, - substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) { + class SubstreamsSupport(splitAfter: Int = 3, + elementCount: Int = 6, + substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) { val source = Source(1 to elementCount) val groupStream = source.splitAfter(substreamCancelStrategy)(_ == splitAfter).lift.runWith(Sink.asPublisher(false)) @@ -121,9 +121,12 @@ class FlowSplitAfterSpec extends StreamSpec { "work with single elem splits" in assertAllStagesStopped { Await.result( - Source(1 to 10).splitAfter(_ => true).lift + Source(1 to 10) + .splitAfter(_ => true) + .lift .mapAsync(1)(_.runWith(Sink.head)) // Please note that this line *also* implicitly asserts nonempty substreams - .grouped(10).runWith(Sink.head), + .grouped(10) + .runWith(Sink.head), 3.second) should ===(1 to 10) } @@ -162,7 +165,8 @@ class FlowSplitAfterSpec extends StreamSpec { "fail stream when splitAfter function throws" in assertAllStagesStopped { val publisherProbeProbe = TestPublisher.manualProbe[Int]() val exc = TE("test") - val publisher = Source.fromPublisher(publisherProbeProbe) + val publisher = Source + .fromPublisher(publisherProbeProbe) .splitAfter(elem => if (elem == 3) throw exc else elem % 3 == 0) .lift .runWith(Sink.asPublisher(false)) @@ -197,7 +201,8 @@ class FlowSplitAfterSpec extends StreamSpec { pending val publisherProbeProbe = TestPublisher.manualProbe[Int]() val exc = TE("test") - val publisher = Source.fromPublisher(publisherProbeProbe) + val publisher = Source + .fromPublisher(publisherProbeProbe) .splitAfter(elem => if (elem == 3) throw exc else elem % 3 == 0) .lift .withAttributes(ActorAttributes.supervisionStrategy(resumingDecider)) @@ -281,18 +286,15 @@ class FlowSplitAfterSpec extends StreamSpec { "fail stream if substream not materialized in time" in assertAllStagesStopped { val tightTimeoutMaterializer = - ActorMaterializer(ActorMaterializerSettings(system) - .withSubscriptionTimeoutSettings( + ActorMaterializer( + ActorMaterializerSettings(system).withSubscriptionTimeoutSettings( StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, 500.millisecond))) val testSource = Source.single(1).concat(Source.maybe).splitAfter(_ => true) a[SubscriptionTimeoutException] mustBe thrownBy { Await.result( - testSource.lift - .delay(1.second) - .flatMapConcat(identity) - .runWith(Sink.ignore)(tightTimeoutMaterializer), + testSource.lift.delay(1.second).flatMapConcat(identity).runWith(Sink.ignore)(tightTimeoutMaterializer), 3.seconds) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala index adeb0459c5..20e06a8551 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSplitWhenSpec.scala @@ -22,7 +22,8 @@ class FlowSplitWhenSpec extends StreamSpec { val settings = ActorMaterializerSettings(system) .withInputBuffer(initialSize = 2, maxSize = 2) - .withSubscriptionTimeoutSettings(StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, 1.second)) + .withSubscriptionTimeoutSettings( + StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, 1.second)) implicit val materializer = ActorMaterializer(settings) @@ -39,10 +40,9 @@ class FlowSplitWhenSpec extends StreamSpec { def cancel(): Unit = subscription.cancel() } - class SubstreamsSupport( - splitWhen: Int = 3, - elementCount: Int = 6, - substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) { + class SubstreamsSupport(splitWhen: Int = 3, + elementCount: Int = 6, + substreamCancelStrategy: SubstreamCancelStrategy = SubstreamCancelStrategy.drain) { val source = Source(1 to elementCount) val groupStream = source.splitWhen(substreamCancelStrategy)(_ == splitWhen).lift.runWith(Sink.asPublisher(false)) @@ -95,9 +95,13 @@ class FlowSplitWhenSpec extends StreamSpec { "not emit substreams if the parent stream is empty" in assertAllStagesStopped { Await.result( - Source.empty[Int] - .splitWhen(_ => true).lift - .mapAsync(1)(_.runWith(Sink.headOption)).grouped(10).runWith(Sink.headOption), + Source + .empty[Int] + .splitWhen(_ => true) + .lift + .mapAsync(1)(_.runWith(Sink.headOption)) + .grouped(10) + .runWith(Sink.headOption), 3.seconds) should ===(None) // rather tricky way of saying that no empty substream should be emitted (vs. Some(None)) } @@ -142,7 +146,8 @@ class FlowSplitWhenSpec extends StreamSpec { val substream = TestSubscriber.probe[Int]() val masterStream = TestSubscriber.probe[Any]() - Source.fromPublisher(inputs) + Source + .fromPublisher(inputs) .splitWhen(_ == 2) .lift .map(_.runWith(Sink.fromSubscriber(substream))) @@ -159,11 +164,7 @@ class FlowSplitWhenSpec extends StreamSpec { inputs.expectCancellation() val inputs2 = TestPublisher.probe[Int]() - Source.fromPublisher(inputs2) - .splitWhen(_ == 2) - .lift - .map(_.runWith(Sink.cancelled)) - .runWith(Sink.cancelled) + Source.fromPublisher(inputs2).splitWhen(_ == 2).lift.map(_.runWith(Sink.cancelled)).runWith(Sink.cancelled) inputs2.expectCancellation() @@ -172,10 +173,7 @@ class FlowSplitWhenSpec extends StreamSpec { val substream3 = TestSubscriber.probe[Int]() val masterStream3 = TestSubscriber.probe[Source[Int, Any]]() - Source.fromPublisher(inputs3) - .splitWhen(_ == 2) - .lift - .runWith(Sink.fromSubscriber(masterStream3)) + Source.fromPublisher(inputs3).splitWhen(_ == 2).lift.runWith(Sink.fromSubscriber(masterStream3)) masterStream3.request(1) inputs3.sendNext(1) @@ -222,7 +220,8 @@ class FlowSplitWhenSpec extends StreamSpec { "fail stream when splitWhen function throws" in assertAllStagesStopped { val publisherProbeProbe = TestPublisher.manualProbe[Int]() val exc = TE("test") - val publisher = Source.fromPublisher(publisherProbeProbe) + val publisher = Source + .fromPublisher(publisherProbeProbe) .splitWhen(elem => if (elem == 3) throw exc else elem % 3 == 0) .lift .runWith(Sink.asPublisher(false)) @@ -254,19 +253,26 @@ class FlowSplitWhenSpec extends StreamSpec { "work with single elem splits" in assertAllStagesStopped { Await.result( - Source(1 to 100).splitWhen(_ => true).lift + Source(1 to 100) + .splitWhen(_ => true) + .lift .mapAsync(1)(_.runWith(Sink.head)) // Please note that this line *also* implicitly asserts nonempty substreams - .grouped(200).runWith(Sink.head), + .grouped(200) + .runWith(Sink.head), 3.second) should ===(1 to 100) } "fail substream if materialized twice" in assertAllStagesStopped { - implicit val mat = ActorMaterializer(ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 1, maxSize = 1)) + implicit val mat = + ActorMaterializer(ActorMaterializerSettings(system).withInputBuffer(initialSize = 1, maxSize = 1)) import system.dispatcher - val probe = Source(1 to 5).splitWhen(_ => true).lift - .map { src => src.runWith(Sink.ignore)(mat).flatMap(_ => src.runWith(Sink.ignore)(mat)) } + val probe = Source(1 to 5) + .splitWhen(_ => true) + .lift + .map { src => + src.runWith(Sink.ignore)(mat).flatMap(_ => src.runWith(Sink.ignore)(mat)) + } .runWith(TestSink.probe[Future[Done]])(mat) probe.request(1) val future = probe.requestNext() @@ -278,18 +284,15 @@ class FlowSplitWhenSpec extends StreamSpec { "fail stream if substream not materialized in time" in assertAllStagesStopped { val tightTimeoutMaterializer = - ActorMaterializer(ActorMaterializerSettings(system) - .withSubscriptionTimeoutSettings( + ActorMaterializer( + ActorMaterializerSettings(system).withSubscriptionTimeoutSettings( StreamSubscriptionTimeoutSettings(StreamSubscriptionTimeoutTerminationMode.cancel, 500.millisecond))) val testSource = Source.single(1).concat(Source.maybe).splitWhen(_ => true) a[SubscriptionTimeoutException] mustBe thrownBy { Await.result( - testSource.lift - .delay(1.second) - .flatMapConcat(identity) - .runWith(Sink.ignore)(tightTimeoutMaterializer), + testSource.lift.delay(1.second).flatMapConcat(identity).runWith(Sink.ignore)(tightTimeoutMaterializer), 3.seconds) } } @@ -300,7 +303,8 @@ class FlowSplitWhenSpec extends StreamSpec { val publisherProbeProbe = TestPublisher.manualProbe[Int]() val exc = TE("test") - val publisher = Source.fromPublisher(publisherProbeProbe) + val publisher = Source + .fromPublisher(publisherProbeProbe) .splitWhen(elem => if (elem == 3) throw exc else elem % 3 == 0) .lift .withAttributes(ActorAttributes.supervisionStrategy(resumingDecider)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala index a33e16f08e..8ed7ed844d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowStatefulMapConcatSpec.scala @@ -5,78 +5,83 @@ package akka.stream.scaladsl import akka.stream.testkit.scaladsl.TestSink -import akka.stream.{ ActorMaterializer, ActorAttributes, Supervision, ActorMaterializerSettings } +import akka.stream.{ ActorAttributes, ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.stream.testkit._ import scala.util.control.NoStackTrace class FlowStatefulMapConcatSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) val ex = new Exception("TEST") with NoStackTrace "A StatefulMapConcat" must { "work in happy case" in { - val script = Script( - Seq(2) -> Seq(), - Seq(1) -> Seq(1, 1), - Seq(3) -> Seq(3), - Seq(6) -> Seq(6, 6, 6)) - TestConfig.RandomTestRange foreach (_ => runScript(script, settings)(_.statefulMapConcat(() => { - var prev: Option[Int] = None - x => prev match { - case Some(e) => - prev = Some(x) - (1 to e) map (_ => x) - case None => - prev = Some(x) - List.empty[Int] - } - }))) + val script = Script(Seq(2) -> Seq(), Seq(1) -> Seq(1, 1), Seq(3) -> Seq(3), Seq(6) -> Seq(6, 6, 6)) + TestConfig.RandomTestRange.foreach(_ => + runScript(script, settings)(_.statefulMapConcat(() => { + var prev: Option[Int] = None + x => + prev match { + case Some(e) => + prev = Some(x) + (1 to e).map(_ => x) + case None => + prev = Some(x) + List.empty[Int] + } + }))) } "be able to restart" in { - Source(List(2, 1, 3, 4, 1)).statefulMapConcat(() => { - var prev: Option[Int] = None - x => { - if (x % 3 == 0) throw ex - prev match { - case Some(e) => - prev = Some(x) - (1 to e) map (_ => x) - case None => - prev = Some(x) - List.empty[Int] + Source(List(2, 1, 3, 4, 1)) + .statefulMapConcat(() => { + var prev: Option[Int] = None + x => { + if (x % 3 == 0) throw ex + prev match { + case Some(e) => + prev = Some(x) + (1 to e).map(_ => x) + case None => + prev = Some(x) + List.empty[Int] + } } - } - }).withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) + }) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(TestSink.probe[Int]) - .request(2).expectNext(1, 1) - .request(4).expectNext(1, 1, 1, 1) + .request(2) + .expectNext(1, 1) + .request(4) + .expectNext(1, 1, 1, 1) .expectComplete() } "be able to resume" in { - Source(List(2, 1, 3, 4, 1)).statefulMapConcat(() => { - var prev: Option[Int] = None - x => { - if (x % 3 == 0) throw ex - prev match { - case Some(e) => - prev = Some(x) - (1 to e) map (_ => x) - case None => - prev = Some(x) - List.empty[Int] + Source(List(2, 1, 3, 4, 1)) + .statefulMapConcat(() => { + var prev: Option[Int] = None + x => { + if (x % 3 == 0) throw ex + prev match { + case Some(e) => + prev = Some(x) + (1 to e).map(_ => x) + case None => + prev = Some(x) + List.empty[Int] + } } - } - }).withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) + }) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .runWith(TestSink.probe[Int]) - .request(2).expectNext(1, 1) + .request(2) + .expectNext(1, 1) .requestNext(4) - .request(4).expectNext(1, 1, 1, 1) + .request(4) + .expectNext(1, 1, 1, 1) .expectComplete() } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSupervisionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSupervisionSpec.scala index 4582b62bd5..c9cc7cb309 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSupervisionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowSupervisionSpec.scala @@ -52,10 +52,10 @@ class FlowSupervisionSpec extends StreamSpec { } "resume stream when null is emitted" in { - val nullMap = Flow[String].map(elem => if (elem == "b") null else elem) + val nullMap = Flow[String] + .map(elem => if (elem == "b") null else elem) .withAttributes(supervisionStrategy(Supervision.resumingDecider)) - val result = Await.result(Source(List("a", "b", "c")).via(nullMap) - .limit(1000).runWith(Sink.seq), 3.seconds) + val result = Await.result(Source(List("a", "b", "c")).via(nullMap).limit(1000).runWith(Sink.seq), 3.seconds) result should be(List("a", "c")) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala index 4c573d7086..644de1b8e8 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeSpec.scala @@ -16,8 +16,7 @@ import akka.stream.testkit._ class FlowTakeSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -26,8 +25,11 @@ class FlowTakeSpec extends StreamSpec with ScriptedTest { "A Take" must { "take" in { - def script(d: Int) = Script(TestConfig.RandomTestRange map { n => Seq(n) -> (if (n > d) Nil else Seq(n)) }: _*) - TestConfig.RandomTestRange foreach { _ => + def script(d: Int) = + Script(TestConfig.RandomTestRange.map { n => + Seq(n) -> (if (n > d) Nil else Seq(n)) + }: _*) + TestConfig.RandomTestRange.foreach { _ => val d = Math.min(Math.max(random.nextInt(-10, 60), 0), 50) runScript(script(d), settings)(_.take(d)) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeWhileSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeWhileSpec.scala index bc6186703e..481969ac5f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeWhileSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeWhileSpec.scala @@ -22,22 +22,19 @@ class FlowTakeWhileSpec extends StreamSpec { "A TakeWhile" must { "take while predicate is true" in assertAllStagesStopped { - Source(1 to 4).takeWhile(_ < 3).runWith(TestSink.probe[Int]) - .request(3) - .expectNext(1, 2) - .expectComplete() + Source(1 to 4).takeWhile(_ < 3).runWith(TestSink.probe[Int]).request(3).expectNext(1, 2).expectComplete() } "complete the future for an empty stream" in assertAllStagesStopped { - Source.empty[Int].takeWhile(_ < 2).runWith(TestSink.probe[Int]) - .request(1) - .expectComplete() + Source.empty[Int].takeWhile(_ < 2).runWith(TestSink.probe[Int]).request(1).expectComplete() } "continue if error" in assertAllStagesStopped { val testException = new Exception("test") with NoStackTrace - val p = Source(1 to 4).takeWhile(a => if (a == 3) throw testException else true).withAttributes(supervisionStrategy(resumingDecider)) + val p = Source(1 to 4) + .takeWhile(a => if (a == 3) throw testException else true) + .withAttributes(supervisionStrategy(resumingDecider)) .runWith(TestSink.probe[Int]) .request(4) .expectNext(1, 2, 4) @@ -45,7 +42,9 @@ class FlowTakeWhileSpec extends StreamSpec { } "emit the element that caused the predicate to return false and then no more with inclusive set" in assertAllStagesStopped { - Source(1 to 10).takeWhile(_ < 3, true).runWith(TestSink.probe[Int]) + Source(1 to 10) + .takeWhile(_ < 3, true) + .runWith(TestSink.probe[Int]) .request(4) .expectNext(1, 2, 3) .expectComplete() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeWithinSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeWithinSpec.scala index 066db00abe..5debd09f4c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeWithinSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowTakeWithinSpec.scala @@ -24,16 +24,24 @@ class FlowTakeWithinSpec extends StreamSpec { val cSub = c.expectSubscription() cSub.request(100) val demand1 = pSub.expectRequest().toInt - (1 to demand1) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand1).foreach { _ => + pSub.sendNext(input.next()) + } val demand2 = pSub.expectRequest().toInt - (1 to demand2) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand2).foreach { _ => + pSub.sendNext(input.next()) + } val demand3 = pSub.expectRequest().toInt val sentN = demand1 + demand2 - (1 to sentN) foreach { n => c.expectNext(n) } + (1 to sentN).foreach { n => + c.expectNext(n) + } within(2.seconds) { c.expectComplete() } - (1 to demand3) foreach { _ => pSub.sendNext(input.next()) } + (1 to demand3).foreach { _ => + pSub.sendNext(input.next()) + } c.expectNoMsg(200.millis) } @@ -43,7 +51,9 @@ class FlowTakeWithinSpec extends StreamSpec { val cSub = c.expectSubscription() c.expectNoMsg(200.millis) cSub.request(100) - (1 to 3) foreach { n => c.expectNext(n) } + (1 to 3).foreach { n => + c.expectNext(n) + } c.expectComplete() c.expectNoMsg(200.millis) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala index 893e98a9b8..40854e5bf7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowThrottleSpec.scala @@ -30,7 +30,8 @@ class FlowThrottleSpec extends StreamSpec { "Throttle for single cost elements" must { "work for the happy case" in assertAllStagesStopped { //Source(1 to 5).throttle(1, 100.millis, 0, Shaping) - Source(1 to 5).throttle(19, 1000.millis, -1, Shaping) + Source(1 to 5) + .throttle(19, 1000.millis, -1, Shaping) .runWith(TestSink.probe[Int]) .request(5) .expectNext(1, 2, 3, 4, 5) @@ -38,7 +39,8 @@ class FlowThrottleSpec extends StreamSpec { } "accept very high rates" in assertAllStagesStopped { - Source(1 to 5).throttle(1, 1.nanos, 0, Shaping) + Source(1 to 5) + .throttle(1, 1.nanos, 0, Shaping) .runWith(TestSink.probe[Int]) .request(5) .expectNext(1, 2, 3, 4, 5) @@ -46,7 +48,8 @@ class FlowThrottleSpec extends StreamSpec { } "accept very low rates" in assertAllStagesStopped { - Source(1 to 5).throttle(1, 100.days, 1, Shaping) + Source(1 to 5) + .throttle(1, 100.days, 1, Shaping) .runWith(TestSink.probe[Int]) .request(5) .expectNext(1) @@ -58,16 +61,10 @@ class FlowThrottleSpec extends StreamSpec { val sharedThrottle = Flow[Int].throttle(1, 1.day, 1, Enforcing) // If there is accidental shared state then we would not be able to pass through the single element - Source.single(1) - .via(sharedThrottle) - .via(sharedThrottle) - .runWith(Sink.seq).futureValue should ===(Seq(1)) + Source.single(1).via(sharedThrottle).via(sharedThrottle).runWith(Sink.seq).futureValue should ===(Seq(1)) // It works with a new stream, too - Source.single(2) - .via(sharedThrottle) - .via(sharedThrottle) - .runWith(Sink.seq).futureValue should ===(Seq(2)) + Source.single(2).via(sharedThrottle).via(sharedThrottle).runWith(Sink.seq).futureValue should ===(Seq(2)) } @@ -113,14 +110,9 @@ class FlowThrottleSpec extends StreamSpec { } "send elements downstream as soon as time comes" in assertAllStagesStopped { - val probe = Source(1 to 10).throttle(2, 750.millis, 0, Shaping).runWith(TestSink.probe[Int]) - .request(5) + val probe = Source(1 to 10).throttle(2, 750.millis, 0, Shaping).runWith(TestSink.probe[Int]).request(5) probe.receiveWithin(900.millis) should be(Seq(1, 2)) - probe.expectNoMsg(150.millis) - .expectNext(3) - .expectNoMsg(150.millis) - .expectNext(4) - .cancel() + probe.expectNoMsg(150.millis).expectNext(3).expectNoMsg(150.millis).expectNext(4).cancel() } "burst according to its maximum if enough time passed" in assertAllStagesStopped { @@ -130,7 +122,7 @@ class FlowThrottleSpec extends StreamSpec { // Exhaust bucket first downstream.request(5) - (1 to 5) foreach upstream.sendNext + (1 to 5).foreach(upstream.sendNext) downstream.receiveWithin(300.millis, 5) should be(1 to 5) downstream.request(5) @@ -147,7 +139,7 @@ class FlowThrottleSpec extends StreamSpec { // Exhaust bucket first downstream.request(5) - (1 to 5) foreach upstream.sendNext + (1 to 5).foreach(upstream.sendNext) downstream.receiveWithin(300.millis, 5) should be(1 to 5) downstream.request(1) @@ -162,19 +154,16 @@ class FlowThrottleSpec extends StreamSpec { } "throw exception when exceeding throughput in enforced mode" in assertAllStagesStopped { - Await.result( - Source(1 to 5).throttle(1, 200.millis, 5, Enforcing).runWith(Sink.seq), - 2.seconds) should ===(1 to 5) // Burst is 5 so this will not fail + Await.result(Source(1 to 5).throttle(1, 200.millis, 5, Enforcing).runWith(Sink.seq), 2.seconds) should ===(1 to 5) // Burst is 5 so this will not fail an[RateExceededException] shouldBe thrownBy { - Await.result( - Source(1 to 6).throttle(1, 200.millis, 5, Enforcing).runWith(Sink.ignore), - 2.seconds) + Await.result(Source(1 to 6).throttle(1, 200.millis, 5, Enforcing).runWith(Sink.ignore), 2.seconds) } } "properly combine shape and throttle modes" in assertAllStagesStopped { - Source(1 to 5).throttle(1, 100.millis, 5, Shaping) + Source(1 to 5) + .throttle(1, 100.millis, 5, Shaping) .throttle(1, 100.millis, 5, Enforcing) .runWith(TestSink.probe[Int]) .request(5) @@ -185,7 +174,8 @@ class FlowThrottleSpec extends StreamSpec { "Throttle for various cost elements" must { "work for happy case" in assertAllStagesStopped { - Source(1 to 5).throttle(1, 100.millis, 0, (_) => 1, Shaping) + Source(1 to 5) + .throttle(1, 100.millis, 0, (_) => 1, Shaping) .runWith(TestSink.probe[Int]) .request(5) .expectNext(1, 2, 3, 4, 5) @@ -194,7 +184,8 @@ class FlowThrottleSpec extends StreamSpec { "emit elements according to cost" in assertAllStagesStopped { val list = (1 to 4).map(_ * 2).map(genByteString) - Source(list).throttle(2, 200.millis, 0, _.length, Shaping) + Source(list) + .throttle(2, 200.millis, 0, _.length, Shaping) .runWith(TestSink.probe[ByteString]) .request(4) .expectNext(list(0)) @@ -210,7 +201,10 @@ class FlowThrottleSpec extends StreamSpec { "not send downstream if upstream does not emit element" in assertAllStagesStopped { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - Source.fromPublisher(upstream).throttle(2, 300.millis, 0, identity, Shaping).runWith(Sink.fromSubscriber(downstream)) + Source + .fromPublisher(upstream) + .throttle(2, 300.millis, 0, identity, Shaping) + .runWith(Sink.fromSubscriber(downstream)) downstream.request(2) upstream.sendNext(1) @@ -230,24 +224,22 @@ class FlowThrottleSpec extends StreamSpec { } "send elements downstream as soon as time comes" in assertAllStagesStopped { - val probe = Source(1 to 10).throttle(4, 500.millis, 0, _ => 2, Shaping).runWith(TestSink.probe[Int]) - .request(5) + val probe = Source(1 to 10).throttle(4, 500.millis, 0, _ => 2, Shaping).runWith(TestSink.probe[Int]).request(5) probe.receiveWithin(600.millis) should be(Seq(1, 2)) - probe.expectNoMsg(100.millis) - .expectNext(3) - .expectNoMsg(100.millis) - .expectNext(4) - .cancel() + probe.expectNoMsg(100.millis).expectNext(3).expectNoMsg(100.millis).expectNext(4).cancel() } "burst according to its maximum if enough time passed" in assertAllStagesStopped { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - Source.fromPublisher(upstream).throttle(2, 400.millis, 5, (_) => 1, Shaping).runWith(Sink.fromSubscriber(downstream)) + Source + .fromPublisher(upstream) + .throttle(2, 400.millis, 5, (_) => 1, Shaping) + .runWith(Sink.fromSubscriber(downstream)) // Exhaust bucket first downstream.request(5) - (1 to 5) foreach upstream.sendNext + (1 to 5).foreach(upstream.sendNext) downstream.receiveWithin(300.millis, 5) should be(1 to 5) downstream.request(1) @@ -264,11 +256,14 @@ class FlowThrottleSpec extends StreamSpec { "burst some elements if have enough time" in assertAllStagesStopped { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - Source.fromPublisher(upstream).throttle(2, 400.millis, 5, (e) => if (e < 9) 1 else 20, Shaping).runWith(Sink.fromSubscriber(downstream)) + Source + .fromPublisher(upstream) + .throttle(2, 400.millis, 5, (e) => if (e < 9) 1 else 20, Shaping) + .runWith(Sink.fromSubscriber(downstream)) // Exhaust bucket first downstream.request(5) - (1 to 5) foreach upstream.sendNext + (1 to 5).foreach(upstream.sendNext) downstream.receiveWithin(300.millis, 5) should be(1 to 5) downstream.request(1) @@ -283,19 +278,17 @@ class FlowThrottleSpec extends StreamSpec { } "throw exception when exceeding throughput in enforced mode" in assertAllStagesStopped { - Await.result( - Source(1 to 4).throttle(2, 200.millis, 10, identity, Enforcing).runWith(Sink.seq), - 2.seconds) should ===(1 to 4) // Burst is 10 so this will not fail + Await.result(Source(1 to 4).throttle(2, 200.millis, 10, identity, Enforcing).runWith(Sink.seq), 2.seconds) should ===( + 1 to 4) // Burst is 10 so this will not fail an[RateExceededException] shouldBe thrownBy { - Await.result( - Source(1 to 6).throttle(2, 200.millis, 0, identity, Enforcing).runWith(Sink.ignore), - 2.seconds) + Await.result(Source(1 to 6).throttle(2, 200.millis, 0, identity, Enforcing).runWith(Sink.ignore), 2.seconds) } } "properly combine shape and enforce modes" in assertAllStagesStopped { - Source(1 to 5).throttle(2, 200.millis, 0, identity, Shaping) + Source(1 to 5) + .throttle(2, 200.millis, 0, identity, Shaping) .throttle(1, 100.millis, 5, Enforcing) .runWith(TestSink.probe[Int]) .request(5) @@ -305,7 +298,8 @@ class FlowThrottleSpec extends StreamSpec { "handle rate calculation function exception" in assertAllStagesStopped { val ex = new RuntimeException with NoStackTrace - Source(1 to 5).throttle(2, 200.millis, 0, (_) => { throw ex }, Shaping) + Source(1 to 5) + .throttle(2, 200.millis, 0, (_) => { throw ex }, Shaping) .throttle(1, 100.millis, 5, Enforcing) .runWith(TestSink.probe[Int]) .request(5) @@ -318,7 +312,8 @@ class FlowThrottleSpec extends StreamSpec { val timestamp1 = new AtomicLong(System.nanoTime()) val expectedMinRate = new AtomicInteger val expectedMaxRate = new AtomicInteger - val (ref, done) = Source.actorRef[Int](bufferSize = 100000, OverflowStrategy.fail) + val (ref, done) = Source + .actorRef[Int](bufferSize = 100000, OverflowStrategy.fail) .throttle(300, 1000.millis) .toMat(Sink.foreach { elem => val now = System.nanoTime() @@ -326,15 +321,16 @@ class FlowThrottleSpec extends StreamSpec { val duration1Millis = (now - timestamp1.get) / 1000 / 1000 if (duration1Millis >= 500) { val rate = n1 * 1000.0 / duration1Millis - info(f"burst rate after ${(now - startTime).nanos.toMillis} ms at element $elem: $rate%2.2f elements/s ($n1)") + info( + f"burst rate after ${(now - startTime).nanos.toMillis} ms at element $elem: $rate%2.2f elements/s ($n1)") timestamp1.set(now) counter1.set(0) if (rate < expectedMinRate.get) throw new RuntimeException(s"Too low rate, got $rate, expected min ${expectedMinRate.get}, " + - s"after ${(now - startTime).nanos.toMillis} ms at element $elem") + s"after ${(now - startTime).nanos.toMillis} ms at element $elem") if (rate > expectedMaxRate.get) throw new RuntimeException(s"Too high rate, got $rate, expected max ${expectedMaxRate.get}, " + - s"after ${(now - startTime).nanos.toMillis} ms at element $elem") + s"after ${(now - startTime).nanos.toMillis} ms at element $elem") } })(Keep.both) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchSpec.scala index 2a9c590261..47a2823558 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchSpec.scala @@ -33,7 +33,8 @@ class FlowWatchSpec extends StreamSpec { implicit val timeout = akka.util.Timeout(10.seconds) - val replyOnInts = system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "replyOnInts") + val replyOnInts = + system.actorOf(Props(classOf[Replier]).withDispatcher("akka.test.stream-dispatcher"), "replyOnInts") val dontReply = system.actorOf(TestActors.blackholeProps.withDispatcher("akka.test.stream-dispatcher"), "dontReply") @@ -58,7 +59,8 @@ class FlowWatchSpec extends StreamSpec { intercept[RuntimeException] { r ! PoisonPill Await.result(done, remainingOrDefault) - }.getMessage should startWith("Actor watched by [Watch] has terminated! Was: Actor[akka://FlowWatchSpec/user/wanna-fail#") + }.getMessage should startWith( + "Actor watched by [Watch] has terminated! Was: Actor[akka://FlowWatchSpec/user/wanna-fail#") } "should handle cancel properly" in assertAllStagesStopped { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala index fc9dfb29e1..d1b87285e8 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWatchTerminationSpec.scala @@ -52,7 +52,12 @@ class FlowWatchTerminationSpec extends StreamSpec { "complete future for graph" in assertAllStagesStopped { implicit val ec = system.dispatcher - val ((sourceProbe, future), sinkProbe) = TestSource.probe[Int].watchTermination()(Keep.both).concat(Source(2 to 5)).toMat(TestSink.probe[Int])(Keep.both).run() + val ((sourceProbe, future), sinkProbe) = TestSource + .probe[Int] + .watchTermination()(Keep.both) + .concat(Source(2 to 5)) + .toMat(TestSink.probe[Int])(Keep.both) + .run() future.pipeTo(testActor) sinkProbe.request(5) sourceProbe.sendNext(1) @@ -62,8 +67,7 @@ class FlowWatchTerminationSpec extends StreamSpec { sourceProbe.sendComplete() expectMsg(Done) - sinkProbe.expectNextN(2 to 5) - .expectComplete() + sinkProbe.expectNextN(2 to 5).expectComplete() } "fail future when stream abruptly terminated" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWireTapSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWireTapSpec.scala index b9af41c36c..30f9b327a8 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWireTapSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWireTapSpec.scala @@ -20,20 +20,22 @@ class FlowWireTapSpec extends StreamSpec("akka.stream.materializer.debug.fuzzing "call the procedure for each element" in assertAllStagesStopped { Source(1 to 100).wireTap(testActor ! _).runWith(Sink.ignore).futureValue - 1 to 100 foreach { i => expectMsg(i) } + (1 to 100).foreach { i => + expectMsg(i) + } } "complete the future for an empty stream" in assertAllStagesStopped { - Source.empty[String].wireTap(testActor ! _).runWith(Sink.ignore) foreach { - _ => testActor ! "done" + Source.empty[String].wireTap(testActor ! _).runWith(Sink.ignore).foreach { _ => + testActor ! "done" } expectMsg("done") } "yield the first error" in assertAllStagesStopped { val p = TestPublisher.manualProbe[Int]() - Source.fromPublisher(p).wireTap(testActor ! _).runWith(Sink.ignore).failed foreach { - ex => testActor ! ex + Source.fromPublisher(p).wireTap(testActor ! _).runWith(Sink.ignore).failed.foreach { ex => + testActor ! ex } val proc = p.expectSubscription() proc.expectRequest() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala index 854706c1ef..6161775b6c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextLogSpec.scala @@ -6,7 +6,7 @@ package akka.stream.scaladsl import akka.event.Logging import akka.stream.Attributes.LogLevels -import akka.stream.testkit.{ StreamSpec, ScriptedTest } +import akka.stream.testkit.{ ScriptedTest, StreamSpec } import akka.stream._ import akka.testkit.TestProbe @@ -46,21 +46,15 @@ class FlowWithContextLogSpec extends StreamSpec(""" "allow extracting value to be logged" in { val logging = FlowWithContext[Message, Long].log("my-log2", m => m.data) - Source(List(Message("a", 1L))) - .asSourceWithContext(m => m.offset) - .via(logging) - .asSource - .runWith(Sink.ignore) + Source(List(Message("a", 1L))).asSourceWithContext(m => m.offset).via(logging).asSource.runWith(Sink.ignore) logProbe.expectMsg(Logging.Debug(LogSrc, LogClazz, "[my-log2] Element: a")) logProbe.expectMsg(Logging.Debug(LogSrc, LogClazz, "[my-log2] Upstream finished.")) } "allow disabling element logging" in { - val disableElementLogging = Attributes.logLevels( - onElement = LogLevels.Off, - onFinish = Logging.DebugLevel, - onFailure = Logging.DebugLevel) + val disableElementLogging = + Attributes.logLevels(onElement = LogLevels.Off, onFinish = Logging.DebugLevel, onFailure = Logging.DebugLevel) val logging = FlowWithContext[Message, Long].log("my-log3").withAttributes(disableElementLogging) Source(List(Message("a", 1L), Message("b", 2L))) @@ -100,10 +94,8 @@ class FlowWithContextLogSpec extends StreamSpec(""" } "allow disabling element logging" in { - val disableElementLogging = Attributes.logLevels( - onElement = LogLevels.Off, - onFinish = Logging.DebugLevel, - onFailure = Logging.DebugLevel) + val disableElementLogging = + Attributes.logLevels(onElement = LogLevels.Off, onFinish = Logging.DebugLevel, onFailure = Logging.DebugLevel) Source(List(Message("a", 1L), Message("b", 2L))) .asSourceWithContext(m => m.offset) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala index 368a44d398..1a88530c98 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowWithContextSpec.scala @@ -17,7 +17,9 @@ class FlowWithContextSpec extends StreamSpec { "get created from Flow.asFlowWithContext" in { val flow = Flow[Message].map { case m => m.copy(data = m.data + "z") } - val flowWithContext = flow.asFlowWithContext((m: Message, o: Long) => Message(m.data, o)) { m => m.offset } + val flowWithContext = flow.asFlowWithContext((m: Message, o: Long) => Message(m.data, o)) { m => + m.offset + } val msg = Message("a", 1L) Source(Vector(msg)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala index bccdb7625b..6dc412ca9c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithIndexSpec.scala @@ -15,8 +15,7 @@ import akka.stream.testkit.{ StreamSpec, TestSubscriber } class FlowZipWithIndexSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -41,9 +40,7 @@ class FlowZipWithIndexSpec extends StreamSpec { "work in fruit example" in { //#zip-with-index - Source(List("apple", "orange", "banana")) - .zipWithIndex - .runWith(Sink.foreach(println)) + Source(List("apple", "orange", "banana")).zipWithIndex.runWith(Sink.foreach(println)) // this will print ('apple', 0), ('orange', 1), ('banana', 2) //#zip-with-index } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala index da0ce10359..6266f1d347 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FlowZipWithSpec.scala @@ -102,10 +102,11 @@ class FlowZipWithSpec extends BaseTwoStreamsSetup { val sourceCount = Source(List("one", "two", "three")) val sourceFruits = Source(List("apple", "orange", "banana")) - sourceCount.zipWith(sourceFruits) { - (countStr, fruitName) => + sourceCount + .zipWith(sourceFruits) { (countStr, fruitName) => s"$countStr $fruitName" - }.runWith(Sink.foreach(println)) + } + .runWith(Sink.foreach(println)) // this will print 'one apple', 'two orange', 'three banana' //#zip-with } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala index e885ffe9b4..771cfb73ec 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FramingSpec.scala @@ -81,8 +81,7 @@ class FramingSpec extends StreamSpec { // Helper to simplify testing def simpleLines(delimiter: String, maximumBytes: Int, allowTruncation: Boolean = true) = - Framing.delimiter(ByteString(delimiter), maximumBytes, allowTruncation).map(_.utf8String) - .named("lineFraming") + Framing.delimiter(ByteString(delimiter), maximumBytes, allowTruncation).map(_.utf8String).named("lineFraming") def completeTestSequences(delimiter: ByteString): immutable.Iterable[ByteString] = for (prefix <- delimiter.indices; s <- baseTestSequences) @@ -91,11 +90,8 @@ class FramingSpec extends StreamSpec { "work with various delimiters and test sequences" in { for (delimiter <- delimiterBytes; _ <- 1 to 5) { val testSequence = completeTestSequences(delimiter) - val f = Source(testSequence) - .map(_ ++ delimiter) - .via(rechunk) - .via(Framing.delimiter(delimiter, 256)) - .runWith(Sink.seq) + val f = + Source(testSequence).map(_ ++ delimiter).via(rechunk).via(Framing.delimiter(delimiter, 256)).runWith(Sink.seq) f.futureValue should ===(testSequence) } @@ -103,39 +99,49 @@ class FramingSpec extends StreamSpec { "Respect maximum line settings" in { // The buffer will contain more than 1 bytes, but the individual frames are less - Source.single(ByteString("a\nb\nc\nd\n")) + Source + .single(ByteString("a\nb\nc\nd\n")) .via(simpleLines("\n", 1)) .limit(100) - .runWith(Sink.seq).futureValue should ===(List("a", "b", "c", "d")) + .runWith(Sink.seq) + .futureValue should ===(List("a", "b", "c", "d")) - Source.single(ByteString("ab\n")) + Source + .single(ByteString("ab\n")) .via(simpleLines("\n", 1)) .limit(100) - .runWith(Sink.seq).failed.futureValue shouldBe a[FramingException] + .runWith(Sink.seq) + .failed + .futureValue shouldBe a[FramingException] - Source.single(ByteString("aaa")) + Source + .single(ByteString("aaa")) .via(simpleLines("\n", 2)) .limit(100) - .runWith(Sink.seq).failed.futureValue shouldBe a[FramingException] + .runWith(Sink.seq) + .failed + .futureValue shouldBe a[FramingException] } "work with empty streams" in { - Source.empty.via(simpleLines("\n", 256)) - .runFold(Vector.empty[String])(_ :+ _) - .futureValue should ===(Vector.empty) + Source.empty.via(simpleLines("\n", 256)).runFold(Vector.empty[String])(_ :+ _).futureValue should ===( + Vector.empty) } "report truncated frames" in { - Source.single(ByteString("I have no end")) + Source + .single(ByteString("I have no end")) .via(simpleLines("\n", 256, allowTruncation = false)) .grouped(1000) .runWith(Sink.head) - .failed.futureValue shouldBe a[FramingException] + .failed + .futureValue shouldBe a[FramingException] } "allow truncated frames if configured so" in { - Source.single(ByteString("I have no end")) + Source + .single(ByteString("I have no end")) .via(simpleLines("\n", 256, allowTruncation = true)) .grouped(1000) .runWith(Sink.head) @@ -154,16 +160,20 @@ class FramingSpec extends StreamSpec { val fieldOffsets = List(0, 1, 2, 3, 15, 16, 31, 32, 44, 107) def encode(payload: ByteString, fieldOffset: Int, fieldLength: Int, byteOrder: ByteOrder): ByteString = { - encodeComplexFrame(payload, fieldOffset, fieldLength, byteOrder, ByteString(new Array[Byte](fieldOffset)), ByteString.empty) + encodeComplexFrame(payload, + fieldOffset, + fieldLength, + byteOrder, + ByteString(new Array[Byte](fieldOffset)), + ByteString.empty) } - def encodeComplexFrame( - payload: ByteString, - fieldOffset: Int, - fieldLength: Int, - byteOrder: ByteOrder, - offset: ByteString, - tail: ByteString): ByteString = { + def encodeComplexFrame(payload: ByteString, + fieldOffset: Int, + fieldLength: Int, + byteOrder: ByteOrder, + offset: ByteString, + tail: ByteString): ByteString = { val header = { val h = (new ByteStringBuilder).putInt(payload.size)(byteOrder).result() byteOrder match { @@ -218,7 +228,12 @@ class FramingSpec extends StreamSpec { val payload = referenceChunk.take(length) val offsetBytes = offset() val tailBytes = if (offsetBytes.length > 0) new Array[Byte](offsetBytes(0)) else Array.empty[Byte] - encodeComplexFrame(payload, fieldOffset, fieldLength, byteOrder, ByteString(offsetBytes), ByteString(tailBytes)) + encodeComplexFrame(payload, + fieldOffset, + fieldLength, + byteOrder, + ByteString(offsetBytes), + ByteString(tailBytes)) } Source(encodedFrames) @@ -232,7 +247,8 @@ class FramingSpec extends StreamSpec { } "work with empty streams" in { - Source.empty.via(Framing.lengthField(4, 0, Int.MaxValue, ByteOrder.BIG_ENDIAN)) + Source.empty + .via(Framing.lengthField(4, 0, Int.MaxValue, ByteOrder.BIG_ENDIAN)) .runFold(Vector.empty[ByteString])(_ :+ _) .futureValue should ===(Vector.empty) } @@ -240,14 +256,13 @@ class FramingSpec extends StreamSpec { "work with grouped frames" in { val groupSize = 5 val single = encode(referenceChunk.take(100), 0, 1, ByteOrder.BIG_ENDIAN) - val groupedFrames = (1 to groupSize) - .map(_ => single) - .fold(ByteString.empty)((result, bs) => result ++ bs) + val groupedFrames = (1 to groupSize).map(_ => single).fold(ByteString.empty)((result, bs) => result ++ bs) val publisher = TestPublisher.probe[ByteString]() val substriber = TestSubscriber.manualProbe[ByteString]() - Source.fromPublisher(publisher) + Source + .fromPublisher(publisher) .via(Framing.lengthField(1, 0, Int.MaxValue, ByteOrder.BIG_ENDIAN)) .to(Sink.fromSubscriber(substriber)) .run() @@ -265,15 +280,19 @@ class FramingSpec extends StreamSpec { } "report oversized frames" in { - Source.single(encode(referenceChunk.take(100), 0, 1, ByteOrder.BIG_ENDIAN)) + Source + .single(encode(referenceChunk.take(100), 0, 1, ByteOrder.BIG_ENDIAN)) .via(Framing.lengthField(1, 0, 99, ByteOrder.BIG_ENDIAN)) .runFold(Vector.empty[ByteString])(_ :+ _) - .failed.futureValue shouldBe a[FramingException] + .failed + .futureValue shouldBe a[FramingException] - Source.single(encode(referenceChunk.take(100), 49, 1, ByteOrder.BIG_ENDIAN)) + Source + .single(encode(referenceChunk.take(100), 49, 1, ByteOrder.BIG_ENDIAN)) .via(Framing.lengthField(1, 0, 100, ByteOrder.BIG_ENDIAN)) .runFold(Vector.empty[ByteString])(_ :+ _) - .failed.futureValue shouldBe a[FramingException] + .failed + .futureValue shouldBe a[FramingException] } "report truncated frames" taggedAs LongRunningTest in { @@ -293,21 +312,22 @@ class FramingSpec extends StreamSpec { .via(Framing.lengthField(fieldLength, fieldOffset, Int.MaxValue, byteOrder)) .grouped(10000) .runWith(Sink.head) - .failed.futureValue shouldBe a[FramingException] + .failed + .futureValue shouldBe a[FramingException] } } "support simple framing adapter" in { val rechunkBidi = BidiFlow.fromFlowsMat(rechunk, rechunk)(Keep.left) val codecFlow = - Framing.simpleFramingProtocol(1024) + Framing + .simpleFramingProtocol(1024) .atop(rechunkBidi) .atop(Framing.simpleFramingProtocol(1024).reversed) .join(Flow[ByteString]) // Loopback val testMessages = List.fill(100)(referenceChunk.take(Random.nextInt(1024))) - Source(testMessages).via(codecFlow).limit(1000).runWith(Sink.seq) - .futureValue should ===(testMessages) + Source(testMessages).via(codecFlow).limit(1000).runWith(Sink.seq).futureValue should ===(testMessages) } "fail the stage on negative length field values (#22367)" in { @@ -319,10 +339,7 @@ class FramingSpec extends StreamSpec { val bs = ByteString.newBuilder.putInt(-4).result() val res = - Source - .single(bs) - .via(Flow[ByteString].via(Framing.lengthField(4, 0, 1000))) - .runWith(Sink.seq) + Source.single(bs).via(Flow[ByteString].via(Framing.lengthField(4, 0, 1000))).runWith(Sink.seq) val ex = res.failed.futureValue ex shouldBe a[FramingException] @@ -357,9 +374,7 @@ class FramingSpec extends StreamSpec { val bs = Vector(emptyFrame, encodedPayload, emptyFrame) val res = - Source(bs) - .via(Flow[ByteString].via(Framing.lengthField(4, 0, 1000))) - .runWith(Sink.seq) + Source(bs).via(Flow[ByteString].via(Framing.lengthField(4, 0, 1000))).runWith(Sink.seq) res.futureValue should equal(Seq(emptyFrame, encodedPayload, emptyFrame)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FutureFlattenSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FutureFlattenSourceSpec.scala index 4f93671882..42de445607 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FutureFlattenSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/FutureFlattenSourceSpec.scala @@ -27,9 +27,7 @@ class FutureFlattenSourceSpec extends StreamSpec { "emit the elements of the already successful future source" in assertAllStagesStopped { val (sourceMatVal, sinkMatVal) = - Source.fromFutureSource(Future.successful(underlying)) - .toMat(Sink.seq)(Keep.both) - .run() + Source.fromFutureSource(Future.successful(underlying)).toMat(Sink.seq)(Keep.both).run() // should complete as soon as inner source has been materialized sourceMatVal.futureValue should ===("foo") @@ -39,9 +37,7 @@ class FutureFlattenSourceSpec extends StreamSpec { "emit no elements before the future of source successful" in assertAllStagesStopped { val c = TestSubscriber.manualProbe[Int]() val sourcePromise = Promise[Source[Int, String]]() - val p = Source.fromFutureSource(sourcePromise.future) - .runWith(Sink.asPublisher(true)) - .subscribe(c) + val p = Source.fromFutureSource(sourcePromise.future).runWith(Sink.asPublisher(true)).subscribe(c) val sub = c.expectSubscription() import scala.concurrent.duration._ c.expectNoMsg(100.millis) @@ -58,9 +54,7 @@ class FutureFlattenSourceSpec extends StreamSpec { val sourcePromise = Promise[Source[Int, String]]() val (sourceMatVal, sinkMatVal) = - Source.fromFutureSource(sourcePromise.future) - .toMat(Sink.seq)(Keep.both) - .run() + Source.fromFutureSource(sourcePromise.future).toMat(Sink.seq)(Keep.both).run() sourcePromise.success(underlying) // should complete as soon as inner source has been materialized sourceMatVal.futureValue should ===("foo") @@ -69,10 +63,11 @@ class FutureFlattenSourceSpec extends StreamSpec { "emit the elements from a source in a completion stage" in assertAllStagesStopped { val (sourceMatVal, sinkMatVal) = - Source.fromSourceCompletionStage( - // can't be inferred - CompletableFuture.completedFuture[Graph[SourceShape[Int], String]](underlying) - ).toMat(Sink.seq)(Keep.both) + Source + .fromSourceCompletionStage( + // can't be inferred + CompletableFuture.completedFuture[Graph[SourceShape[Int], String]](underlying)) + .toMat(Sink.seq)(Keep.both) .run() sourceMatVal.toCompletableFuture.get(remainingOrDefault.toMillis, TimeUnit.MILLISECONDS) should ===("foo") @@ -84,9 +79,7 @@ class FutureFlattenSourceSpec extends StreamSpec { val probe = TestSubscriber.probe[Int]() val sourceMatVal = - Source.fromFutureSource(sourcePromise.future) - .toMat(Sink.fromSubscriber(probe))(Keep.left) - .run() + Source.fromFutureSource(sourcePromise.future).toMat(Sink.fromSubscriber(probe))(Keep.left).run() // wait for cancellation to occur probe.ensureSubscription() @@ -116,7 +109,8 @@ class FutureFlattenSourceSpec extends StreamSpec { val sourcePromise = Promise[Source[Int, String]]() val materializationLatch = TestLatch(1) val (sourceMatVal, sinkMatVal) = - Source.fromFutureSource(sourcePromise.future) + Source + .fromFutureSource(sourcePromise.future) .mapMaterializedValue { value => materializationLatch.countDown() value @@ -138,9 +132,7 @@ class FutureFlattenSourceSpec extends StreamSpec { val sourcePromise = Promise[Source[Int, String]]() val testProbe = TestSubscriber.probe[Int]() val sourceMatVal = - Source.fromFutureSource(sourcePromise.future) - .to(Sink.fromSubscriber(testProbe)) - .run() + Source.fromFutureSource(sourcePromise.future).to(Sink.fromSubscriber(testProbe)).run() testProbe.expectSubscription() sourcePromise.failure(failure) @@ -154,9 +146,7 @@ class FutureFlattenSourceSpec extends StreamSpec { val sourcePromise = Promise[Source[Int, String]]() - val matVal = Source.fromFutureSource(sourcePromise.future) - .to(Sink.fromSubscriber(subscriber)) - .run() + val matVal = Source.fromFutureSource(sourcePromise.future).to(Sink.fromSubscriber(subscriber)).run() subscriber.ensureSubscription() @@ -181,9 +171,7 @@ class FutureFlattenSourceSpec extends StreamSpec { val sourcePromise = Promise[Source[Int, String]]() - val matVal = Source.fromFutureSource(sourcePromise.future) - .to(Sink.fromSubscriber(subscriber)) - .run() + val matVal = Source.fromFutureSource(sourcePromise.future).to(Sink.fromSubscriber(subscriber)).run() subscriber.ensureSubscription() @@ -210,9 +198,7 @@ class FutureFlattenSourceSpec extends StreamSpec { "fail when the future source materialization fails" in assertAllStagesStopped { val inner = Future.successful(Source.fromGraph(new FailingMatGraphStage)) val (innerSourceMat: Future[String], outerSinkMat: Future[Seq[Int]]) = - Source.fromFutureSource(inner) - .toMat(Sink.seq)(Keep.both) - .run() + Source.fromFutureSource(inner).toMat(Sink.seq)(Keep.both).run() outerSinkMat.failed.futureValue should ===(TE("INNER_FAILED")) innerSourceMat.failed.futureValue should ===(TE("INNER_FAILED")) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala index 991804616c..7bc003ecd2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBackedFlowSpec.scala @@ -40,8 +40,7 @@ class GraphFlowSpec extends StreamSpec { import GraphFlowSpec._ - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -76,7 +75,8 @@ class GraphFlowSpec extends StreamSpec { "be transformable with a Pipe" in { val probe = TestSubscriber.manualProbe[Int]() - val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b => partial => FlowShape(partial.in, partial.out) + val flow = Flow.fromGraph(GraphDSL.create(partialGraph) { implicit b => partial => + FlowShape(partial.in, partial.out) }) source1.via(flow).map(_.toInt).to(Sink.fromSubscriber(probe)).run() @@ -103,14 +103,17 @@ class GraphFlowSpec extends StreamSpec { "be reusable multiple times" in { val probe = TestSubscriber.manualProbe[Int]() - val flow = Flow.fromGraph(GraphDSL.create(Flow[Int].map(_ * 2)) { implicit b => importFlow => FlowShape(importFlow.in, importFlow.out) + val flow = Flow.fromGraph(GraphDSL.create(Flow[Int].map(_ * 2)) { implicit b => importFlow => + FlowShape(importFlow.in, importFlow.out) }) - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - Source(1 to 5) ~> flow ~> flow ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + Source(1 to 5) ~> flow ~> flow ~> Sink.fromSubscriber(probe) + ClosedShape + }) + .run() validateProbe(probe, 5, Set(4, 8, 12, 16, 20)) } @@ -181,14 +184,16 @@ class GraphFlowSpec extends StreamSpec { SourceShape(s.out.map(_ * 2).outlet) }) - RunnableGraph.fromGraph(GraphDSL.create(source, source)(Keep.both) { implicit b => (s1, s2) => - import GraphDSL.Implicits._ - val merge = b.add(Merge[Int](2)) - s1.out ~> merge.in(0) - merge.out ~> Sink.fromSubscriber(probe) - s2.out.map(_ * 10) ~> merge.in(1) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create(source, source)(Keep.both) { implicit b => (s1, s2) => + import GraphDSL.Implicits._ + val merge = b.add(Merge[Int](2)) + s1.out ~> merge.in(0) + merge.out ~> Sink.fromSubscriber(probe) + s2.out.map(_ * 10) ~> merge.in(1) + ClosedShape + }) + .run() validateProbe(probe, 10, Set(2, 4, 6, 8, 10, 20, 40, 60, 80, 100)) } @@ -213,7 +218,8 @@ class GraphFlowSpec extends StreamSpec { val probe = TestSubscriber.manualProbe[Int]() val pubSink = Sink.asPublisher[Int](false) - val sink = Sink.fromGraph(GraphDSL.create(pubSink) { implicit b => p => SinkShape(p.in) + val sink = Sink.fromGraph(GraphDSL.create(pubSink) { implicit b => p => + SinkShape(p.in) }) val mm = source1.runWith(sink) @@ -225,11 +231,12 @@ class GraphFlowSpec extends StreamSpec { "be transformable with a Pipe" in { val probe = TestSubscriber.manualProbe[Int]() - val sink = Sink.fromGraph(GraphDSL.create(partialGraph, Flow[String].map(_.toInt))(Keep.both) { implicit b => (partial, flow) => - import GraphDSL.Implicits._ - flow.out ~> partial.in - partial.out.map(_.toInt) ~> Sink.fromSubscriber(probe) - SinkShape(flow.in) + val sink = Sink.fromGraph(GraphDSL.create(partialGraph, Flow[String].map(_.toInt))(Keep.both) { + implicit b => (partial, flow) => + import GraphDSL.Implicits._ + flow.out ~> partial.in + partial.out.map(_.toInt) ~> Sink.fromSubscriber(probe) + SinkShape(flow.in) }) val iSink = Flow[Int].map(_.toString).to(sink) @@ -269,24 +276,28 @@ class GraphFlowSpec extends StreamSpec { FlowShape(partial.in, partial.out.map(_.toInt).outlet) }) - val source = Source.fromGraph(GraphDSL.create(Flow[Int].map(_.toString), inSource)(Keep.right) { implicit b => (flow, src) => - import GraphDSL.Implicits._ - src.out ~> flow.in - SourceShape(flow.out) + val source = Source.fromGraph(GraphDSL.create(Flow[Int].map(_.toString), inSource)(Keep.right) { + implicit b => (flow, src) => + import GraphDSL.Implicits._ + src.out ~> flow.in + SourceShape(flow.out) }) - val sink = Sink.fromGraph(GraphDSL.create(Flow[String].map(_.toInt), outSink)(Keep.right) { implicit b => (flow, snk) => - import GraphDSL.Implicits._ - flow.out ~> snk.in - SinkShape(flow.in) + val sink = Sink.fromGraph(GraphDSL.create(Flow[String].map(_.toInt), outSink)(Keep.right) { + implicit b => (flow, snk) => + import GraphDSL.Implicits._ + flow.out ~> snk.in + SinkShape(flow.in) }) - val (m1, m2, m3) = RunnableGraph.fromGraph(GraphDSL.create(source, flow, sink)(Tuple3.apply) { implicit b => (src, f, snk) => - import GraphDSL.Implicits._ - src.out.map(_.toInt) ~> f.in - f.out.map(_.toString) ~> snk.in - ClosedShape - }).run() + val (m1, m2, m3) = RunnableGraph + .fromGraph(GraphDSL.create(source, flow, sink)(Tuple3.apply) { implicit b => (src, f, snk) => + import GraphDSL.Implicits._ + src.out.map(_.toInt) ~> f.in + f.out.map(_.toString) ~> snk.in + ClosedShape + }) + .run() val subscriber = m1 val publisher = m3 @@ -309,11 +320,13 @@ class GraphFlowSpec extends StreamSpec { SinkShape(snk.in) }) - val (m1, m2) = RunnableGraph.fromGraph(GraphDSL.create(source, sink)(Keep.both) { implicit b => (src, snk) => - import GraphDSL.Implicits._ - src.out ~> snk.in - ClosedShape - }).run() + val (m1, m2) = RunnableGraph + .fromGraph(GraphDSL.create(source, sink)(Keep.both) { implicit b => (src, snk) => + import GraphDSL.Implicits._ + src.out ~> snk.in + ClosedShape + }) + .run() val subscriber = m1 val publisher = m2 diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala index 3a7e5759e9..be3c510f69 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBalanceSpec.scala @@ -14,8 +14,7 @@ import akka.stream.testkit.scaladsl.StreamTestKit._ class GraphBalanceSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -26,13 +25,15 @@ class GraphBalanceSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val balance = b.add(Balance[Int](2)) - Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val balance = b.add(Balance[Int](2)) + Source(List(1, 2, 3)) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -50,13 +51,15 @@ class GraphBalanceSpec extends StreamSpec { "support waiting for demand from all downstream subscriptions" in { val s1 = TestSubscriber.manualProbe[Int]() - val p2 = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false)) { implicit b => p2Sink => - val balance = b.add(Balance[Int](2, waitForAllDownstreams = true)) - Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(s1) - balance.out(1) ~> p2Sink - ClosedShape - }).run() + val p2 = RunnableGraph + .fromGraph(GraphDSL.create(Sink.asPublisher[Int](false)) { implicit b => p2Sink => + val balance = b.add(Balance[Int](2, waitForAllDownstreams = true)) + Source(List(1, 2, 3)) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(s1) + balance.out(1) ~> p2Sink + ClosedShape + }) + .run() val sub1 = s1.expectSubscription() sub1.request(1) @@ -80,14 +83,17 @@ class GraphBalanceSpec extends StreamSpec { "support waiting for demand from all non-cancelled downstream subscriptions" in assertAllStagesStopped { val s1 = TestSubscriber.manualProbe[Int]() - val (p2, p3) = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false), Sink.asPublisher[Int](false))(Keep.both) { implicit b => (p2Sink, p3Sink) => - val balance = b.add(Balance[Int](3, waitForAllDownstreams = true)) - Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(s1) - balance.out(1) ~> p2Sink - balance.out(2) ~> p3Sink - ClosedShape - }).run() + val (p2, p3) = RunnableGraph + .fromGraph(GraphDSL.create(Sink.asPublisher[Int](false), Sink.asPublisher[Int](false))(Keep.both) { + implicit b => (p2Sink, p3Sink) => + val balance = b.add(Balance[Int](3, waitForAllDownstreams = true)) + Source(List(1, 2, 3)) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(s1) + balance.out(1) ~> p2Sink + balance.out(2) ~> p3Sink + ClosedShape + }) + .run() val sub1 = s1.expectSubscription() sub1.request(1) @@ -112,13 +118,15 @@ class GraphBalanceSpec extends StreamSpec { } "work with one-way merge" in { - val result = Source.fromGraph(GraphDSL.create() { implicit b => - val balance = b.add(Balance[Int](1)) - val source = b.add(Source(1 to 3)) + val result = Source + .fromGraph(GraphDSL.create() { implicit b => + val balance = b.add(Balance[Int](1)) + val source = b.add(Source(1 to 3)) - source ~> balance.in - SourceShape(balance.out(0)) - }).runFold(Seq[Int]())(_ :+ _) + source ~> balance.in + SourceShape(balance.out(0)) + }) + .runFold(Seq[Int]())(_ :+ _) Await.result(result, 3.seconds) should ===(Seq(1, 2, 3)) } @@ -126,32 +134,36 @@ class GraphBalanceSpec extends StreamSpec { "work with 5-way balance" in { val sink = Sink.head[Seq[Int]] - val (s1, s2, s3, s4, s5) = RunnableGraph.fromGraph(GraphDSL.create(sink, sink, sink, sink, sink)(Tuple5.apply) { implicit b => (f1, f2, f3, f4, f5) => - val balance = b.add(Balance[Int](5, waitForAllDownstreams = true)) - Source(0 to 14) ~> balance.in - balance.out(0).grouped(15) ~> f1 - balance.out(1).grouped(15) ~> f2 - balance.out(2).grouped(15) ~> f3 - balance.out(3).grouped(15) ~> f4 - balance.out(4).grouped(15) ~> f5 - ClosedShape - }).run() + val (s1, s2, s3, s4, s5) = RunnableGraph + .fromGraph(GraphDSL.create(sink, sink, sink, sink, sink)(Tuple5.apply) { implicit b => (f1, f2, f3, f4, f5) => + val balance = b.add(Balance[Int](5, waitForAllDownstreams = true)) + Source(0 to 14) ~> balance.in + balance.out(0).grouped(15) ~> f1 + balance.out(1).grouped(15) ~> f2 + balance.out(2).grouped(15) ~> f3 + balance.out(3).grouped(15) ~> f4 + balance.out(4).grouped(15) ~> f5 + ClosedShape + }) + .run() - Set(s1, s2, s3, s4, s5) flatMap (Await.result(_, 3.seconds)) should be((0 to 14).toSet) + Set(s1, s2, s3, s4, s5).flatMap(Await.result(_, 3.seconds)) should be((0 to 14).toSet) } "balance between all three outputs" in { val numElementsForSink = 10000 val outputs = Sink.fold[Int, Int](0)(_ + _) - val results = RunnableGraph.fromGraph(GraphDSL.create(outputs, outputs, outputs)(List(_, _, _)) { implicit b => (o1, o2, o3) => - val balance = b.add(Balance[Int](3, waitForAllDownstreams = true)) - Source.repeat(1).take(numElementsForSink * 3) ~> balance.in - balance.out(0) ~> o1 - balance.out(1) ~> o2 - balance.out(2) ~> o3 - ClosedShape - }).run() + val results = RunnableGraph + .fromGraph(GraphDSL.create(outputs, outputs, outputs)(List(_, _, _)) { implicit b => (o1, o2, o3) => + val balance = b.add(Balance[Int](3, waitForAllDownstreams = true)) + Source.repeat(1).take(numElementsForSink * 3) ~> balance.in + balance.out(0) ~> o1 + balance.out(1) ~> o2 + balance.out(2) ~> o3 + ClosedShape + }) + .run() import system.dispatcher val sum = Future.sequence(results).map { res => @@ -163,14 +175,16 @@ class GraphBalanceSpec extends StreamSpec { "fairly balance between three outputs" in { val probe = TestSink.probe[Int] - val (p1, p2, p3) = RunnableGraph.fromGraph(GraphDSL.create(probe, probe, probe)(Tuple3.apply) { implicit b => (o1, o2, o3) => - val balance = b.add(Balance[Int](3)) - Source(1 to 7) ~> balance.in - balance.out(0) ~> o1 - balance.out(1) ~> o2 - balance.out(2) ~> o3 - ClosedShape - }).run() + val (p1, p2, p3) = RunnableGraph + .fromGraph(GraphDSL.create(probe, probe, probe)(Tuple3.apply) { implicit b => (o1, o2, o3) => + val balance = b.add(Balance[Int](3)) + Source(1 to 7) ~> balance.in + balance.out(0) ~> o1 + balance.out(1) ~> o2 + balance.out(2) ~> o3 + ClosedShape + }) + .run() p1.requestNext(1) p2.requestNext(2) @@ -189,13 +203,15 @@ class GraphBalanceSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val balance = b.add(Balance[Int](2)) - Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val balance = b.add(Balance[Int](2)) + Source(List(1, 2, 3)) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() sub1.cancel() @@ -211,13 +227,15 @@ class GraphBalanceSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val balance = b.add(Balance[Int](2)) - Source(List(1, 2, 3)) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val balance = b.add(Balance[Int](2)) + Source(List(1, 2, 3)) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -234,13 +252,15 @@ class GraphBalanceSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val balance = b.add(Balance[Int](2)) - Source.fromPublisher(p1.getPublisher) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val balance = b.add(Balance[Int](2)) + Source.fromPublisher(p1.getPublisher) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val bsub = p1.expectSubscription() val sub1 = c1.expectSubscription() @@ -265,13 +285,15 @@ class GraphBalanceSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val balance = b.add(new Balance[Int](2, waitForAllDownstreams = false, eagerCancel = true)) - Source.fromPublisher(p1.getPublisher) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val balance = b.add(new Balance[Int](2, waitForAllDownstreams = false, eagerCancel = true)) + Source.fromPublisher(p1.getPublisher) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val bsub = p1.expectSubscription() val sub1 = c1.expectSubscription() @@ -296,13 +318,15 @@ class GraphBalanceSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val balance = b.add(Balance[Int](2)) - Source.fromPublisher(p1.getPublisher) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val balance = b.add(Balance[Int](2)) + Source.fromPublisher(p1.getPublisher) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val bsub = p1.expectSubscription() val sub1 = c1.expectSubscription() @@ -328,15 +352,17 @@ class GraphBalanceSpec extends StreamSpec { val c2 = TestSubscriber.manualProbe[Int]() val c3 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val balance = b.add(Balance[Int](3)) - Source.fromPublisher(p1.getPublisher) ~> balance.in - balance.out(0) ~> Sink.fromSubscriber(c1) - balance.out(1) ~> Sink.fromSubscriber(c2) - balance.out(2) ~> Sink.fromSubscriber(c3) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val balance = b.add(Balance[Int](3)) + Source.fromPublisher(p1.getPublisher) ~> balance.in + balance.out(0) ~> Sink.fromSubscriber(c1) + balance.out(1) ~> Sink.fromSubscriber(c2) + balance.out(2) ~> Sink.fromSubscriber(c3) - ClosedShape - }).run() + ClosedShape + }) + .run() val bsub = p1.expectSubscription() val sub1 = c1.expectSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala index 9ebca31740..859a51b633 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphBroadcastSpec.scala @@ -14,8 +14,7 @@ import akka.stream.testkit.scaladsl.StreamTestKit._ class GraphBroadcastSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -26,13 +25,15 @@ class GraphBroadcastSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val bcast = b.add(Broadcast[Int](2)) - Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) - bcast.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val bcast = b.add(Broadcast[Int](2)) + Source(List(1, 2, 3)) ~> bcast.in + bcast.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) + bcast.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -53,14 +54,16 @@ class GraphBroadcastSpec extends StreamSpec { } "work with one-way broadcast" in assertAllStagesStopped { - val result = Source.fromGraph(GraphDSL.create() { implicit b => - val broadcast = b.add(Broadcast[Int](1)) - val source = b.add(Source(1 to 3)) + val result = Source + .fromGraph(GraphDSL.create() { implicit b => + val broadcast = b.add(Broadcast[Int](1)) + val source = b.add(Source(1 to 3)) - source ~> broadcast.in + source ~> broadcast.in - SourceShape(broadcast.out(0)) - }).runFold(Seq[Int]())(_ :+ _) + SourceShape(broadcast.out(0)) + }) + .runFold(Seq[Int]())(_ :+ _) Await.result(result, 3.seconds) should ===(Seq(1, 2, 3)) } @@ -69,13 +72,9 @@ class GraphBroadcastSpec extends StreamSpec { val headSink = Sink.head[Seq[Int]] import system.dispatcher - val result = RunnableGraph.fromGraph(GraphDSL.create( - headSink, - headSink, - headSink, - headSink, - headSink)( - (fut1, fut2, fut3, fut4, fut5) => Future.sequence(List(fut1, fut2, fut3, fut4, fut5))) { implicit b => (p1, p2, p3, p4, p5) => + val result = RunnableGraph + .fromGraph(GraphDSL.create(headSink, headSink, headSink, headSink, headSink)((fut1, fut2, fut3, fut4, fut5) => + Future.sequence(List(fut1, fut2, fut3, fut4, fut5))) { implicit b => (p1, p2, p3, p4, p5) => val bcast = b.add(Broadcast[Int](5)) Source(List(1, 2, 3)) ~> bcast.in bcast.out(0).grouped(5) ~> p1.in @@ -84,7 +83,8 @@ class GraphBroadcastSpec extends StreamSpec { bcast.out(3).grouped(5) ~> p4.in bcast.out(4).grouped(5) ~> p5.in ClosedShape - }).run() + }) + .run() Await.result(result, 3.seconds) should be(List.fill(5)(List(1, 2, 3))) } @@ -95,42 +95,64 @@ class GraphBroadcastSpec extends StreamSpec { val headSink: Sink[T, FT] = Sink.head[T] import system.dispatcher - val combine: (FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT) => Future[Seq[Seq[Int]]] = + val combine: (FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT, FT) => Future[ + Seq[Seq[Int]]] = (f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22) => - Future.sequence(List(f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22)) + Future.sequence( + List(f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22)) - val result = RunnableGraph.fromGraph(GraphDSL.create( - headSink, headSink, headSink, headSink, headSink, - headSink, headSink, headSink, headSink, headSink, - headSink, headSink, headSink, headSink, headSink, - headSink, headSink, headSink, headSink, headSink, - headSink, headSink)(combine) { implicit b => (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18, p19, p20, p21, p22) => - val bcast = b.add(Broadcast[Int](22)) - Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0).grouped(5) ~> p1.in - bcast.out(1).grouped(5) ~> p2.in - bcast.out(2).grouped(5) ~> p3.in - bcast.out(3).grouped(5) ~> p4.in - bcast.out(4).grouped(5) ~> p5.in - bcast.out(5).grouped(5) ~> p6.in - bcast.out(6).grouped(5) ~> p7.in - bcast.out(7).grouped(5) ~> p8.in - bcast.out(8).grouped(5) ~> p9.in - bcast.out(9).grouped(5) ~> p10.in - bcast.out(10).grouped(5) ~> p11.in - bcast.out(11).grouped(5) ~> p12.in - bcast.out(12).grouped(5) ~> p13.in - bcast.out(13).grouped(5) ~> p14.in - bcast.out(14).grouped(5) ~> p15.in - bcast.out(15).grouped(5) ~> p16.in - bcast.out(16).grouped(5) ~> p17.in - bcast.out(17).grouped(5) ~> p18.in - bcast.out(18).grouped(5) ~> p19.in - bcast.out(19).grouped(5) ~> p20.in - bcast.out(20).grouped(5) ~> p21.in - bcast.out(21).grouped(5) ~> p22.in - ClosedShape - }).run() + val result = RunnableGraph + .fromGraph(GraphDSL.create(headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink, + headSink)(combine) { + implicit b => + (p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18, p19, p20, p21, p22) => + val bcast = b.add(Broadcast[Int](22)) + Source(List(1, 2, 3)) ~> bcast.in + bcast.out(0).grouped(5) ~> p1.in + bcast.out(1).grouped(5) ~> p2.in + bcast.out(2).grouped(5) ~> p3.in + bcast.out(3).grouped(5) ~> p4.in + bcast.out(4).grouped(5) ~> p5.in + bcast.out(5).grouped(5) ~> p6.in + bcast.out(6).grouped(5) ~> p7.in + bcast.out(7).grouped(5) ~> p8.in + bcast.out(8).grouped(5) ~> p9.in + bcast.out(9).grouped(5) ~> p10.in + bcast.out(10).grouped(5) ~> p11.in + bcast.out(11).grouped(5) ~> p12.in + bcast.out(12).grouped(5) ~> p13.in + bcast.out(13).grouped(5) ~> p14.in + bcast.out(14).grouped(5) ~> p15.in + bcast.out(15).grouped(5) ~> p16.in + bcast.out(16).grouped(5) ~> p17.in + bcast.out(17).grouped(5) ~> p18.in + bcast.out(18).grouped(5) ~> p19.in + bcast.out(19).grouped(5) ~> p20.in + bcast.out(20).grouped(5) ~> p21.in + bcast.out(21).grouped(5) ~> p22.in + ClosedShape + }) + .run() Await.result(result, 3.seconds) should be(List.fill(22)(List(1, 2, 3))) } @@ -139,13 +161,15 @@ class GraphBroadcastSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val bcast = b.add(Broadcast[Int](2)) - Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0) ~> Flow[Int] ~> Sink.fromSubscriber(c1) - bcast.out(1) ~> Flow[Int] ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val bcast = b.add(Broadcast[Int](2)) + Source(List(1, 2, 3)) ~> bcast.in + bcast.out(0) ~> Flow[Int] ~> Sink.fromSubscriber(c1) + bcast.out(1) ~> Flow[Int] ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() sub1.cancel() @@ -161,13 +185,15 @@ class GraphBroadcastSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val bcast = b.add(Broadcast[Int](2)) - Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0) ~> Flow[Int].named("identity-a") ~> Sink.fromSubscriber(c1) - bcast.out(1) ~> Flow[Int].named("identity-b") ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val bcast = b.add(Broadcast[Int](2)) + Source(List(1, 2, 3)) ~> bcast.in + bcast.out(0) ~> Flow[Int].named("identity-a") ~> Sink.fromSubscriber(c1) + bcast.out(1) ~> Flow[Int].named("identity-b") ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -184,13 +210,15 @@ class GraphBroadcastSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val bcast = b.add(Broadcast[Int](2)) - Source.fromPublisher(p1.getPublisher) ~> bcast.in - bcast.out(0) ~> Flow[Int] ~> Sink.fromSubscriber(c1) - bcast.out(1) ~> Flow[Int] ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val bcast = b.add(Broadcast[Int](2)) + Source.fromPublisher(p1.getPublisher) ~> bcast.in + bcast.out(0) ~> Flow[Int] ~> Sink.fromSubscriber(c1) + bcast.out(1) ~> Flow[Int] ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val bsub = p1.expectSubscription() val sub1 = c1.expectSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphConcatSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphConcatSpec.scala index 925d92a92e..86a2451f7f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphConcatSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphConcatSpec.scala @@ -15,7 +15,7 @@ class GraphConcatSpec extends TwoStreamsSetup { override type Outputs = Int override def fixture(b: GraphDSL.Builder[_]): Fixture = new Fixture(b) { - val concat = b add Concat[Outputs]() + val concat = b.add(Concat[Outputs]()) override def left: Inlet[Outputs] = concat.in(0) override def right: Inlet[Outputs] = concat.in(1) @@ -29,20 +29,21 @@ class GraphConcatSpec extends TwoStreamsSetup { "work in the happy case" in assertAllStagesStopped { val probe = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val concat1 = b.add(Concat[Int]()) + val concat2 = b.add(Concat[Int]()) - val concat1 = b add Concat[Int]() - val concat2 = b add Concat[Int]() + Source(List.empty[Int]) ~> concat1.in(0) + Source(1 to 4) ~> concat1.in(1) - Source(List.empty[Int]) ~> concat1.in(0) - Source(1 to 4) ~> concat1.in(1) + concat1.out ~> concat2.in(0) + Source(5 to 10) ~> concat2.in(1) - concat1.out ~> concat2.in(0) - Source(5 to 10) ~> concat2.in(1) - - concat2.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + concat2.out ~> Sink.fromSubscriber(probe) + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -132,13 +133,15 @@ class GraphConcatSpec extends TwoStreamsSetup { val promise = Promise[Int]() val subscriber = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val concat = b add Concat[Int]() - Source(List(1, 2, 3)) ~> concat.in(0) - Source.fromFuture(promise.future) ~> concat.in(1) - concat.out ~> Sink.fromSubscriber(subscriber) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val concat = b.add(Concat[Int]()) + Source(List(1, 2, 3)) ~> concat.in(0) + Source.fromFuture(promise.future) ~> concat.in(1) + concat.out ~> Sink.fromSubscriber(subscriber) + ClosedShape + }) + .run() val subscription = subscriber.expectSubscription() subscription.request(4) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala index 0ff148a4bf..66dc157c95 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphDSLCompileSpec.scala @@ -23,11 +23,12 @@ class GraphDSLCompileSpec extends StreamSpec { val in = Inlet[In]("op.in") val out = Outlet[Out]("op.out") override val shape = FlowShape[In, Out](in, out) - override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { - override def onPush() = push(out, grab(in).asInstanceOf[Out]) - override def onPull(): Unit = pull(in) - setHandlers(in, out, this) - } + override def createLogic(inheritedAttributes: Attributes) = + new GraphStageLogic(shape) with InHandler with OutHandler { + override def onPush() = push(out, grab(in).asInstanceOf[Out]) + override def onPull(): Unit = pull(in) + setHandlers(in, out, this) + } } @@ -48,23 +49,27 @@ class GraphDSLCompileSpec extends StreamSpec { "A Graph" should { import GraphDSL.Implicits._ "build simple merge" in { - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val merge = b.add(Merge[String](2)) - in1 ~> f1 ~> merge.in(0) - in2 ~> f2 ~> merge.in(1) - merge.out ~> f3 ~> out1 - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val merge = b.add(Merge[String](2)) + in1 ~> f1 ~> merge.in(0) + in2 ~> f2 ~> merge.in(1) + merge.out ~> f3 ~> out1 + ClosedShape + }) + .run() } "build simple broadcast" in { - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val bcast = b.add(Broadcast[String](2)) - in1 ~> f1 ~> bcast.in - bcast.out(0) ~> f2 ~> out1 - bcast.out(1) ~> f3 ~> out2 - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val bcast = b.add(Broadcast[String](2)) + in1 ~> f1 ~> bcast.in + bcast.out(0) ~> f2 ~> out1 + bcast.out(1) ~> f3 ~> out2 + ClosedShape + }) + .run() } "build simple balance" in { @@ -78,30 +83,34 @@ class GraphDSLCompileSpec extends StreamSpec { } "build simple merge - broadcast" in { - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val merge = b.add(Merge[String](2)) - val bcast = b.add(Broadcast[String](2)) - in1 ~> f1 ~> merge.in(0) - in2 ~> f2 ~> merge.in(1) - merge ~> f3 ~> bcast - bcast.out(0) ~> f4 ~> out1 - bcast.out(1) ~> f5 ~> out2 - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val merge = b.add(Merge[String](2)) + val bcast = b.add(Broadcast[String](2)) + in1 ~> f1 ~> merge.in(0) + in2 ~> f2 ~> merge.in(1) + merge ~> f3 ~> bcast + bcast.out(0) ~> f4 ~> out1 + bcast.out(1) ~> f5 ~> out2 + ClosedShape + }) + .run() } "build simple merge - broadcast with implicits" in { - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val merge = b.add(Merge[String](2)) - val bcast = b.add(Broadcast[String](2)) - b.add(in1) ~> f1 ~> merge.in(0) - merge.out ~> f2 ~> bcast.in - bcast.out(0) ~> f3 ~> b.add(out1) - b.add(in2) ~> f4 ~> merge.in(1) - bcast.out(1) ~> f5 ~> b.add(out2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + val merge = b.add(Merge[String](2)) + val bcast = b.add(Broadcast[String](2)) + b.add(in1) ~> f1 ~> merge.in(0) + merge.out ~> f2 ~> bcast.in + bcast.out(0) ~> f3 ~> b.add(out1) + b.add(in2) ~> f4 ~> merge.in(1) + bcast.out(1) ~> f5 ~> b.add(out2) + ClosedShape + }) + .run() } /* @@ -134,84 +143,94 @@ class GraphDSLCompileSpec extends StreamSpec { } "express complex topologies in a readable way" in { - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val merge = b.add(Merge[String](2)) - val bcast1 = b.add(Broadcast[String](2)) - val bcast2 = b.add(Broadcast[String](2)) - val feedbackLoopBuffer = Flow[String].buffer(10, OverflowStrategy.dropBuffer) - import GraphDSL.Implicits._ - b.add(in1) ~> f1 ~> merge ~> f2 ~> bcast1 ~> f3 ~> b.add(out1) - bcast1 ~> feedbackLoopBuffer ~> bcast2 ~> f5 ~> merge - bcast2 ~> f6 ~> b.add(out2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val merge = b.add(Merge[String](2)) + val bcast1 = b.add(Broadcast[String](2)) + val bcast2 = b.add(Broadcast[String](2)) + val feedbackLoopBuffer = Flow[String].buffer(10, OverflowStrategy.dropBuffer) + import GraphDSL.Implicits._ + b.add(in1) ~> f1 ~> merge ~> f2 ~> bcast1 ~> f3 ~> b.add(out1) + bcast1 ~> feedbackLoopBuffer ~> bcast2 ~> f5 ~> merge + bcast2 ~> f6 ~> b.add(out2) + ClosedShape + }) + .run() } "build broadcast - merge" in { - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val bcast = b.add(Broadcast[String](2)) - val merge = b.add(Merge[String](2)) - import GraphDSL.Implicits._ - in1 ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out1 - bcast ~> f4 ~> merge - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val bcast = b.add(Broadcast[String](2)) + val merge = b.add(Merge[String](2)) + import GraphDSL.Implicits._ + in1 ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out1 + bcast ~> f4 ~> merge + ClosedShape + }) + .run() } "build wikipedia Topological_sorting" in { // see https://en.wikipedia.org/wiki/Topological_sorting#mediaviewer/File:Directed_acyclic_graph.png - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val b3 = b.add(Broadcast[String](2)) - val b7 = b.add(Broadcast[String](2)) - val b11 = b.add(Broadcast[String](3)) - val m8 = b.add(Merge[String](2)) - val m9 = b.add(Merge[String](2)) - val m10 = b.add(Merge[String](2)) - val m11 = b.add(Merge[String](2)) - val in3 = Source(List("b")) - val in5 = Source(List("b")) - val in7 = Source(List("a")) - val out2 = Sink.asPublisher[String](false) - val out9 = Sink.asPublisher[String](false) - val out10 = Sink.asPublisher[String](false) - def f(s: String) = Flow[String].via(op[String, String]).named(s) - import GraphDSL.Implicits._ + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val b3 = b.add(Broadcast[String](2)) + val b7 = b.add(Broadcast[String](2)) + val b11 = b.add(Broadcast[String](3)) + val m8 = b.add(Merge[String](2)) + val m9 = b.add(Merge[String](2)) + val m10 = b.add(Merge[String](2)) + val m11 = b.add(Merge[String](2)) + val in3 = Source(List("b")) + val in5 = Source(List("b")) + val in7 = Source(List("a")) + val out2 = Sink.asPublisher[String](false) + val out9 = Sink.asPublisher[String](false) + val out10 = Sink.asPublisher[String](false) + def f(s: String) = Flow[String].via(op[String, String]).named(s) + import GraphDSL.Implicits._ - in7 ~> f("a") ~> b7 ~> f("b") ~> m11 ~> f("c") ~> b11 ~> f("d") ~> out2 - b11 ~> f("e") ~> m9 ~> f("f") ~> out9 - b7 ~> f("g") ~> m8 ~> f("h") ~> m9 - b11 ~> f("i") ~> m10 ~> f("j") ~> out10 - in5 ~> f("k") ~> m11 - in3 ~> f("l") ~> b3 ~> f("m") ~> m8 - b3 ~> f("n") ~> m10 - ClosedShape - }).run() + in7 ~> f("a") ~> b7 ~> f("b") ~> m11 ~> f("c") ~> b11 ~> f("d") ~> out2 + b11 ~> f("e") ~> m9 ~> f("f") ~> out9 + b7 ~> f("g") ~> m8 ~> f("h") ~> m9 + b11 ~> f("i") ~> m10 ~> f("j") ~> out10 + in5 ~> f("k") ~> m11 + in3 ~> f("l") ~> b3 ~> f("m") ~> m8 + b3 ~> f("n") ~> m10 + ClosedShape + }) + .run() } "make it optional to specify flows" in { - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val merge = b.add(Merge[String](2)) - val bcast = b.add(Broadcast[String](2)) - import GraphDSL.Implicits._ - in1 ~> merge ~> bcast ~> out1 - in2 ~> merge - bcast ~> out2 - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val merge = b.add(Merge[String](2)) + val bcast = b.add(Broadcast[String](2)) + import GraphDSL.Implicits._ + in1 ~> merge ~> bcast ~> out1 + in2 ~> merge + bcast ~> out2 + ClosedShape + }) + .run() } "build unzip - zip" in { - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(Zip[Int, String]()) - val unzip = b.add(Unzip[Int, String]()) - val out = Sink.asPublisher[(Int, String)](false) - import GraphDSL.Implicits._ - Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Flow[Int].map(_ * 2) ~> zip.in0 - unzip.out1 ~> zip.in1 - zip.out ~> out - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(Zip[Int, String]()) + val unzip = b.add(Unzip[Int, String]()) + val out = Sink.asPublisher[(Int, String)](false) + import GraphDSL.Implicits._ + Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in + unzip.out0 ~> Flow[Int].map(_ * 2) ~> zip.in0 + unzip.out1 ~> zip.in1 + zip.out ~> out + ClosedShape + }) + .run() } "throw an error if some ports are not connected" in { @@ -225,7 +244,8 @@ class GraphDSLCompileSpec extends StreamSpec { ClosedShape } - }.getMessage should be("Illegal GraphDSL usage. " + + }.getMessage should be( + "Illegal GraphDSL usage. " + "Inlets [Map.in] were not returned in the resulting shape and not connected. " + "Outlets [EmptySource.out] were not returned in the resulting shape and not connected.") } @@ -241,7 +261,8 @@ class GraphDSLCompileSpec extends StreamSpec { op } - }.getMessage should be("Illegal GraphDSL usage. " + + }.getMessage should be( + "Illegal GraphDSL usage. " + "Inlets [Map.in] were returned in the resulting shape but were already connected. " + "Outlets [Map.out] were returned in the resulting shape but were already connected.") } @@ -296,11 +317,11 @@ class GraphDSLCompileSpec extends StreamSpec { RunnableGraph.fromGraph(GraphDSL.create() { implicit b => def appleSource = b.add(Source.fromPublisher(TestPublisher.manualProbe[Apple]())) def fruitSource = b.add(Source.fromPublisher(TestPublisher.manualProbe[Fruit]())) - val outA = b add Sink.fromSubscriber(TestSubscriber.manualProbe[Fruit]()) - val outB = b add Sink.fromSubscriber(TestSubscriber.manualProbe[Fruit]()) - val merge = b add Merge[Fruit](11) - val unzip = b add Unzip[Int, String]() - val whatever = b add Sink.asPublisher[Any](false) + val outA = b.add(Sink.fromSubscriber(TestSubscriber.manualProbe[Fruit]())) + val outB = b.add(Sink.fromSubscriber(TestSubscriber.manualProbe[Fruit]())) + val merge = b.add(Merge[Fruit](11)) + val unzip = b.add(Unzip[Int, String]()) + val whatever = b.add(Sink.asPublisher[Any](false)) import GraphDSL.Implicits._ b.add(Source.fromIterator[Fruit](apples)) ~> merge.in(0) appleSource ~> merge.in(1) @@ -331,43 +352,61 @@ class GraphDSLCompileSpec extends StreamSpec { "build with plain flow without junctions" in { import GraphDSL.Implicits._ - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - in1 ~> f1 ~> out1 - ClosedShape - }).run() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - in1 ~> f1 ~> f2.to(out1) - ClosedShape - }).run() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - (in1 via f1) ~> f2 ~> out1 - ClosedShape - }).run() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - in1 ~> out1 - ClosedShape - }).run() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - in1 ~> (f1 to out1) - ClosedShape - }).run() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - (in1 via f1) ~> out1 - ClosedShape - }).run() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - (in1 via f1) ~> (f2 to out1) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + in1 ~> f1 ~> out1 + ClosedShape + }) + .run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + in1 ~> f1 ~> f2.to(out1) + ClosedShape + }) + .run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + (in1.via(f1)) ~> f2 ~> out1 + ClosedShape + }) + .run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + in1 ~> out1 + ClosedShape + }) + .run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + in1 ~> (f1 to out1) + ClosedShape + }) + .run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + (in1.via(f1)) ~> out1 + ClosedShape + }) + .run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + (in1.via(f1)) ~> (f2 to out1) + ClosedShape + }) + .run() } "suitably override attribute handling methods" in { import akka.stream.Attributes._ - val ga = GraphDSL.create() { implicit b => - val id = b.add(GraphStages.identity[Any]) + val ga = GraphDSL + .create() { implicit b => + val id = b.add(GraphStages.identity[Any]) - FlowShape(id.in, id.out) - }.async.addAttributes(none).named("useless") + FlowShape(id.in, id.out) + } + .async + .addAttributes(none) + .named("useless") ga.traversalBuilder.attributes.getFirst[Name] shouldEqual Some(Name("useless")) ga.traversalBuilder.attributes.getFirst[AsyncBoundary.type] shouldEqual (Some(AsyncBoundary)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala index b919e95e1c..577562ee9b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMatValueSpec.scala @@ -21,17 +21,18 @@ class GraphMatValueSpec extends StreamSpec { "A Graph with materialized value" must { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "expose the materialized value as source" in { val sub = TestSubscriber.manualProbe[Int]() - val f = RunnableGraph.fromGraph(GraphDSL.create(foldSink) { implicit b => fold => - Source(1 to 10) ~> fold - b.materializedValue.mapAsync(4)(identity) ~> Sink.fromSubscriber(sub) - ClosedShape - }).run() + val f = RunnableGraph + .fromGraph(GraphDSL.create(foldSink) { implicit b => fold => + Source(1 to 10) ~> fold + b.materializedValue.mapAsync(4)(identity) ~> Sink.fromSubscriber(sub) + ClosedShape + }) + .run() val r1 = Await.result(f, 3.seconds) sub.expectSubscription().request(1) @@ -43,15 +44,17 @@ class GraphMatValueSpec extends StreamSpec { "expose the materialized value as source multiple times" in { val sub = TestSubscriber.manualProbe[Int]() - val f = RunnableGraph.fromGraph(GraphDSL.create(foldSink) { implicit b => fold => - val zip = b.add(ZipWith[Int, Int, Int](_ + _)) - Source(1 to 10) ~> fold - b.materializedValue.mapAsync(4)(identity) ~> zip.in0 - b.materializedValue.mapAsync(4)(identity) ~> zip.in1 + val f = RunnableGraph + .fromGraph(GraphDSL.create(foldSink) { implicit b => fold => + val zip = b.add(ZipWith[Int, Int, Int](_ + _)) + Source(1 to 10) ~> fold + b.materializedValue.mapAsync(4)(identity) ~> zip.in0 + b.materializedValue.mapAsync(4)(identity) ~> zip.in1 - zip.out ~> Sink.fromSubscriber(sub) - ClosedShape - }).run() + zip.out ~> Sink.fromSubscriber(sub) + ClosedShape + }) + .run() val r1 = Await.result(f, 3.seconds) sub.expectSubscription().request(1) @@ -61,9 +64,10 @@ class GraphMatValueSpec extends StreamSpec { } // Exposes the materialized value as a stream value - val foldFeedbackSource: Source[Future[Int], Future[Int]] = Source.fromGraph(GraphDSL.create(foldSink) { implicit b => fold => - Source(1 to 10) ~> fold - SourceShape(b.materializedValue) + val foldFeedbackSource: Source[Future[Int], Future[Int]] = Source.fromGraph(GraphDSL.create(foldSink) { + implicit b => fold => + Source(1 to 10) ~> fold + SourceShape(b.materializedValue) }) "allow exposing the materialized value as port" in { @@ -73,24 +77,27 @@ class GraphMatValueSpec extends StreamSpec { } "allow exposing the materialized value as port even if wrapped and the final materialized value is Unit" in { - val noMatSource: Source[Int, Unit] = foldFeedbackSource.mapAsync(4)(identity).map(_ + 100).mapMaterializedValue((_) => ()) + val noMatSource: Source[Int, Unit] = + foldFeedbackSource.mapAsync(4)(identity).map(_ + 100).mapMaterializedValue((_) => ()) Await.result(noMatSource.runWith(Sink.head), 3.seconds) should ===(155) } "work properly with nesting and reusing" in { - val compositeSource1 = Source.fromGraph(GraphDSL.create(foldFeedbackSource, foldFeedbackSource)(Keep.both) { implicit b => (s1, s2) => - val zip = b.add(ZipWith[Int, Int, Int](_ + _)) + val compositeSource1 = Source.fromGraph(GraphDSL.create(foldFeedbackSource, foldFeedbackSource)(Keep.both) { + implicit b => (s1, s2) => + val zip = b.add(ZipWith[Int, Int, Int](_ + _)) - s1.out.mapAsync(4)(identity) ~> zip.in0 - s2.out.mapAsync(4)(identity).map(_ * 100) ~> zip.in1 - SourceShape(zip.out) + s1.out.mapAsync(4)(identity) ~> zip.in0 + s2.out.mapAsync(4)(identity).map(_ * 100) ~> zip.in1 + SourceShape(zip.out) }) - val compositeSource2 = Source.fromGraph(GraphDSL.create(compositeSource1, compositeSource1)(Keep.both) { implicit b => (s1, s2) => - val zip = b.add(ZipWith[Int, Int, Int](_ + _)) - s1.out ~> zip.in0 - s2.out.map(_ * 10000) ~> zip.in1 - SourceShape(zip.out) + val compositeSource2 = Source.fromGraph(GraphDSL.create(compositeSource1, compositeSource1)(Keep.both) { + implicit b => (s1, s2) => + val zip = b.add(ZipWith[Int, Int, Int](_ + _)) + s1.out ~> zip.in0 + s2.out.map(_ * 10000) ~> zip.in1 + SourceShape(zip.out) }) val (((f1, f2), (f3, f4)), result) = compositeSource2.toMat(Sink.head)(Keep.both).run() @@ -150,9 +157,10 @@ class GraphMatValueSpec extends StreamSpec { "produce NotUsed when starting from Flow.via with transformation" in { var done = false - Source.empty.viaMat( - Flow[Int].via(Flow[Int].mapMaterializedValue(_ => done = true)))(Keep.right) - .to(Sink.ignore).run() should ===(akka.NotUsed) + Source.empty + .viaMat(Flow[Int].via(Flow[Int].mapMaterializedValue(_ => done = true)))(Keep.right) + .to(Sink.ignore) + .run() should ===(akka.NotUsed) done should ===(true) } @@ -200,16 +208,18 @@ class GraphMatValueSpec extends StreamSpec { "with Identity Flow optimization even if ports are wired in an arbitrary higher nesting level" in { val mat2 = ActorMaterializer(ActorMaterializerSettings(system)) - val subflow = GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val zip = b.add(Zip[String, String]()) - val bc = b.add(Broadcast[String](2)) + val subflow = GraphDSL + .create() { implicit b => + import GraphDSL.Implicits._ + val zip = b.add(Zip[String, String]()) + val bc = b.add(Broadcast[String](2)) - bc.out(0) ~> zip.in0 - bc.out(1) ~> zip.in1 + bc.out(0) ~> zip.in0 + bc.out(1) ~> zip.in1 - FlowShape(bc.in, zip.out) - }.named("nestedFlow") + FlowShape(bc.in, zip.out) + } + .named("nestedFlow") val nest1 = Flow[String].via(subflow) val nest2 = Flow[String].via(nest1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeLatestSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeLatestSpec.scala index d7374518bf..93e92ceaa9 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeLatestSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeLatestSpec.scala @@ -18,7 +18,7 @@ class GraphMergeLatestSpec extends TwoStreamsSetup { override type Outputs = List[Int] override def fixture(b: GraphDSL.Builder[_]): Fixture = new Fixture(b) { - val merge = b add MergeLatest[Int](2) + val merge = b.add(MergeLatest[Int](2)) override def left: Inlet[Int] = merge.in(0) override def right: Inlet[Int] = merge.in(1) @@ -34,15 +34,17 @@ class GraphMergeLatestSpec extends TwoStreamsSetup { val up3 = TestSource.probe[Int] val probe = TestSubscriber.manualProbe[List[Int]]() - val (in1, in2, in3) = RunnableGraph.fromGraph(GraphDSL.create(up1, up2, up3)((_, _, _)) { implicit b => (s1, s2, s3) => - val m = b.add(MergeLatest[Int](3)) + val (in1, in2, in3) = RunnableGraph + .fromGraph(GraphDSL.create(up1, up2, up3)((_, _, _)) { implicit b => (s1, s2, s3) => + val m = b.add(MergeLatest[Int](3)) - s1 ~> m - s2 ~> m - s3 ~> m - m.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + s1 ~> m + s2 ~> m + s3 ~> m + m.out ~> Sink.fromSubscriber(probe) + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -66,15 +68,17 @@ class GraphMergeLatestSpec extends TwoStreamsSetup { val up3 = TestSource.probe[Int] val probe = TestSubscriber.manualProbe[List[Int]]() - val (in1, in2, in3) = RunnableGraph.fromGraph(GraphDSL.create(up1, up2, up3)((_, _, _)) { implicit b => (s1, s2, s3) => - val m = b.add(MergeLatest[Int](3)) + val (in1, in2, in3) = RunnableGraph + .fromGraph(GraphDSL.create(up1, up2, up3)((_, _, _)) { implicit b => (s1, s2, s3) => + val m = b.add(MergeLatest[Int](3)) - s1 ~> m - s2 ~> m - s3 ~> m - m.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + s1 ~> m + s2 ~> m + s3 ~> m + m.out ~> Sink.fromSubscriber(probe) + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -111,13 +115,15 @@ class GraphMergeLatestSpec extends TwoStreamsSetup { } "work with one-way merge" in { - val result = Source.fromGraph(GraphDSL.create() { implicit b => - val merge = b.add(MergeLatest[Int](1)) - val source = b.add(Source(1 to 3)) + val result = Source + .fromGraph(GraphDSL.create() { implicit b => + val merge = b.add(MergeLatest[Int](1)) + val source = b.add(Source(1 to 3)) - source ~> merge - SourceShape(merge.out) - }).runFold(Seq[List[Int]]())(_ :+ _) + source ~> merge + SourceShape(merge.out) + }) + .runFold(Seq[List[Int]]())(_ :+ _) Await.result(result, 3.seconds) should ===(Seq(List(1), List(2), List(3))) } @@ -127,14 +133,16 @@ class GraphMergeLatestSpec extends TwoStreamsSetup { val up2 = TestSource.probe[Int] val probe = TestSubscriber.manualProbe[List[Int]]() - val (in1, in2) = RunnableGraph.fromGraph(GraphDSL.create(up1, up2)((_, _)) { implicit b => (s1, s2) => - val m = b.add(MergeLatest[Int](2, true)) + val (in1, in2) = RunnableGraph + .fromGraph(GraphDSL.create(up1, up2)((_, _)) { implicit b => (s1, s2) => + val m = b.add(MergeLatest[Int](2, true)) - s1 ~> m - s2 ~> m - m.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + s1 ~> m + s2 ~> m + m.out ~> Sink.fromSubscriber(probe) + ClosedShape + }) + .run() val subscription = probe.expectSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala index 924f779044..1df8630db2 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePreferredSpec.scala @@ -33,31 +33,35 @@ class GraphMergePreferredSpec extends TwoStreamsSetup { val preferred = Source(Stream.fill(numElements)(1)) val aux = Source(Stream.fill(numElements)(2)) - val result = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => sink => - val merge = b.add(MergePreferred[Int](3)) - preferred ~> merge.preferred + val result = RunnableGraph + .fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => sink => + val merge = b.add(MergePreferred[Int](3)) + preferred ~> merge.preferred - merge.out.grouped(numElements * 2) ~> sink.in - aux ~> merge.in(0) - aux ~> merge.in(1) - aux ~> merge.in(2) - ClosedShape - }).run() + merge.out.grouped(numElements * 2) ~> sink.in + aux ~> merge.in(0) + aux ~> merge.in(1) + aux ~> merge.in(2) + ClosedShape + }) + .run() Await.result(result, 3.seconds).filter(_ == 1).size should be(numElements) } "eventually pass through all elements without corrupting the ordering" in { - val result = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => sink => - val merge = b.add(MergePreferred[Int](3)) - Source(1 to 100) ~> merge.preferred + val result = RunnableGraph + .fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => sink => + val merge = b.add(MergePreferred[Int](3)) + Source(1 to 100) ~> merge.preferred - merge.out.grouped(500) ~> sink.in - Source(101 to 200) ~> merge.in(0) - Source(201 to 300) ~> merge.in(1) - Source(301 to 400) ~> merge.in(2) - ClosedShape - }).run() + merge.out.grouped(500) ~> sink.in + Source(101 to 200) ~> merge.in(0) + Source(201 to 300) ~> merge.in(1) + Source(301 to 400) ~> merge.in(2) + ClosedShape + }) + .run() val resultSeq = Await.result(result, 3.seconds) resultSeq.toSet should ===((1 to 400).toSet) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala index 593572f85a..702bf10a16 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergePrioritizedSpec.scala @@ -16,7 +16,7 @@ class GraphMergePrioritizedSpec extends TwoStreamsSetup { override type Outputs = Int override def fixture(b: GraphDSL.Builder[_]): Fixture = new Fixture(b) { - val mergePrioritized = b add MergePrioritized[Outputs](Seq(2, 8)) + val mergePrioritized = b.add(MergePrioritized[Outputs](Seq(2, 8))) override def left: Inlet[Outputs] = mergePrioritized.in(0) override def right: Inlet[Outputs] = mergePrioritized.in(1) @@ -136,7 +136,11 @@ class GraphMergePrioritizedSpec extends TwoStreamsSetup { } } - private def threeSourceMerge[T](source1: Source[T, NotUsed], source2: Source[T, NotUsed], source3: Source[T, NotUsed], priorities: Seq[Int], probe: ManualProbe[T]) = { + private def threeSourceMerge[T](source1: Source[T, NotUsed], + source2: Source[T, NotUsed], + source3: Source[T, NotUsed], + priorities: Seq[Int], + probe: ManualProbe[T]) = { RunnableGraph.fromGraph(GraphDSL.create(source1, source2, source3)((_, _, _)) { implicit b => (s1, s2, s3) => val merge = b.add(MergePrioritized[T](priorities)) // introduce a delay on the consuming side making it more likely that diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSortedSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSortedSpec.scala index ad320fae38..ccb54c7cfe 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSortedSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSortedSpec.scala @@ -32,7 +32,8 @@ class GraphMergeSortedSpec extends TwoStreamsSetup with GeneratorDrivenPropertyC forAll(gen) { picks => val N = picks.size val (left, right) = picks.zipWithIndex.partition(_._1) - Source(left.map(_._2)).mergeSorted(Source(right.map(_._2))) + Source(left.map(_._2)) + .mergeSorted(Source(right.map(_._2))) .grouped(N max 1) .concat(Source.single(Nil)) .runWith(Sink.head) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala index 2a65958fe4..17cf290618 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphMergeSpec.scala @@ -18,7 +18,7 @@ class GraphMergeSpec extends TwoStreamsSetup { override type Outputs = Int override def fixture(b: GraphDSL.Builder[_]): Fixture = new Fixture(b) { - val merge = b add Merge[Outputs](2) + val merge = b.add(Merge[Outputs](2)) override def left: Inlet[Outputs] = merge.in(0) override def right: Inlet[Outputs] = merge.in(1) @@ -35,18 +35,20 @@ class GraphMergeSpec extends TwoStreamsSetup { val source3 = Source(List[Int]()) val probe = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val m1 = b.add(Merge[Int](2)) - val m2 = b.add(Merge[Int](2)) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val m1 = b.add(Merge[Int](2)) + val m2 = b.add(Merge[Int](2)) - source1 ~> m1.in(0) - m1.out ~> Flow[Int].map(_ * 2) ~> m2.in(0) - m2.out ~> Flow[Int].map(_ / 2).map(_ + 1) ~> Sink.fromSubscriber(probe) - source2 ~> m1.in(1) - source3 ~> m2.in(1) + source1 ~> m1.in(0) + m1.out ~> Flow[Int].map(_ * 2) ~> m2.in(0) + m2.out ~> Flow[Int].map(_ / 2).map(_ + 1) ~> Sink.fromSubscriber(probe) + source2 ~> m1.in(1) + source3 ~> m2.in(1) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -64,13 +66,15 @@ class GraphMergeSpec extends TwoStreamsSetup { } "work with one-way merge" in { - val result = Source.fromGraph(GraphDSL.create() { implicit b => - val merge = b.add(Merge[Int](1)) - val source = b.add(Source(1 to 3)) + val result = Source + .fromGraph(GraphDSL.create() { implicit b => + val merge = b.add(Merge[Int](1)) + val source = b.add(Source(1 to 3)) - source ~> merge.in(0) - SourceShape(merge.out) - }).runFold(Seq[Int]())(_ :+ _) + source ~> merge.in(0) + SourceShape(merge.out) + }) + .runFold(Seq[Int]())(_ :+ _) Await.result(result, 3.seconds) should ===(Seq(1, 2, 3)) } @@ -85,19 +89,21 @@ class GraphMergeSpec extends TwoStreamsSetup { val probe = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val merge = b.add(Merge[Int](6)) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val merge = b.add(Merge[Int](6)) - source1 ~> merge.in(0) - source2 ~> merge.in(1) - source3 ~> merge.in(2) - source4 ~> merge.in(3) - source5 ~> merge.in(4) - source6 ~> merge.in(5) - merge.out ~> Sink.fromSubscriber(probe) + source1 ~> merge.in(0) + source2 ~> merge.in(1) + source3 ~> merge.in(2) + source4 ~> merge.in(3) + source5 ~> merge.in(4) + source6 ~> merge.in(5) + merge.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -171,13 +177,15 @@ class GraphMergeSpec extends TwoStreamsSetup { val src1 = Source.asSubscriber[Int] val src2 = Source.asSubscriber[Int] - val (graphSubscriber1, graphSubscriber2) = RunnableGraph.fromGraph(GraphDSL.create(src1, src2)((_, _)) { implicit b => (s1, s2) => - val merge = b.add(Merge[Int](2)) - s1.out ~> merge.in(0) - s2.out ~> merge.in(1) - merge.out ~> Sink.fromSubscriber(down) - ClosedShape - }).run() + val (graphSubscriber1, graphSubscriber2) = RunnableGraph + .fromGraph(GraphDSL.create(src1, src2)((_, _)) { implicit b => (s1, s2) => + val merge = b.add(Merge[Int](2)) + s1.out ~> merge.in(0) + s2.out ~> merge.in(1) + merge.out ~> Sink.fromSubscriber(down) + ClosedShape + }) + .run() val downstream = down.expectSubscription() downstream.cancel() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala index 2d7e4571ac..d60e9114a7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphOpsIntegrationSpec.scala @@ -6,7 +6,7 @@ package akka.stream.scaladsl import akka.NotUsed import scala.collection.immutable -import scala.concurrent.{ Future, Await } +import scala.concurrent.{ Await, Future } import scala.concurrent.duration._ import akka.stream._ import akka.stream.testkit._ @@ -16,13 +16,12 @@ object GraphOpsIntegrationSpec { object Shuffle { - case class ShufflePorts[In, Out](in1: Inlet[In], in2: Inlet[In], out1: Outlet[Out], out2: Outlet[Out]) extends Shape { + case class ShufflePorts[In, Out](in1: Inlet[In], in2: Inlet[In], out1: Outlet[Out], out2: Outlet[Out]) + extends Shape { override def inlets: immutable.Seq[Inlet[_]] = List(in1, in2) override def outlets: immutable.Seq[Outlet[_]] = List(out1, out2) - override def deepCopy() = ShufflePorts( - in1.carbonCopy(), in2.carbonCopy(), - out1.carbonCopy(), out2.carbonCopy()) + override def deepCopy() = ShufflePorts(in1.carbonCopy(), in2.carbonCopy(), out1.carbonCopy(), out2.carbonCopy()) } def apply[In, Out](pipeline: Flow[In, Out, _]): Graph[ShufflePorts[In, Out], NotUsed] = { @@ -42,41 +41,44 @@ class GraphOpsIntegrationSpec extends StreamSpec { import akka.stream.scaladsl.GraphOpsIntegrationSpec._ import GraphDSL.Implicits._ - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) "GraphDSLs" must { "support broadcast - merge layouts" in { - val resultFuture = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => (sink) => - val bcast = b.add(Broadcast[Int](2)) - val merge = b.add(Merge[Int](2)) + val resultFuture = RunnableGraph + .fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => (sink) => + val bcast = b.add(Broadcast[Int](2)) + val merge = b.add(Merge[Int](2)) - Source(List(1, 2, 3)) ~> bcast.in - bcast.out(0) ~> merge.in(0) - bcast.out(1).map(_ + 3) ~> merge.in(1) - merge.out.grouped(10) ~> sink.in - ClosedShape - }).run() + Source(List(1, 2, 3)) ~> bcast.in + bcast.out(0) ~> merge.in(0) + bcast.out(1).map(_ + 3) ~> merge.in(1) + merge.out.grouped(10) ~> sink.in + ClosedShape + }) + .run() Await.result(resultFuture, 3.seconds).sorted should be(List(1, 2, 3, 4, 5, 6)) } "support balance - merge (parallelization) layouts" in { val elements = 0 to 10 - val out = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => (sink) => - val balance = b.add(Balance[Int](5)) - val merge = b.add(Merge[Int](5)) + val out = RunnableGraph + .fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => (sink) => + val balance = b.add(Balance[Int](5)) + val merge = b.add(Merge[Int](5)) - Source(elements) ~> balance.in + Source(elements) ~> balance.in - for (i <- 0 until 5) balance.out(i) ~> merge.in(i) + for (i <- 0 until 5) balance.out(i) ~> merge.in(i) - merge.out.grouped(elements.size * 2) ~> sink.in - ClosedShape - }).run() + merge.out.grouped(elements.size * 2) ~> sink.in + ClosedShape + }) + .run() Await.result(out, 3.seconds).sorted should be(elements) } @@ -85,43 +87,45 @@ class GraphOpsIntegrationSpec extends StreamSpec { // see https://en.wikipedia.org/wiki/Topological_sorting#mediaviewer/File:Directed_acyclic_graph.png val seqSink = Sink.head[Seq[Int]] - val (resultFuture2, resultFuture9, resultFuture10) = RunnableGraph.fromGraph(GraphDSL.create(seqSink, seqSink, seqSink)(Tuple3.apply) { implicit b => (sink2, sink9, sink10) => - val b3 = b.add(Broadcast[Int](2)) - val b7 = b.add(Broadcast[Int](2)) - val b11 = b.add(Broadcast[Int](3)) - val m8 = b.add(Merge[Int](2)) - val m9 = b.add(Merge[Int](2)) - val m10 = b.add(Merge[Int](2)) - val m11 = b.add(Merge[Int](2)) - val in3 = Source(List(3)) - val in5 = Source(List(5)) - val in7 = Source(List(7)) + val (resultFuture2, resultFuture9, resultFuture10) = RunnableGraph + .fromGraph(GraphDSL.create(seqSink, seqSink, seqSink)(Tuple3.apply) { implicit b => (sink2, sink9, sink10) => + val b3 = b.add(Broadcast[Int](2)) + val b7 = b.add(Broadcast[Int](2)) + val b11 = b.add(Broadcast[Int](3)) + val m8 = b.add(Merge[Int](2)) + val m9 = b.add(Merge[Int](2)) + val m10 = b.add(Merge[Int](2)) + val m11 = b.add(Merge[Int](2)) + val in3 = Source(List(3)) + val in5 = Source(List(5)) + val in7 = Source(List(7)) - // First layer - in7 ~> b7.in - b7.out(0) ~> m11.in(0) - b7.out(1) ~> m8.in(0) + // First layer + in7 ~> b7.in + b7.out(0) ~> m11.in(0) + b7.out(1) ~> m8.in(0) - in5 ~> m11.in(1) + in5 ~> m11.in(1) - in3 ~> b3.in - b3.out(0) ~> m8.in(1) - b3.out(1) ~> m10.in(0) + in3 ~> b3.in + b3.out(0) ~> m8.in(1) + b3.out(1) ~> m10.in(0) - // Second layer - m11.out ~> b11.in - b11.out(0).grouped(1000) ~> sink2.in // Vertex 2 is omitted since it has only one in and out - b11.out(1) ~> m9.in(0) - b11.out(2) ~> m10.in(1) + // Second layer + m11.out ~> b11.in + b11.out(0).grouped(1000) ~> sink2.in // Vertex 2 is omitted since it has only one in and out + b11.out(1) ~> m9.in(0) + b11.out(2) ~> m10.in(1) - m8.out ~> m9.in(1) + m8.out ~> m9.in(1) - // Third layer - m9.out.grouped(1000) ~> sink9.in - m10.out.grouped(1000) ~> sink10.in + // Third layer + m9.out.grouped(1000) ~> sink9.in + m10.out.grouped(1000) ~> sink10.in - ClosedShape - }).run() + ClosedShape + }) + .run() Await.result(resultFuture2, 3.seconds).sorted should be(List(5, 7)) Await.result(resultFuture9, 3.seconds).sorted should be(List(3, 5, 7, 7)) @@ -131,16 +135,18 @@ class GraphOpsIntegrationSpec extends StreamSpec { "allow adding of flows to sources and sinks to flows" in { - val resultFuture = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => (sink) => - val bcast = b.add(Broadcast[Int](2)) - val merge = b.add(Merge[Int](2)) + val resultFuture = RunnableGraph + .fromGraph(GraphDSL.create(Sink.head[Seq[Int]]) { implicit b => (sink) => + val bcast = b.add(Broadcast[Int](2)) + val merge = b.add(Merge[Int](2)) - Source(List(1, 2, 3)).map(_ * 2) ~> bcast.in - bcast.out(0) ~> merge.in(0) - bcast.out(1).map(_ + 3) ~> merge.in(1) - merge.out.grouped(10) ~> sink.in - ClosedShape - }).run() + Source(List(1, 2, 3)).map(_ * 2) ~> bcast.in + bcast.out(0) ~> merge.in(0) + bcast.out(1).map(_ + 3) ~> merge.in(1) + merge.out.grouped(10) ~> sink.in + ClosedShape + }) + .run() Await.result(resultFuture, 3.seconds) should contain theSameElementsAs (Seq(2, 4, 6, 5, 7, 9)) } @@ -149,10 +155,12 @@ class GraphOpsIntegrationSpec extends StreamSpec { val p = Source(List(1, 2, 3)).runWith(Sink.asPublisher(false)) val s = TestSubscriber.manualProbe[Int] val flow = Flow[Int].map(_ * 2) - RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => - Source.fromPublisher(p) ~> flow ~> Sink.fromSubscriber(s) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit builder => + Source.fromPublisher(p) ~> flow ~> Sink.fromSubscriber(s) + ClosedShape + }) + .run() val sub = s.expectSubscription() sub.request(10) s.expectNext(1 * 2) @@ -164,24 +172,27 @@ class GraphOpsIntegrationSpec extends StreamSpec { "be possible to use as lego bricks" in { val shuffler = Shuffle(Flow[Int].map(_ + 1)) - val f: Future[Seq[Int]] = RunnableGraph.fromGraph(GraphDSL.create(shuffler, shuffler, shuffler, Sink.head[Seq[Int]])((_, _, _, fut) => fut) { implicit b => (s1, s2, s3, sink) => - val merge = b.add(Merge[Int](2)) + val f: Future[Seq[Int]] = RunnableGraph + .fromGraph(GraphDSL.create(shuffler, shuffler, shuffler, Sink.head[Seq[Int]])((_, _, _, fut) => fut) { + implicit b => (s1, s2, s3, sink) => + val merge = b.add(Merge[Int](2)) - Source(List(1, 2, 3)) ~> s1.in1 - Source(List(10, 11, 12)) ~> s1.in2 + Source(List(1, 2, 3)) ~> s1.in1 + Source(List(10, 11, 12)) ~> s1.in2 - s1.out1 ~> s2.in1 - s1.out2 ~> s2.in2 + s1.out1 ~> s2.in1 + s1.out2 ~> s2.in2 - s2.out1 ~> s3.in1 - s2.out2 ~> s3.in2 + s2.out1 ~> s3.in1 + s2.out2 ~> s3.in2 - s3.out1 ~> merge.in(0) - s3.out2 ~> merge.in(1) + s3.out1 ~> merge.in(0) + s3.out2 ~> merge.in(1) - merge.out.grouped(1000) ~> sink - ClosedShape - }).run() + merge.out.grouped(1000) ~> sink + ClosedShape + }) + .run() val result = Await.result(f, 3.seconds) @@ -192,17 +203,18 @@ class GraphOpsIntegrationSpec extends StreamSpec { implicit val ex = materializer.system.dispatcher //#graph-from-list - val sinks = immutable.Seq("a", "b", "c").map(prefix => - Flow[String].filter(str => str.startsWith(prefix)).toMat(Sink.head[String])(Keep.right) - ) + val sinks = immutable + .Seq("a", "b", "c") + .map(prefix => Flow[String].filter(str => str.startsWith(prefix)).toMat(Sink.head[String])(Keep.right)) - val g: RunnableGraph[Seq[Future[String]]] = RunnableGraph.fromGraph(GraphDSL.create(sinks) { implicit b => sinkList => - val broadcast = b.add(Broadcast[String](sinkList.size)) + val g: RunnableGraph[Seq[Future[String]]] = RunnableGraph.fromGraph(GraphDSL.create(sinks) { + implicit b => sinkList => + val broadcast = b.add(Broadcast[String](sinkList.size)) - Source(List("ax", "bx", "cx")) ~> broadcast - sinkList.foreach(sink => broadcast ~> sink) + Source(List("ax", "bx", "cx")) ~> broadcast + sinkList.foreach(sink => broadcast ~> sink) - ClosedShape + ClosedShape }) val matList: Seq[Future[String]] = g.run() @@ -221,13 +233,14 @@ class GraphOpsIntegrationSpec extends StreamSpec { val sinks = immutable.Seq(Sink.seq[Int]) - val g: RunnableGraph[Seq[Future[immutable.Seq[Int]]]] = RunnableGraph.fromGraph(GraphDSL.create(sinks) { implicit b => sinkList => - val broadcast = b.add(Broadcast[Int](sinkList.size)) + val g: RunnableGraph[Seq[Future[immutable.Seq[Int]]]] = RunnableGraph.fromGraph(GraphDSL.create(sinks) { + implicit b => sinkList => + val broadcast = b.add(Broadcast[Int](sinkList.size)) - Source(List(1, 2, 3)) ~> broadcast - sinkList.foreach(sink => broadcast ~> sink) + Source(List(1, 2, 3)) ~> broadcast + sinkList.foreach(sink => broadcast ~> sink) - ClosedShape + ClosedShape }) val matList: Seq[Future[immutable.Seq[Int]]] = g.run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala index 82060c5f26..b8bbc9240b 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartialSpec.scala @@ -5,15 +5,14 @@ package akka.stream.scaladsl import akka.stream.testkit.StreamSpec -import akka.stream.{ ClosedShape, ActorMaterializer, ActorMaterializerSettings, FlowShape } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, ClosedShape, FlowShape } import scala.concurrent.Await import scala.concurrent.duration._ class GraphPartialSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -30,12 +29,15 @@ class GraphPartialSpec extends StreamSpec { FlowShape(bcast.in, zip.out) } - val (_, _, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b => (d1, d2, sink) => - Source(List(1, 2, 3)) ~> d1.in - d1.out ~> d2.in - d2.out.grouped(100) ~> sink.in - ClosedShape - }).run() + val (_, _, result) = RunnableGraph + .fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { + implicit b => (d1, d2, sink) => + Source(List(1, 2, 3)) ~> d1.in + d1.out ~> d2.in + d2.out.grouped(100) ~> sink.in + ClosedShape + }) + .run() Await.result(result, 3.seconds) should be(List(4, 8, 12)) } @@ -51,12 +53,15 @@ class GraphPartialSpec extends StreamSpec { FlowShape(bcast.in, zip.out) } - val (sub1, sub2, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b => (d1, d2, sink) => - Source(List(1, 2, 3)) ~> d1.in - d1.out ~> d2.in - d2.out.grouped(100) ~> sink.in - ClosedShape - }).run() + val (sub1, sub2, result) = RunnableGraph + .fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { + implicit b => (d1, d2, sink) => + Source(List(1, 2, 3)) ~> d1.in + d1.out ~> d2.in + d2.out.grouped(100) ~> sink.in + ClosedShape + }) + .run() Await.result(result, 3.seconds) should be(List(4, 8, 12)) Await.result(sub1, 3.seconds) should be(List(1, 2, 3)) @@ -81,12 +86,15 @@ class GraphPartialSpec extends StreamSpec { FlowShape(bcast.in, bcast2.out(1)) } - val (sub1, sub2, result) = RunnableGraph.fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { implicit b => (d1, d2, sink) => - Source(List(1, 2, 3)) ~> d1.in - d1.out ~> d2.in - d2.out.grouped(100) ~> sink.in - ClosedShape - }).run() + val (sub1, sub2, result) = RunnableGraph + .fromGraph(GraphDSL.create(doubler, doubler, Sink.head[Seq[Int]])(Tuple3.apply) { + implicit b => (d1, d2, sink) => + Source(List(1, 2, 3)) ~> d1.in + d1.out ~> d2.in + d2.out.grouped(100) ~> sink.in + ClosedShape + }) + .run() Await.result(result, 3.seconds) should be(List(4, 8, 12)) Await.result(sub1._1, 3.seconds) should be(6) @@ -100,12 +108,14 @@ class GraphPartialSpec extends StreamSpec { FlowShape(flow.in, flow.out) } - val fut = RunnableGraph.fromGraph(GraphDSL.create(Sink.head[Int], p)(Keep.left) { implicit b => (sink, flow) => - import GraphDSL.Implicits._ - Source.single(0) ~> flow.in - flow.out ~> sink.in - ClosedShape - }).run() + val fut = RunnableGraph + .fromGraph(GraphDSL.create(Sink.head[Int], p)(Keep.left) { implicit b => (sink, flow) => + import GraphDSL.Implicits._ + Source.single(0) ~> flow.in + flow.out ~> sink.in + ClosedShape + }) + .run() Await.result(fut, 3.seconds) should be(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala index 0087d5ad77..54b28ceb19 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphPartitionSpec.scala @@ -5,15 +5,14 @@ package akka.stream.scaladsl import akka.stream.testkit._ -import akka.stream.{ OverflowStrategy, ActorMaterializer, ActorMaterializerSettings, ClosedShape } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, ClosedShape, OverflowStrategy } import akka.stream.testkit.scaladsl.StreamTestKit._ import scala.concurrent.Await import scala.concurrent.duration._ class GraphPartitionSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -22,18 +21,21 @@ class GraphPartitionSpec extends StreamSpec { "partition to three subscribers" in assertAllStagesStopped { - val (s1, s2, s3) = RunnableGraph.fromGraph(GraphDSL.create(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { implicit b => (sink1, sink2, sink3) => - val partition = b.add(Partition[Int](3, { - case g if (g > 3) => 0 - case l if (l < 3) => 1 - case e if (e == 3) => 2 - })) - Source(List(1, 2, 3, 4, 5)) ~> partition.in - partition.out(0) ~> sink1.in - partition.out(1) ~> sink2.in - partition.out(2) ~> sink3.in - ClosedShape - }).run() + val (s1, s2, s3) = RunnableGraph + .fromGraph(GraphDSL.create(Sink.seq[Int], Sink.seq[Int], Sink.seq[Int])(Tuple3.apply) { + implicit b => (sink1, sink2, sink3) => + val partition = b.add(Partition[Int](3, { + case g if (g > 3) => 0 + case l if (l < 3) => 1 + case e if (e == 3) => 2 + })) + Source(List(1, 2, 3, 4, 5)) ~> partition.in + partition.out(0) ~> sink1.in + partition.out(1) ~> sink2.in + partition.out(2) ~> sink3.in + ClosedShape + }) + .run() s1.futureValue.toSet should ===(Set(4, 5)) s2.futureValue.toSet should ===(Set(1, 2)) @@ -45,16 +47,18 @@ class GraphPartitionSpec extends StreamSpec { val c1 = TestSubscriber.probe[String]() val c2 = TestSubscriber.probe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[String](2, { - case s if (s.length > 4) => 0 - case _ => 1 - })) - Source(List("this", "is", "just", "another", "test")) ~> partition.in - partition.out(0) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val partition = b.add(Partition[String](2, { + case s if (s.length > 4) => 0 + case _ => 1 + })) + Source(List("this", "is", "just", "another", "test")) ~> partition.in + partition.out(0) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() c1.request(1) c2.request(4) @@ -72,13 +76,15 @@ class GraphPartitionSpec extends StreamSpec { val c1 = TestSubscriber.probe[Int]() val c2 = TestSubscriber.probe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[Int](2, { case l if l < 6 => 0; case _ => 1 })) - Source(List(6, 3)) ~> partition.in - partition.out(0) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val partition = b.add(Partition[Int](2, { case l if l < 6 => 0; case _ => 1 })) + Source(List(6, 3)) ~> partition.in + partition.out(0) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() c1.request(1) c1.expectNoMsg(1.seconds) @@ -94,13 +100,15 @@ class GraphPartitionSpec extends StreamSpec { val c1 = TestSubscriber.probe[Int]() val c2 = TestSubscriber.probe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(new Partition[Int](2, { case l if l < 6 => 0; case _ => 1 }, false)) - Source.fromPublisher(p1.getPublisher) ~> partition.in - partition.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val partition = b.add(new Partition[Int](2, { case l if l < 6 => 0; case _ => 1 }, false)) + Source.fromPublisher(p1.getPublisher) ~> partition.in + partition.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val p1Sub = p1.expectSubscription() val sub1 = c1.expectSubscription() @@ -125,13 +133,15 @@ class GraphPartitionSpec extends StreamSpec { val c1 = TestSubscriber.probe[Int]() val c2 = TestSubscriber.probe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(new Partition[Int](2, { case l if l < 6 => 0; case _ => 1 }, true)) - Source.fromPublisher(p1.getPublisher) ~> partition.in - partition.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val partition = b.add(new Partition[Int](2, { case l if l < 6 => 0; case _ => 1 }, true)) + Source.fromPublisher(p1.getPublisher) ~> partition.in + partition.out(0) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Flow[Int].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val p1Sub = p1.expectSubscription() val sub1 = c1.expectSubscription() @@ -172,13 +182,15 @@ class GraphPartitionSpec extends StreamSpec { val c1 = TestSubscriber.probe[Int]() val c2 = TestSubscriber.probe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[Int](2, { case l if l < 6 => 0; case _ => 1 })) - Source(List(6)) ~> partition.in - partition.out(0) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val partition = b.add(Partition[Int](2, { case l if l < 6 => 0; case _ => 1 })) + Source(List(6)) ~> partition.in + partition.out(0) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() c1.request(1) c1.expectNoMsg(1.second) @@ -192,16 +204,20 @@ class GraphPartitionSpec extends StreamSpec { val c1 = TestSubscriber.probe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val partition = b.add(Partition[Int](2, { case l if l < 0 => -1; case _ => 0 })) - Source(List(-3)) ~> partition.in - partition.out(0) ~> Sink.fromSubscriber(c1) - partition.out(1) ~> Sink.ignore - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val partition = b.add(Partition[Int](2, { case l if l < 0 => -1; case _ => 0 })) + Source(List(-3)) ~> partition.in + partition.out(0) ~> Sink.fromSubscriber(c1) + partition.out(1) ~> Sink.ignore + ClosedShape + }) + .run() c1.request(1) - c1.expectError(Partition.PartitionOutOfBoundsException("partitioner must return an index in the range [0,1]. returned: [-1] for input [java.lang.Integer].")) + c1.expectError( + Partition.PartitionOutOfBoundsException( + "partitioner must return an index in the range [0,1]. returned: [-1] for input [java.lang.Integer].")) } } @@ -223,7 +239,11 @@ class GraphPartitionSpec extends StreamSpec { val pub = TestPublisher.probe[Int]() val odd = TestSubscriber.probe[Int]() val even = TestSubscriber.probe[Int]() - Source.fromPublisher(pub.getPublisher).divertTo(Sink.fromSubscriber(odd), _ % 2 != 0).to(Sink.fromSubscriber(even)).run() + Source + .fromPublisher(pub.getPublisher) + .divertTo(Sink.fromSubscriber(odd), _ % 2 != 0) + .to(Sink.fromSubscriber(even)) + .run() even.request(1) pub.sendNext(2) even.expectNext(2) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala index 241a4565af..fc873d13e7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphStageTimersSpec.scala @@ -5,9 +5,9 @@ package akka.stream.scaladsl import akka.actor.ActorRef -import akka.stream.{ Attributes, ActorMaterializer } +import akka.stream.{ ActorMaterializer, Attributes } import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage -import akka.stream.stage.{ TimerGraphStageLogic, OutHandler, AsyncCallback, InHandler } +import akka.stream.stage.{ AsyncCallback, InHandler, OutHandler, TimerGraphStageLogic } import akka.testkit.TestDuration import scala.concurrent.Promise @@ -44,7 +44,7 @@ class GraphStageTimersSpec extends StreamSpec { class TestStage(probe: ActorRef, sideChannel: SideChannel) extends SimpleLinearGraphStage[Int] { override def createLogic(inheritedAttributes: Attributes) = new TimerGraphStageLogic(shape) { - val tickCount = Iterator from 1 + val tickCount = Iterator.from(1) setHandler(in, new InHandler { override def onPush() = push(out, grab(in)) @@ -146,7 +146,7 @@ class GraphStageTimersSpec extends StreamSpec { val seq = receiveWhile(2.seconds) { case t: Tick => t } - seq should have length 5 + (seq should have).length(5) expectNoMsg(1.second) driver.stopStage() @@ -199,21 +199,24 @@ class GraphStageTimersSpec extends StreamSpec { val upstream = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[Int]() - Source.fromPublisher(upstream).via(new SimpleLinearGraphStage[Int] { - override def createLogic(inheritedAttributes: Attributes) = new TimerGraphStageLogic(shape) { - override def preStart(): Unit = scheduleOnce("tick", 100.millis) + Source + .fromPublisher(upstream) + .via(new SimpleLinearGraphStage[Int] { + override def createLogic(inheritedAttributes: Attributes) = new TimerGraphStageLogic(shape) { + override def preStart(): Unit = scheduleOnce("tick", 100.millis) - setHandler(in, new InHandler { - override def onPush() = () // Ingore - }) + setHandler(in, new InHandler { + override def onPush() = () // Ingore + }) - setHandler(out, new OutHandler { - override def onPull(): Unit = pull(in) - }) + setHandler(out, new OutHandler { + override def onPull(): Unit = pull(in) + }) - override def onTimer(timerKey: Any) = throw exception - } - }).runWith(Sink.fromSubscriber(downstream)) + override def onTimer(timerKey: Any) = throw exception + } + }) + .runWith(Sink.fromSubscriber(downstream)) downstream.request(1) downstream.expectError(exception) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala index 25f3da1e74..d16a3760bd 100755 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipSpec.scala @@ -5,14 +5,13 @@ package akka.stream.scaladsl import scala.concurrent.duration._ -import akka.stream.{ ClosedShape, OverflowStrategy, ActorMaterializerSettings, ActorMaterializer } +import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, ClosedShape, OverflowStrategy } import akka.stream.testkit._ import akka.stream.testkit.scaladsl.StreamTestKit._ class GraphUnzipSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -23,13 +22,15 @@ class GraphUnzipSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(Unzip[Int, String]()) - Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out1 ~> Flow[String].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) - unzip.out0 ~> Flow[Int].buffer(16, OverflowStrategy.backpressure).map(_ * 2) ~> Sink.fromSubscriber(c1) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(Unzip[Int, String]()) + Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in + unzip.out1 ~> Flow[String].buffer(16, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(c2) + unzip.out0 ~> Flow[Int].buffer(16, OverflowStrategy.backpressure).map(_ * 2) ~> Sink.fromSubscriber(c1) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -53,13 +54,15 @@ class GraphUnzipSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(Unzip[Int, String]()) - Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(Unzip[Int, String]()) + Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -75,13 +78,15 @@ class GraphUnzipSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(Unzip[Int, String]()) - Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(Unzip[Int, String]()) + Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -97,13 +102,15 @@ class GraphUnzipSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(Unzip[Int, String]()) - Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(Unzip[Int, String]()) + Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -120,13 +127,15 @@ class GraphUnzipSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(Unzip[Int, String]()) - Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(Unzip[Int, String]()) + Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() val sub2 = c2.expectSubscription() @@ -144,13 +153,15 @@ class GraphUnzipSpec extends StreamSpec { val c1 = TestSubscriber.manualProbe[Int]() val c2 = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(Unzip[Int, String]()) - Source.fromPublisher(p1.getPublisher) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(c1) - unzip.out1 ~> Sink.fromSubscriber(c2) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(Unzip[Int, String]()) + Source.fromPublisher(p1.getPublisher) ~> unzip.in + unzip.out0 ~> Sink.fromSubscriber(c1) + unzip.out1 ~> Sink.fromSubscriber(c2) + ClosedShape + }) + .run() val p1Sub = p1.expectSubscription() val sub1 = c1.expectSubscription() @@ -171,15 +182,17 @@ class GraphUnzipSpec extends StreamSpec { "work with zip" in assertAllStagesStopped { val c1 = TestSubscriber.manualProbe[(Int, String)]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(Zip[Int, String]()) - val unzip = b.add(Unzip[Int, String]()) - Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in - unzip.out0 ~> zip.in0 - unzip.out1 ~> zip.in1 - zip.out ~> Sink.fromSubscriber(c1) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(Zip[Int, String]()) + val unzip = b.add(Unzip[Int, String]()) + Source(List(1 -> "a", 2 -> "b", 3 -> "c")) ~> unzip.in + unzip.out0 ~> zip.in0 + unzip.out1 ~> zip.in1 + zip.out ~> Sink.fromSubscriber(c1) + ClosedShape + }) + .run() val sub1 = c1.expectSubscription() sub1.request(5) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala index 2d216ab5db..ccbe2a2895 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphUnzipWithSpec.scala @@ -17,8 +17,7 @@ class GraphUnzipWithSpec extends StreamSpec { import GraphDSL.Implicits._ - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -49,15 +48,17 @@ class GraphUnzipWithSpec extends StreamSpec { val leftSubscriber = TestSubscriber.probe[LeftOutput]() val rightSubscriber = TestSubscriber.probe[RightOutput]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val f = fixture(b) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val f = fixture(b) - Source.fromPublisher(p) ~> f.in - f.left ~> Sink.fromSubscriber(leftSubscriber) - f.right ~> Sink.fromSubscriber(rightSubscriber) + Source.fromPublisher(p) ~> f.in + f.left ~> Sink.fromSubscriber(leftSubscriber) + f.right ~> Sink.fromSubscriber(rightSubscriber) - ClosedShape - }).run() + ClosedShape + }) + .run() (leftSubscriber, rightSubscriber) } @@ -98,15 +99,17 @@ class GraphUnzipWithSpec extends StreamSpec { val leftProbe = TestSubscriber.manualProbe[LeftOutput]() val rightProbe = TestSubscriber.manualProbe[RightOutput]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(UnzipWith(f)) - Source(1 to 4) ~> unzip.in + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(UnzipWith(f)) + Source(1 to 4) ~> unzip.in - unzip.out0 ~> Flow[LeftOutput].buffer(4, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(leftProbe) - unzip.out1 ~> Flow[RightOutput].buffer(4, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(rightProbe) + unzip.out0 ~> Flow[LeftOutput].buffer(4, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(leftProbe) + unzip.out1 ~> Flow[RightOutput].buffer(4, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(rightProbe) - ClosedShape - }).run() + ClosedShape + }) + .run() val leftSubscription = leftProbe.expectSubscription() val rightSubscription = rightProbe.expectSubscription() @@ -142,22 +145,23 @@ class GraphUnzipWithSpec extends StreamSpec { } "work in the sad case" in { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 1, maxSize = 1) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 1, maxSize = 1) val leftProbe = TestSubscriber.manualProbe[LeftOutput]() val rightProbe = TestSubscriber.manualProbe[RightOutput]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(UnzipWith[Int, Int, String]((b: Int) => (1 / b, 1 + "/" + b))) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(UnzipWith[Int, Int, String]((b: Int) => (1 / b, 1 + "/" + b))) - Source(-2 to 2) ~> unzip.in + Source(-2 to 2) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(leftProbe) - unzip.out1 ~> Sink.fromSubscriber(rightProbe) + unzip.out0 ~> Sink.fromSubscriber(leftProbe) + unzip.out1 ~> Sink.fromSubscriber(rightProbe) - ClosedShape - }).run() + ClosedShape + }) + .run() val leftSubscription = leftProbe.expectSubscription() val rightSubscription = rightProbe.expectSubscription() @@ -195,17 +199,19 @@ class GraphUnzipWithSpec extends StreamSpec { case class Person(name: String, surname: String, int: Int) - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val unzip = b.add(UnzipWith((a: Person) => Person.unapply(a).get)) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val unzip = b.add(UnzipWith((a: Person) => Person.unapply(a).get)) - Source.single(Person("Caplin", "Capybara", 3)) ~> unzip.in + Source.single(Person("Caplin", "Capybara", 3)) ~> unzip.in - unzip.out0 ~> Sink.fromSubscriber(probe0) - unzip.out1 ~> Sink.fromSubscriber(probe1) - unzip.out2 ~> Sink.fromSubscriber(probe2) + unzip.out0 ~> Sink.fromSubscriber(probe0) + unzip.out1 ~> Sink.fromSubscriber(probe1) + unzip.out2 ~> Sink.fromSubscriber(probe2) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription0 = probe0.expectSubscription() val subscription1 = probe1.expectSubscription() @@ -231,58 +237,71 @@ class GraphUnzipWithSpec extends StreamSpec { val probe15 = TestSubscriber.manualProbe[String]() val probe21 = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val split22 = (a: (List[Int])) => + (a(0), + a(0).toString, + a(1), + a(1).toString, + a(2), + a(2).toString, + a(3), + a(3).toString, + a(4), + a(4).toString, + a(5), + a(5).toString, + a(6), + a(6).toString, + a(7), + a(7).toString, + a(8), + a(8).toString, + a(9), + a(9).toString, + a(10), + a(10).toString) - val split22 = (a: (List[Int])) => - (a(0), a(0).toString, - a(1), a(1).toString, - a(2), a(2).toString, - a(3), a(3).toString, - a(4), a(4).toString, - a(5), a(5).toString, - a(6), a(6).toString, - a(7), a(7).toString, - a(8), a(8).toString, - a(9), a(9).toString, - a(10), a(10).toString) + // odd input ports will be Int, even input ports will be String + val unzip = b.add(UnzipWith(split22)) - // odd input ports will be Int, even input ports will be String - val unzip = b.add(UnzipWith(split22)) + Source.single((0 to 21).toList) ~> unzip.in - Source.single((0 to 21).toList) ~> unzip.in + def createSink[T](o: Outlet[T]) = + o ~> Flow[T].buffer(1, OverflowStrategy.backpressure) ~> Sink.fromSubscriber( + TestSubscriber.manualProbe[T]()) - def createSink[T](o: Outlet[T]) = - o ~> Flow[T].buffer(1, OverflowStrategy.backpressure) ~> Sink.fromSubscriber(TestSubscriber.manualProbe[T]()) + unzip.out0 ~> Sink.fromSubscriber(probe0) + createSink(unzip.out1) + createSink(unzip.out2) + createSink(unzip.out3) + createSink(unzip.out4) - unzip.out0 ~> Sink.fromSubscriber(probe0) - createSink(unzip.out1) - createSink(unzip.out2) - createSink(unzip.out3) - createSink(unzip.out4) + unzip.out5 ~> Sink.fromSubscriber(probe5) + createSink(unzip.out6) + createSink(unzip.out7) + createSink(unzip.out8) + createSink(unzip.out9) - unzip.out5 ~> Sink.fromSubscriber(probe5) - createSink(unzip.out6) - createSink(unzip.out7) - createSink(unzip.out8) - createSink(unzip.out9) + unzip.out10 ~> Sink.fromSubscriber(probe10) + createSink(unzip.out11) + createSink(unzip.out12) + createSink(unzip.out13) + createSink(unzip.out14) - unzip.out10 ~> Sink.fromSubscriber(probe10) - createSink(unzip.out11) - createSink(unzip.out12) - createSink(unzip.out13) - createSink(unzip.out14) + unzip.out15 ~> Sink.fromSubscriber(probe15) + createSink(unzip.out16) + createSink(unzip.out17) + createSink(unzip.out18) + createSink(unzip.out19) + createSink(unzip.out20) - unzip.out15 ~> Sink.fromSubscriber(probe15) - createSink(unzip.out16) - createSink(unzip.out17) - createSink(unzip.out18) - createSink(unzip.out19) - createSink(unzip.out20) + unzip.out21 ~> Sink.fromSubscriber(probe21) - unzip.out21 ~> Sink.fromSubscriber(probe21) - - ClosedShape - }).run() + ClosedShape + }) + .run() probe0.expectSubscription().request(1) probe5.expectSubscription().request(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphWireTapSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphWireTapSpec.scala index 340b04a0d5..12ac5473ef 100755 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphWireTapSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphWireTapSpec.scala @@ -11,8 +11,7 @@ import akka.stream.testkit.scaladsl.TestSink class GraphWireTapSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestSpec.scala index e5379b4595..d138006e8e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestSpec.scala @@ -18,13 +18,13 @@ import scala.concurrent.duration._ import scala.language.postfixOps class GraphZipLatestSpec - extends TestKit(ActorSystem("ZipLatestSpec")) - with WordSpecLike - with Matchers - with BeforeAndAfterAll - with PropertyChecks - with GivenWhenThen - with ScalaFutures { + extends TestKit(ActorSystem("ZipLatestSpec")) + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with PropertyChecks + with GivenWhenThen + with ScalaFutures { implicit val materializer = ActorMaterializer() override def afterAll = TestKit.shutdownActorSystem(system) implicit val patience = PatienceConfig(5 seconds) @@ -312,20 +312,15 @@ class GraphZipLatestSpec private def testGraph[A, B] = RunnableGraph - .fromGraph( - GraphDSL - .create( - TestSink.probe[(A, B)], - TestSource.probe[A], - TestSource.probe[B])(Tuple3.apply) { implicit b => (ts, as, bs) => - import GraphDSL.Implicits._ - val zipLatest = b.add(new ZipLatest[A, B]()) - as ~> zipLatest.in0 - bs ~> zipLatest.in1 - zipLatest.out ~> ts - ClosedShape - } - ) + .fromGraph(GraphDSL.create(TestSink.probe[(A, B)], TestSource.probe[A], TestSource.probe[B])(Tuple3.apply) { + implicit b => (ts, as, bs) => + import GraphDSL.Implicits._ + val zipLatest = b.add(new ZipLatest[A, B]()) + as ~> zipLatest.in0 + bs ~> zipLatest.in1 + zipLatest.out ~> ts + ClosedShape + }) .run() } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala index aa786ac9a9..67d26552a5 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipLatestWithSpec.scala @@ -26,10 +26,7 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup { override def setup(p1: Publisher[Int], p2: Publisher[Int]) = { val subscriber = TestSubscriber.probe[Outputs]() - Source - .fromPublisher(p1) - .zipLatestWith(Source.fromPublisher(p2))(_ + _) - .runWith(Sink.fromSubscriber(subscriber)) + Source.fromPublisher(p1).zipLatestWith(Source.fromPublisher(p2))(_ + _).runWith(Sink.fromSubscriber(subscriber)) subscriber } @@ -175,29 +172,29 @@ class GraphZipLatestWithSpec extends TwoStreamsSetup { RunnableGraph .fromGraph(GraphDSL.create() { implicit b => val sum22 = (v1: Int, - v2: String, - v3: Int, - v4: String, - v5: Int, - v6: String, - v7: Int, - v8: String, - v9: Int, - v10: String, - v11: Int, - v12: String, - v13: Int, - v14: String, - v15: Int, - v16: String, - v17: Int, - v18: String, - v19: Int, - v20: String, - v21: Int, - v22: String) => + v2: String, + v3: Int, + v4: String, + v5: Int, + v6: String, + v7: Int, + v8: String, + v9: Int, + v10: String, + v11: Int, + v12: String, + v13: Int, + v14: String, + v15: Int, + v16: String, + v17: Int, + v18: String, + v19: Int, + v20: String, + v21: Int, + v22: String) => v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9 + v10 + - v11 + v12 + v13 + v14 + v15 + v16 + v17 + v18 + v19 + v20 + v21 + v22 + v11 + v12 + v13 + v14 + v15 + v16 + v17 + v18 + v19 + v20 + v21 + v22 // odd input ports will be Int, even input ports will be String val zip = b.add(ZipLatestWith(sum22)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala index 69479598c3..dd552dfea5 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipNSpec.scala @@ -29,16 +29,18 @@ class GraphZipNSpec extends TwoStreamsSetup { "work in the happy case" in assertAllStagesStopped { val probe = TestSubscriber.manualProbe[immutable.Seq[Int]]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zipN = b.add(ZipN[Int](2)) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zipN = b.add(ZipN[Int](2)) - Source(1 to 4) ~> zipN.in(0) - Source(2 to 5) ~> zipN.in(1) + Source(1 to 4) ~> zipN.in(0) + Source(2 to 5) ~> zipN.in(1) - zipN.out ~> Sink.fromSubscriber(probe) + zipN.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -59,15 +61,17 @@ class GraphZipNSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[immutable.Seq[Int]]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zipN = b.add(ZipN[Int](2)) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zipN = b.add(ZipN[Int](2)) - Source.fromPublisher(upstream1) ~> zipN.in(0) - Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + Source.fromPublisher(upstream1) ~> zipN.in(0) + Source.fromPublisher(upstream2) ~> zipN.in(1) + zipN.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() upstream1.sendNext(1) upstream1.sendNext(2) @@ -84,15 +88,17 @@ class GraphZipNSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[immutable.Seq[Int]]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zipN = b.add(ZipN[Int](2)) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zipN = b.add(ZipN[Int](2)) - Source.fromPublisher(upstream1) ~> zipN.in(0) - Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + Source.fromPublisher(upstream1) ~> zipN.in(0) + Source.fromPublisher(upstream2) ~> zipN.in(1) + zipN.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() downstream.request(1) @@ -110,15 +116,17 @@ class GraphZipNSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[immutable.Seq[Int]]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zipN = b.add(ZipN[Int](2)) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zipN = b.add(ZipN[Int](2)) - Source.fromPublisher(upstream1) ~> zipN.in(0) - Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + Source.fromPublisher(upstream1) ~> zipN.in(0) + Source.fromPublisher(upstream2) ~> zipN.in(1) + zipN.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() upstream1.sendNext(1) upstream2.sendNext(2) @@ -135,15 +143,17 @@ class GraphZipNSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[immutable.Seq[Int]]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zipN = b.add(ZipN[Int](2)) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zipN = b.add(ZipN[Int](2)) - Source.fromPublisher(upstream1) ~> zipN.in(0) - Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + Source.fromPublisher(upstream1) ~> zipN.in(0) + Source.fromPublisher(upstream2) ~> zipN.in(1) + zipN.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() upstream1.sendNext(1) upstream1.sendNext(2) @@ -161,15 +171,17 @@ class GraphZipNSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[Int]() val downstream = TestSubscriber.probe[immutable.Seq[Int]]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zipN = b.add(ZipN[Int](2)) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zipN = b.add(ZipN[Int](2)) - Source.fromPublisher(upstream1) ~> zipN.in(0) - Source.fromPublisher(upstream2) ~> zipN.in(1) - zipN.out ~> out + Source.fromPublisher(upstream1) ~> zipN.in(0) + Source.fromPublisher(upstream2) ~> zipN.in(1) + zipN.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() downstream.ensureSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala index 13bb3eae0b..fb31e828f1 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipSpec.scala @@ -29,16 +29,18 @@ class GraphZipSpec extends TwoStreamsSetup { "work in the happy case" in assertAllStagesStopped { val probe = TestSubscriber.manualProbe[(Int, String)]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(Zip[Int, String]()) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(Zip[Int, String]()) - Source(1 to 4) ~> zip.in0 - Source(List("A", "B", "C", "D", "E", "F")) ~> zip.in1 + Source(1 to 4) ~> zip.in0 + Source(List("A", "B", "C", "D", "E", "F")) ~> zip.in1 - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -58,15 +60,17 @@ class GraphZipSpec extends TwoStreamsSetup { val upstream1 = TestPublisher.probe[Int]() val upstream2 = TestPublisher.probe[String]() - val completed = RunnableGraph.fromGraph(GraphDSL.create(Sink.ignore) { implicit b => out => - val zip = b.add(Zip[Int, String]()) + val completed = RunnableGraph + .fromGraph(GraphDSL.create(Sink.ignore) { implicit b => out => + val zip = b.add(Zip[Int, String]()) - Source.fromPublisher(upstream1) ~> zip.in0 - Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + Source.fromPublisher(upstream1) ~> zip.in0 + Source.fromPublisher(upstream2) ~> zip.in1 + zip.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() upstream1.sendNext(1) upstream1.sendNext(2) @@ -83,15 +87,17 @@ class GraphZipSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[String]() val downstream = TestSubscriber.probe[(Int, String)]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zip = b.add(Zip[Int, String]()) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zip = b.add(Zip[Int, String]()) - Source.fromPublisher(upstream1) ~> zip.in0 - Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + Source.fromPublisher(upstream1) ~> zip.in0 + Source.fromPublisher(upstream2) ~> zip.in1 + zip.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() downstream.request(1) @@ -109,15 +115,17 @@ class GraphZipSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[String]() val downstream = TestSubscriber.probe[(Int, String)]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zip = b.add(Zip[Int, String]()) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zip = b.add(Zip[Int, String]()) - Source.fromPublisher(upstream1) ~> zip.in0 - Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + Source.fromPublisher(upstream1) ~> zip.in0 + Source.fromPublisher(upstream2) ~> zip.in1 + zip.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() upstream1.sendNext(1) upstream2.sendNext("A") @@ -134,15 +142,17 @@ class GraphZipSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[String]() val downstream = TestSubscriber.probe[(Int, String)]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zip = b.add(Zip[Int, String]()) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zip = b.add(Zip[Int, String]()) - Source.fromPublisher(upstream1) ~> zip.in0 - Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + Source.fromPublisher(upstream1) ~> zip.in0 + Source.fromPublisher(upstream2) ~> zip.in1 + zip.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() upstream1.sendNext(1) upstream1.sendNext(2) @@ -160,15 +170,17 @@ class GraphZipSpec extends TwoStreamsSetup { val upstream2 = TestPublisher.probe[String]() val downstream = TestSubscriber.probe[(Int, String)]() - RunnableGraph.fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => - val zip = b.add(Zip[Int, String]()) + RunnableGraph + .fromGraph(GraphDSL.create(Sink.fromSubscriber(downstream)) { implicit b => out => + val zip = b.add(Zip[Int, String]()) - Source.fromPublisher(upstream1) ~> zip.in0 - Source.fromPublisher(upstream2) ~> zip.in1 - zip.out ~> out + Source.fromPublisher(upstream1) ~> zip.in0 + Source.fromPublisher(upstream2) ~> zip.in1 + zip.out ~> out - ClosedShape - }).run() + ClosedShape + }) + .run() downstream.ensureSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithNSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithNSpec.scala index 44d5ec1247..bcac29b538 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithNSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithNSpec.scala @@ -27,15 +27,17 @@ class GraphZipWithNSpec extends TwoStreamsSetup { "work in the happy case" in { val probe = TestSubscriber.manualProbe[Outputs]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(ZipWithN((_: immutable.Seq[Int]).sum)(2)) - Source(1 to 4) ~> zip.in(0) - Source(10 to 40 by 10) ~> zip.in(1) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(ZipWithN((_: immutable.Seq[Int]).sum)(2)) + Source(1 to 4) ~> zip.in(0) + Source(10 to 40 by 10) ~> zip.in(1) - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -54,16 +56,18 @@ class GraphZipWithNSpec extends TwoStreamsSetup { "work in the sad case" in { val probe = TestSubscriber.manualProbe[Outputs]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(ZipWithN((_: immutable.Seq[Int]).foldLeft(1)(_ / _))(2)) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(ZipWithN((_: immutable.Seq[Int]).foldLeft(1)(_ / _))(2)) - Source(1 to 4) ~> zip.in(0) - Source(-2 to 2) ~> zip.in(1) + Source(1 to 4) ~> zip.in(0) + Source(-2 to 2) ~> zip.in(1) - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -117,17 +121,19 @@ class GraphZipWithNSpec extends TwoStreamsSetup { "work with 3 inputs" in { val probe = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(ZipWithN((_: immutable.Seq[Int]).sum)(3)) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(ZipWithN((_: immutable.Seq[Int]).sum)(3)) - Source.single(1) ~> zip.in(0) - Source.single(2) ~> zip.in(1) - Source.single(3) ~> zip.in(2) + Source.single(1) ~> zip.in(0) + Source.single(2) ~> zip.in(1) + Source.single(3) ~> zip.in(2) - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -140,17 +146,19 @@ class GraphZipWithNSpec extends TwoStreamsSetup { "work with 30 inputs" in { val probe = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(ZipWithN((_: immutable.Seq[Int]).sum)(30)) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(ZipWithN((_: immutable.Seq[Int]).sum)(30)) - (0 to 29).foreach { - n => Source.single(n) ~> zip.in(n) - } + (0 to 29).foreach { n => + Source.single(n) ~> zip.in(n) + } - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala index 3129ecba6d..ceb88991d3 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/GraphZipWithSpec.scala @@ -26,15 +26,17 @@ class GraphZipWithSpec extends TwoStreamsSetup { "work in the happy case" in { val probe = TestSubscriber.manualProbe[Outputs]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(ZipWith((_: Int) + (_: Int))) - Source(1 to 4) ~> zip.in0 - Source(10 to 40 by 10) ~> zip.in1 + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(ZipWith((_: Int) + (_: Int))) + Source(1 to 4) ~> zip.in0 + Source(10 to 40 by 10) ~> zip.in1 - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -53,16 +55,18 @@ class GraphZipWithSpec extends TwoStreamsSetup { "work in the sad case" in { val probe = TestSubscriber.manualProbe[Outputs]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(ZipWith[Int, Int, Int]((_: Int) / (_: Int))) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(ZipWith[Int, Int, Int]((_: Int) / (_: Int))) - Source(1 to 4) ~> zip.in0 - Source(-2 to 2) ~> zip.in1 + Source(1 to 4) ~> zip.in0 + Source(-2 to 2) ~> zip.in1 - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -118,17 +122,19 @@ class GraphZipWithSpec extends TwoStreamsSetup { case class Person(name: String, surname: String, int: Int) - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - val zip = b.add(ZipWith(Person.apply _)) + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val zip = b.add(ZipWith(Person.apply _)) - Source.single("Caplin") ~> zip.in0 - Source.single("Capybara") ~> zip.in1 - Source.single(3) ~> zip.in2 + Source.single("Caplin") ~> zip.in0 + Source.single("Capybara") ~> zip.in1 + Source.single(3) ~> zip.in2 - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() @@ -141,44 +147,65 @@ class GraphZipWithSpec extends TwoStreamsSetup { "work with up to 22 inputs" in { val probe = TestSubscriber.manualProbe[String]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - - val sum22 = (v1: Int, v2: String, v3: Int, v4: String, v5: Int, v6: String, v7: Int, v8: String, v9: Int, v10: String, - v11: Int, v12: String, v13: Int, v14: String, v15: Int, v16: String, v17: Int, v18: String, v19: Int, v20: String, v21: Int, v22: String) => - v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9 + v10 + + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + val sum22 = (v1: Int, + v2: String, + v3: Int, + v4: String, + v5: Int, + v6: String, + v7: Int, + v8: String, + v9: Int, + v10: String, + v11: Int, + v12: String, + v13: Int, + v14: String, + v15: Int, + v16: String, + v17: Int, + v18: String, + v19: Int, + v20: String, + v21: Int, + v22: String) => + v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8 + v9 + v10 + v11 + v12 + v13 + v14 + v15 + v16 + v17 + v18 + v19 + v20 + v21 + v22 - // odd input ports will be Int, even input ports will be String - val zip = b.add(ZipWith(sum22)) + // odd input ports will be Int, even input ports will be String + val zip = b.add(ZipWith(sum22)) - Source.single(1) ~> zip.in0 - Source.single(2).map(_.toString) ~> zip.in1 - Source.single(3) ~> zip.in2 - Source.single(4).map(_.toString) ~> zip.in3 - Source.single(5) ~> zip.in4 - Source.single(6).map(_.toString) ~> zip.in5 - Source.single(7) ~> zip.in6 - Source.single(8).map(_.toString) ~> zip.in7 - Source.single(9) ~> zip.in8 - Source.single(10).map(_.toString) ~> zip.in9 - Source.single(11) ~> zip.in10 - Source.single(12).map(_.toString) ~> zip.in11 - Source.single(13) ~> zip.in12 - Source.single(14).map(_.toString) ~> zip.in13 - Source.single(15) ~> zip.in14 - Source.single(16).map(_.toString) ~> zip.in15 - Source.single(17) ~> zip.in16 - Source.single(18).map(_.toString) ~> zip.in17 - Source.single(19) ~> zip.in18 - Source.single(20).map(_.toString) ~> zip.in19 - Source.single(21) ~> zip.in20 - Source.single(22).map(_.toString) ~> zip.in21 + Source.single(1) ~> zip.in0 + Source.single(2).map(_.toString) ~> zip.in1 + Source.single(3) ~> zip.in2 + Source.single(4).map(_.toString) ~> zip.in3 + Source.single(5) ~> zip.in4 + Source.single(6).map(_.toString) ~> zip.in5 + Source.single(7) ~> zip.in6 + Source.single(8).map(_.toString) ~> zip.in7 + Source.single(9) ~> zip.in8 + Source.single(10).map(_.toString) ~> zip.in9 + Source.single(11) ~> zip.in10 + Source.single(12).map(_.toString) ~> zip.in11 + Source.single(13) ~> zip.in12 + Source.single(14).map(_.toString) ~> zip.in13 + Source.single(15) ~> zip.in14 + Source.single(16).map(_.toString) ~> zip.in15 + Source.single(17) ~> zip.in16 + Source.single(18).map(_.toString) ~> zip.in17 + Source.single(19) ~> zip.in18 + Source.single(20).map(_.toString) ~> zip.in19 + Source.single(21) ~> zip.in20 + Source.single(22).map(_.toString) ~> zip.in21 - zip.out ~> Sink.fromSubscriber(probe) + zip.out ~> Sink.fromSubscriber(probe) - ClosedShape - }).run() + ClosedShape + }) + .run() val subscription = probe.expectSubscription() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala index debe8f2ed6..37ca22b7cf 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HeadSinkSpec.scala @@ -13,8 +13,7 @@ import akka.stream.testkit.scaladsl.StreamTestKit._ class HeadSinkSpec extends StreamSpec with ScriptedTest { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) @@ -46,9 +45,9 @@ class HeadSinkSpec extends StreamSpec with ScriptedTest { "yield the first error" in assertAllStagesStopped { val ex = new RuntimeException("ex") - intercept[RuntimeException] { + (intercept[RuntimeException] { Await.result(Source.failed[Int](ex).runWith(Sink.head), 1.second) - } should be theSameInstanceAs (ex) + } should be).theSameInstanceAs(ex) } "yield NoSuchElementException for empty stream" in assertAllStagesStopped { @@ -72,9 +71,9 @@ class HeadSinkSpec extends StreamSpec with ScriptedTest { "yield the first error" in assertAllStagesStopped { val ex = new RuntimeException("ex") - intercept[RuntimeException] { + (intercept[RuntimeException] { Await.result(Source.failed[Int](ex).runWith(Sink.head), 1.second) - } should be theSameInstanceAs (ex) + } should be).theSameInstanceAs(ex) } "yield None for empty stream" in assertAllStagesStopped { @@ -84,8 +83,7 @@ class HeadSinkSpec extends StreamSpec with ScriptedTest { "fail on abrupt termination" in { val mat = ActorMaterializer() val source = TestPublisher.probe() - val f = Source.fromPublisher(source) - .runWith(Sink.headOption)(mat) + val f = Source.fromPublisher(source).runWith(Sink.headOption)(mat) mat.shutdown() // this one always fails with the AbruptTerminationException rather than the diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala index a5d2a38abf..cec00a4287 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/HubSpec.scala @@ -69,7 +69,11 @@ class HubSpec extends StreamSpec { val downstream = TestSubscriber.manualProbe[Int]() val sink = Sink.fromSubscriber(downstream).runWith(MergeHub.source[Int](3)) - Source(1 to 10).map { i => testActor ! i; i }.runWith(sink) + Source(1 to 10) + .map { i => + testActor ! i; i + } + .runWith(sink) val sub = downstream.expectSubscription() sub.request(1) @@ -132,7 +136,8 @@ class HubSpec extends StreamSpec { "work with long streams when consumer is slower" in assertAllStagesStopped { val (sink, result) = - MergeHub.source[Int](16) + MergeHub + .source[Int](16) .take(2000) .throttle(10, 1.millisecond, 200, ThrottleMode.shaping) .toMat(Sink.seq)(Keep.both) @@ -146,10 +151,7 @@ class HubSpec extends StreamSpec { "work with long streams if one of the producers is slower" in assertAllStagesStopped { val (sink, result) = - MergeHub.source[Int](16) - .take(2000) - .toMat(Sink.seq)(Keep.both) - .run() + MergeHub.source[Int](16).take(2000).toMat(Sink.seq)(Keep.both).run() Source(1 to 1000).throttle(10, 1.millisecond, 100, ThrottleMode.shaping).runWith(sink) Source(1001 to 2000).runWith(sink) @@ -236,9 +238,12 @@ class HubSpec extends StreamSpec { } "send the same elements to consumers of attaching around the same time if the producer is slow" in assertAllStagesStopped { - val (firstElem, source) = Source.maybe[Int].concat(Source(2 to 10)) + val (firstElem, source) = Source + .maybe[Int] + .concat(Source(2 to 10)) .throttle(1, 10.millis, 3, ThrottleMode.shaping) - .toMat(BroadcastHub.sink(8))(Keep.both).run() + .toMat(BroadcastHub.sink(8))(Keep.both) + .run() val f1 = source.runWith(Sink.seq) val f2 = source.runWith(Sink.seq) @@ -319,7 +324,7 @@ class HubSpec extends StreamSpec { // give a bit of time for the downstream to complete subscriptions Thread.sleep(100) - (1 to 8) foreach (upstream.sendNext(_)) + (1 to 8).foreach(upstream.sendNext(_)) downstream1.expectNext(1, 2, 3, 4) downstream2.expectNext(1, 2, 3, 4, 5, 6, 7, 8) @@ -404,12 +409,14 @@ class HubSpec extends StreamSpec { "PartitionHub" must { "work in the happy case with one stream" in assertAllStagesStopped { - val source = Source(1 to 10).runWith(PartitionHub.sink((size, elem) => 0, startAfterNrOfConsumers = 0, bufferSize = 8)) + val source = + Source(1 to 10).runWith(PartitionHub.sink((size, elem) => 0, startAfterNrOfConsumers = 0, bufferSize = 8)) source.runWith(Sink.seq).futureValue should ===(1 to 10) } "work in the happy case with two streams" in assertAllStagesStopped { - val source = Source(0 until 10).runWith(PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 2, bufferSize = 8)) + val source = Source(0 until 10) + .runWith(PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 2, bufferSize = 8)) val result1 = source.runWith(Sink.seq) // it should not start publishing until startAfterNrOfConsumers = 2 Thread.sleep(20) @@ -456,9 +463,10 @@ class HubSpec extends StreamSpec { } "be able to use as fastest consumer router" in assertAllStagesStopped { - val source = Source(0 until 1000).runWith(PartitionHub.statefulSink( - () => (info, elem) => info.consumerIds.toVector.minBy(id => info.queueSize(id)), - startAfterNrOfConsumers = 2, bufferSize = 4)) + val source = Source(0 until 1000).runWith( + PartitionHub.statefulSink(() => (info, elem) => info.consumerIds.toVector.minBy(id => info.queueSize(id)), + startAfterNrOfConsumers = 2, + bufferSize = 4)) val result1 = source.runWith(Sink.seq) val result2 = source.throttle(10, 100.millis, 10, ThrottleMode.Shaping).runWith(Sink.seq) @@ -466,8 +474,10 @@ class HubSpec extends StreamSpec { } "route evenly" in assertAllStagesStopped { - val (testSource, hub) = TestSource.probe[Int].toMat( - PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 2, bufferSize = 8))(Keep.both).run() + val (testSource, hub) = TestSource + .probe[Int] + .toMat(PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 2, bufferSize = 8))(Keep.both) + .run() val probe0 = hub.runWith(TestSink.probe[Int]) val probe1 = hub.runWith(TestSink.probe[Int]) probe0.request(3) @@ -500,8 +510,11 @@ class HubSpec extends StreamSpec { } "route unevenly" in assertAllStagesStopped { - val (testSource, hub) = TestSource.probe[Int].toMat( - PartitionHub.sink((size, elem) => (elem % 3) % 2, startAfterNrOfConsumers = 2, bufferSize = 8))(Keep.both).run() + val (testSource, hub) = TestSource + .probe[Int] + .toMat(PartitionHub.sink((size, elem) => (elem % 3) % 2, startAfterNrOfConsumers = 2, bufferSize = 8))( + Keep.both) + .run() val probe0 = hub.runWith(TestSink.probe[Int]) val probe1 = hub.runWith(TestSink.probe[Int]) @@ -531,8 +544,10 @@ class HubSpec extends StreamSpec { } "backpressure" in assertAllStagesStopped { - val (testSource, hub) = TestSource.probe[Int].toMat( - PartitionHub.sink((size, elem) => 0, startAfterNrOfConsumers = 2, bufferSize = 4))(Keep.both).run() + val (testSource, hub) = TestSource + .probe[Int] + .toMat(PartitionHub.sink((size, elem) => 0, startAfterNrOfConsumers = 2, bufferSize = 4))(Keep.both) + .run() val probe0 = hub.runWith(TestSink.probe[Int]) val probe1 = hub.runWith(TestSink.probe[Int]) probe0.request(10) @@ -554,8 +569,11 @@ class HubSpec extends StreamSpec { } "ensure that from two different speed consumers the slower controls the rate" in assertAllStagesStopped { - val (firstElem, source) = Source.maybe[Int].concat(Source(1 until 20)).toMat( - PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 2, bufferSize = 1))(Keep.both).run() + val (firstElem, source) = Source + .maybe[Int] + .concat(Source(1 until 20)) + .toMat(PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 2, bufferSize = 1))(Keep.both) + .run() val f1 = source.throttle(1, 10.millis, 1, ThrottleMode.shaping).runWith(Sink.seq) // Second cannot be overwhelmed since the first one throttles the overall rate, and second allows a higher rate @@ -571,8 +589,9 @@ class HubSpec extends StreamSpec { "properly signal error to consumers" in assertAllStagesStopped { val upstream = TestPublisher.probe[Int]() - val source = Source.fromPublisher(upstream).runWith( - PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 2, bufferSize = 8)) + val source = Source + .fromPublisher(upstream) + .runWith(PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 2, bufferSize = 8)) val downstream1 = TestSubscriber.probe[Int]() source.runWith(Sink.fromSubscriber(downstream1)) @@ -586,7 +605,7 @@ class HubSpec extends StreamSpec { // starting to send elements Thread.sleep(100) - (0 until 16) foreach (upstream.sendNext(_)) + (0 until 16).foreach(upstream.sendNext(_)) downstream1.expectNext(0, 2, 4, 6) downstream2.expectNext(1, 3, 5, 7, 9, 11, 13, 15) @@ -601,7 +620,8 @@ class HubSpec extends StreamSpec { } "properly signal completion to consumers arriving after producer finished" in assertAllStagesStopped { - val source = Source.empty[Int].runWith(PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 0)) + val source = + Source.empty[Int].runWith(PartitionHub.sink((size, elem) => elem % size, startAfterNrOfConsumers = 0)) // Wait enough so the Hub gets the completion. This is racy, but this is fine because both // cases should work in the end Thread.sleep(10) @@ -611,8 +631,8 @@ class HubSpec extends StreamSpec { "remember completion for materialisations after completion" in { - val (sourceProbe, source) = TestSource.probe[Unit].toMat( - PartitionHub.sink((size, elem) => 0, startAfterNrOfConsumers = 0))(Keep.both).run() + val (sourceProbe, source) = + TestSource.probe[Unit].toMat(PartitionHub.sink((size, elem) => 0, startAfterNrOfConsumers = 0))(Keep.both).run() val sinkProbe = source.runWith(TestSink.probe[Unit]) sourceProbe.sendComplete() @@ -629,8 +649,8 @@ class HubSpec extends StreamSpec { } "properly signal error to consumers arriving after producer finished" in assertAllStagesStopped { - val source = Source.failed[Int](TE("Fail!")).runWith( - PartitionHub.sink((size, elem) => 0, startAfterNrOfConsumers = 0)) + val source = + Source.failed[Int](TE("Fail!")).runWith(PartitionHub.sink((size, elem) => 0, startAfterNrOfConsumers = 0)) // Wait enough so the Hub gets the failure. This is racy, but this is fine because both // cases should work in the end Thread.sleep(10) @@ -641,8 +661,10 @@ class HubSpec extends StreamSpec { } "drop elements with negative index" in assertAllStagesStopped { - val source = Source(0 until 10).runWith(PartitionHub.sink( - (size, elem) => if (elem == 3 || elem == 4) -1 else elem % size, startAfterNrOfConsumers = 2, bufferSize = 8)) + val source = Source(0 until 10).runWith( + PartitionHub.sink((size, elem) => if (elem == 3 || elem == 4) -1 else elem % size, + startAfterNrOfConsumers = 2, + bufferSize = 8)) val result1 = source.runWith(Sink.seq) val result2 = source.runWith(Sink.seq) result1.futureValue should ===((0 to 8 by 2).filterNot(_ == 4)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala index e764616fbd..6637f38d30 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/JsonFramingSpec.scala @@ -32,18 +32,15 @@ class JsonFramingSpec extends AkkaSpec { |] |""".stripMargin // also should complete once notices end of array - val result = Source.single(ByteString(input)) - .via(JsonFraming.objectScanner(Int.MaxValue)) - .runFold(Seq.empty[String]) { + val result = + Source.single(ByteString(input)).via(JsonFraming.objectScanner(Int.MaxValue)).runFold(Seq.empty[String]) { case (acc, entry) => acc ++ Seq(entry.utf8String) } // #using-json-framing - result.futureValue shouldBe Seq( - """{ "name" : "john" }""", - """{ "name" : "Ég get etið gler án þess að meiða mig" }""", - """{ "name" : "jack" }""" - ) + result.futureValue shouldBe Seq("""{ "name" : "john" }""", + """{ "name" : "Ég get etið gler án þess að meiða mig" }""", + """{ "name" : "jack" }""") } "emit single json element from string" in { @@ -52,7 +49,8 @@ class JsonFramingSpec extends AkkaSpec { | { "name": "jack" } """.stripMargin - val result = Source.single(ByteString(input)) + val result = Source + .single(ByteString(input)) .via(JsonFraming.objectScanner(Int.MaxValue)) .take(1) .runFold(Seq.empty[String]) { @@ -69,56 +67,46 @@ class JsonFramingSpec extends AkkaSpec { | { "name": "katie" } """.stripMargin - val result = Source.single(ByteString(input)) - .via(JsonFraming.objectScanner(Int.MaxValue)) - .runFold(Seq.empty[String]) { + val result = + Source.single(ByteString(input)).via(JsonFraming.objectScanner(Int.MaxValue)).runFold(Seq.empty[String]) { case (acc, entry) => acc ++ Seq(entry.utf8String) } - Await.result(result, 3.seconds) shouldBe Seq( - """{ "name": "john" }""", - """{ "name": "jack" }""", - """{ "name": "katie" }""") + Await.result(result, 3.seconds) shouldBe Seq("""{ "name": "john" }""", + """{ "name": "jack" }""", + """{ "name": "katie" }""") } "parse comma delimited" in { val input = """ { "name": "john" }, { "name": "jack" }, { "name": "katie" } """ - val result = Source.single(ByteString(input)) - .via(JsonFraming.objectScanner(Int.MaxValue)) - .runFold(Seq.empty[String]) { + val result = + Source.single(ByteString(input)).via(JsonFraming.objectScanner(Int.MaxValue)).runFold(Seq.empty[String]) { case (acc, entry) => acc ++ Seq(entry.utf8String) } - result.futureValue shouldBe Seq( - """{ "name": "john" }""", - """{ "name": "jack" }""", - """{ "name": "katie" }""") + result.futureValue shouldBe Seq("""{ "name": "john" }""", """{ "name": "jack" }""", """{ "name": "katie" }""") } "parse chunks successfully" in { - val input: Seq[ByteString] = Seq( - """ + val input: Seq[ByteString] = Seq(""" |[ | { "name": "john"""".stripMargin, - """ + """ |}, """.stripMargin, - """{ "na""", - """me": "jack""", - """"}]"""").map(ByteString(_)) + """{ "na""", + """me": "jack""", + """"}]"""").map(ByteString(_)) - val result = Source.apply(input) - .via(JsonFraming.objectScanner(Int.MaxValue)) - .runFold(Seq.empty[String]) { - case (acc, entry) => acc ++ Seq(entry.utf8String) - } + val result = Source.apply(input).via(JsonFraming.objectScanner(Int.MaxValue)).runFold(Seq.empty[String]) { + case (acc, entry) => acc ++ Seq(entry.utf8String) + } - result.futureValue shouldBe Seq( - """{ "name": "john" + result.futureValue shouldBe Seq("""{ "name": "john" |}""".stripMargin, - """{ "name": "jack"}""") + """{ "name": "jack"}""") } "emit all elements after input completes" in { @@ -126,7 +114,8 @@ class JsonFramingSpec extends AkkaSpec { val input = TestPublisher.probe[ByteString]() val output = TestSubscriber.probe[String]() - val result = Source.fromPublisher(input) + val result = Source + .fromPublisher(input) .via(JsonFraming.objectScanner(Int.MaxValue)) .map(_.utf8String) .runWith(Sink.fromSubscriber(output)) @@ -158,7 +147,7 @@ class JsonFramingSpec extends AkkaSpec { } } - "valid json is supplied" which { + "valid json is supplied".which { "has one object" should { "successfully parse empty object" in { val buffer = new JsonObjectParser() @@ -225,8 +214,7 @@ class JsonFramingSpec extends AkkaSpec { "successfully parse single field having nested object" in { val buffer = new JsonObjectParser() - buffer.offer(ByteString( - """ + buffer.offer(ByteString(""" |{ "name": "john", | "age": 101, | "address": { @@ -246,8 +234,7 @@ class JsonFramingSpec extends AkkaSpec { "successfully parse single field having multiple level of nested object" in { val buffer = new JsonObjectParser() - buffer.offer(ByteString( - """ + buffer.offer(ByteString(""" |{ "name": "john", | "age": 101, | "address": { @@ -273,13 +260,11 @@ class JsonFramingSpec extends AkkaSpec { "successfully parse an escaped backslash followed by a double quote" in { val buffer = new JsonObjectParser() - buffer.offer(ByteString( - """ + buffer.offer(ByteString(""" |{ | "key": "\\" | } - | """.stripMargin - )) + | """.stripMargin)) buffer.poll().get.utf8String shouldBe """{ | "key": "\\" @@ -288,13 +273,11 @@ class JsonFramingSpec extends AkkaSpec { "successfully parse a string that contains an escaped quote" in { val buffer = new JsonObjectParser() - buffer.offer(ByteString( - """ + buffer.offer(ByteString(""" |{ | "key": "\"" | } - | """.stripMargin - )) + | """.stripMargin)) buffer.poll().get.utf8String shouldBe """{ | "key": "\"" @@ -303,13 +286,11 @@ class JsonFramingSpec extends AkkaSpec { "successfully parse a string that contains escape sequence" in { val buffer = new JsonObjectParser() - buffer.offer(ByteString( - """ + buffer.offer(ByteString(""" |{ | "key": "\\\"" | } - | """.stripMargin - )) + | """.stripMargin)) buffer.poll().get.utf8String shouldBe """{ | "key": "\\\"" @@ -320,8 +301,7 @@ class JsonFramingSpec extends AkkaSpec { "has nested array" should { "successfully parse" in { val buffer = new JsonObjectParser() - buffer.offer(ByteString( - """ + buffer.offer(ByteString(""" |{ "name": "john", | "things": [ | 1, @@ -345,8 +325,7 @@ class JsonFramingSpec extends AkkaSpec { "has complex object graph" should { "successfully parse" in { val buffer = new JsonObjectParser() - buffer.offer(ByteString( - """ + buffer.offer(ByteString(""" |{ | "name": "john", | "addresses": [ @@ -405,14 +384,13 @@ class JsonFramingSpec extends AkkaSpec { "parse successfully despite valid whitespaces around json" in { val buffer = new JsonObjectParser() - buffer.offer(ByteString( - """ + buffer.offer(ByteString(""" | | |{"name": "john" |, "age": 101}""".stripMargin)) buffer.poll().get.utf8String shouldBe - """{"name": "john" + """{"name": "john" |, "age": 101}""".stripMargin } } @@ -435,12 +413,12 @@ class JsonFramingSpec extends AkkaSpec { buffer.offer(ByteString(input)) buffer.poll().get.utf8String shouldBe - """{ + """{ | "name": "john", | "age": 32 | }""".stripMargin buffer.poll().get.utf8String shouldBe - """{ + """{ | "name": "katie", | "age": 25 | }""".stripMargin @@ -457,10 +435,9 @@ class JsonFramingSpec extends AkkaSpec { "returns none until valid json is encountered" in { val buffer = new JsonObjectParser() - """{ "name": "john"""".foreach { - c => - buffer.offer(ByteString(c)) - buffer.poll() should ===(None) + """{ "name": "john"""".foreach { c => + buffer.offer(ByteString(c)) + buffer.poll() should ===(None) } buffer.offer(ByteString("}")) @@ -489,8 +466,10 @@ class JsonFramingSpec extends AkkaSpec { | { "name": "john" }, { "name": "jack" } """.stripMargin - val result = Source.single(ByteString(input)) - .via(JsonFraming.objectScanner(5)).map(_.utf8String) + val result = Source + .single(ByteString(input)) + .via(JsonFraming.objectScanner(5)) + .map(_.utf8String) .runFold(Seq.empty[String]) { case (acc, entry) => acc ++ Seq(entry) } @@ -501,14 +480,11 @@ class JsonFramingSpec extends AkkaSpec { } "fail when 2nd object is too large" in { - val input = List( - """{ "name": "john" }""", - """{ "name": "jack" }""", - """{ "name": "very very long name somehow. how did this happen?" }""").map(s => ByteString(s)) + val input = List("""{ "name": "john" }""", + """{ "name": "jack" }""", + """{ "name": "very very long name somehow. how did this happen?" }""").map(s => ByteString(s)) - val probe = Source(input) - .via(JsonFraming.objectScanner(48)) - .runWith(TestSink.probe) + val probe = Source(input).via(JsonFraming.objectScanner(48)).runWith(TestSink.probe) probe.ensureSubscription() probe @@ -517,7 +493,8 @@ class JsonFramingSpec extends AkkaSpec { .request(1) .expectNext(ByteString("""{ "name": "jack" }""")) .request(1) - .expectError().getMessage should include("exceeded") + .expectError() + .getMessage should include("exceeded") } } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala index d9735dd626..ae2d09895e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LastSinkSpec.scala @@ -32,9 +32,9 @@ class LastSinkSpec extends StreamSpec with ScriptedTest { "yield the first error" in assertAllStagesStopped { val ex = new RuntimeException("ex") - intercept[RuntimeException] { + (intercept[RuntimeException] { Await.result(Source.failed[Int](ex).runWith(Sink.last), 1.second) - } should be theSameInstanceAs (ex) + } should be).theSameInstanceAs(ex) } "yield NoSuchElementException for empty stream" in assertAllStagesStopped { @@ -52,9 +52,9 @@ class LastSinkSpec extends StreamSpec with ScriptedTest { "yield the first error" in assertAllStagesStopped { val ex = new RuntimeException("ex") - intercept[RuntimeException] { + (intercept[RuntimeException] { Await.result(Source.failed[Int](ex).runWith(Sink.lastOption), 1.second) - } should be theSameInstanceAs (ex) + } should be).theSameInstanceAs(ex) } "yield None for empty stream" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazilyAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazilyAsyncSpec.scala index 87227e783b..2fc4dc91c4 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazilyAsyncSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazilyAsyncSpec.scala @@ -24,7 +24,11 @@ class LazilyAsyncSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "A lazy async source" should { "work in happy path scenario" in assertAllStagesStopped { - val stream = Source.lazilyAsync { () => Future(42) }.runWith(Sink.head) + val stream = Source + .lazilyAsync { () => + Future(42) + } + .runWith(Sink.head) stream.futureValue should ===(42) } @@ -33,7 +37,10 @@ class LazilyAsyncSpec extends StreamSpec with DefaultTimeout with ScalaFutures { val probe = TestSubscriber.probe[Int]() val constructed = new AtomicBoolean(false) - val result = Source.lazilyAsync { () => constructed.set(true); Future(42) } + val result = Source + .lazilyAsync { () => + constructed.set(true); Future(42) + } .runWith(Sink.fromSubscriber(probe)) probe.cancel() @@ -41,7 +48,10 @@ class LazilyAsyncSpec extends StreamSpec with DefaultTimeout with ScalaFutures { } "fail materialized value when downstream cancels without ever consuming any element" in assertAllStagesStopped { - val materialization = Source.lazilyAsync { () => Future(42) } + val materialization = Source + .lazilyAsync { () => + Future(42) + } .toMat(Sink.cancelled)(Keep.left) .run() @@ -54,7 +64,10 @@ class LazilyAsyncSpec extends StreamSpec with DefaultTimeout with ScalaFutures { val probe = TestSubscriber.probe[Int]() val materialization: Future[Done] = - Source.lazilyAsync { () => Future(42) } + Source + .lazilyAsync { () => + Future(42) + } .mapMaterializedValue(_.map(_ => Done)) .to(Sink.fromSubscriber(probe)) .run() @@ -70,7 +83,10 @@ class LazilyAsyncSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "propagate failed future from factory" in assertAllStagesStopped { val probe = TestSubscriber.probe[Int]() val failure = new RuntimeException("too bad") - val materialization = Source.lazilyAsync { () => Future.failed(failure) } + val materialization = Source + .lazilyAsync { () => + Future.failed(failure) + } .to(Sink.fromSubscriber(probe)) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala index 2b780a297d..a2bc0c9bdc 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazyFlowSpec.scala @@ -18,20 +18,17 @@ import scala.concurrent.{ Future, Promise } class LazyFlowSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 1, maxSize = 1) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 1, maxSize = 1) implicit val materializer = ActorMaterializer(settings) val ex = TE("") "A LazyFlow" must { - def mapF(e: Int): () => Future[Flow[Int, String, NotUsed]] = () => - Future.successful(Flow.fromFunction[Int, String](i => (i * e).toString)) + def mapF(e: Int): () => Future[Flow[Int, String, NotUsed]] = + () => Future.successful(Flow.fromFunction[Int, String](i => (i * e).toString)) val flowF = Future.successful(Flow[Int]) "work in happy case" in assertAllStagesStopped { - val probe = Source(2 to 10) - .via(Flow.lazyInitAsync[Int, String, NotUsed](mapF(2))) - .runWith(TestSink.probe[String]) + val probe = Source(2 to 10).via(Flow.lazyInitAsync[Int, String, NotUsed](mapF(2))).runWith(TestSink.probe[String]) probe.request(100) (2 to 10).map(i => (i * 2).toString).foreach(probe.expectNext) } @@ -39,7 +36,8 @@ class LazyFlowSpec extends StreamSpec { "work with slow flow init" in assertAllStagesStopped { val p = Promise[Flow[Int, Int, NotUsed]]() val sourceProbe = TestPublisher.manualProbe[Int]() - val flowProbe = Source.fromPublisher(sourceProbe) + val flowProbe = Source + .fromPublisher(sourceProbe) .via(Flow.lazyInitAsync[Int, Int, NotUsed](() => p.future)) .runWith(TestSink.probe[Int]) @@ -62,15 +60,14 @@ class LazyFlowSpec extends StreamSpec { "complete when there was no elements in the stream" in assertAllStagesStopped { def flowMaker() = flowF - val probe = Source.empty - .via(Flow.lazyInitAsync(() => flowMaker)) - .runWith(TestSink.probe[Int]) + val probe = Source.empty.via(Flow.lazyInitAsync(() => flowMaker)).runWith(TestSink.probe[Int]) probe.request(1).expectComplete() } "complete normally when upstream completes BEFORE the stage has switched to the inner flow" in assertAllStagesStopped { val promise = Promise[Flow[Int, Int, NotUsed]] - val (pub, sub) = TestSource.probe[Int] + val (pub, sub) = TestSource + .probe[Int] .viaMat(Flow.lazyInitAsync(() => promise.future))(Keep.left) .toMat(TestSink.probe)(Keep.both) .run() @@ -81,7 +78,8 @@ class LazyFlowSpec extends StreamSpec { } "complete normally when upstream completes AFTER the stage has switched to the inner flow" in assertAllStagesStopped { - val (pub, sub) = TestSource.probe[Int] + val (pub, sub) = TestSource + .probe[Int] .viaMat(Flow.lazyInitAsync(() => Future.successful(Flow[Int])))(Keep.left) .toMat(TestSink.probe)(Keep.both) .run() @@ -94,7 +92,8 @@ class LazyFlowSpec extends StreamSpec { "fail gracefully when flow factory method failed" in assertAllStagesStopped { val sourceProbe = TestPublisher.manualProbe[Int]() - val probe = Source.fromPublisher(sourceProbe) + val probe = Source + .fromPublisher(sourceProbe) .via(Flow.lazyInitAsync[Int, Int, NotUsed](() => throw ex)) .runWith(TestSink.probe[Int]) @@ -108,22 +107,20 @@ class LazyFlowSpec extends StreamSpec { "fail gracefully when upstream failed" in assertAllStagesStopped { val sourceProbe = TestPublisher.manualProbe[Int]() - val probe = Source.fromPublisher(sourceProbe) - .via(Flow.lazyInitAsync(() => flowF)) - .runWith(TestSink.probe) + val probe = Source.fromPublisher(sourceProbe).via(Flow.lazyInitAsync(() => flowF)).runWith(TestSink.probe) val sourceSub = sourceProbe.expectSubscription() sourceSub.expectRequest(1) sourceSub.sendNext(0) - probe.request(1) - .expectNext(0) + probe.request(1).expectNext(0) sourceSub.sendError(ex) probe.expectError(ex) } "fail gracefully when factory future failed" in assertAllStagesStopped { val sourceProbe = TestPublisher.manualProbe[Int]() - val flowProbe = Source.fromPublisher(sourceProbe) + val flowProbe = Source + .fromPublisher(sourceProbe) .via(Flow.lazyInitAsync[Int, Int, NotUsed](() => Future.failed(ex))) .runWith(TestSink.probe) @@ -135,7 +132,8 @@ class LazyFlowSpec extends StreamSpec { "cancel upstream when the downstream is cancelled" in assertAllStagesStopped { val sourceProbe = TestPublisher.manualProbe[Int]() - val probe = Source.fromPublisher(sourceProbe) + val probe = Source + .fromPublisher(sourceProbe) .via(Flow.lazyInitAsync[Int, Int, NotUsed](() => flowF)) .runWith(TestSink.probe[Int]) @@ -152,7 +150,8 @@ class LazyFlowSpec extends StreamSpec { "fail correctly when factory throw error" in assertAllStagesStopped { val msg = "fail!" val matFail = TE(msg) - val result = Source.single("whatever") + val result = Source + .single("whatever") .viaMat(Flow.lazyInitAsync(() => throw matFail))(Keep.right) .toMat(Sink.ignore)(Keep.left) .run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySinkSpec.scala index 58666d529e..5c968cdddc 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySinkSpec.scala @@ -20,8 +20,7 @@ import scala.concurrent.duration._ class LazySinkSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 1, maxSize = 1) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 1, maxSize = 1) implicit val materializer = ActorMaterializer(settings) val ex = TE("") @@ -66,9 +65,7 @@ class LazySinkSpec extends StreamSpec { "complete normally when upstream is completed" in assertAllStagesStopped { val futureProbe = Source.single(1).runWith(Sink.lazyInitAsync(() => Future.successful(TestSink.probe[Int]))) val futureResult = Await.result(futureProbe, remainingOrDefault).get - futureResult.request(1) - .expectNext(1) - .expectComplete() + futureResult.request(1).expectNext(1).expectComplete() } "failed gracefully when sink factory method failed" in assertAllStagesStopped { @@ -84,14 +81,14 @@ class LazySinkSpec extends StreamSpec { "failed gracefully when upstream failed" in assertAllStagesStopped { val sourceProbe = TestPublisher.manualProbe[Int]() - val futureProbe = Source.fromPublisher(sourceProbe).runWith(Sink.lazyInitAsync(() => Future.successful(TestSink.probe[Int]))) + val futureProbe = + Source.fromPublisher(sourceProbe).runWith(Sink.lazyInitAsync(() => Future.successful(TestSink.probe[Int]))) val sourceSub = sourceProbe.expectSubscription() sourceSub.expectRequest(1) sourceSub.sendNext(0) val probe = Await.result(futureProbe, remainingOrDefault).get - probe.request(1) - .expectNext(0) + probe.request(1).expectNext(0) sourceSub.sendError(ex) probe.expectError(ex) } @@ -108,14 +105,14 @@ class LazySinkSpec extends StreamSpec { "cancel upstream when internal sink is cancelled" in assertAllStagesStopped { val sourceProbe = TestPublisher.manualProbe[Int]() - val futureProbe = Source.fromPublisher(sourceProbe).runWith(Sink.lazyInitAsync(() => Future.successful(TestSink.probe[Int]))) + val futureProbe = + Source.fromPublisher(sourceProbe).runWith(Sink.lazyInitAsync(() => Future.successful(TestSink.probe[Int]))) val sourceSub = sourceProbe.expectSubscription() sourceSub.expectRequest(1) sourceSub.sendNext(0) sourceSub.expectRequest(1) val probe = Await.result(futureProbe, remainingOrDefault).get - probe.request(1) - .expectNext(0) + probe.request(1).expectNext(0) probe.cancel() sourceSub.expectCancellation() } @@ -130,10 +127,9 @@ class LazySinkSpec extends StreamSpec { } } - val result = Source(List("whatever")) - .runWith( - Sink.lazyInitAsync[String, NotUsed]( - () => { println("create sink"); Future.successful(Sink.fromGraph(FailingInnerMat)) })) + val result = Source(List("whatever")).runWith(Sink.lazyInitAsync[String, NotUsed](() => { + println("create sink"); Future.successful(Sink.fromGraph(FailingInnerMat)) + })) result.failed.futureValue should ===(matFail) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySourceSpec.scala index 369b84e0d6..dff9bfe24a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/LazySourceSpec.scala @@ -33,16 +33,18 @@ class LazySourceSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "never construct the source when there was no demand" in assertAllStagesStopped { val probe = TestSubscriber.probe[Int]() val constructed = new AtomicBoolean(false) - val result = Source.fromGraph(LazySource { () => constructed.set(true); Source(List(1, 2, 3)) }).runWith(Sink.fromSubscriber(probe)) + val result = Source + .fromGraph(LazySource { () => + constructed.set(true); Source(List(1, 2, 3)) + }) + .runWith(Sink.fromSubscriber(probe)) probe.cancel() constructed.get() should ===(false) } "fail the materialized value when downstream cancels without ever consuming any element" in assertAllStagesStopped { - val matF = Source.fromGraph(LazySource(() => Source(List(1, 2, 3)))) - .toMat(Sink.cancelled)(Keep.left) - .run() + val matF = Source.fromGraph(LazySource(() => Source(List(1, 2, 3)))).toMat(Sink.cancelled)(Keep.left).run() intercept[RuntimeException] { matF.futureValue @@ -66,9 +68,11 @@ class LazySourceSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "materialize when the source has been created" in assertAllStagesStopped { val probe = TestSubscriber.probe[Int]() - val matF: Future[Done] = Source.fromGraph(LazySource { () => - Source(List(1, 2, 3)).mapMaterializedValue(_ => Done) - }).to(Sink.fromSubscriber(probe)) + val matF: Future[Done] = Source + .fromGraph(LazySource { () => + Source(List(1, 2, 3)).mapMaterializedValue(_ => Done) + }) + .to(Sink.fromSubscriber(probe)) .run() matF.value shouldEqual None diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MaybeSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MaybeSourceSpec.scala index bcd104e5c6..d021e45c69 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MaybeSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/MaybeSourceSpec.scala @@ -37,7 +37,9 @@ class MaybeSourceSpec extends StreamSpec with DefaultTimeout { "allow external triggering of empty completion" in assertAllStagesStopped { val neverSource = Source.maybe[Int].filter(_ => false) - val counterSink = Sink.fold[Int, Int](0) { (acc, _) => acc + 1 } + val counterSink = Sink.fold[Int, Int](0) { (acc, _) => + acc + 1 + } val (neverPromise, counterFuture) = neverSource.toMat(counterSink)(Keep.both).run() @@ -71,7 +73,9 @@ class MaybeSourceSpec extends StreamSpec with DefaultTimeout { "allow external triggering of onError" in assertAllStagesStopped { val neverSource = Source.maybe[Int] - val counterSink = Sink.fold[Int, Int](0) { (acc, _) => acc + 1 } + val counterSink = Sink.fold[Int, Int](0) { (acc, _) => + acc + 1 + } val (neverPromise, counterFuture) = neverSource.toMat(counterSink)(Keep.both).run() diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala index 77518a1c4b..02796d9abb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/PublisherSinkSpec.scala @@ -5,7 +5,7 @@ package akka.stream.scaladsl import akka.stream.testkit.StreamSpec -import akka.stream.{ ClosedShape, ActorMaterializer } +import akka.stream.{ ActorMaterializer, ClosedShape } import akka.stream.testkit.scaladsl.StreamTestKit._ import scala.concurrent.duration._ @@ -20,16 +20,19 @@ class PublisherSinkSpec extends StreamSpec { "be unique when created twice" in assertAllStagesStopped { - val (pub1, pub2) = RunnableGraph.fromGraph(GraphDSL.create(Sink.asPublisher[Int](false), Sink.asPublisher[Int](false))(Keep.both) { implicit b => (p1, p2) => - import GraphDSL.Implicits._ + val (pub1, pub2) = RunnableGraph + .fromGraph(GraphDSL.create(Sink.asPublisher[Int](false), Sink.asPublisher[Int](false))(Keep.both) { + implicit b => (p1, p2) => + import GraphDSL.Implicits._ - val bcast = b.add(Broadcast[Int](2)) + val bcast = b.add(Broadcast[Int](2)) - Source(0 to 5) ~> bcast.in - bcast.out(0).map(_ * 2) ~> p1.in - bcast.out(1) ~> p2.in - ClosedShape - }).run() + Source(0 to 5) ~> bcast.in + bcast.out(0).map(_ * 2) ~> p1.in + bcast.out(1) ~> p2.in + ClosedShape + }) + .run() val f1 = Source.fromPublisher(pub1).map(identity).runFold(0)(_ + _) val f2 = Source.fromPublisher(pub2).map(identity).runFold(0)(_ + _) @@ -46,8 +49,8 @@ class PublisherSinkSpec extends StreamSpec { } "be able to use Publisher in materialized value transformation" in { - val f = Source(1 to 3).runWith( - Sink.asPublisher[Int](false).mapMaterializedValue(p => Source.fromPublisher(p).runFold(0)(_ + _))) + val f = Source(1 to 3) + .runWith(Sink.asPublisher[Int](false).mapMaterializedValue(p => Source.fromPublisher(p).runFold(0)(_ + _))) Await.result(f, 3.seconds) should be(6) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala index c6ae88f006..69404da9d7 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSinkSpec.scala @@ -28,8 +28,8 @@ class QueueSinkSpec extends StreamSpec { "send the elements as result of future" in assertAllStagesStopped { val expected = List(Some(1), Some(2), Some(3), None) val queue = Source(expected.flatten).runWith(Sink.queue()) - expected foreach { v => - queue.pull() pipeTo testActor + expected.foreach { v => + queue.pull().pipeTo(testActor) expectMsg(v) } } @@ -125,8 +125,7 @@ class QueueSinkSpec extends StreamSpec { "keep on sending even after the buffer has been full" in assertAllStagesStopped { val bufferSize = 16 val streamElementCount = bufferSize + 4 - val sink = Sink.queue[Int]() - .withAttributes(inputBuffer(bufferSize, bufferSize)) + val sink = Sink.queue[Int]().withAttributes(inputBuffer(bufferSize, bufferSize)) val bufferFullProbe = Promise[akka.Done.type] val queue = Source(1 to streamElementCount) .alsoTo(Flow[Int].drop(bufferSize - 1).to(Sink.foreach(_ => bufferFullProbe.trySuccess(akka.Done)))) @@ -134,10 +133,10 @@ class QueueSinkSpec extends StreamSpec { .run() bufferFullProbe.future.futureValue should ===(akka.Done) for (i <- 1 to streamElementCount) { - queue.pull() pipeTo testActor + queue.pull().pipeTo(testActor) expectMsg(Some(i)) } - queue.pull() pipeTo testActor + queue.pull().pipeTo(testActor) expectMsg(None) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala index d383ffee1f..1da9a4719c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/QueueSourceSpec.scala @@ -24,9 +24,8 @@ class QueueSourceSpec extends StreamSpec { val pause = 300.millis // more frequent checks than defaults from AkkaSpec - implicit val testPatience = PatienceConfig( - testKitSettings.DefaultTimeout.duration, - Span(5, org.scalatest.time.Millis)) + implicit val testPatience = + PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(5, org.scalatest.time.Millis)) def assertSuccess(f: Future[QueueOfferResult]): Unit = { f.futureValue should ===(QueueOfferResult.Enqueued) @@ -126,7 +125,7 @@ class QueueSourceSpec extends StreamSpec { val sub = s.expectSubscription queue.watchCompletion.pipeTo(testActor) - queue.offer(1) pipeTo testActor + queue.offer(1).pipeTo(testActor) expectNoMessage(pause) sub.cancel() @@ -153,8 +152,8 @@ class QueueSourceSpec extends StreamSpec { "remember pull from downstream to send offered element immediately" in assertAllStagesStopped { val s = TestSubscriber.manualProbe[Int]() val probe = TestProbe() - val queue = TestSourceStage(new QueueSource[Int](1, OverflowStrategy.dropHead), probe) - .to(Sink.fromSubscriber(s)).run() + val queue = + TestSourceStage(new QueueSource[Int](1, OverflowStrategy.dropHead), probe).to(Sink.fromSubscriber(s)).run() val sub = s.expectSubscription sub.request(1) @@ -178,10 +177,7 @@ class QueueSourceSpec extends StreamSpec { expectMsg(QueueOfferResult.Enqueued) queue.complete() - probe - .request(6) - .expectNext(2, 3, 4, 5, 6) - .expectComplete() + probe.request(6).expectNext(2, 3, 4, 5, 6).expectComplete() } "complete watching future with failure if stream failed" in assertAllStagesStopped { @@ -208,7 +204,7 @@ class QueueSourceSpec extends StreamSpec { val sub = s.expectSubscription queue.offer(1) - queue.offer(2) pipeTo testActor + queue.offer(2).pipeTo(testActor) expectMsg(QueueOfferResult.Dropped) sub.request(1) @@ -222,7 +218,7 @@ class QueueSourceSpec extends StreamSpec { val sub = s.expectSubscription assertSuccess(queue.offer(1)) - queue.offer(2) pipeTo testActor + queue.offer(2).pipeTo(testActor) expectNoMessage(pause) sub.request(1) @@ -261,7 +257,7 @@ class QueueSourceSpec extends StreamSpec { sourceQueue1.offer("hello") mat1subscriber.expectNext("hello") mat1subscriber.cancel() - sourceQueue1.watchCompletion pipeTo testActor + sourceQueue1.watchCompletion.pipeTo(testActor) expectMsg(Done) sourceQueue2.watchCompletion().isCompleted should ===(false) @@ -273,18 +269,14 @@ class QueueSourceSpec extends StreamSpec { val (source, probe) = Source.queue[Int](1, OverflowStrategy.fail).toMat(TestSink.probe)(Keep.both).run() source.complete() source.watchCompletion().futureValue should ===(Done) - probe - .ensureSubscription() - .expectComplete() + probe.ensureSubscription().expectComplete() } "buffer is full" in { val (source, probe) = Source.queue[Int](1, OverflowStrategy.fail).toMat(TestSink.probe)(Keep.both).run() source.offer(1) source.complete() - probe - .requestNext(1) - .expectComplete() + probe.requestNext(1).expectComplete() source.watchCompletion().futureValue should ===(Done) } @@ -293,10 +285,7 @@ class QueueSourceSpec extends StreamSpec { source.offer(1) source.offer(2) source.complete() - probe - .requestNext(1) - .requestNext(2) - .expectComplete() + probe.requestNext(1).requestNext(2).expectComplete() source.watchCompletion().futureValue should ===(Done) } @@ -304,18 +293,14 @@ class QueueSourceSpec extends StreamSpec { val (source, probe) = Source.queue[Int](0, OverflowStrategy.fail).toMat(TestSink.probe)(Keep.both).run() source.complete() source.watchCompletion().futureValue should ===(Done) - probe - .ensureSubscription() - .expectComplete() + probe.ensureSubscription().expectComplete() } "no buffer is used and element is pending" in { val (source, probe) = Source.queue[Int](0, OverflowStrategy.fail).toMat(TestSink.probe)(Keep.both).run() source.offer(1) source.complete() - probe - .requestNext(1) - .expectComplete() + probe.requestNext(1).expectComplete() source.watchCompletion().futureValue should ===(Done) } @@ -323,8 +308,7 @@ class QueueSourceSpec extends StreamSpec { val (queue, probe) = Source.queue[Unit](10, OverflowStrategy.fail).toMat(TestSink.probe)(Keep.both).run() intercept[StreamDetachedException] { - Await.result( - (1 to 15).map(_ => queue.offer(())).last, 3.seconds) + Await.result((1 to 15).map(_ => queue.offer(())).last, 3.seconds) } } } @@ -336,9 +320,7 @@ class QueueSourceSpec extends StreamSpec { val (source, probe) = Source.queue[Int](1, OverflowStrategy.fail).toMat(TestSink.probe)(Keep.both).run() source.fail(ex) source.watchCompletion().failed.futureValue should ===(ex) - probe - .ensureSubscription() - .expectError(ex) + probe.ensureSubscription().expectError(ex) } "buffer is full" in { @@ -346,9 +328,7 @@ class QueueSourceSpec extends StreamSpec { source.offer(1) source.fail(ex) source.watchCompletion().failed.futureValue should ===(ex) - probe - .ensureSubscription() - .expectError(ex) + probe.ensureSubscription().expectError(ex) } "buffer is full and element is pending" in { @@ -357,18 +337,14 @@ class QueueSourceSpec extends StreamSpec { source.offer(2) source.fail(ex) source.watchCompletion().failed.futureValue should ===(ex) - probe - .ensureSubscription() - .expectError(ex) + probe.ensureSubscription().expectError(ex) } "no buffer is used" in { val (source, probe) = Source.queue[Int](0, OverflowStrategy.fail).toMat(TestSink.probe)(Keep.both).run() source.fail(ex) source.watchCompletion().failed.futureValue should ===(ex) - probe - .ensureSubscription() - .expectError(ex) + probe.ensureSubscription().expectError(ex) } "no buffer is used and element is pending" in { @@ -376,9 +352,7 @@ class QueueSourceSpec extends StreamSpec { source.offer(1) source.fail(ex) source.watchCompletion().failed.futureValue should ===(ex) - probe - .ensureSubscription() - .expectError(ex) + probe.ensureSubscription().expectError(ex) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala index 2dc716eb2a..af016e512e 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RestartSpec.scala @@ -33,10 +33,12 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "A restart with backoff source" should { "run normally" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Source.repeat("a") - }.runWith(TestSink.probe) + val probe = RestartSource + .withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Source.repeat("a") + } + .runWith(TestSink.probe) probe.requestNext("a") probe.requestNext("a") @@ -51,10 +53,12 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "restart on completion" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Source(List("a", "b")) - }.runWith(TestSink.probe) + val probe = RestartSource + .withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Source(List("a", "b")) + } + .runWith(TestSink.probe) probe.requestNext("a") probe.requestNext("b") @@ -69,14 +73,15 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "restart on failure" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Source(List("a", "b", "c")) - .map { + val probe = RestartSource + .withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Source(List("a", "b", "c")).map { case "c" => throw TE("failed") case other => other } - }.runWith(TestSink.probe) + } + .runWith(TestSink.probe) probe.requestNext("a") probe.requestNext("b") @@ -91,10 +96,12 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "backoff before restart" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.withBackoff(minBackoff, maxBackoff, 0) { () => - created.incrementAndGet() - Source(List("a", "b")) - }.runWith(TestSink.probe) + val probe = RestartSource + .withBackoff(minBackoff, maxBackoff, 0) { () => + created.incrementAndGet() + Source(List("a", "b")) + } + .runWith(TestSink.probe) probe.requestNext("a") probe.requestNext("b") @@ -113,10 +120,12 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "reset exponential backoff back to minimum when source runs for at least minimum backoff without completing" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.withBackoff(minBackoff, maxBackoff, 0) { () => - created.incrementAndGet() - Source(List("a", "b")) - }.runWith(TestSink.probe) + val probe = RestartSource + .withBackoff(minBackoff, maxBackoff, 0) { () => + created.incrementAndGet() + Source(List("a", "b")) + } + .runWith(TestSink.probe) probe.requestNext("a") probe.requestNext("b") @@ -145,12 +154,14 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "cancel the currently running source when cancelled" in assertAllStagesStopped { val created = new AtomicInteger() val promise = Promise[Done]() - val probe = RestartSource.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Source.repeat("a").watchTermination() { (_, term) => - promise.completeWith(term) + val probe = RestartSource + .withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Source.repeat("a").watchTermination() { (_, term) => + promise.completeWith(term) + } } - }.runWith(TestSink.probe) + .runWith(TestSink.probe) probe.requestNext("a") probe.cancel() @@ -164,10 +175,12 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "not restart the source when cancelled while backing off" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.withBackoff(minBackoff, maxBackoff, 0) { () => - created.incrementAndGet() - Source.single("a") - }.runWith(TestSink.probe) + val probe = RestartSource + .withBackoff(minBackoff, maxBackoff, 0) { () => + created.incrementAndGet() + Source.single("a") + } + .runWith(TestSink.probe) probe.requestNext("a") probe.request(1) @@ -181,14 +194,15 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "stop on completion if it should only be restarted in failures" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.onFailuresWithBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Source(List("a", "b", "c")) - .map { + val probe = RestartSource + .onFailuresWithBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Source(List("a", "b", "c")).map { case "c" => if (created.get() == 1) throw TE("failed") else "c" case other => other } - }.runWith(TestSink.probe) + } + .runWith(TestSink.probe) probe.requestNext("a") probe.requestNext("b") @@ -205,14 +219,15 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "restart on failure when only due to failures should be restarted" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.onFailuresWithBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Source(List("a", "b", "c")) - .map { + val probe = RestartSource + .onFailuresWithBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Source(List("a", "b", "c")).map { case "c" => throw TE("failed") case other => other } - }.runWith(TestSink.probe) + } + .runWith(TestSink.probe) probe.requestNext("a") probe.requestNext("b") @@ -228,10 +243,12 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "not restart the source when maxRestarts is reached" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.withBackoff(shortMinBackoff, shortMaxBackoff, 0, maxRestarts = 1) { () => - created.incrementAndGet() - Source.single("a") - }.runWith(TestSink.probe) + val probe = RestartSource + .withBackoff(shortMinBackoff, shortMaxBackoff, 0, maxRestarts = 1) { () => + created.incrementAndGet() + Source.single("a") + } + .runWith(TestSink.probe) probe.requestNext("a") probe.requestNext("a") @@ -244,10 +261,12 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "reset maxRestarts when source runs for at least minimum backoff without completing" in assertAllStagesStopped { val created = new AtomicInteger() - val probe = RestartSource.withBackoff(minBackoff, maxBackoff, 0, maxRestarts = 2) { () => - created.incrementAndGet() - Source(List("a")) - }.runWith(TestSink.probe) + val probe = RestartSource + .withBackoff(minBackoff, maxBackoff, 0, maxRestarts = 2) { () => + created.incrementAndGet() + Source(List("a")) + } + .runWith(TestSink.probe) probe.requestNext("a") // There should be minBackoff delay @@ -272,28 +291,33 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "run normally" in assertAllStagesStopped { val created = new AtomicInteger() val result = Promise[Seq[String]]() - val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Sink.seq.mapMaterializedValue(result.completeWith) - })(Keep.left).run() + val probe = TestSource + .probe[String] + .toMat(RestartSink.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Sink.seq.mapMaterializedValue(result.completeWith) + })(Keep.left) + .run() probe.sendNext("a") probe.sendNext("b") probe.sendNext("c") probe.sendComplete() - result.future.futureValue should contain inOrderOnly ("a", "b", "c") + (result.future.futureValue should contain).inOrderOnly("a", "b", "c") created.get() should ===(1) } "restart on cancellation" in assertAllStagesStopped { val created = new AtomicInteger() val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() - val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Flow[String].takeWhile(_ != "cancel", inclusive = true) - .to(Sink.foreach(queue.sendNext)) - })(Keep.left).run() + val probe = TestSource + .probe[String] + .toMat(RestartSink.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true).to(Sink.foreach(queue.sendNext)) + })(Keep.left) + .run() probe.sendNext("a") sinkProbe.requestNext("a") @@ -313,11 +337,13 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "backoff before restart" in assertAllStagesStopped { val created = new AtomicInteger() val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() - val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(minBackoff, maxBackoff, 0) { () => - created.incrementAndGet() - Flow[String].takeWhile(_ != "cancel", inclusive = true) - .to(Sink.foreach(queue.sendNext)) - })(Keep.left).run() + val probe = TestSource + .probe[String] + .toMat(RestartSink.withBackoff(minBackoff, maxBackoff, 0) { () => + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true).to(Sink.foreach(queue.sendNext)) + })(Keep.left) + .run() probe.sendNext("a") sinkProbe.requestNext("a") @@ -338,11 +364,13 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "reset exponential backoff back to minimum when sink runs for at least minimum backoff without completing" in assertAllStagesStopped { val created = new AtomicInteger() val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() - val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(minBackoff, maxBackoff, 0) { () => - created.incrementAndGet() - Flow[String].takeWhile(_ != "cancel", inclusive = true) - .to(Sink.foreach(queue.sendNext)) - })(Keep.left).run() + val probe = TestSource + .probe[String] + .toMat(RestartSink.withBackoff(minBackoff, maxBackoff, 0) { () => + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true).to(Sink.foreach(queue.sendNext)) + })(Keep.left) + .run() probe.sendNext("a") sinkProbe.requestNext("a") @@ -378,11 +406,13 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "not restart the sink when completed while backing off" in assertAllStagesStopped { val created = new AtomicInteger() val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() - val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(minBackoff, maxBackoff, 0) { () => - created.incrementAndGet() - Flow[String].takeWhile(_ != "cancel", inclusive = true) - .to(Sink.foreach(queue.sendNext)) - })(Keep.left).run() + val probe = TestSource + .probe[String] + .toMat(RestartSink.withBackoff(minBackoff, maxBackoff, 0) { () => + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true).to(Sink.foreach(queue.sendNext)) + })(Keep.left) + .run() probe.sendNext("a") sinkProbe.requestNext("a") @@ -401,11 +431,13 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "not restart the sink when maxRestarts is reached" in assertAllStagesStopped { val created = new AtomicInteger() val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() - val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(shortMinBackoff, shortMaxBackoff, 0, maxRestarts = 1) { () => - created.incrementAndGet() - Flow[String].takeWhile(_ != "cancel", inclusive = true) - .to(Sink.foreach(queue.sendNext)) - })(Keep.left).run() + val probe = TestSource + .probe[String] + .toMat(RestartSink.withBackoff(shortMinBackoff, shortMaxBackoff, 0, maxRestarts = 1) { () => + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true).to(Sink.foreach(queue.sendNext)) + })(Keep.left) + .run() probe.sendNext("cancel") sinkProbe.requestNext("cancel") @@ -423,11 +455,13 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "reset maxRestarts when sink runs for at least minimum backoff without completing" in assertAllStagesStopped { val created = new AtomicInteger() val (queue, sinkProbe) = TestSource.probe[String].toMat(TestSink.probe)(Keep.both).run() - val probe = TestSource.probe[String].toMat(RestartSink.withBackoff(minBackoff, maxBackoff, 0, maxRestarts = 2) { () => - created.incrementAndGet() - Flow[String].takeWhile(_ != "cancel", inclusive = true) - .to(Sink.foreach(queue.sendNext)) - })(Keep.left).run() + val probe = TestSource + .probe[String] + .toMat(RestartSink.withBackoff(minBackoff, maxBackoff, 0, maxRestarts = 2) { () => + created.incrementAndGet() + Flow[String].takeWhile(_ != "cancel", inclusive = true).to(Sink.foreach(queue.sendNext)) + })(Keep.left) + .run() probe.sendNext("cancel") sinkProbe.requestNext("cancel") @@ -457,58 +491,74 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "A restart with backoff flow" should { // helps reuse all the setupFlow code for both methods: withBackoff, and onlyOnFailuresWithBackoff - def RestartFlowFactory[In, Out](onlyOnFailures: Boolean): (FiniteDuration, FiniteDuration, Double, Int) => (() => Flow[In, Out, _]) => Flow[In, Out, NotUsed] = if (onlyOnFailures) { - RestartFlow.onFailuresWithBackoff - } else { - // choose the correct backoff method - (minBackoff, maxBackoff, randomFactor, maxRestarts) => RestartFlow.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) - } + def RestartFlowFactory[In, Out](onlyOnFailures: Boolean) + : (FiniteDuration, FiniteDuration, Double, Int) => (() => Flow[In, Out, _]) => Flow[In, Out, NotUsed] = + if (onlyOnFailures) { + RestartFlow.onFailuresWithBackoff + } else { + // choose the correct backoff method + (minBackoff, maxBackoff, randomFactor, maxRestarts) => + RestartFlow.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) + } - def setupFlow(minBackoff: FiniteDuration, maxBackoff: FiniteDuration, maxRestarts: Int = -1, onlyOnFailures: Boolean = false) = { + def setupFlow(minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + maxRestarts: Int = -1, + onlyOnFailures: Boolean = false) = { val created = new AtomicInteger() - val (flowInSource: TestPublisher.Probe[String], flowInProbe: TestSubscriber.Probe[String]) = TestSource.probe[String] - .buffer(4, OverflowStrategy.backpressure) - .toMat(TestSink.probe)(Keep.both).run() + val (flowInSource: TestPublisher.Probe[String], flowInProbe: TestSubscriber.Probe[String]) = + TestSource.probe[String].buffer(4, OverflowStrategy.backpressure).toMat(TestSink.probe)(Keep.both).run() - val (flowOutProbe: TestPublisher.Probe[String], flowOutSource: Source[String, NotUsed]) = TestSource.probe[String].toMat(BroadcastHub.sink)(Keep.both).run() + val (flowOutProbe: TestPublisher.Probe[String], flowOutSource: Source[String, NotUsed]) = + TestSource.probe[String].toMat(BroadcastHub.sink)(Keep.both).run() // We can't just use ordinary probes here because we're expecting them to get started/restarted. Instead, we // simply use the probes as a message bus for feeding and capturing events. - val (source, sink) = TestSource.probe[String].viaMat(RestartFlowFactory(onlyOnFailures)(minBackoff, maxBackoff, 0, maxRestarts) { () => - created.incrementAndGet() - Flow.fromSinkAndSource( - Flow[String] - .takeWhile(_ != "cancel") - .map { - case "in error" => throw TE("in error") - case other => other - } - .to(Sink.foreach(flowInSource.sendNext) - .mapMaterializedValue(_.onComplete { - case Success(_) => flowInSource.sendNext("in complete") - case Failure(_) => flowInSource.sendNext("in error") - })), - flowOutSource - .takeWhile(_ != "complete") - .map { - case "error" => throw TE("error") - case other => other - }.watchTermination()((_, term) => - term.foreach(_ => { - flowInSource.sendNext("out complete") - }))) - })(Keep.left).toMat(TestSink.probe[String])(Keep.both).run() + val (source, sink) = TestSource + .probe[String] + .viaMat(RestartFlowFactory(onlyOnFailures)(minBackoff, maxBackoff, 0, maxRestarts) { () => + created.incrementAndGet() + Flow.fromSinkAndSource( + Flow[String] + .takeWhile(_ != "cancel") + .map { + case "in error" => throw TE("in error") + case other => other + } + .to(Sink + .foreach(flowInSource.sendNext) + .mapMaterializedValue(_.onComplete { + case Success(_) => flowInSource.sendNext("in complete") + case Failure(_) => flowInSource.sendNext("in error") + })), + flowOutSource + .takeWhile(_ != "complete") + .map { + case "error" => throw TE("error") + case other => other + } + .watchTermination()((_, term) => + term.foreach(_ => { + flowInSource.sendNext("out complete") + }))) + })(Keep.left) + .toMat(TestSink.probe[String])(Keep.both) + .run() (created, source, flowInProbe, flowOutProbe, sink) } "run normally" in assertAllStagesStopped { val created = new AtomicInteger() - val (source, sink) = TestSource.probe[String].viaMat(RestartFlow.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => - created.incrementAndGet() - Flow[String] - })(Keep.left).toMat(TestSink.probe[String])(Keep.both).run() + val (source, sink) = TestSource + .probe[String] + .viaMat(RestartFlow.withBackoff(shortMinBackoff, shortMaxBackoff, 0) { () => + created.incrementAndGet() + Flow[String] + })(Keep.left) + .toMat(TestSink.probe[String])(Keep.both) + .run() source.sendNext("a") sink.requestNext("a") @@ -688,7 +738,8 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 // onlyOnFailures --> "stop on cancellation when using onlyOnFailuresWithBackoff" in { val onlyOnFailures = true - val (created, source, flowInProbe, flowOutProbe, sink) = setupFlow(shortMinBackoff, shortMaxBackoff, -1, onlyOnFailures) + val (created, source, flowInProbe, flowOutProbe, sink) = + setupFlow(shortMinBackoff, shortMaxBackoff, -1, onlyOnFailures) source.sendNext("a") flowInProbe.requestNext("a") @@ -707,7 +758,8 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 "stop on completion when using onlyOnFailuresWithBackoff" in { val onlyOnFailures = true - val (created, source, flowInProbe, flowOutProbe, sink) = setupFlow(shortMinBackoff, shortMaxBackoff, -1, onlyOnFailures) + val (created, source, flowInProbe, flowOutProbe, sink) = + setupFlow(shortMinBackoff, shortMaxBackoff, -1, onlyOnFailures) source.sendNext("a") flowInProbe.requestNext("a") @@ -751,14 +803,14 @@ class RestartSpec extends StreamSpec(Map("akka.test.single-expect-default" -> "1 } val restartOnFailures = - RestartFlow.onFailuresWithBackoff(1.second, 2.seconds, 0.2, 2)(() => { - flowCreations.incrementAndGet() - failsSomeTimes - }).addAttributes(Attributes(Delay(100.millis))) + RestartFlow + .onFailuresWithBackoff(1.second, 2.seconds, 0.2, 2)(() => { + flowCreations.incrementAndGet() + failsSomeTimes + }) + .addAttributes(Attributes(Delay(100.millis))) - val elements = Source(1 to 7) - .via(restartOnFailures) - .runWith(Sink.seq).futureValue + val elements = Source(1 to 7).via(restartOnFailures).runWith(Sink.seq).futureValue elements shouldEqual List(1, 2, 4, 5, 7) flowCreations.get() shouldEqual 3 } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala index 0fef44af0b..d5869cdb8d 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ReverseArrowSpec.scala @@ -19,25 +19,33 @@ class ReverseArrowSpec extends StreamSpec { "Reverse Arrows in the Graph DSL" must { "work from Inlets" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - s.in <~ source - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + s.in <~ source + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work from SinkShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - s <~ source - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + s <~ source + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work from Sink" in { val sub = TestSubscriber.manualProbe[Int] - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - Sink.fromSubscriber(sub) <~ source - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + Sink.fromSubscriber(sub) <~ source + ClosedShape + }) + .run() sub.expectSubscription().request(10) sub.expectNext(1, 2, 3) sub.expectComplete() @@ -66,121 +74,167 @@ class ReverseArrowSpec extends StreamSpec { } "work from FlowShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val f: FlowShape[Int, Int] = b.add(Flow[Int]) - f <~ source - f ~> s - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val f: FlowShape[Int, Int] = b.add(Flow[Int]) + f <~ source + f ~> s + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work from UniformFanInShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2)) - f <~ source - f <~ Source.empty - f ~> s - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result( + RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2)) + f <~ source + f <~ Source.empty + f ~> s + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work from UniformFanOutShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2)) - f <~ source - f ~> Sink.ignore - f ~> s - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result( + RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2)) + f <~ source + f ~> Sink.ignore + f ~> s + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work towards Outlets" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val o: Outlet[Int] = b.add(source).out - s <~ o - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val o: Outlet[Int] = b.add(source).out + s <~ o + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work towards SourceShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val o: SourceShape[Int] = b.add(source) - s <~ o - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val o: SourceShape[Int] = b.add(source) + s <~ o + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work towards Source" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - s <~ source - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + s <~ source + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work towards FlowShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val f: FlowShape[Int, Int] = b.add(Flow[Int]) - s <~ f - source ~> f - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val f: FlowShape[Int, Int] = b.add(Flow[Int]) + s <~ f + source ~> f + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work towards UniformFanInShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2)) - s <~ f - Source.empty ~> f - source ~> f - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result( + RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2)) + s <~ f + Source.empty ~> f + source ~> f + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "fail towards already full UniformFanInShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2)) - val src = b.add(source) - Source.empty ~> f - src ~> f - (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include("no more inlets free") - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result( + RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val f: UniformFanInShape[Int, Int] = b.add(Merge[Int](2)) + val src = b.add(source) + Source.empty ~> f + src ~> f + (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include("no more inlets free") + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work towards UniformFanOutShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2)) - s <~ f - Sink.ignore <~ f - source ~> f - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result( + RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2)) + s <~ f + Sink.ignore <~ f + source ~> f + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "fail towards already full UniformFanOutShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2)) - val sink2: SinkShape[Int] = b.add(Sink.ignore) - val src = b.add(source) - src ~> f - sink2 <~ f - (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include("[StatefulMapConcat.out] is already connected") - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result( + RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + val f: UniformFanOutShape[Int, Int] = b.add(Broadcast[Int](2)) + val sink2: SinkShape[Int] = b.add(Sink.ignore) + val src = b.add(source) + src ~> f + sink2 <~ f + (the[IllegalArgumentException] thrownBy (s <~ f <~ src)).getMessage should include( + "[StatefulMapConcat.out] is already connected") + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work across a Flow" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - s <~ Flow[Int] <~ source - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + s <~ Flow[Int] <~ source + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } "work across a FlowShape" in { - Await.result(RunnableGraph.fromGraph(GraphDSL.create(sink) { implicit b => s => - s <~ b.add(Flow[Int]) <~ source - ClosedShape - }).run(), 1.second) should ===(Seq(1, 2, 3)) + Await.result(RunnableGraph + .fromGraph(GraphDSL.create(sink) { implicit b => s => + s <~ b.add(Flow[Int]) <~ source + ClosedShape + }) + .run(), + 1.second) should ===(Seq(1, 2, 3)) } } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RunnableGraphSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RunnableGraphSpec.scala index 4b11a17d35..2285462e44 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RunnableGraphSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/RunnableGraphSpec.scala @@ -17,7 +17,8 @@ class RunnableGraphSpec extends StreamSpec { "suitably override attribute handling methods" in { import Attributes._ - val r: RunnableGraph[NotUsed] = RunnableGraph.fromGraph(Source.empty.to(Sink.ignore)).async.addAttributes(none).named("useless") + val r: RunnableGraph[NotUsed] = + RunnableGraph.fromGraph(Source.empty.to(Sink.ignore)).async.addAttributes(none).named("useless") r.traversalBuilder.attributes.get[Name] shouldEqual Some(Name("useless")) r.traversalBuilder.attributes.get[AsyncBoundary.type] shouldEqual (Some(AsyncBoundary)) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala index 5503a0e4b0..04997abbd0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SeqSinkSpec.scala @@ -12,8 +12,7 @@ import scala.concurrent.{ Await, Future } class SeqSinkSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val mat = ActorMaterializer(settings) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkAsJavaStreamSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkAsJavaStreamSpec.scala index b217dfb04a..9368ed8ea9 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkAsJavaStreamSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkAsJavaStreamSpec.scala @@ -55,9 +55,15 @@ class SinkAsJavaStreamSpec extends StreamSpec(UnboundedMailboxConfig) { val materializer = ActorMaterializer()(sys) try { - TestSource.probe[ByteString].runWith(StreamConverters.asJavaStream() - .addAttributes(ActorAttributes.dispatcher("akka.actor.default-dispatcher")))(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + TestSource + .probe[ByteString] + .runWith( + StreamConverters.asJavaStream().addAttributes(ActorAttributes.dispatcher("akka.actor.default-dispatcher")))( + materializer) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "asJavaStream").get assertDispatcher(ref, "akka.actor.default-dispatcher") } finally shutdown(sys) @@ -69,7 +75,10 @@ class SinkAsJavaStreamSpec extends StreamSpec(UnboundedMailboxConfig) { try { TestSource.probe[ByteString].runWith(StreamConverters.asJavaStream())(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "asJavaStream").get assertDispatcher(ref, "akka.stream.default-blocking-io-dispatcher") } finally shutdown(sys) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachAsyncSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachAsyncSpec.scala index 0a4f72e094..a1c9d0e266 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachAsyncSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachAsyncSpec.scala @@ -99,8 +99,7 @@ class SinkForeachAsyncSpec extends StreamSpec { } val p = - Source(List(one _, two _, three _, four _)) - .runWith(sink) + Source(List(one _, two _, three _, four _)).runWith(sink) latch(1).countDown() probe.expectMsg(1) @@ -199,15 +198,18 @@ class SinkForeachAsyncSpec extends StreamSpec { val probe = TestProbe() val latch = TestLatch(1) - val p = Source(1 to 5).runWith(Sink.foreachAsync(4)((n: Int) => { - Future { - if (n == 3) throw new RuntimeException("err1") with NoStackTrace - else { - probe.ref ! n - Await.ready(latch, 10.seconds) - } - } - }).withAttributes(supervisionStrategy(resumingDecider))) + val p = Source(1 to 5).runWith( + Sink + .foreachAsync(4)((n: Int) => { + Future { + if (n == 3) throw new RuntimeException("err1") with NoStackTrace + else { + probe.ref ! n + Await.ready(latch, 10.seconds) + } + } + }) + .withAttributes(supervisionStrategy(resumingDecider))) latch.countDown() probe.expectMsgAllOf(1, 2, 4, 5) @@ -222,19 +224,24 @@ class SinkForeachAsyncSpec extends StreamSpec { val element4Latch = new CountDownLatch(1) val errorLatch = new CountDownLatch(2) - val p = Source.fromIterator(() => Iterator.from(1)).runWith(Sink.foreachAsync(3)((n: Int) => { - Future { - if (n == 3) { - // Error will happen only after elements 1, 2 has been processed - errorLatch.await(5, TimeUnit.SECONDS) - throw new RuntimeException("err2") with NoStackTrace - } else { - probe.ref ! n - errorLatch.countDown() - element4Latch.await(5, TimeUnit.SECONDS) // Block element 4, 5, 6, ... from entering - } - } - }).withAttributes(supervisionStrategy(stoppingDecider))) + val p = Source + .fromIterator(() => Iterator.from(1)) + .runWith( + Sink + .foreachAsync(3)((n: Int) => { + Future { + if (n == 3) { + // Error will happen only after elements 1, 2 has been processed + errorLatch.await(5, TimeUnit.SECONDS) + throw new RuntimeException("err2") with NoStackTrace + } else { + probe.ref ! n + errorLatch.countDown() + element4Latch.await(5, TimeUnit.SECONDS) // Block element 4, 5, 6, ... from entering + } + } + }) + .withAttributes(supervisionStrategy(stoppingDecider))) // Only the first two messages are guaranteed to arrive due to their enforced ordering related to the time // of failure. diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachParallelSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachParallelSpec.scala index ef959b6c2f..831bfa7ff0 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachParallelSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkForeachParallelSpec.scala @@ -78,13 +78,16 @@ class SinkForeachParallelSpec extends StreamSpec { val probe = TestProbe() val latch = TestLatch(1) - val p = Source(1 to 5).runWith(Sink.foreachParallel(4)((n: Int) => { - if (n == 3) throw new RuntimeException("err1") with NoStackTrace - else { - probe.ref ! n - Await.ready(latch, 10.seconds) - } - }).withAttributes(supervisionStrategy(resumingDecider))) + val p = Source(1 to 5).runWith( + Sink + .foreachParallel(4)((n: Int) => { + if (n == 3) throw new RuntimeException("err1") with NoStackTrace + else { + probe.ref ! n + Await.ready(latch, 10.seconds) + } + }) + .withAttributes(supervisionStrategy(resumingDecider))) latch.countDown() probe.expectMsgAllOf(1, 2, 4, 5) @@ -99,17 +102,22 @@ class SinkForeachParallelSpec extends StreamSpec { val element4Latch = new CountDownLatch(1) val errorLatch = new CountDownLatch(2) - val p = Source.fromIterator(() => Iterator.from(1)).runWith(Sink.foreachParallel(3)((n: Int) => { - if (n == 3) { - // Error will happen only after elements 1, 2 has been processed - errorLatch.await(5, TimeUnit.SECONDS) - throw new RuntimeException("err2") with NoStackTrace - } else { - probe.ref ! n - errorLatch.countDown() - element4Latch.await(5, TimeUnit.SECONDS) // Block element 4, 5, 6, ... from entering - } - }).withAttributes(supervisionStrategy(stoppingDecider))) + val p = Source + .fromIterator(() => Iterator.from(1)) + .runWith( + Sink + .foreachParallel(3)((n: Int) => { + if (n == 3) { + // Error will happen only after elements 1, 2 has been processed + errorLatch.await(5, TimeUnit.SECONDS) + throw new RuntimeException("err2") with NoStackTrace + } else { + probe.ref ! n + errorLatch.countDown() + element4Latch.await(5, TimeUnit.SECONDS) // Block element 4, 5, 6, ... from entering + } + }) + .withAttributes(supervisionStrategy(stoppingDecider))) // Only the first two messages are guaranteed to arrive due to their enforced ordering related to the time // of failure. diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala index be4820890e..a05c6c5c06 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SinkSpec.scala @@ -38,9 +38,11 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { Source(List(0, 1, 2)).runWith(sink) val subscriptions = probes.map(_.expectSubscription()) - subscriptions.foreach { s => s.request(3) } + subscriptions.foreach { s => + s.request(3) + } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 1 module" in { @@ -54,57 +56,75 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { Source(List(0, 1, 2)).runWith(sink) val subscriptions = probes.map(_.expectSubscription()) - subscriptions.foreach { s => s.request(3) } + subscriptions.foreach { s => + s.request(3) + } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 2 modules" in { val probes = Array.fill(3)(TestSubscriber.manualProbe[Int]) - val sink = Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)))(List(_, _)) { implicit b => (s0, s1) => - val bcast = b.add(Broadcast[Int](3)) - bcast.out(0).filter(_ == 0) ~> s0.in - bcast.out(1).filter(_ == 1) ~> s1.in - bcast.out(2).filter(_ == 2) ~> Sink.fromSubscriber(probes(2)) - SinkShape(bcast.in) - }) + val sink = + Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)))(List(_, _)) { + implicit b => (s0, s1) => + val bcast = b.add(Broadcast[Int](3)) + bcast.out(0).filter(_ == 0) ~> s0.in + bcast.out(1).filter(_ == 1) ~> s1.in + bcast.out(2).filter(_ == 2) ~> Sink.fromSubscriber(probes(2)) + SinkShape(bcast.in) + }) Source(List(0, 1, 2)).runWith(sink) val subscriptions = probes.map(_.expectSubscription()) - subscriptions.foreach { s => s.request(3) } + subscriptions.foreach { s => + s.request(3) + } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "be composable with importing 3 modules" in { val probes = Array.fill(3)(TestSubscriber.manualProbe[Int]) - val sink = Sink.fromGraph(GraphDSL.create(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)), Sink.fromSubscriber(probes(2)))(List(_, _, _)) { implicit b => (s0, s1, s2) => - val bcast = b.add(Broadcast[Int](3)) - bcast.out(0).filter(_ == 0) ~> s0.in - bcast.out(1).filter(_ == 1) ~> s1.in - bcast.out(2).filter(_ == 2) ~> s2.in - SinkShape(bcast.in) - }) + val sink = Sink.fromGraph( + GraphDSL.create(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)), Sink.fromSubscriber(probes(2)))( + List(_, _, _)) { implicit b => (s0, s1, s2) => + val bcast = b.add(Broadcast[Int](3)) + bcast.out(0).filter(_ == 0) ~> s0.in + bcast.out(1).filter(_ == 1) ~> s1.in + bcast.out(2).filter(_ == 2) ~> s2.in + SinkShape(bcast.in) + }) Source(List(0, 1, 2)).runWith(sink) val subscriptions = probes.map(_.expectSubscription()) - subscriptions.foreach { s => s.request(3) } + subscriptions.foreach { s => + s.request(3) + } probes.zipWithIndex.foreach { case (p, i) => p.expectNext(i) } - probes.foreach { case p => p.expectComplete() } + probes.foreach { case p => p.expectComplete() } } "combine to many outputs with simplified API" in { val probes = Seq.fill(3)(TestSubscriber.manualProbe[Int]()) - val sink = Sink.combine(Sink.fromSubscriber(probes(0)), Sink.fromSubscriber(probes(1)), Sink.fromSubscriber(probes(2)))(Broadcast[Int](_)) + val sink = Sink.combine(Sink.fromSubscriber(probes(0)), + Sink.fromSubscriber(probes(1)), + Sink.fromSubscriber(probes(2)))(Broadcast[Int](_)) Source(List(0, 1, 2)).runWith(sink) val subscriptions = probes.map(_.expectSubscription()) - subscriptions.foreach { s => s.request(1) } - probes.foreach { p => p.expectNext(0) } + subscriptions.foreach { s => + s.request(1) + } + probes.foreach { p => + p.expectNext(0) + } - subscriptions.foreach { s => s.request(2) } + subscriptions.foreach { s => + s.request(2) + } probes.foreach { p => p.expectNextN(List(1, 2)) p.expectComplete @@ -119,10 +139,16 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { val subscriptions = probes.map(_.expectSubscription()) - subscriptions.foreach { s => s.request(1) } - probes.foreach { p => p.expectNext(0) } + subscriptions.foreach { s => + s.request(1) + } + probes.foreach { p => + p.expectNext(0) + } - subscriptions.foreach { s => s.request(2) } + subscriptions.foreach { s => + s.request(2) + } probes.foreach { p => p.expectNextN(List(1, 2)) p.expectComplete @@ -186,11 +212,11 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "Java collector Sink" must { - class TestCollector( - _supplier: () => Supplier[Array[Int]], - _accumulator: () => BiConsumer[Array[Int], Int], - _combiner: () => BinaryOperator[Array[Int]], - _finisher: () => function.Function[Array[Int], Int]) extends Collector[Int, Array[Int], Int] { + class TestCollector(_supplier: () => Supplier[Array[Int]], + _accumulator: () => BiConsumer[Array[Int], Int], + _combiner: () => BinaryOperator[Array[Int]], + _finisher: () => function.Function[Array[Int], Int]) + extends Collector[Int, Array[Int], Int] { override def supplier(): Supplier[Array[Int]] = _supplier() override def combiner(): BinaryOperator[Array[Int]] = _combiner() override def finisher(): function.Function[Array[Int], Int] = _finisher() @@ -219,15 +245,17 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { } "work in the happy case" in { - Source(1 to 100).map(_.toString).runWith(StreamConverters.javaCollector(() => Collectors.joining(", "))) + Source(1 to 100) + .map(_.toString) + .runWith(StreamConverters.javaCollector(() => Collectors.joining(", "))) .futureValue should ===((1 to 100).mkString(", ")) } "work parallelly in the happy case" in { - Source(1 to 100).runWith(StreamConverters - .javaCollectorParallelUnordered(4)( - () => Collectors.summingInt[Int](intIdentity))) - .futureValue.toInt should ===(5050) + Source(1 to 100) + .runWith(StreamConverters.javaCollectorParallelUnordered(4)(() => Collectors.summingInt[Int](intIdentity))) + .futureValue + .toInt should ===(5050) } "be reusable" in { @@ -247,8 +275,8 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "fail if getting the supplier fails" in { def failedSupplier(): Supplier[Array[Int]] = throw TE("") - val future = Source(1 to 100).runWith(StreamConverters.javaCollector( - () => new TestCollector(failedSupplier _, accumulator _, combiner _, finisher _))) + val future = Source(1 to 100).runWith(StreamConverters.javaCollector(() => + new TestCollector(failedSupplier _, accumulator _, combiner _, finisher _))) a[TE] shouldBe thrownBy { Await.result(future, 300.millis) } @@ -258,8 +286,8 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { def failedSupplier(): Supplier[Array[Int]] = new Supplier[Array[Int]] { override def get(): Array[Int] = throw TE("") } - val future = Source(1 to 100).runWith(StreamConverters.javaCollector( - () => new TestCollector(failedSupplier _, accumulator _, combiner _, finisher _))) + val future = Source(1 to 100).runWith(StreamConverters.javaCollector(() => + new TestCollector(failedSupplier _, accumulator _, combiner _, finisher _))) a[TE] shouldBe thrownBy { Await.result(future, 300.millis) } @@ -268,8 +296,8 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "fail if getting the accumulator fails" in { def failedAccumulator(): BiConsumer[Array[Int], Int] = throw TE("") - val future = Source(1 to 100).runWith(StreamConverters.javaCollector( - () => new TestCollector(supplier _, failedAccumulator _, combiner _, finisher _))) + val future = Source(1 to 100).runWith(StreamConverters.javaCollector(() => + new TestCollector(supplier _, failedAccumulator _, combiner _, finisher _))) a[TE] shouldBe thrownBy { Await.result(future, 300.millis) } @@ -280,8 +308,8 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { override def accept(a: Array[Int], b: Int): Unit = throw TE("") } - val future = Source(1 to 100).runWith(StreamConverters.javaCollector( - () => new TestCollector(supplier _, failedAccumulator _, combiner _, finisher _))) + val future = Source(1 to 100).runWith(StreamConverters.javaCollector(() => + new TestCollector(supplier _, failedAccumulator _, combiner _, finisher _))) a[TE] shouldBe thrownBy { Await.result(future, 300.millis) } @@ -290,8 +318,8 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { "fail if getting the finisher fails" in { def failedFinisher(): function.Function[Array[Int], Int] = throw TE("") - val future = Source(1 to 100).runWith(StreamConverters.javaCollector( - () => new TestCollector(supplier _, accumulator _, combiner _, failedFinisher _))) + val future = Source(1 to 100).runWith(StreamConverters.javaCollector(() => + new TestCollector(supplier _, accumulator _, combiner _, failedFinisher _))) a[TE] shouldBe thrownBy { Await.result(future, 300.millis) } @@ -301,8 +329,8 @@ class SinkSpec extends StreamSpec with DefaultTimeout with ScalaFutures { def failedFinisher(): function.Function[Array[Int], Int] = new function.Function[Array[Int], Int] { override def apply(a: Array[Int]): Int = throw TE("") } - val future = Source(1 to 100).runWith(StreamConverters.javaCollector( - () => new TestCollector(supplier _, accumulator _, combiner _, failedFinisher _))) + val future = Source(1 to 100).runWith(StreamConverters.javaCollector(() => + new TestCollector(supplier _, accumulator _, combiner _, failedFinisher _))) a[TE] shouldBe thrownBy { Await.result(future, 300.millis) } diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala index dd907551a0..94b8fe04f1 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceSpec.scala @@ -88,16 +88,20 @@ class SourceSpec extends StreamSpec with DefaultTimeout { val source = Source.asSubscriber[Int] val out = TestSubscriber.manualProbe[Int] - val s = Source.fromGraph(GraphDSL.create(source, source, source, source, source)(immutable.Seq(_, _, _, _, _)) { implicit b => (i0, i1, i2, i3, i4) => - import GraphDSL.Implicits._ - val m = b.add(Merge[Int](5)) - i0.out ~> m.in(0) - i1.out ~> m.in(1) - i2.out ~> m.in(2) - i3.out ~> m.in(3) - i4.out ~> m.in(4) - SourceShape(m.out) - }).to(Sink.fromSubscriber(out)).run() + val s = Source + .fromGraph(GraphDSL.create(source, source, source, source, source)(immutable.Seq(_, _, _, _, _)) { + implicit b => (i0, i1, i2, i3, i4) => + import GraphDSL.Implicits._ + val m = b.add(Merge[Int](5)) + i0.out ~> m.in(0) + i1.out ~> m.in(1) + i2.out ~> m.in(2) + i3.out ~> m.in(3) + i4.out ~> m.in(4) + SourceShape(m.out) + }) + .to(Sink.fromSubscriber(out)) + .run() for (i <- 0 to 4) probes(i).subscribe(s(i)) val sub = out.expectSubscription() @@ -160,11 +164,10 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "combine using Concat strategy two inputs with simplified API" in { //#combine - val sources = immutable.Seq( - Source(List(1, 2, 3)), - Source(List(10, 20, 30))) + val sources = immutable.Seq(Source(List(1, 2, 3)), Source(List(10, 20, 30))) - Source.combine(sources(0), sources(1))(Concat(_)) + Source + .combine(sources(0), sources(1))(Concat(_)) .runWith(Sink.seq) // This will produce the Seq(1, 2, 3, 10, 20, 30) //#combine @@ -222,38 +225,47 @@ class SourceSpec extends StreamSpec with DefaultTimeout { } "Unfold Source" must { - val expected = List(9227465, 5702887, 3524578, 2178309, 1346269, 832040, 514229, 317811, 196418, 121393, 75025, 46368, 28657, 17711, 10946, 6765, 4181, 2584, 1597, 987, 610, 377, 233, 144, 89, 55, 34, 21, 13, 8, 5, 3, 2, 1, 1, 0) + val expected = List(9227465, 5702887, 3524578, 2178309, 1346269, 832040, 514229, 317811, 196418, 121393, 75025, + 46368, 28657, 17711, 10946, 6765, 4181, 2584, 1597, 987, 610, 377, 233, 144, 89, 55, 34, 21, 13, 8, 5, 3, 2, 1, 1, + 0) "generate a finite fibonacci sequence" in { - Source.unfold((0, 1)) { - case (a, _) if a > 10000000 => None - case (a, b) => Some((b, a + b) -> a) - }.runFold(List.empty[Int]) { case (xs, x) => x :: xs } + Source + .unfold((0, 1)) { + case (a, _) if a > 10000000 => None + case (a, b) => Some((b, a + b) -> a) + } + .runFold(List.empty[Int]) { case (xs, x) => x :: xs } .futureValue should ===(expected) } "terminate with a failure if there is an exception thrown" in { val t = new RuntimeException("expected") - EventFilter[RuntimeException](message = "expected", occurrences = 1) intercept - whenReady( - Source.unfold((0, 1)) { + EventFilter[RuntimeException](message = "expected", occurrences = 1).intercept( + whenReady(Source + .unfold((0, 1)) { case (a, _) if a > 10000000 => throw t case (a, b) => Some((b, a + b) -> a) - }.runFold(List.empty[Int]) { case (xs, x) => x :: xs }.failed) { - x => (x should be).theSameInstanceAs(t) } + .runFold(List.empty[Int]) { case (xs, x) => x :: xs } + .failed) { x => + (x should be).theSameInstanceAs(t) + }) } "generate a finite fibonacci sequence asynchronously" in { - Source.unfoldAsync((0, 1)) { - case (a, _) if a > 10000000 => Future.successful(None) - case (a, b) => Future(Some((b, a + b) -> a))(system.dispatcher) - }.runFold(List.empty[Int]) { case (xs, x) => x :: xs } + Source + .unfoldAsync((0, 1)) { + case (a, _) if a > 10000000 => Future.successful(None) + case (a, b) => Future(Some((b, a + b) -> a))(system.dispatcher) + } + .runFold(List.empty[Int]) { case (xs, x) => x :: xs } .futureValue should ===(expected) } "generate an unbounded fibonacci sequence" in { - Source.unfold((0, 1))({ case (a, b) => Some((b, a + b) -> a) }) + Source + .unfold((0, 1))({ case (a, b) => Some((b, a + b) -> a) }) .take(36) .runFold(List.empty[Int]) { case (xs, x) => x :: xs } .futureValue should ===(expected) @@ -262,22 +274,24 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "Iterator Source" must { "properly iterate" in { - Source.fromIterator(() => Iterator.iterate(false)(!_)) - .grouped(10) - .runWith(Sink.head) - .futureValue should ===(immutable.Seq(false, true, false, true, false, true, false, true, false, true)) + Source.fromIterator(() => Iterator.iterate(false)(!_)).grouped(10).runWith(Sink.head).futureValue should ===( + immutable.Seq(false, true, false, true, false, true, false, true, false, true)) } "fail stream when iterator throws" in { Source .fromIterator(() => (1 to 1000).toIterator.map(k => if (k < 10) k else throw TE("a"))) .runWith(Sink.ignore) - .failed.futureValue.getClass should ===(classOf[TE]) + .failed + .futureValue + .getClass should ===(classOf[TE]) Source .fromIterator(() => (1 to 1000).toIterator.map(_ => throw TE("b"))) .runWith(Sink.ignore) - .failed.futureValue.getClass should ===(classOf[TE]) + .failed + .futureValue + .getClass should ===(classOf[TE]) } "use decider when iterator throws" in { @@ -299,30 +313,18 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "ZipN Source" must { "properly zipN" in { - val sources = immutable.Seq( - Source(List(1, 2, 3)), - Source(List(10, 20, 30)), - Source(List(100, 200, 300))) + val sources = immutable.Seq(Source(List(1, 2, 3)), Source(List(10, 20, 30)), Source(List(100, 200, 300))) - Source.zipN(sources) - .runWith(Sink.seq) - .futureValue should ===(immutable.Seq( - immutable.Seq(1, 10, 100), - immutable.Seq(2, 20, 200), - immutable.Seq(3, 30, 300))) + Source.zipN(sources).runWith(Sink.seq).futureValue should ===( + immutable.Seq(immutable.Seq(1, 10, 100), immutable.Seq(2, 20, 200), immutable.Seq(3, 30, 300))) } } "ZipWithN Source" must { "properly zipWithN" in { - val sources = immutable.Seq( - Source(List(1, 2, 3)), - Source(List(10, 20, 30)), - Source(List(100, 200, 300))) + val sources = immutable.Seq(Source(List(1, 2, 3)), Source(List(10, 20, 30)), Source(List(100, 200, 300))) - Source.zipWithN[Int, Int](_.sum)(sources) - .runWith(Sink.seq) - .futureValue should ===(immutable.Seq(111, 222, 333)) + Source.zipWithN[Int, Int](_.sum)(sources).runWith(Sink.seq).futureValue should ===(immutable.Seq(111, 222, 333)) } } @@ -331,7 +333,8 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "continuously generate the same sequence" in { val expected = Seq(1, 2, 3, 1, 2, 3, 1, 2, 3) //#cycle - Source.cycle(() => List(1, 2, 3).iterator) + Source + .cycle(() => List(1, 2, 3).iterator) .grouped(9) .runWith(Sink.head) // This will produce the Seq(1, 2, 3, 1, 2, 3, 1, 2, 3) @@ -342,11 +345,13 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "throw an exception in case of empty iterator" in { //#cycle-error val empty = Iterator.empty - Source.cycle(() => empty) + Source + .cycle(() => empty) .runWith(Sink.head) // This will return a failed future with an `IllegalArgumentException` //#cycle-error - .failed.futureValue shouldBe an[IllegalArgumentException] + .failed + .futureValue shouldBe an[IllegalArgumentException] } } @@ -359,9 +364,12 @@ class SourceSpec extends StreamSpec with DefaultTimeout { "Java Stream source" must { import scala.compat.java8.FunctionConverters._ - import java.util.stream.{ Stream, IntStream } + import java.util.stream.{ IntStream, Stream } - def javaStreamInts = IntStream.iterate(1, { i: Int => i + 1 }.asJava) + def javaStreamInts = + IntStream.iterate(1, { i: Int => + i + 1 + }.asJava) "work with Java collections" in { val list = new java.util.LinkedList[Integer]() @@ -369,11 +377,16 @@ class SourceSpec extends StreamSpec with DefaultTimeout { list.add(1) list.add(2) - StreamConverters.fromJavaStream(() => list.stream()).map(_.intValue).runWith(Sink.seq).futureValue should ===(List(0, 1, 2)) + StreamConverters.fromJavaStream(() => list.stream()).map(_.intValue).runWith(Sink.seq).futureValue should ===( + List(0, 1, 2)) } "work with primitive streams" in { - StreamConverters.fromJavaStream(() => IntStream.rangeClosed(1, 10)).map(_.intValue).runWith(Sink.seq).futureValue should ===(1 to 10) + StreamConverters + .fromJavaStream(() => IntStream.rangeClosed(1, 10)) + .map(_.intValue) + .runWith(Sink.seq) + .futureValue should ===(1 to 10) } "work with an empty stream" in { @@ -385,8 +398,14 @@ class SourceSpec extends StreamSpec with DefaultTimeout { } "work with a filtered stream" in { - StreamConverters.fromJavaStream(() => javaStreamInts.filter({ i: Int => i % 2 == 0 }.asJava)) - .take(1000).runFold(0)(_ + _).futureValue should ===(1001000) + StreamConverters + .fromJavaStream(() => + javaStreamInts.filter({ i: Int => + i % 2 == 0 + }.asJava)) + .take(1000) + .runFold(0)(_ + _) + .futureValue should ===(1001000) } "properly report errors during iteration" in { diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceWithContextSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceWithContextSpec.scala index 90769a57a5..f83a735746 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceWithContextSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SourceWithContextSpec.scala @@ -30,7 +30,8 @@ class SourceWithContextSpec extends StreamSpec { "get created from a source of tuple2" in { val msg = Message("a", 1L) - SourceWithContext.fromTuples(Source(Vector((msg, msg.offset)))) + SourceWithContext + .fromTuples(Source(Vector((msg, msg.offset)))) .asSource .runWith(TestSink.probe[(Message, Long)]) .request(1) @@ -43,7 +44,8 @@ class SourceWithContextSpec extends StreamSpec { Source(Vector(msg)) .asSourceWithContext(_.offset) .map(_.data) - .asSource.map { case (e, _) => e } + .asSource + .map { case (e, _) => e } .runWith(TestSink.probe[String]) .request(1) .expectNext("a") @@ -51,9 +53,7 @@ class SourceWithContextSpec extends StreamSpec { } "pass through contexts using map and filter" in { - Source( - Vector(Message("A", 1L), Message("B", 2L), Message("D", 3L), Message("C", 4L)) - ) + Source(Vector(Message("A", 1L), Message("B", 2L), Message("D", 3L), Message("C", 4L))) .asSourceWithContext(_.offset) .map(_.data.toLowerCase) .filter(_ != "b") diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala index e4c7743baf..751ee3d403 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StageActorRefSpec.scala @@ -4,12 +4,12 @@ package akka.stream.scaladsl -import akka.actor.{ Kill, PoisonPill, NoSerializationVerificationNeeded, ActorRef } +import akka.actor.{ ActorRef, Kill, NoSerializationVerificationNeeded, PoisonPill } import akka.event.Logging import akka.stream._ -import akka.stream.stage.{ GraphStageWithMaterializedValue, GraphStageLogic, InHandler } +import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, InHandler } import akka.stream.testkit.StreamSpec -import akka.testkit.{ TestProbe, TestEvent, EventFilter, ImplicitSender } +import akka.testkit.{ EventFilter, ImplicitSender, TestEvent, TestProbe } import scala.concurrent.{ Future, Promise } import scala.concurrent.duration._ @@ -213,23 +213,24 @@ object StageActorRefSpec { } } - setHandler(in, new InHandler { - override def onPush(): Unit = { - sum += grab(in) - p.trySuccess(sum) - completeStage() - } + setHandler(in, + new InHandler { + override def onPush(): Unit = { + sum += grab(in) + p.trySuccess(sum) + completeStage() + } - override def onUpstreamFinish(): Unit = { - p.trySuccess(sum) - completeStage() - } + override def onUpstreamFinish(): Unit = { + p.trySuccess(sum) + completeStage() + } - override def onUpstreamFailure(ex: Throwable): Unit = { - p.tryFailure(ex) - failStage(ex) - } - }) + override def onUpstreamFailure(ex: Throwable): Unit = { + p.tryFailure(ex) + failStage(ex) + } + }) } logic -> p.future diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamRefsSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamRefsSpec.scala index 61dfdc4c7e..2697499b9f 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamRefsSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/StreamRefsSpec.scala @@ -24,8 +24,7 @@ object StreamRefsSpec { object DataSourceActor { def props(probe: ActorRef): Props = - Props(new DataSourceActor(probe)) - .withDispatcher("akka.test.stream-dispatcher") + Props(new DataSourceActor(probe)).withDispatcher("akka.test.stream-dispatcher") } class DataSourceActor(probe: ActorRef) extends Actor with ActorLogging { @@ -42,33 +41,32 @@ object StreamRefsSpec { val source: Source[String, NotUsed] = Source(List("hello", "world")) val ref: Future[SourceRef[String]] = source.runWith(StreamRefs.sourceRef()) - ref pipeTo sender() + ref.pipeTo(sender()) case "give-infinite" => val source: Source[String, NotUsed] = Source.fromIterator(() => Iterator.from(1)).map("ping-" + _) val (r: NotUsed, ref: Future[SourceRef[String]]) = source.toMat(StreamRefs.sourceRef())(Keep.both).run() - ref pipeTo sender() + ref.pipeTo(sender()) case "give-fail" => - val ref = Source.failed[String](new Exception("Booooom!") with NoStackTrace) - .runWith(StreamRefs.sourceRef()) + val ref = Source.failed[String](new Exception("Booooom!") with NoStackTrace).runWith(StreamRefs.sourceRef()) - ref pipeTo sender() + ref.pipeTo(sender()) case "give-complete-asap" => - val ref = Source.empty - .runWith(StreamRefs.sourceRef()) + val ref = Source.empty.runWith(StreamRefs.sourceRef()) - ref pipeTo sender() + ref.pipeTo(sender()) case "give-subscribe-timeout" => - val ref = Source.repeat("is anyone there?") + val ref = Source + .repeat("is anyone there?") .toMat(StreamRefs.sourceRef())(Keep.right) // attributes like this so they apply to the Sink.sourceRef .withAttributes(StreamRefAttributes.subscriptionTimeout(500.millis)) .run() - ref pipeTo sender() + ref.pipeTo(sender()) // case "send-bulk" => // /* // * Here we're able to send a source to a remote recipient @@ -87,32 +85,27 @@ object StreamRefsSpec { * For them it's a Sink; for us it's a Source. */ val sink = - StreamRefs.sinkRef[String]() - .to(Sink.actorRef(probe, "")) - .run() + StreamRefs.sinkRef[String]().to(Sink.actorRef(probe, "")).run() - sink pipeTo sender() + sink.pipeTo(sender()) case "receive-ignore" => val sink = - StreamRefs.sinkRef[String]() - .to(Sink.ignore) - .run() + StreamRefs.sinkRef[String]().to(Sink.ignore).run() - sink pipeTo sender() + sink.pipeTo(sender()) case "receive-subscribe-timeout" => - val sink = StreamRefs.sinkRef[String]() + val sink = StreamRefs + .sinkRef[String]() .withAttributes(StreamRefAttributes.subscriptionTimeout(500.millis)) .to(Sink.actorRef(probe, "")) .run() - sink pipeTo sender() + sink.pipeTo(sender()) case "receive-32" => - val (sink, driver) = StreamRefs.sinkRef[String]() - .toMat(TestSink.probe(context.system))(Keep.both) - .run() + val (sink, driver) = StreamRefs.sinkRef[String]().toMat(TestSink.probe(context.system))(Keep.both).run() import context.dispatcher Future { @@ -125,9 +118,9 @@ object StreamRefsSpec { driver.expectNextN(30) "" - } pipeTo probe + }.pipeTo(probe) - sink pipeTo sender() + sink.pipeTo(sender()) // case "receive-bulk" => // /* @@ -155,8 +148,7 @@ object StreamRefsSpec { final case class BulkSinkMsg(dataSink: SinkRef[ByteString]) def config(): Config = { - ConfigFactory.parseString( - s""" + ConfigFactory.parseString(s""" akka { loglevel = INFO @@ -204,8 +196,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende remoteActor ! "give" val sourceRef = expectMsgType[SourceRef[String]] - sourceRef - .runWith(Sink.actorRef(p.ref, "")) + sourceRef.runWith(Sink.actorRef(p.ref, "")) p.expectMsg("hello") p.expectMsg("world") @@ -216,8 +207,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende remoteActor ! "give-fail" val sourceRef = expectMsgType[SourceRef[String]] - sourceRef - .runWith(Sink.actorRef(p.ref, "")) + sourceRef.runWith(Sink.actorRef(p.ref, "")) val f = p.expectMsgType[Failure] f.cause.getMessage should include("Remote stream (") @@ -231,8 +221,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende remoteActor ! "give-complete-asap" val sourceRef = expectMsgType[SourceRef[String]] - sourceRef - .runWith(Sink.actorRef(p.ref, "")) + sourceRef.runWith(Sink.actorRef(p.ref, "")) p.expectMsg("") } @@ -241,8 +230,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende remoteActor ! "give-infinite" val sourceRef = expectMsgType[SourceRef[String]] - val probe = sourceRef - .runWith(TestSink.probe) + val probe = sourceRef.runWith(TestSink.probe) probe.ensureSubscription() probe.expectNoMessage(100.millis) @@ -270,8 +258,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende // not materializing it, awaiting the timeout... Thread.sleep(800) // the timeout is 500ms - val probe = remoteSource - .runWith(TestSink.probe[String](system)) + val probe = remoteSource.runWith(TestSink.probe[String](system)) // val failure = p.expectMsgType[Failure] // failure.cause.getMessage should include("[SourceRef-0] Remote side did not subscribe (materialize) handed out Sink reference") @@ -287,7 +274,8 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende remoteActor ! "give-subscribe-timeout" val remoteSource: SourceRef[String] = expectMsgType[SourceRef[String]] // materialize directly and start consuming, timeout is 500ms - val eventualStrings: Future[immutable.Seq[String]] = remoteSource.throttle(1, 100.millis, 1, ThrottleMode.Shaping) + val eventualStrings: Future[immutable.Seq[String]] = remoteSource + .throttle(1, 100.millis, 1, ThrottleMode.Shaping) .take(60) // 60 * 100 millis - data flowing for 6 seconds - both 500ms and 5s timeouts should have passed .runWith(Sink.seq) @@ -300,7 +288,8 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende val remoteSource: SourceRef[String] = expectMsgType[SourceRef[String]] val done = - remoteSource.throttle(1, 200.millis) + remoteSource + .throttle(1, 200.millis) .takeWithin(5.seconds) // which is > than the subscription timeout (so we make sure the timeout was cancelled) .runWith(Sink.ignore) @@ -315,9 +304,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende remoteActor ! "receive" val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]] - Source("hello" :: "world" :: Nil) - .to(remoteSink) - .run() + Source("hello" :: "world" :: Nil).to(remoteSink).run() p.expectMsg("hello") p.expectMsg("world") @@ -330,9 +317,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]] val remoteFailureMessage = "Booom!" - Source.failed(new Exception(remoteFailureMessage)) - .to(remoteSink) - .run() + Source.failed(new Exception(remoteFailureMessage)).to(remoteSink).run() val f = p.expectMsgType[akka.actor.Status.Failure] f.cause.getMessage should include(s"Remote stream (") @@ -346,8 +331,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende val msgs = (1 to 100).toList.map(i => s"payload-$i") - Source(msgs) - .runWith(remoteSink) + Source(msgs).runWith(remoteSink) msgs.foreach(t => p.expectMsg(t)) p.expectMsg("") @@ -360,9 +344,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende // not materializing it, awaiting the timeout... Thread.sleep(800) // the timeout is 500ms - val probe = TestSource.probe[String](system) - .to(remoteSink) - .run() + val probe = TestSource.probe[String](system).to(remoteSink).run() val failure = p.expectMsgType[Failure] failure.cause.getMessage should include("Remote side did not subscribe (materialize) handed out Sink reference") @@ -375,7 +357,8 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende "not receive timeout if subscribing is already done to the sink ref" in { remoteActor ! "receive-subscribe-timeout" val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]] - Source.repeat("whatever") + Source + .repeat("whatever") .throttle(1, 100.millis) .take(10) // the timeout is 500ms, so this makes sure we run more time than that .runWith(remoteSink) @@ -392,7 +375,8 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende val remoteSink: SinkRef[String] = expectMsgType[SinkRef[String]] val done = - Source.repeat("hello-24934") + Source + .repeat("hello-24934") .throttle(1, 300.millis) .takeWithin(5.seconds) // which is > than the subscription timeout (so we make sure the timeout was cancelled) .alsoToMat(Sink.last)(Keep.right) @@ -406,7 +390,7 @@ class StreamRefsSpec(config: Config) extends AkkaSpec(config) with ImplicitSende remoteActor ! "receive-32" val sinkRef = expectMsgType[SinkRef[String]] - Source.repeat("hello") runWith sinkRef + Source.repeat("hello").runWith(sinkRef) // if we get this message, it means no checks in the request/expect semantics were broken, good! p.expectMsg("") diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubscriberSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubscriberSinkSpec.scala index 48aebd9455..39485651aa 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubscriberSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubscriberSinkSpec.scala @@ -11,8 +11,7 @@ import akka.stream.testkit.scaladsl.StreamTestKit._ class SubscriberSinkSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val materializer = ActorMaterializer(settings) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubscriberSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubscriberSourceSpec.scala index 8e6e48f646..a9d2b5c8bc 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubscriberSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubscriberSourceSpec.scala @@ -19,7 +19,9 @@ class SubscriberSourceSpec extends StreamSpec { "be able to use Subscriber in materialized value transformation" in { val f = - Source.asSubscriber[Int].mapMaterializedValue(s => Source(1 to 3).runWith(Sink.fromSubscriber(s))) + Source + .asSubscriber[Int] + .mapMaterializedValue(s => Source(1 to 3).runWith(Sink.fromSubscriber(s))) .runWith(Sink.fold[Int, Int](0)(_ + _)) Await.result(f, 3.seconds) should be(6) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubstreamSubscriptionTimeoutSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubstreamSubscriptionTimeoutSpec.scala index 1cbcc4af94..b1a02da92a 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubstreamSubscriptionTimeoutSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/SubstreamSubscriptionTimeoutSpec.scala @@ -16,8 +16,7 @@ class SubstreamSubscriptionTimeoutSpec(conf: String) extends StreamSpec(conf) { import FlowGroupBySpec._ def this(subscriptionTimeout: FiniteDuration) { - this( - s""" + this(s""" |akka.stream.materializer { | subscription-timeout { | mode = cancel @@ -31,8 +30,7 @@ class SubstreamSubscriptionTimeoutSpec(conf: String) extends StreamSpec(conf) { this(300.millis) } - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 2) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 2) implicit val dispatcher = system.dispatcher implicit val materializer = ActorMaterializer(settings) @@ -42,7 +40,8 @@ class SubstreamSubscriptionTimeoutSpec(conf: String) extends StreamSpec(conf) { "timeout and cancel substream publishers when no-one subscribes to them after some time (time them out)" in assertAllStagesStopped { val subscriber = TestSubscriber.manualProbe[(Int, Source[Int, _])]() val publisherProbe = TestPublisher.probe[Int]() - val publisher = Source.fromPublisher(publisherProbe).groupBy(3, _ % 3).lift(_ % 3).runWith(Sink.fromSubscriber(subscriber)) + val publisher = + Source.fromPublisher(publisherProbe).groupBy(3, _ % 3).lift(_ % 3).runWith(Sink.fromSubscriber(subscriber)) val downstreamSubscription = subscriber.expectSubscription() downstreamSubscription.request(100) @@ -82,7 +81,8 @@ class SubstreamSubscriptionTimeoutSpec(conf: String) extends StreamSpec(conf) { "timeout and stop groupBy parent actor if none of the substreams are actually consumed" in assertAllStagesStopped { val publisherProbe = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[(Int, Source[Int, _])]() - val publisher = Source.fromPublisher(publisherProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.fromSubscriber(subscriber)) + val publisher = + Source.fromPublisher(publisherProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.fromSubscriber(subscriber)) val downstreamSubscription = subscriber.expectSubscription() downstreamSubscription.request(100) @@ -99,7 +99,8 @@ class SubstreamSubscriptionTimeoutSpec(conf: String) extends StreamSpec(conf) { "not timeout and cancel substream publishers when they have been subscribed to" in { val publisherProbe = TestPublisher.probe[Int]() val subscriber = TestSubscriber.manualProbe[(Int, Source[Int, _])]() - val publisher = Source.fromPublisher(publisherProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.fromSubscriber(subscriber)) + val publisher = + Source.fromPublisher(publisherProbe).groupBy(2, _ % 2).lift(_ % 2).runWith(Sink.fromSubscriber(subscriber)) val downstreamSubscription = subscriber.expectSubscription() downstreamSubscription.request(100) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala index 18c2a40661..f76465b2bb 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TakeLastSinkSpec.scala @@ -12,8 +12,7 @@ import scala.concurrent.{ Await, Future } class TakeLastSinkSpec extends StreamSpec { - val settings = ActorMaterializerSettings(system) - .withInputBuffer(initialSize = 2, maxSize = 16) + val settings = ActorMaterializerSettings(system).withInputBuffer(initialSize = 2, maxSize = 16) implicit val mat = ActorMaterializer(settings) @@ -30,8 +29,12 @@ class TakeLastSinkSpec extends StreamSpec { //#takeLast-operator-example case class Student(name: String, gpa: Double) - val students = List(Student("Alison", 4.7), Student("Adrian", 3.1), Student("Alexis", 4), - Student("Benita", 2.1), Student("Kendra", 4.2), Student("Jerrie", 4.3)).sortBy(_.gpa) + val students = List(Student("Alison", 4.7), + Student("Adrian", 3.1), + Student("Alexis", 4), + Student("Benita", 2.1), + Student("Kendra", 4.2), + Student("Jerrie", 4.3)).sortBy(_.gpa) val sourceOfStudents = Source(students) @@ -39,7 +42,7 @@ class TakeLastSinkSpec extends StreamSpec { result.foreach { topThree => println("#### Top students ####") - topThree.reverse foreach { s => + topThree.reverse.foreach { s => println(s"Name: ${s.name}, GPA: ${s.gpa}") } } @@ -48,7 +51,7 @@ class TakeLastSinkSpec extends StreamSpec { Name: Alison, GPA: 4.7 Name: Jerrie, GPA: 4.3 Name: Kendra, GPA: 4.2 - */ + */ //#takeLast-operator-example diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TickSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TickSourceSpec.scala index 5f8147a459..c7c306ba2c 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TickSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/TickSourceSpec.scala @@ -5,7 +5,7 @@ package akka.stream.scaladsl import scala.concurrent.duration._ -import akka.stream.{ ClosedShape, ActorMaterializer } +import akka.stream.{ ActorMaterializer, ClosedShape } import akka.stream.testkit._ import akka.stream.testkit.scaladsl.StreamTestKit._ import akka.testkit.TimingTest @@ -63,14 +63,16 @@ class TickSourceSpec extends StreamSpec { "be usable with zip for a simple form of rate limiting" taggedAs TimingTest in { val c = TestSubscriber.manualProbe[Int]() - RunnableGraph.fromGraph(GraphDSL.create() { implicit b => - import GraphDSL.Implicits._ - val zip = b.add(Zip[Int, String]()) - Source(1 to 100) ~> zip.in0 - Source.tick(1.second, 1.second, "tick") ~> zip.in1 - zip.out ~> Flow[(Int, String)].map { case (n, _) => n } ~> Sink.fromSubscriber(c) - ClosedShape - }).run() + RunnableGraph + .fromGraph(GraphDSL.create() { implicit b => + import GraphDSL.Implicits._ + val zip = b.add(Zip[Int, String]()) + Source(1 to 100) ~> zip.in0 + Source.tick(1.second, 1.second, "tick") ~> zip.in1 + zip.out ~> Flow[(Int, String)].map { case (n, _) => n } ~> Sink.fromSubscriber(c) + ClosedShape + }) + .run() val sub = c.expectSubscription() sub.request(1000) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala index 6d1dc8750d..e404411320 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceAsyncSourceSpec.scala @@ -21,12 +21,11 @@ import scala.concurrent.{ Await, ExecutionContext, Future, Promise } object UnfoldResourceAsyncSourceSpec { - class ResourceDummy[T]( - values: Seq[T], - // these can be used to control when the resource creates, reads first element and completes closing - createFuture: Future[Done] = Future.successful(Done), - firstReadFuture: Future[Done] = Future.successful(Done), - closeFuture: Future[Done] = Future.successful(Done))(implicit ec: ExecutionContext) { + class ResourceDummy[T](values: Seq[T], + // these can be used to control when the resource creates, reads first element and completes closing + createFuture: Future[Done] = Future.successful(Done), + firstReadFuture: Future[Done] = Future.successful(Done), + closeFuture: Future[Done] = Future.successful(Done))(implicit ec: ExecutionContext) { private val iterator = values.iterator private val createdP = Promise[Done]() private val closedP = Promise[Done]() @@ -71,17 +70,13 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val closePromise = Promise[Done]() val values = 0 to 1000 - val resource = new ResourceDummy[Int]( - values, - createFuture = createPromise.future, - closeFuture = closePromise.future) + val resource = + new ResourceDummy[Int](values, createFuture = createPromise.future, closeFuture = closePromise.future) val probe = TestSubscriber.probe[Int]() - Source.unfoldResourceAsync[Int, ResourceDummy[Int]]( - resource.create _, - _.read, - _.close - ).runWith(Sink.fromSubscriber(probe)) + Source + .unfoldResourceAsync[Int, ResourceDummy[Int]](resource.create _, _.read, _.close) + .runWith(Sink.fromSubscriber(probe)) probe.request(1) resource.created.futureValue @@ -105,11 +100,9 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val firstRead = Promise[Done]() val resource = new ResourceDummy[Int](1 :: Nil, firstReadFuture = firstRead.future) - Source.unfoldResourceAsync[Int, ResourceDummy[Int]]( - resource.create _, - _.read, - _.close - ).runWith(Sink.fromSubscriber(probe)) + Source + .unfoldResourceAsync[Int, ResourceDummy[Int]](resource.create _, _.read, _.close) + .runWith(Sink.fromSubscriber(probe)) probe.request(1L) resource.firstElementRead.futureValue @@ -123,10 +116,9 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail when create throws exception" in assertAllStagesStopped { val probe = TestSubscriber.probe[Unit]() - Source.unfoldResourceAsync[Unit, Unit]( - () => throw TE("create failed"), - _ => ???, - _ => ???).runWith(Sink.fromSubscriber(probe)) + Source + .unfoldResourceAsync[Unit, Unit](() => throw TE("create failed"), _ => ???, _ => ???) + .runWith(Sink.fromSubscriber(probe)) probe.ensureSubscription() probe.expectError(TE("create failed")) @@ -134,10 +126,9 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail when create returns failed future" in assertAllStagesStopped { val probe = TestSubscriber.probe[Unit]() - Source.unfoldResourceAsync[Unit, Unit]( - () => Future.failed(TE("create failed")), - _ => ???, - _ => ???).runWith(Sink.fromSubscriber(probe)) + Source + .unfoldResourceAsync[Unit, Unit](() => Future.failed(TE("create failed")), _ => ???, _ => ???) + .runWith(Sink.fromSubscriber(probe)) probe.ensureSubscription() probe.expectError(TE("create failed")) @@ -145,10 +136,10 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail when close throws exception" in assertAllStagesStopped { val probe = TestSubscriber.probe[Unit]() - Source.unfoldResourceAsync[Unit, Unit]( - () => Future.successful(()), - _ => Future.successful[Option[Unit]](None), - _ => throw TE("")) + Source + .unfoldResourceAsync[Unit, Unit](() => Future.successful(()), + _ => Future.successful[Option[Unit]](None), + _ => throw TE("")) .runWith(Sink.fromSubscriber(probe)) probe.ensureSubscription() probe.request(1L) @@ -157,10 +148,10 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail when close returns failed future" in assertAllStagesStopped { val probe = TestSubscriber.probe[Unit]() - Source.unfoldResourceAsync[Unit, Unit]( - () => Future.successful(()), - _ => Future.successful[Option[Unit]](None), - _ => Future.failed(throw TE(""))) + Source + .unfoldResourceAsync[Unit, Unit](() => Future.successful(()), + _ => Future.successful[Option[Unit]](None), + _ => Future.failed(throw TE(""))) .runWith(Sink.fromSubscriber(probe)) probe.ensureSubscription() probe.request(1L) @@ -168,34 +159,34 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { } "continue when Strategy is Resume and read throws" in assertAllStagesStopped { - val result = Source.unfoldResourceAsync[Int, Iterator[Any]]( - () => Future.successful(List(1, 2, TE("read-error"), 3).iterator), - iterator => - if (iterator.hasNext) { - iterator.next() match { - case n: Int => Future.successful(Some(n)) - case e: TE => throw e - } - } else Future.successful(None), - _ => Future.successful(Done) - ).withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) + val result = Source + .unfoldResourceAsync[Int, Iterator[Any]](() => Future.successful(List(1, 2, TE("read-error"), 3).iterator), + iterator => + if (iterator.hasNext) { + iterator.next() match { + case n: Int => Future.successful(Some(n)) + case e: TE => throw e + } + } else Future.successful(None), + _ => Future.successful(Done)) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .runWith(Sink.seq) result.futureValue should ===(Seq(1, 2, 3)) } "continue when Strategy is Resume and read returns failed future" in assertAllStagesStopped { - val result = Source.unfoldResourceAsync[Int, Iterator[Any]]( - () => Future.successful(List(1, 2, TE("read-error"), 3).iterator), - iterator => - if (iterator.hasNext) { - iterator.next() match { - case n: Int => Future.successful(Some(n)) - case e: TE => Future.failed(e) - } - } else Future.successful(None), - _ => Future.successful(Done) - ).withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) + val result = Source + .unfoldResourceAsync[Int, Iterator[Any]](() => Future.successful(List(1, 2, TE("read-error"), 3).iterator), + iterator => + if (iterator.hasNext) { + iterator.next() match { + case n: Int => Future.successful(Some(n)) + case e: TE => Future.failed(e) + } + } else Future.successful(None), + _ => Future.successful(Done)) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .runWith(Sink.seq) result.futureValue should ===(Seq(1, 2, 3)) @@ -205,19 +196,21 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { @volatile var failed = false val startCount = new AtomicInteger(0) - val result = Source.unfoldResourceAsync[Int, Iterator[Int]]( - () => Future.successful { - startCount.incrementAndGet() - List(1, 2, 3).iterator - }, - reader => - if (!failed) { - failed = true - throw TE("read-error") - } else if (reader.hasNext) Future.successful(Some(reader.next)) - else Future.successful(None), - _ => Future.successful(Done) - ).withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) + val result = Source + .unfoldResourceAsync[Int, Iterator[Int]]( + () => + Future.successful { + startCount.incrementAndGet() + List(1, 2, 3).iterator + }, + reader => + if (!failed) { + failed = true + throw TE("read-error") + } else if (reader.hasNext) Future.successful(Some(reader.next)) + else Future.successful(None), + _ => Future.successful(Done)) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(Sink.seq) result.futureValue should ===(Seq(1, 2, 3)) @@ -228,19 +221,21 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { @volatile var failed = false val startCount = new AtomicInteger(0) - val result = Source.unfoldResourceAsync[Int, Iterator[Int]]( - () => Future.successful { - startCount.incrementAndGet() - List(1, 2, 3).iterator - }, - reader => - if (!failed) { - failed = true - Future.failed(TE("read-error")) - } else if (reader.hasNext) Future.successful(Some(reader.next)) - else Future.successful(None), - _ => Future.successful(Done) - ).withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) + val result = Source + .unfoldResourceAsync[Int, Iterator[Int]]( + () => + Future.successful { + startCount.incrementAndGet() + List(1, 2, 3).iterator + }, + reader => + if (!failed) { + failed = true + Future.failed(TE("read-error")) + } else if (reader.hasNext) Future.successful(Some(reader.next)) + else Future.successful(None), + _ => Future.successful(Done)) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(Sink.seq) result.futureValue should ===(Seq(1, 2, 3)) @@ -249,11 +244,11 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail stream when restarting and close throws" in assertAllStagesStopped { val out = TestSubscriber.probe[Int]() - Source.unfoldResourceAsync[Int, Iterator[Int]]( - () => Future.successful(List(1, 2, 3).iterator), - reader => throw TE("read-error"), - _ => throw new TE("close-error") - ).withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) + Source + .unfoldResourceAsync[Int, Iterator[Int]](() => Future.successful(List(1, 2, 3).iterator), + reader => throw TE("read-error"), + _ => throw new TE("close-error")) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(Sink.fromSubscriber(out)) out.request(1) @@ -262,11 +257,11 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail stream when restarting and close returns failed future" in assertAllStagesStopped { val out = TestSubscriber.probe[Int]() - Source.unfoldResourceAsync[Int, Iterator[Int]]( - () => Future.successful(List(1, 2, 3).iterator), - reader => throw TE("read-error"), - _ => Future.failed(new TE("close-error")) - ).withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) + Source + .unfoldResourceAsync[Int, Iterator[Int]](() => Future.successful(List(1, 2, 3).iterator), + reader => throw TE("read-error"), + _ => Future.failed(new TE("close-error"))) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(Sink.fromSubscriber(out)) out.request(1) @@ -276,13 +271,14 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail stream when restarting and start throws" in assertAllStagesStopped { val startCounter = new AtomicInteger(0) val out = TestSubscriber.probe[Int]() - Source.unfoldResourceAsync[Int, Iterator[Int]]( - () => - if (startCounter.incrementAndGet() < 2) Future.successful(List(1, 2, 3).iterator) - else throw TE("start-error"), - reader => throw TE("read-error"), - _ => Future.successful(Done) - ).withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) + Source + .unfoldResourceAsync[Int, Iterator[Int]]( + () => + if (startCounter.incrementAndGet() < 2) Future.successful(List(1, 2, 3).iterator) + else throw TE("start-error"), + reader => throw TE("read-error"), + _ => Future.successful(Done)) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(Sink.fromSubscriber(out)) out.request(1) @@ -292,13 +288,14 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "fail stream when restarting and start returns failed future" in assertAllStagesStopped { val startCounter = new AtomicInteger(0) val out = TestSubscriber.probe[Int]() - Source.unfoldResourceAsync[Int, Iterator[Int]]( - () => - if (startCounter.incrementAndGet() < 2) Future.successful(List(1, 2, 3).iterator) - else Future.failed(TE("start-error")), - reader => throw TE("read-error"), - _ => Future.successful(Done) - ).withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) + Source + .unfoldResourceAsync[Int, Iterator[Int]]( + () => + if (startCounter.incrementAndGet() < 2) Future.successful(List(1, 2, 3).iterator) + else Future.failed(TE("start-error")), + reader => throw TE("read-error"), + _ => Future.successful(Done)) + .withAttributes(ActorAttributes.supervisionStrategy(Supervision.restartingDecider)) .runWith(Sink.fromSubscriber(out)) out.request(1) @@ -309,12 +306,16 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val sys = ActorSystem("dispatcher-testing", UnboundedMailboxConfig) val materializer = ActorMaterializer()(sys) try { - val p = Source.unfoldResourceAsync[String, Unit]( - () => Promise[Unit].future, // never complete - _ => ???, - _ => ???).runWith(Sink.ignore)(materializer) + val p = Source + .unfoldResourceAsync[String, Unit](() => Promise[Unit].future, // never complete + _ => ???, + _ => ???) + .runWith(Sink.ignore)(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "unfoldResourceSourceAsync").get assertDispatcher(ref, "akka.stream.default-blocking-io-dispatcher") } finally shutdown(sys) @@ -324,14 +325,17 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { import system.dispatcher val closeLatch = TestLatch(1) val mat = ActorMaterializer() - val p = Source.unfoldResourceAsync[String, Unit]( - () => Future.successful(()), - // a slow trickle of elements that never ends - _ => akka.pattern.after(100.millis, system.scheduler)(Future.successful(Some("element"))), - _ => Future.successful { - closeLatch.countDown() - Done - }) + val p = Source + .unfoldResourceAsync[String, Unit](() => Future.successful(()), + // a slow trickle of elements that never ends + _ => + akka.pattern.after(100.millis, system.scheduler)( + Future.successful(Some("element"))), + _ => + Future.successful { + closeLatch.countDown() + Done + }) .runWith(Sink.asPublisher(false))(mat) val c = TestSubscriber.manualProbe[String]() p.subscribe(c) @@ -344,12 +348,14 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { // these two reproduces different aspects of #24839 "close resource when stream is quickly cancelled" in assertAllStagesStopped { val closePromise = Promise[Done]() - Source.unfoldResourceAsync[String, Unit]( - // delay it a bit to give cancellation time to come upstream - () => akka.pattern.after(100.millis, system.scheduler)(Future.successful(())), - _ => Future.successful(Some("whatever")), - _ => closePromise.success(Done).future - ).runWith(Sink.cancelled) + Source + .unfoldResourceAsync[String, Unit]( + // delay it a bit to give cancellation time to come upstream + () => + akka.pattern.after(100.millis, system.scheduler)(Future.successful(())), + _ => Future.successful(Some("whatever")), + _ => closePromise.success(Done).future) + .runWith(Sink.cancelled) closePromise.future.futureValue should ===(Done) } @@ -357,11 +363,13 @@ class UnfoldResourceAsyncSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "close resource when stream is quickly cancelled reproducer 2" in { val closed = Promise[Done]() Source - .unfoldResourceAsync[String, Iterator[String]]( - { () => Future(Iterator("a", "b", "c")) }, - { m => Future(if (m.hasNext) Some(m.next()) else None) }, - { _ => closed.success(Done).future } - ) + .unfoldResourceAsync[String, Iterator[String]]({ () => + Future(Iterator("a", "b", "c")) + }, { m => + Future(if (m.hasNext) Some(m.next()) else None) + }, { _ => + closed.success(Done).future + }) .map(m => println(s"Elem=> $m")) .runWith(Sink.cancelled) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala index 8fa5e7445e..cdc27be869 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/UnfoldResourceSourceSpec.scala @@ -34,11 +34,11 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { private val manyLines = { ("a" * 100 + "\n") * 10 + - ("b" * 100 + "\n") * 10 + - ("c" * 100 + "\n") * 10 + - ("d" * 100 + "\n") * 10 + - ("e" * 100 + "\n") * 10 + - ("f" * 100 + "\n") * 10 + ("b" * 100 + "\n") * 10 + + ("c" * 100 + "\n") * 10 + + ("d" * 100 + "\n") * 10 + + ("e" * 100 + "\n") * 10 + + ("f" * 100 + "\n") * 10 } private val manyLinesArray = manyLines.split("\n") @@ -50,10 +50,10 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "Unfold Resource Source" must { "read contents from a file" in assertAllStagesStopped { - val p = Source.unfoldResource[String, BufferedReader]( - () => newBufferedReader(), - reader => Option(reader.readLine()), - reader => reader.close()) + val p = Source + .unfoldResource[String, BufferedReader](() => newBufferedReader(), + reader => Option(reader.readLine()), + reader => reader.close()) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() p.subscribe(c) @@ -77,13 +77,12 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { } "continue when Strategy is Resume and exception happened" in assertAllStagesStopped { - val p = Source.unfoldResource[String, BufferedReader]( - () => newBufferedReader(), - reader => { + val p = Source + .unfoldResource[String, BufferedReader](() => newBufferedReader(), reader => { val s = reader.readLine() if (s != null && s.contains("b")) throw TE("") else Option(s) - }, - reader => reader.close()).withAttributes(supervisionStrategy(resumingDecider)) + }, reader => reader.close()) + .withAttributes(supervisionStrategy(resumingDecider)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() @@ -99,13 +98,12 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { } "close and open stream again when Strategy is Restart" in assertAllStagesStopped { - val p = Source.unfoldResource[String, BufferedReader]( - () => newBufferedReader(), - reader => { + val p = Source + .unfoldResource[String, BufferedReader](() => newBufferedReader(), reader => { val s = reader.readLine() if (s != null && s.contains("b")) throw TE("") else Option(s) - }, - reader => reader.close()).withAttributes(supervisionStrategy(restartingDecider)) + }, reader => reader.close()) + .withAttributes(supervisionStrategy(restartingDecider)) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() @@ -122,13 +120,11 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { "work with ByteString as well" in assertAllStagesStopped { val chunkSize = 50 val buffer = new Array[Char](chunkSize) - val p = Source.unfoldResource[ByteString, Reader]( - () => newBufferedReader(), - reader => { + val p = Source + .unfoldResource[ByteString, Reader](() => newBufferedReader(), reader => { val s = reader.read(buffer) if (s > 0) Some(ByteString(buffer.mkString("")).take(s)) else None - }, - reader => reader.close()) + }, reader => reader.close()) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[ByteString]() @@ -154,23 +150,28 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val sys = ActorSystem("dispatcher-testing", UnboundedMailboxConfig) val materializer = ActorMaterializer()(sys) try { - val p = Source.unfoldResource[String, BufferedReader]( - () => newBufferedReader(), - reader => Option(reader.readLine()), - reader => reader.close()).runWith(TestSink.probe)(materializer) + val p = Source + .unfoldResource[String, BufferedReader](() => newBufferedReader(), + reader => Option(reader.readLine()), + reader => reader.close()) + .runWith(TestSink.probe)(materializer) - materializer.asInstanceOf[PhasedFusingActorMaterializer].supervisor.tell(StreamSupervisor.GetChildren, testActor) + materializer + .asInstanceOf[PhasedFusingActorMaterializer] + .supervisor + .tell(StreamSupervisor.GetChildren, testActor) val ref = expectMsgType[Children].children.find(_.path.toString contains "unfoldResourceSource").get - try assertDispatcher(ref, "akka.stream.default-blocking-io-dispatcher") finally p.cancel() + try assertDispatcher(ref, "akka.stream.default-blocking-io-dispatcher") + finally p.cancel() } finally shutdown(sys) } "fail when create throws exception" in assertAllStagesStopped { EventFilter[TE](occurrences = 1).intercept { - val p = Source.unfoldResource[String, BufferedReader]( - () => throw TE(""), - reader => Option(reader.readLine()), - reader => reader.close()) + val p = Source + .unfoldResource[String, BufferedReader](() => throw TE(""), + reader => Option(reader.readLine()), + reader => reader.close()) .runWith(Sink.asPublisher(false)) val c = TestSubscriber.manualProbe[String]() p.subscribe(c) @@ -184,10 +185,10 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { val out = TestSubscriber.probe[String]() EventFilter[TE](occurrences = 1).intercept { - Source.unfoldResource[String, Iterator[String]]( - () => Iterator("a"), - it => if (it.hasNext) Some(it.next()) else None, - _ => throw TE("")) + Source + .unfoldResource[String, Iterator[String]](() => Iterator("a"), + it => if (it.hasNext) Some(it.next()) else None, + _ => throw TE("")) .runWith(Sink.fromSubscriber(out)) out.request(61) @@ -199,11 +200,11 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { // issue #24924 "not close the resource twice when read fails" in { val closedCounter = new AtomicInteger(0) - val probe = Source.unfoldResource[Int, Int]( - () => 23, // the best resource there is - _ => throw TE("failing read"), - _ => closedCounter.incrementAndGet() - ).runWith(TestSink.probe[Int]) + val probe = Source + .unfoldResource[Int, Int](() => 23, // the best resource there is + _ => throw TE("failing read"), + _ => closedCounter.incrementAndGet()) + .runWith(TestSink.probe[Int]) probe.request(1) probe.expectError(TE("failing read")) @@ -213,14 +214,13 @@ class UnfoldResourceSourceSpec extends StreamSpec(UnboundedMailboxConfig) { // issue #24924 "not close the resource twice when read fails and then close fails" in { val closedCounter = new AtomicInteger(0) - val probe = Source.unfoldResource[Int, Int]( - () => 23, // the best resource there is - _ => throw TE("failing read"), - { _ => - closedCounter.incrementAndGet() - if (closedCounter.get == 1) throw TE("boom") - } - ).runWith(TestSink.probe[Int]) + val probe = Source + .unfoldResource[Int, Int](() => 23, // the best resource there is + _ => throw TE("failing read"), { _ => + closedCounter.incrementAndGet() + if (closedCounter.get == 1) throw TE("boom") + }) + .runWith(TestSink.probe[Int]) EventFilter[TE](occurrences = 1).intercept { probe.request(1) diff --git a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/WithContextUsageSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/WithContextUsageSpec.scala index 2d98623974..0b21640d50 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/scaladsl/WithContextUsageSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/scaladsl/WithContextUsageSpec.scala @@ -26,17 +26,17 @@ class WithContextUsageSpec extends StreamSpec { val f: (Record => Record) = record => record.copy(value = record.value + 1) val expectedRecords = toRecords(input).map(f) - val src = createSourceWithContext(input) - .map(f) - .asSource + val src = createSourceWithContext(input).map(f).asSource - src.map { case (e, _) => e } + src + .map { case (e, _) => e } .runWith(TestSink.probe[Record]) .request(input.size) .expectNextN(expectedRecords) .expectComplete() - src.map { case (_, ctx) => ctx } + src + .map { case (_, ctx) => ctx } .toMat(commitOffsets)(Keep.right) .run() .request(input.size) @@ -52,17 +52,17 @@ class WithContextUsageSpec extends StreamSpec { val expectedOffsets = input.filter(cm => f(cm.record)).map(cm => Offset(cm)).init val expectedRecords = toRecords(input).filter(f) - val src = createSourceWithContext(input) - .filter(f) - .asSource + val src = createSourceWithContext(input).filter(f).asSource - src.map { case (e, _) => e } + src + .map { case (e, _) => e } .runWith(TestSink.probe[Record]) .request(input.size) .expectNextN(expectedRecords) .expectComplete() - src.map { case (_, ctx) => ctx } + src + .map { case (_, ctx) => ctx } .toMat(commitOffsets)(Keep.right) .run() .request(input.size) @@ -78,17 +78,17 @@ class WithContextUsageSpec extends StreamSpec { val expectedOffsets = testRange.map(ix => Offset(ix)).init val expectedRecords = toRecords(input).flatMap(f) - val src = createSourceWithContext(input) - .mapConcat(f) - .asSource + val src = createSourceWithContext(input).mapConcat(f).asSource - src.map { case (e, _) => e } + src + .map { case (e, _) => e } .runWith(TestSink.probe[Record]) .request(expectedRecords.size) .expectNextN(expectedRecords) .expectComplete() - src.map { case (_, ctx) => ctx } + src + .map { case (_, ctx) => ctx } .toMat(commitOffsets)(Keep.right) .run() .request(input.size) @@ -104,19 +104,17 @@ class WithContextUsageSpec extends StreamSpec { val expectedOffsets = testRange.grouped(2).map(ixs => Offset(ixs.last)).toVector.init val expectedMultiRecords = toRecords(input).grouped(groupSize).map(l => MultiRecord(l)).toVector - val src = createSourceWithContext(input) - .grouped(groupSize) - .map(l => MultiRecord(l)) - .mapContext(_.last) - .asSource + val src = createSourceWithContext(input).grouped(groupSize).map(l => MultiRecord(l)).mapContext(_.last).asSource - src.map { case (e, _) => e } + src + .map { case (e, _) => e } .runWith(TestSink.probe[MultiRecord]) .request(expectedMultiRecords.size) .expectNextN(expectedMultiRecords) .expectComplete() - src.map { case (_, ctx) => ctx } + src + .map { case (_, ctx) => ctx } .toMat(commitOffsets)(Keep.right) .run() .request(input.size) @@ -143,13 +141,15 @@ class WithContextUsageSpec extends StreamSpec { .mapContext(_.last) .asSource - src.map { case (e, _) => e } + src + .map { case (e, _) => e } .runWith(TestSink.probe[MultiRecord]) .request(expectedMultiRecords.size) .expectNextN(expectedMultiRecords) .expectComplete() - src.map { case (_, ctx) => ctx } + src + .map { case (_, ctx) => ctx } .toMat(commitOffsets)(Keep.right) .run() .request(input.size) @@ -157,13 +157,17 @@ class WithContextUsageSpec extends StreamSpec { .expectComplete() } - def genInput(range: Range) = range.map(ix => Consumer.CommittableMessage(Record(genKey(ix), genValue(ix)), Consumer.CommittableOffsetImpl(ix))).toVector + def genInput(range: Range) = + range + .map(ix => Consumer.CommittableMessage(Record(genKey(ix), genValue(ix)), Consumer.CommittableOffsetImpl(ix))) + .toVector def toRecords(committableMessages: Vector[Consumer.CommittableMessage[Record]]) = committableMessages.map(_.record) def genKey(ix: Int) = s"k$ix" def genValue(ix: Int) = s"v$ix" } - def createSourceWithContext(committableMessages: Vector[Consumer.CommittableMessage[Record]]): SourceWithContext[Record, Offset, NotUsed] = + def createSourceWithContext( + committableMessages: Vector[Consumer.CommittableMessage[Record]]): SourceWithContext[Record, Offset, NotUsed] = Consumer .committableSource(committableMessages) .asSourceWithContext(m => Offset(m.committableOffset.offset)) @@ -172,19 +176,21 @@ class WithContextUsageSpec extends StreamSpec { def commitOffsets = commit[Offset](Offset.Uninitialized) def commit[Ctx](uninitialized: Ctx): Sink[Ctx, Probe[Ctx]] = { val testSink = TestSink.probe[Ctx] - Flow[Ctx].statefulMapConcat { () => - { - var prevCtx: Ctx = uninitialized - ctx => { - val res = - if (prevCtx != uninitialized && ctx != prevCtx) Vector(prevCtx) - else Vector.empty[Ctx] + Flow[Ctx] + .statefulMapConcat { () => + { + var prevCtx: Ctx = uninitialized + ctx => { + val res = + if (prevCtx != uninitialized && ctx != prevCtx) Vector(prevCtx) + else Vector.empty[Ctx] - prevCtx = ctx - res + prevCtx = ctx + res + } } } - }.toMat(testSink)(Keep.right) + .toMat(testSink)(Keep.right) } } @@ -200,7 +206,8 @@ case class Committed[R](record: R, offset: Int) case class MultiRecord(records: immutable.Seq[Record]) object Consumer { - def committableSource(committableMessages: Vector[CommittableMessage[Record]]): Source[CommittableMessage[Record], NotUsed] = { + def committableSource( + committableMessages: Vector[CommittableMessage[Record]]): Source[CommittableMessage[Record], NotUsed] = { Source(committableMessages) } case class CommittableMessage[V](record: V, committableOffset: CommittableOffset) diff --git a/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala b/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala index 5d9042acc6..ee3b241bc5 100644 --- a/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala +++ b/akka-stream-tests/src/test/scala/akka/stream/snapshot/MaterializerStateSpec.scala @@ -16,10 +16,7 @@ class MaterializerStateSpec extends StreamSpec { "snapshot a running stream" in { implicit val mat = ActorMaterializer() - Source.maybe[Int] - .map(_.toString) - .zipWithIndex - .runWith(Sink.seq) + Source.maybe[Int].map(_.toString).zipWithIndex.runWith(Sink.seq) awaitAssert({ val snapshot = MaterializerState.streamSnapshots(mat).futureValue diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala index 541ccd1564..e8b9eba3fd 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorFlow.scala @@ -55,8 +55,12 @@ object ActorFlow { * @tparam Q Question message type that is spoken by the target actor * @tparam A Answer type that the Actor is expected to reply with, it will become the Output type of this Flow */ - def ask[I, Q, A](ref: ActorRef[Q], timeout: java.time.Duration, makeMessage: BiFunction[I, ActorRef[A], Q]): Flow[I, A, NotUsed] = - akka.stream.typed.scaladsl.ActorFlow.ask[I, Q, A](parallelism = 2)(ref)((i, ref) => makeMessage(i, ref))(JavaDurationConverters.asFiniteDuration(timeout)) + def ask[I, Q, A](ref: ActorRef[Q], + timeout: java.time.Duration, + makeMessage: BiFunction[I, ActorRef[A], Q]): Flow[I, A, NotUsed] = + akka.stream.typed.scaladsl.ActorFlow + .ask[I, Q, A](parallelism = 2)(ref)((i, ref) => makeMessage(i, ref))( + JavaDurationConverters.asFiniteDuration(timeout)) .asJava /** @@ -92,8 +96,12 @@ object ActorFlow { * @tparam Q Question message type that is spoken by the target actor * @tparam A Answer type that the Actor is expected to reply with, it will become the Output type of this Flow */ - def ask[I, Q, A](parallelism: Int, ref: ActorRef[Q], timeout: java.time.Duration, makeMessage: (I, ActorRef[A]) => Q): Flow[I, A, NotUsed] = - akka.stream.typed.scaladsl.ActorFlow.ask[I, Q, A](parallelism)(ref)((i, ref) => makeMessage(i, ref))(timeout.toMillis.millis) + def ask[I, Q, A](parallelism: Int, + ref: ActorRef[Q], + timeout: java.time.Duration, + makeMessage: (I, ActorRef[A]) => Q): Flow[I, A, NotUsed] = + akka.stream.typed.scaladsl.ActorFlow + .ask[I, Q, A](parallelism)(ref)((i, ref) => makeMessage(i, ref))(timeout.toMillis.millis) .asJava } diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorMaterializerFactory.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorMaterializerFactory.scala index c1baf67aca..23efacdcf8 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorMaterializerFactory.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorMaterializerFactory.scala @@ -41,7 +41,9 @@ object ActorMaterializerFactory { * the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of * `namePrefix-flowNumber-flowStepNumber-stepName`. */ - def create[T](settings: ActorMaterializerSettings, namePrefix: String, actorSystem: ActorSystem[T]): akka.stream.ActorMaterializer = + def create[T](settings: ActorMaterializerSettings, + namePrefix: String, + actorSystem: ActorSystem[T]): akka.stream.ActorMaterializer = akka.stream.ActorMaterializer.create(settings, actorSystem.toUntyped, namePrefix) /** @@ -72,6 +74,8 @@ object ActorMaterializerFactory { * the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of * `namePrefix-flowNumber-flowStepNumber-stepName`. */ - def create[T](settings: ActorMaterializerSettings, namePrefix: String, ctx: ActorContext[T]): akka.stream.ActorMaterializer = + def create[T](settings: ActorMaterializerSettings, + namePrefix: String, + ctx: ActorContext[T]): akka.stream.ActorMaterializer = akka.stream.ActorMaterializer.create(settings, Adapter.toUntyped(ctx), namePrefix) } diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala index fc186ced89..cc45595c51 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSink.scala @@ -13,6 +13,7 @@ import akka.stream.typed * Collection of Sinks aimed at integrating with typed Actors. */ object ActorSink { + /** * Sends the elements of the stream to the given `ActorRef`. * If the target actor terminates the stream will be canceled. @@ -29,7 +30,9 @@ object ActorSink { * to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate * limiting operator in front of this `Sink`. */ - def actorRef[T](ref: ActorRef[T], onCompleteMessage: T, onFailureMessage: akka.japi.function.Function[Throwable, T]): Sink[T, NotUsed] = + def actorRef[T](ref: ActorRef[T], + onCompleteMessage: T, + onFailureMessage: akka.japi.function.Function[Throwable, T]): Sink[T, NotUsed] = typed.scaladsl.ActorSink.actorRef(ref, onCompleteMessage, onFailureMessage.apply).asJava /** @@ -45,14 +48,19 @@ object ActorSink { * When the stream is completed with failure - result of `onFailureMessage(throwable)` * function will be sent to the destination actor. */ - def actorRefWithAck[T, M, A]( - ref: ActorRef[M], - messageAdapter: akka.japi.function.Function2[ActorRef[A], T, M], - onInitMessage: akka.japi.function.Function[ActorRef[A], M], - ackMessage: A, - onCompleteMessage: M, - onFailureMessage: akka.japi.function.Function[Throwable, M]): Sink[T, NotUsed] = - typed.scaladsl.ActorSink.actorRefWithAck( - ref, messageAdapter.apply, onInitMessage.apply, ackMessage, onCompleteMessage, onFailureMessage.apply).asJava + def actorRefWithAck[T, M, A](ref: ActorRef[M], + messageAdapter: akka.japi.function.Function2[ActorRef[A], T, M], + onInitMessage: akka.japi.function.Function[ActorRef[A], M], + ackMessage: A, + onCompleteMessage: M, + onFailureMessage: akka.japi.function.Function[Throwable, M]): Sink[T, NotUsed] = + typed.scaladsl.ActorSink + .actorRefWithAck(ref, + messageAdapter.apply, + onInitMessage.apply, + ackMessage, + onCompleteMessage, + onFailureMessage.apply) + .asJava } diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala index 2517e2b0ff..a03f58db92 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/javadsl/ActorSource.scala @@ -47,12 +47,15 @@ object ActorSource { * @param bufferSize The size of the buffer in element count * @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer */ - def actorRef[T]( - completionMatcher: Predicate[T], - failureMatcher: PartialFunction[T, Throwable], - bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef[T]] = { - akka.stream.typed.scaladsl.ActorSource.actorRef( - { case m if completionMatcher.test(m) => }: PartialFunction[T, Unit], - failureMatcher, bufferSize, overflowStrategy).asJava + def actorRef[T](completionMatcher: Predicate[T], + failureMatcher: PartialFunction[T, Throwable], + bufferSize: Int, + overflowStrategy: OverflowStrategy): Source[T, ActorRef[T]] = { + akka.stream.typed.scaladsl.ActorSource + .actorRef({ case m if completionMatcher.test(m) => }: PartialFunction[T, Unit], + failureMatcher, + bufferSize, + overflowStrategy) + .asJava } } diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala index 80d2f44109..5140498624 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorFlow.scala @@ -60,7 +60,8 @@ object ActorFlow { * @tparam A Answer type that the Actor is expected to reply with, it will become the Output type of this Flow */ @implicitNotFound("Missing an implicit akka.util.Timeout for the ask() stage") - def ask[I, Q, A](ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)(implicit timeout: Timeout): Flow[I, A, NotUsed] = + def ask[I, Q, A](ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)( + implicit timeout: Timeout): Flow[I, A, NotUsed] = ask(parallelism = 2)(ref)(makeMessage)(timeout) /** @@ -98,7 +99,8 @@ object ActorFlow { * @tparam A answer type that the Actor is expected to reply with, it will become the Output type of this Flow */ @implicitNotFound("Missing an implicit akka.util.Timeout for the ask() stage") - def ask[I, Q, A](parallelism: Int)(ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)(implicit timeout: Timeout): Flow[I, A, NotUsed] = { + def ask[I, Q, A](parallelism: Int)(ref: ActorRef[Q])(makeMessage: (I, ActorRef[A]) => Q)( + implicit timeout: Timeout): Flow[I, A, NotUsed] = { import akka.actor.typed.scaladsl.adapter._ val untypedRef = ref.toUntyped diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorMaterializer.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorMaterializer.scala index 92d205407b..c05508f87d 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorMaterializer.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorMaterializer.scala @@ -23,7 +23,8 @@ object ActorMaterializer { * the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of * `namePrefix-flowNumber-flowStepNumber-stepName`. */ - def apply[T](materializerSettings: Option[ActorMaterializerSettings] = None, namePrefix: Option[String] = None)(implicit actorSystem: ActorSystem[T]): ActorMaterializer = + def apply[T](materializerSettings: Option[ActorMaterializerSettings] = None, namePrefix: Option[String] = None)( + implicit actorSystem: ActorSystem[T]): ActorMaterializer = akka.stream.ActorMaterializer(materializerSettings, namePrefix)(actorSystem.toUntyped) /** @@ -38,7 +39,9 @@ object ActorMaterializer { * the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of * `namePrefix-flowNumber-flowStepNumber-stepName`. */ - def boundToActor[T](ctx: ActorContext[T], materializerSettings: Option[ActorMaterializerSettings] = None, namePrefix: Option[String] = None): ActorMaterializer = + def boundToActor[T](ctx: ActorContext[T], + materializerSettings: Option[ActorMaterializerSettings] = None, + namePrefix: Option[String] = None): ActorMaterializer = akka.stream.ActorMaterializer(materializerSettings, namePrefix)(ctx.toUntyped) } diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala index 24502763eb..98c12b36d5 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSink.scala @@ -46,17 +46,17 @@ object ActorSink { * When the stream is completed with failure - result of `onFailureMessage(throwable)` * function will be sent to the destination actor. */ - def actorRefWithAck[T, M, A]( - ref: ActorRef[M], - messageAdapter: (ActorRef[A], T) => M, - onInitMessage: ActorRef[A] => M, - ackMessage: A, - onCompleteMessage: M, - onFailureMessage: Throwable => M): Sink[T, NotUsed] = - Sink.actorRefWithAck( - ref.toUntyped, - messageAdapter.curried.compose(actorRefAdapter), - onInitMessage.compose(actorRefAdapter), - ackMessage, onCompleteMessage, onFailureMessage) + def actorRefWithAck[T, M, A](ref: ActorRef[M], + messageAdapter: (ActorRef[A], T) => M, + onInitMessage: ActorRef[A] => M, + ackMessage: A, + onCompleteMessage: M, + onFailureMessage: Throwable => M): Sink[T, NotUsed] = + Sink.actorRefWithAck(ref.toUntyped, + messageAdapter.curried.compose(actorRefAdapter), + onInitMessage.compose(actorRefAdapter), + ackMessage, + onCompleteMessage, + onFailureMessage) } diff --git a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala index 2ef2a1f0b1..a2a0948dc4 100644 --- a/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala +++ b/akka-stream-typed/src/main/scala/akka/stream/typed/scaladsl/ActorSource.scala @@ -47,12 +47,14 @@ object ActorSource { * @param bufferSize The size of the buffer in element count * @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer */ - def actorRef[T]( - completionMatcher: PartialFunction[T, Unit], - failureMatcher: PartialFunction[T, Throwable], - bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef[T]] = - Source.actorRef[T]( - completionMatcher.asInstanceOf[PartialFunction[Any, Unit]], - failureMatcher.asInstanceOf[PartialFunction[Any, Throwable]], - bufferSize, overflowStrategy).mapMaterializedValue(actorRefAdapter) + def actorRef[T](completionMatcher: PartialFunction[T, Unit], + failureMatcher: PartialFunction[T, Throwable], + bufferSize: Int, + overflowStrategy: OverflowStrategy): Source[T, ActorRef[T]] = + Source + .actorRef[T](completionMatcher.asInstanceOf[PartialFunction[Any, Unit]], + failureMatcher.asInstanceOf[PartialFunction[Any, Throwable]], + bufferSize, + overflowStrategy) + .mapMaterializedValue(actorRefAdapter) } diff --git a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorFlowSpec.scala b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorFlowSpec.scala index 9b89138ab8..8bf85284f7 100644 --- a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorFlowSpec.scala +++ b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorFlowSpec.scala @@ -43,7 +43,8 @@ class ActorFlowSpec extends ScalaTestWithActorTestKit with WordSpecLike { "produce asked elements" in { val in: Future[immutable.Seq[Reply]] = - Source.repeat("hello") + Source + .repeat("hello") .via(ActorFlow.ask(replier)((el, replyTo: ActorRef[Reply]) => Asking(el, replyTo))) .take(3) .runWith(Sink.seq) @@ -62,7 +63,8 @@ class ActorFlowSpec extends ScalaTestWithActorTestKit with WordSpecLike { //#ask val in: Future[immutable.Seq[Reply]] = - Source(1 to 50).map(_.toString) + Source(1 to 50) + .map(_.toString) .via(ActorFlow.ask(ref)((el, replyTo: ActorRef[Reply]) => Asking(el, replyTo))) .runWith(Sink.seq) //#ask @@ -78,16 +80,19 @@ class ActorFlowSpec extends ScalaTestWithActorTestKit with WordSpecLike { implicit val ec = system.dispatchers.lookup(DispatcherSelector.default()) implicit val timeout = akka.util.Timeout(10.millis) - Source(1 to 5).map(_ + " nope") + Source(1 to 5) + .map(_ + " nope") .via(ActorFlow.ask[String, Asking, Reply](4)(dontReply)(Asking(_, _))) - .to(Sink.fromSubscriber(c)).run() + .to(Sink.fromSubscriber(c)) + .run() c.expectSubscription().request(10) c.expectError().getMessage should startWith("Ask timed out on [Actor") } "signal failure when target actor is terminated" in { - val done = Source.maybe[String] + val done = Source + .maybe[String] .via(ActorFlow.ask(replier)((el, replyTo: ActorRef[Reply]) => Asking(el, replyTo))) .runWith(Sink.ignore) diff --git a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorSourceSinkSpec.scala b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorSourceSinkSpec.scala index d364aaba57..b54c16bd4b 100644 --- a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorSourceSinkSpec.scala +++ b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/ActorSourceSinkSpec.scala @@ -33,7 +33,8 @@ class ActorSourceSinkSpec extends ScalaTestWithActorTestKit with WordSpecLike { val p = TestProbe[String]() val in = - Source.queue[String](10, OverflowStrategy.dropBuffer) + Source + .queue[String](10, OverflowStrategy.dropBuffer) .map(_ + "!") .to(ActorSink.actorRef(p.ref, "DONE", ex => "FAILED: " + ex.getMessage)) .run() @@ -47,27 +48,27 @@ class ActorSourceSinkSpec extends ScalaTestWithActorTestKit with WordSpecLike { "obey protocol" in { val p = TestProbe[AckProto]() - val autoPilot = Behaviors.receive[AckProto] { - (ctx, msg) => - msg match { - case m @ Init(sender) => - p.ref ! m - sender ! "ACK" - Behaviors.same - case m @ Msg(sender, _) => - p.ref ! m - sender ! "ACK" - Behaviors.same - case m => - p.ref ! m - Behaviors.same - } + val autoPilot = Behaviors.receive[AckProto] { (ctx, msg) => + msg match { + case m @ Init(sender) => + p.ref ! m + sender ! "ACK" + Behaviors.same + case m @ Msg(sender, _) => + p.ref ! m + sender ! "ACK" + Behaviors.same + case m => + p.ref ! m + Behaviors.same + } } val pilotRef: ActorRef[AckProto] = spawn(autoPilot) val in = - Source.queue[String](10, OverflowStrategy.dropBuffer) + Source + .queue[String](10, OverflowStrategy.dropBuffer) .to(ActorSink.actorRefWithAck(pilotRef, Msg.apply, Init.apply, "ACK", Complete, _ => Failed)) .run() @@ -86,7 +87,8 @@ class ActorSourceSinkSpec extends ScalaTestWithActorTestKit with WordSpecLike { "ActorSource" should { "send messages and complete" in { - val (in, out) = ActorSource.actorRef[String]({ case "complete" => }, PartialFunction.empty, 10, OverflowStrategy.dropBuffer) + val (in, out) = ActorSource + .actorRef[String]({ case "complete" => }, PartialFunction.empty, 10, OverflowStrategy.dropBuffer) .toMat(Sink.seq)(Keep.both) .run() @@ -98,7 +100,8 @@ class ActorSourceSinkSpec extends ScalaTestWithActorTestKit with WordSpecLike { } "fail the stream" in { - val (in, out) = ActorSource.actorRef[String](PartialFunction.empty, { case msg => new Error(msg) }, 10, OverflowStrategy.dropBuffer) + val (in, out) = ActorSource + .actorRef[String](PartialFunction.empty, { case msg => new Error(msg) }, 10, OverflowStrategy.dropBuffer) .toMat(Sink.seq)(Keep.both) .run() diff --git a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/CustomGuardianAndMaterializerSpec.scala b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/CustomGuardianAndMaterializerSpec.scala index 935094f77f..739dd9a13f 100644 --- a/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/CustomGuardianAndMaterializerSpec.scala +++ b/akka-stream-typed/src/test/scala/akka/stream/typed/scaladsl/CustomGuardianAndMaterializerSpec.scala @@ -26,8 +26,8 @@ object CustomGuardianAndMaterializerSpec { class CustomGuardianAndMaterializerSpec extends ScalaTestWithActorTestKit with WordSpecLike { import CustomGuardianAndMaterializerSpec._ - val guardian = Behaviors.receive[GuardianProtocol] { - (_, msg) => Behaviors.same + val guardian = Behaviors.receive[GuardianProtocol] { (_, msg) => + Behaviors.same } implicit val mat = ActorMaterializer() diff --git a/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala b/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala index 9d22ca2610..b957bb557b 100644 --- a/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala +++ b/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala @@ -26,20 +26,18 @@ object ActorSourceSinkExample { case object Complete extends Protocol case class Fail(ex: Exception) extends Protocol - val source: Source[Protocol, ActorRef[Protocol]] = ActorSource.actorRef[Protocol]( - completionMatcher = { - case Complete => - }, - failureMatcher = { - case Fail(ex) => ex - }, - bufferSize = 8, - overflowStrategy = OverflowStrategy.fail - ) + val source: Source[Protocol, ActorRef[Protocol]] = ActorSource.actorRef[Protocol](completionMatcher = { + case Complete => + }, failureMatcher = { + case Fail(ex) => ex + }, bufferSize = 8, overflowStrategy = OverflowStrategy.fail) - val ref = source.collect { - case Message(msg) => msg - }.to(Sink.foreach(println)).run() + val ref = source + .collect { + case Message(msg) => msg + } + .to(Sink.foreach(println)) + .run() ref ! Message("msg1") // ref ! "msg2" Does not compile @@ -59,11 +57,8 @@ object ActorSourceSinkExample { val actor: ActorRef[Protocol] = ??? - val sink: Sink[Protocol, NotUsed] = ActorSink.actorRef[Protocol]( - ref = actor, - onCompleteMessage = Complete, - onFailureMessage = Fail.apply - ) + val sink: Sink[Protocol, NotUsed] = + ActorSink.actorRef[Protocol](ref = actor, onCompleteMessage = Complete, onFailureMessage = Fail.apply) Source.single(Message("msg1")).runWith(sink) // #actor-sink-ref @@ -86,14 +81,12 @@ object ActorSourceSinkExample { val actor: ActorRef[Protocol] = ??? - val sink: Sink[String, NotUsed] = ActorSink.actorRefWithAck( - ref = actor, - onCompleteMessage = Complete, - onFailureMessage = Fail.apply, - messageAdapter = Message.apply, - onInitMessage = Init.apply, - ackMessage = Ack - ) + val sink: Sink[String, NotUsed] = ActorSink.actorRefWithAck(ref = actor, + onCompleteMessage = Complete, + onFailureMessage = Fail.apply, + messageAdapter = Message.apply, + onInitMessage = Init.apply, + ackMessage = Ack) Source.single("msg1").runWith(sink) // #actor-sink-ref-with-ack diff --git a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala index 5fabf5fd7d..00107477a5 100644 --- a/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala +++ b/akka-stream/src/main/scala/akka/stream/ActorMaterializer.scala @@ -35,10 +35,11 @@ object ActorMaterializer { * the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of * `namePrefix-flowNumber-flowStepNumber-stepName`. */ - def apply(materializerSettings: Option[ActorMaterializerSettings] = None, namePrefix: Option[String] = None)(implicit context: ActorRefFactory): ActorMaterializer = { + def apply(materializerSettings: Option[ActorMaterializerSettings] = None, namePrefix: Option[String] = None)( + implicit context: ActorRefFactory): ActorMaterializer = { val system = actorSystemOf(context) - val settings = materializerSettings getOrElse ActorMaterializerSettings(system) + val settings = materializerSettings.getOrElse(ActorMaterializerSettings(system)) apply(settings, namePrefix.getOrElse("flow"))(context) } @@ -54,20 +55,22 @@ object ActorMaterializer { * the processing steps. The default `namePrefix` is `"flow"`. The actor names are built up of * `namePrefix-flowNumber-flowStepNumber-stepName`. */ - def apply(materializerSettings: ActorMaterializerSettings, namePrefix: String)(implicit context: ActorRefFactory): ActorMaterializer = { + def apply(materializerSettings: ActorMaterializerSettings, namePrefix: String)( + implicit context: ActorRefFactory): ActorMaterializer = { val haveShutDown = new AtomicBoolean(false) val system = actorSystemOf(context) - new PhasedFusingActorMaterializer( - system, - materializerSettings, - system.dispatchers, - actorOfStreamSupervisor(materializerSettings, context, haveShutDown), - haveShutDown, - FlowNames(system).name.copy(namePrefix)) + new PhasedFusingActorMaterializer(system, + materializerSettings, + system.dispatchers, + actorOfStreamSupervisor(materializerSettings, context, haveShutDown), + haveShutDown, + FlowNames(system).name.copy(namePrefix)) } - private def actorOfStreamSupervisor(materializerSettings: ActorMaterializerSettings, context: ActorRefFactory, haveShutDown: AtomicBoolean) = { + private def actorOfStreamSupervisor(materializerSettings: ActorMaterializerSettings, + context: ActorRefFactory, + haveShutDown: AtomicBoolean) = { val props = StreamSupervisor.props(materializerSettings, haveShutDown) context match { case s: ExtendedActorSystem => s.systemActorOf(props, StreamSupervisor.nextName()) @@ -93,16 +96,15 @@ object ActorMaterializer { /** * INTERNAL API: Creates the `StreamSupervisor` as a system actor. */ - private[akka] def systemMaterializer(materializerSettings: ActorMaterializerSettings, namePrefix: String, + private[akka] def systemMaterializer(materializerSettings: ActorMaterializerSettings, + namePrefix: String, system: ExtendedActorSystem): ActorMaterializer = { val haveShutDown = new AtomicBoolean(false) new PhasedFusingActorMaterializer( system, materializerSettings, system.dispatchers, - system.systemActorOf( - StreamSupervisor.props(materializerSettings, haveShutDown), - StreamSupervisor.nextName()), + system.systemActorOf(StreamSupervisor.props(materializerSettings, haveShutDown), StreamSupervisor.nextName()), haveShutDown, FlowNames(system).name.copy(namePrefix)) } @@ -152,7 +154,8 @@ object ActorMaterializer { case c: ActorContext => c.system case null => throw new IllegalArgumentException("ActorRefFactory context must be defined") case _ => - throw new IllegalArgumentException(s"ActorRefFactory context must be an ActorSystem or ActorContext, got [${context.getClass.getName}]") + throw new IllegalArgumentException( + s"ActorRefFactory context must be an ActorSystem or ActorContext, got [${context.getClass.getName}]") } system } @@ -163,14 +166,17 @@ object ActorMaterializer { * INTERNAL API */ private[akka] object ActorMaterializerHelper { + /** * INTERNAL API */ private[akka] def downcast(materializer: Materializer): ActorMaterializer = materializer match { //FIXME this method is going to cause trouble for other Materializer implementations case m: ActorMaterializer => m - case _ => throw new IllegalArgumentException(s"required [${classOf[ActorMaterializer].getName}] " + - s"but got [${materializer.getClass.getName}]") + case _ => + throw new IllegalArgumentException( + s"required [${classOf[ActorMaterializer].getName}] " + + s"but got [${materializer.getClass.getName}]") } } @@ -227,7 +233,8 @@ class MaterializationException(msg: String, cause: Throwable = null) extends Run * when an ActorSystem is shut down while stream processing actors are still running. */ final case class AbruptTerminationException(actor: ActorRef) - extends RuntimeException(s"Processor actor [$actor] terminated abruptly") with NoStackTrace + extends RuntimeException(s"Processor actor [$actor] terminated abruptly") + with NoStackTrace /** * Signal that the operator was abruptly terminated, usually seen as a call to `postStop` of the `GraphStageLogic` without @@ -235,8 +242,9 @@ final case class AbruptTerminationException(actor: ActorRef) * the actor running the graph is killed, which happens when the materializer or actor system is terminated. */ final class AbruptStageTerminationException(logic: GraphStageLogic) - extends RuntimeException(s"GraphStage [$logic] terminated abruptly, caused by for example materializer or actor system termination.") - with NoStackTrace + extends RuntimeException( + s"GraphStage [$logic] terminated abruptly, caused by for example materializer or actor system termination.") + with NoStackTrace object ActorMaterializerSettings { @@ -244,26 +252,35 @@ object ActorMaterializerSettings { * Create [[ActorMaterializerSettings]] from individual settings (Scala). */ @Deprecated - @deprecated("Create the settings using the apply(system) or apply(config) method, and then modify them using the .with methods.", since = "2.5.10") - def apply( - initialInputBufferSize: Int, - maxInputBufferSize: Int, - dispatcher: String, - supervisionDecider: Supervision.Decider, - subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - debugLogging: Boolean, - outputBurstLimit: Int, - fuzzingMode: Boolean, - autoFusing: Boolean, - maxFixedBufferSize: Int) = { + @deprecated( + "Create the settings using the apply(system) or apply(config) method, and then modify them using the .with methods.", + since = "2.5.10") + def apply(initialInputBufferSize: Int, + maxInputBufferSize: Int, + dispatcher: String, + supervisionDecider: Supervision.Decider, + subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, + debugLogging: Boolean, + outputBurstLimit: Int, + fuzzingMode: Boolean, + autoFusing: Boolean, + maxFixedBufferSize: Int) = { // these sins were committed in the name of bin comp: val config = ConfigFactory.defaultReference - new ActorMaterializerSettings( - initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, - outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, 1000, IOSettings(tcpWriteBufferSize = 16 * 1024), - StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")), - config.getString(ActorAttributes.IODispatcher.dispatcher) - ) + new ActorMaterializerSettings(initialInputBufferSize, + maxInputBufferSize, + dispatcher, + supervisionDecider, + subscriptionTimeoutSettings, + debugLogging, + outputBurstLimit, + fuzzingMode, + autoFusing, + maxFixedBufferSize, + 1000, + IOSettings(tcpWriteBufferSize = 16 * 1024), + StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")), + config.getString(ActorAttributes.IODispatcher.dispatcher)) } /** @@ -276,45 +293,54 @@ object ActorMaterializerSettings { * Create [[ActorMaterializerSettings]] from a Config subsection (Scala). */ def apply(config: Config): ActorMaterializerSettings = - new ActorMaterializerSettings( - initialInputBufferSize = config.getInt("initial-input-buffer-size"), - maxInputBufferSize = config.getInt("max-input-buffer-size"), - dispatcher = config.getString("dispatcher"), - supervisionDecider = Supervision.stoppingDecider, - subscriptionTimeoutSettings = StreamSubscriptionTimeoutSettings(config), - debugLogging = config.getBoolean("debug-logging"), - outputBurstLimit = config.getInt("output-burst-limit"), - fuzzingMode = config.getBoolean("debug.fuzzing-mode"), - autoFusing = config.getBoolean("auto-fusing"), - maxFixedBufferSize = config.getInt("max-fixed-buffer-size"), - syncProcessingLimit = config.getInt("sync-processing-limit"), - ioSettings = IOSettings(config.getConfig("io")), - streamRefSettings = StreamRefSettings(config.getConfig("stream-ref")), - blockingIoDispatcher = config.getString("blocking-io-dispatcher")) + new ActorMaterializerSettings(initialInputBufferSize = config.getInt("initial-input-buffer-size"), + maxInputBufferSize = config.getInt("max-input-buffer-size"), + dispatcher = config.getString("dispatcher"), + supervisionDecider = Supervision.stoppingDecider, + subscriptionTimeoutSettings = StreamSubscriptionTimeoutSettings(config), + debugLogging = config.getBoolean("debug-logging"), + outputBurstLimit = config.getInt("output-burst-limit"), + fuzzingMode = config.getBoolean("debug.fuzzing-mode"), + autoFusing = config.getBoolean("auto-fusing"), + maxFixedBufferSize = config.getInt("max-fixed-buffer-size"), + syncProcessingLimit = config.getInt("sync-processing-limit"), + ioSettings = IOSettings(config.getConfig("io")), + streamRefSettings = StreamRefSettings(config.getConfig("stream-ref")), + blockingIoDispatcher = config.getString("blocking-io-dispatcher")) /** * Create [[ActorMaterializerSettings]] from individual settings (Java). */ @Deprecated - @deprecated("Create the settings using the create(system) or create(config) method, and then modify them using the .with methods.", since = "2.5.10") - def create( - initialInputBufferSize: Int, - maxInputBufferSize: Int, - dispatcher: String, - supervisionDecider: Supervision.Decider, - subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - debugLogging: Boolean, - outputBurstLimit: Int, - fuzzingMode: Boolean, - autoFusing: Boolean, - maxFixedBufferSize: Int) = { + @deprecated( + "Create the settings using the create(system) or create(config) method, and then modify them using the .with methods.", + since = "2.5.10") + def create(initialInputBufferSize: Int, + maxInputBufferSize: Int, + dispatcher: String, + supervisionDecider: Supervision.Decider, + subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, + debugLogging: Boolean, + outputBurstLimit: Int, + fuzzingMode: Boolean, + autoFusing: Boolean, + maxFixedBufferSize: Int) = { // these sins were committed in the name of bin comp: val config = ConfigFactory.defaultReference - new ActorMaterializerSettings( - initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, - outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, 1000, IOSettings(tcpWriteBufferSize = 16 * 1024), - StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")), - config.getString(ActorAttributes.IODispatcher.dispatcher)) + new ActorMaterializerSettings(initialInputBufferSize, + maxInputBufferSize, + dispatcher, + supervisionDecider, + subscriptionTimeoutSettings, + debugLogging, + outputBurstLimit, + fuzzingMode, + autoFusing, + maxFixedBufferSize, + 1000, + IOSettings(tcpWriteBufferSize = 16 * 1024), + StreamRefSettings(config.getConfig("akka.stream.materializer.stream-ref")), + config.getString(ActorAttributes.IODispatcher.dispatcher)) } /** @@ -338,114 +364,149 @@ object ActorMaterializerSettings { * The constructor is not public API, use create or apply on the [[ActorMaterializerSettings]] companion instead. */ final class ActorMaterializerSettings @InternalApi private ( - /* - * Important note: `initialInputBufferSize`, `maxInputBufferSize`, `dispatcher` and - * `supervisionDecider` must not be used as values in the materializer, or anything the materializer phases use - * since these settings allow for overriding using [[Attributes]]. They must always be gotten from the effective - * attributes. - */ - val initialInputBufferSize: Int, - val maxInputBufferSize: Int, - val dispatcher: String, - val supervisionDecider: Supervision.Decider, - val subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - val debugLogging: Boolean, - val outputBurstLimit: Int, - val fuzzingMode: Boolean, - val autoFusing: Boolean, - val maxFixedBufferSize: Int, - val syncProcessingLimit: Int, - val ioSettings: IOSettings, - val streamRefSettings: StreamRefSettings, - val blockingIoDispatcher: String) { + /* + * Important note: `initialInputBufferSize`, `maxInputBufferSize`, `dispatcher` and + * `supervisionDecider` must not be used as values in the materializer, or anything the materializer phases use + * since these settings allow for overriding using [[Attributes]]. They must always be gotten from the effective + * attributes. + */ + val initialInputBufferSize: Int, + val maxInputBufferSize: Int, + val dispatcher: String, + val supervisionDecider: Supervision.Decider, + val subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, + val debugLogging: Boolean, + val outputBurstLimit: Int, + val fuzzingMode: Boolean, + val autoFusing: Boolean, + val maxFixedBufferSize: Int, + val syncProcessingLimit: Int, + val ioSettings: IOSettings, + val streamRefSettings: StreamRefSettings, + val blockingIoDispatcher: String) { require(initialInputBufferSize > 0, "initialInputBufferSize must be > 0") require(syncProcessingLimit > 0, "syncProcessingLimit must be > 0") requirePowerOfTwo(maxInputBufferSize, "maxInputBufferSize") - require(initialInputBufferSize <= maxInputBufferSize, s"initialInputBufferSize($initialInputBufferSize) must be <= maxInputBufferSize($maxInputBufferSize)") + require(initialInputBufferSize <= maxInputBufferSize, + s"initialInputBufferSize($initialInputBufferSize) must be <= maxInputBufferSize($maxInputBufferSize)") // backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima @deprecated("Use ActorMaterializerSettings.apply or ActorMaterializerSettings.create instead", "2.5.10") - def this( - initialInputBufferSize: Int, - maxInputBufferSize: Int, - dispatcher: String, - supervisionDecider: Supervision.Decider, - subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - debugLogging: Boolean, - outputBurstLimit: Int, - fuzzingMode: Boolean, - autoFusing: Boolean, - maxFixedBufferSize: Int, - syncProcessingLimit: Int, - ioSettings: IOSettings) = + def this(initialInputBufferSize: Int, + maxInputBufferSize: Int, + dispatcher: String, + supervisionDecider: Supervision.Decider, + subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, + debugLogging: Boolean, + outputBurstLimit: Int, + fuzzingMode: Boolean, + autoFusing: Boolean, + maxFixedBufferSize: Int, + syncProcessingLimit: Int, + ioSettings: IOSettings) = // using config like this is not quite right but the only way to solve backwards comp without hard coding settings - this(initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, - outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, syncProcessingLimit, ioSettings, - StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), - ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher) - ) + this(initialInputBufferSize, + maxInputBufferSize, + dispatcher, + supervisionDecider, + subscriptionTimeoutSettings, + debugLogging, + outputBurstLimit, + fuzzingMode, + autoFusing, + maxFixedBufferSize, + syncProcessingLimit, + ioSettings, + StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), + ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher)) // backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima @deprecated("Use ActorMaterializerSettings.apply or ActorMaterializerSettings.create instead", "2.5.10") - def this( - initialInputBufferSize: Int, - maxInputBufferSize: Int, - dispatcher: String, - supervisionDecider: Supervision.Decider, - subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - debugLogging: Boolean, - outputBurstLimit: Int, - fuzzingMode: Boolean, - autoFusing: Boolean, - maxFixedBufferSize: Int, - syncProcessingLimit: Int) = + def this(initialInputBufferSize: Int, + maxInputBufferSize: Int, + dispatcher: String, + supervisionDecider: Supervision.Decider, + subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, + debugLogging: Boolean, + outputBurstLimit: Int, + fuzzingMode: Boolean, + autoFusing: Boolean, + maxFixedBufferSize: Int, + syncProcessingLimit: Int) = // using config like this is not quite right but the only way to solve backwards comp without hard coding settings - this(initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, - outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, syncProcessingLimit, - IOSettings(tcpWriteBufferSize = 16 * 1024), StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), - ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher) - ) + this(initialInputBufferSize, + maxInputBufferSize, + dispatcher, + supervisionDecider, + subscriptionTimeoutSettings, + debugLogging, + outputBurstLimit, + fuzzingMode, + autoFusing, + maxFixedBufferSize, + syncProcessingLimit, + IOSettings(tcpWriteBufferSize = 16 * 1024), + StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), + ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher)) // backwards compatibility when added IOSettings, shouldn't be needed since private, but added to satisfy mima @deprecated("Use ActorMaterializerSettings.apply or ActorMaterializerSettings.create instead", "2.5.10") - def this( - initialInputBufferSize: Int, - maxInputBufferSize: Int, - dispatcher: String, - supervisionDecider: Supervision.Decider, - subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, - debugLogging: Boolean, - outputBurstLimit: Int, - fuzzingMode: Boolean, - autoFusing: Boolean, - maxFixedBufferSize: Int) = + def this(initialInputBufferSize: Int, + maxInputBufferSize: Int, + dispatcher: String, + supervisionDecider: Supervision.Decider, + subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings, + debugLogging: Boolean, + outputBurstLimit: Int, + fuzzingMode: Boolean, + autoFusing: Boolean, + maxFixedBufferSize: Int) = // using config like this is not quite right but the only way to solve backwards comp without hard coding settings - this(initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, - outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, 1000, IOSettings(tcpWriteBufferSize = 16 * 1024), - StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), - ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher) - ) + this(initialInputBufferSize, + maxInputBufferSize, + dispatcher, + supervisionDecider, + subscriptionTimeoutSettings, + debugLogging, + outputBurstLimit, + fuzzingMode, + autoFusing, + maxFixedBufferSize, + 1000, + IOSettings(tcpWriteBufferSize = 16 * 1024), + StreamRefSettings(ConfigFactory.defaultReference().getConfig("akka.stream.materializer.stream-ref")), + ConfigFactory.defaultReference().getString(ActorAttributes.IODispatcher.dispatcher)) - private def copy( - initialInputBufferSize: Int = this.initialInputBufferSize, - maxInputBufferSize: Int = this.maxInputBufferSize, - dispatcher: String = this.dispatcher, - supervisionDecider: Supervision.Decider = this.supervisionDecider, - subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings = this.subscriptionTimeoutSettings, - debugLogging: Boolean = this.debugLogging, - outputBurstLimit: Int = this.outputBurstLimit, - fuzzingMode: Boolean = this.fuzzingMode, - autoFusing: Boolean = this.autoFusing, - maxFixedBufferSize: Int = this.maxFixedBufferSize, - syncProcessingLimit: Int = this.syncProcessingLimit, - ioSettings: IOSettings = this.ioSettings, - streamRefSettings: StreamRefSettings = this.streamRefSettings, - blockingIoDispatcher: String = this.blockingIoDispatcher) = { - new ActorMaterializerSettings( - initialInputBufferSize, maxInputBufferSize, dispatcher, supervisionDecider, subscriptionTimeoutSettings, debugLogging, - outputBurstLimit, fuzzingMode, autoFusing, maxFixedBufferSize, syncProcessingLimit, ioSettings, streamRefSettings, blockingIoDispatcher) + private def copy(initialInputBufferSize: Int = this.initialInputBufferSize, + maxInputBufferSize: Int = this.maxInputBufferSize, + dispatcher: String = this.dispatcher, + supervisionDecider: Supervision.Decider = this.supervisionDecider, + subscriptionTimeoutSettings: StreamSubscriptionTimeoutSettings = this.subscriptionTimeoutSettings, + debugLogging: Boolean = this.debugLogging, + outputBurstLimit: Int = this.outputBurstLimit, + fuzzingMode: Boolean = this.fuzzingMode, + autoFusing: Boolean = this.autoFusing, + maxFixedBufferSize: Int = this.maxFixedBufferSize, + syncProcessingLimit: Int = this.syncProcessingLimit, + ioSettings: IOSettings = this.ioSettings, + streamRefSettings: StreamRefSettings = this.streamRefSettings, + blockingIoDispatcher: String = this.blockingIoDispatcher) = { + new ActorMaterializerSettings(initialInputBufferSize, + maxInputBufferSize, + dispatcher, + supervisionDecider, + subscriptionTimeoutSettings, + debugLogging, + outputBurstLimit, + fuzzingMode, + autoFusing, + maxFixedBufferSize, + syncProcessingLimit, + ioSettings, + streamRefSettings, + blockingIoDispatcher) } /** @@ -494,7 +555,8 @@ final class ActorMaterializerSettings @InternalApi private ( * Note that supervision in streams are implemented on a per operator basis and is not supported * by every operator. */ - def withSupervisionStrategy(decider: function.Function[Throwable, Supervision.Directive]): ActorMaterializerSettings = { + def withSupervisionStrategy( + decider: function.Function[Throwable, Supervision.Directive]): ActorMaterializerSettings = { import Supervision._ copy(supervisionDecider = decider match { case `resumingDecider` => resumingDecider @@ -581,21 +643,22 @@ final class ActorMaterializerSettings @InternalApi private ( override def equals(other: Any): Boolean = other match { case s: ActorMaterializerSettings => s.initialInputBufferSize == initialInputBufferSize && - s.maxInputBufferSize == maxInputBufferSize && - s.dispatcher == dispatcher && - s.supervisionDecider == supervisionDecider && - s.subscriptionTimeoutSettings == subscriptionTimeoutSettings && - s.debugLogging == debugLogging && - s.outputBurstLimit == outputBurstLimit && - s.syncProcessingLimit == syncProcessingLimit && - s.fuzzingMode == fuzzingMode && - s.autoFusing == autoFusing && - s.ioSettings == ioSettings && - s.blockingIoDispatcher == blockingIoDispatcher + s.maxInputBufferSize == maxInputBufferSize && + s.dispatcher == dispatcher && + s.supervisionDecider == supervisionDecider && + s.subscriptionTimeoutSettings == subscriptionTimeoutSettings && + s.debugLogging == debugLogging && + s.outputBurstLimit == outputBurstLimit && + s.syncProcessingLimit == syncProcessingLimit && + s.fuzzingMode == fuzzingMode && + s.autoFusing == autoFusing && + s.ioSettings == ioSettings && + s.blockingIoDispatcher == blockingIoDispatcher case _ => false } - override def toString: String = s"ActorMaterializerSettings($initialInputBufferSize,$maxInputBufferSize," + + override def toString: String = + s"ActorMaterializerSettings($initialInputBufferSize,$maxInputBufferSize," + s"$dispatcher,$supervisionDecider,$subscriptionTimeoutSettings,$debugLogging,$outputBurstLimit," + s"$syncProcessingLimit,$fuzzingMode,$autoFusing,$ioSettings)" } @@ -605,8 +668,7 @@ object IOSettings { apply(system.settings.config.getConfig("akka.stream.materializer.io")) def apply(config: Config): IOSettings = - new IOSettings( - tcpWriteBufferSize = math.min(Int.MaxValue, config.getBytes("tcp.write-buffer-size")).toInt) + new IOSettings(tcpWriteBufferSize = math.min(Int.MaxValue, config.getBytes("tcp.write-buffer-size")).toInt) def apply(tcpWriteBufferSize: Int): IOSettings = new IOSettings(tcpWriteBufferSize) @@ -626,8 +688,7 @@ final class IOSettings private (val tcpWriteBufferSize: Int) { def withTcpWriteBufferSize(value: Int): IOSettings = copy(tcpWriteBufferSize = value) - private def copy(tcpWriteBufferSize: Int): IOSettings = new IOSettings( - tcpWriteBufferSize = tcpWriteBufferSize) + private def copy(tcpWriteBufferSize: Int): IOSettings = new IOSettings(tcpWriteBufferSize = tcpWriteBufferSize) override def equals(other: Any): Boolean = other match { case s: IOSettings => s.tcpWriteBufferSize == tcpWriteBufferSize @@ -644,13 +705,15 @@ object StreamSubscriptionTimeoutSettings { /** * Create settings from individual values (Java). */ - def create(mode: StreamSubscriptionTimeoutTerminationMode, timeout: FiniteDuration): StreamSubscriptionTimeoutSettings = + def create(mode: StreamSubscriptionTimeoutTerminationMode, + timeout: FiniteDuration): StreamSubscriptionTimeoutSettings = new StreamSubscriptionTimeoutSettings(mode, timeout) /** * Create settings from individual values (Scala). */ - def apply(mode: StreamSubscriptionTimeoutTerminationMode, timeout: FiniteDuration): StreamSubscriptionTimeoutSettings = + def apply(mode: StreamSubscriptionTimeoutTerminationMode, + timeout: FiniteDuration): StreamSubscriptionTimeoutSettings = new StreamSubscriptionTimeoutSettings(mode, timeout) /** @@ -664,13 +727,11 @@ object StreamSubscriptionTimeoutSettings { */ def apply(config: Config): StreamSubscriptionTimeoutSettings = { val c = config.getConfig("subscription-timeout") - StreamSubscriptionTimeoutSettings( - mode = toRootLowerCase(c.getString("mode")) match { - case "no" | "off" | "false" | "noop" => NoopTermination - case "warn" => WarnTermination - case "cancel" => CancelTermination - }, - timeout = c.getDuration("timeout", TimeUnit.MILLISECONDS).millis) + StreamSubscriptionTimeoutSettings(mode = toRootLowerCase(c.getString("mode")) match { + case "no" | "off" | "false" | "noop" => NoopTermination + case "warn" => WarnTermination + case "cancel" => CancelTermination + }, timeout = c.getDuration("timeout", TimeUnit.MILLISECONDS).millis) } } @@ -678,7 +739,8 @@ object StreamSubscriptionTimeoutSettings { * Leaked publishers and subscribers are cleaned up when they are not used within a given * deadline, configured by [[StreamSubscriptionTimeoutSettings]]. */ -final class StreamSubscriptionTimeoutSettings(val mode: StreamSubscriptionTimeoutTerminationMode, val timeout: FiniteDuration) { +final class StreamSubscriptionTimeoutSettings(val mode: StreamSubscriptionTimeoutTerminationMode, + val timeout: FiniteDuration) { override def equals(other: Any): Boolean = other match { case s: StreamSubscriptionTimeoutSettings => s.mode == mode && s.timeout == timeout case _ => false diff --git a/akka-stream/src/main/scala/akka/stream/Attributes.scala b/akka-stream/src/main/scala/akka/stream/Attributes.scala index 01c1c17439..5a362f8e4c 100644 --- a/akka-stream/src/main/scala/akka/stream/Attributes.scala +++ b/akka-stream/src/main/scala/akka/stream/Attributes.scala @@ -9,7 +9,7 @@ import java.util.Optional import akka.event.Logging import scala.annotation.tailrec -import scala.reflect.{ ClassTag, classTag } +import scala.reflect.{ classTag, ClassTag } import akka.japi.function import java.net.URLEncoder @@ -165,8 +165,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) { concatNames(i, null, b.append(first).append('-').append(n)) } else concatNames(i, n, null) case _ => concatNames(i, first, buf) - } - else if (buf eq null) first + } else if (buf eq null) first else buf.toString Option(concatNames(attributeList.reverseIterator, null, null)) @@ -254,7 +253,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) { */ @deprecated("Attributes should always be most specific, use get[T]", "2.5.7") def getFirstAttribute[T <: Attribute](c: Class[T]): Optional[T] = - attributeList.reverseIterator.collectFirst { case attr if c.isInstance(attr) => c cast attr }.asJava + attributeList.reverseIterator.collectFirst { case attr if c.isInstance(attr) => c.cast(attr) }.asJava /** * Scala API: Get the least specific attribute (added first) of a given type parameter T `Class` or subclass thereof. @@ -290,30 +289,40 @@ object Attributes { final case class Name(n: String) extends Attribute final case class InputBuffer(initial: Int, max: Int) extends MandatoryAttribute - final case class LogLevels(onElement: Logging.LogLevel, onFinish: Logging.LogLevel, onFailure: Logging.LogLevel) extends Attribute + final case class LogLevels(onElement: Logging.LogLevel, onFinish: Logging.LogLevel, onFailure: Logging.LogLevel) + extends Attribute final case object AsyncBoundary extends Attribute object LogLevels { + /** Use to disable logging on certain operations when configuring [[Attributes#logLevels]] */ final val Off: Logging.LogLevel = Logging.levelFor("off").get + /** Use to enable logging at ERROR level for certain operations when configuring [[Attributes#logLevels]] */ final val Error: Logging.LogLevel = Logging.ErrorLevel + /** Use to enable logging at WARNING level for certain operations when configuring [[Attributes#logLevels]] */ final val Warning: Logging.LogLevel = Logging.WarningLevel + /** Use to enable logging at INFO level for certain operations when configuring [[Attributes#logLevels]] */ final val Info: Logging.LogLevel = Logging.InfoLevel + /** Use to enable logging at DEBUG level for certain operations when configuring [[Attributes#logLevels]] */ final val Debug: Logging.LogLevel = Logging.DebugLevel } /** Java API: Use to disable logging on certain operations when configuring [[Attributes#createLogLevels]] */ def logLevelOff: Logging.LogLevel = LogLevels.Off + /** Use to enable logging at ERROR level for certain operations when configuring [[Attributes#createLogLevels]] */ def logLevelError: Logging.LogLevel = LogLevels.Error + /** Use to enable logging at WARNING level for certain operations when configuring [[Attributes#createLogLevels]] */ def logLevelWarning: Logging.LogLevel = LogLevels.Warning + /** Use to enable logging at INFO level for certain operations when configuring [[Attributes#createLogLevels]] */ def logLevelInfo: Logging.LogLevel = LogLevels.Info + /** Use to enable logging at DEBUG level for certain operations when configuring [[Attributes#createLogLevels]] */ def logLevelDebug: Logging.LogLevel = LogLevels.Debug @@ -351,7 +360,9 @@ object Attributes { * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. * */ - def createLogLevels(onElement: Logging.LogLevel, onFinish: Logging.LogLevel, onFailure: Logging.LogLevel): Attributes = + def createLogLevels(onElement: Logging.LogLevel, + onFinish: Logging.LogLevel, + onFailure: Logging.LogLevel): Attributes = logLevels(onElement, onFinish, onFailure) /** @@ -370,7 +381,9 @@ object Attributes { * * See [[Attributes.createLogLevels]] for Java API */ - def logLevels(onElement: Logging.LogLevel = Logging.DebugLevel, onFinish: Logging.LogLevel = Logging.DebugLevel, onFailure: Logging.LogLevel = Logging.ErrorLevel) = + def logLevels(onElement: Logging.LogLevel = Logging.DebugLevel, + onFinish: Logging.LogLevel = Logging.DebugLevel, + onFailure: Logging.LogLevel = Logging.ErrorLevel) = Attributes(LogLevels(onElement, onFinish, onFailure)) /** @@ -391,6 +404,7 @@ object ActorAttributes { final case class Dispatcher(dispatcher: String) extends MandatoryAttribute object Dispatcher { + /** * INTERNAL API * Resolves the dispatcher's name with a fallback to the default blocking IO dispatcher. @@ -448,7 +462,9 @@ object ActorAttributes { * Logging a certain operation can be completely disabled by using [[Attributes#logLevelOff]]. * */ - def createLogLevels(onElement: Logging.LogLevel, onFinish: Logging.LogLevel, onFailure: Logging.LogLevel): Attributes = + def createLogLevels(onElement: Logging.LogLevel, + onFinish: Logging.LogLevel, + onFailure: Logging.LogLevel): Attributes = logLevels(onElement, onFinish, onFailure) /** @@ -467,7 +483,9 @@ object ActorAttributes { * * See [[Attributes.createLogLevels]] for Java API */ - def logLevels(onElement: Logging.LogLevel = Logging.DebugLevel, onFinish: Logging.LogLevel = Logging.DebugLevel, onFailure: Logging.LogLevel = Logging.ErrorLevel) = + def logLevels(onElement: Logging.LogLevel = Logging.DebugLevel, + onFinish: Logging.LogLevel = Logging.DebugLevel, + onFailure: Logging.LogLevel = Logging.ErrorLevel) = Attributes(LogLevels(onElement, onFinish, onFailure)) } diff --git a/akka-stream/src/main/scala/akka/stream/FanInShape.scala b/akka-stream/src/main/scala/akka/stream/FanInShape.scala index d5c705c122..9d9a0c2fb0 100644 --- a/akka-stream/src/main/scala/akka/stream/FanInShape.scala +++ b/akka-stream/src/main/scala/akka/stream/FanInShape.scala @@ -17,18 +17,23 @@ object FanInShape { override def outlet: Outlet[O] = Outlet(s"$name.out") override def inlets: immutable.Seq[Inlet[_]] = Nil } - final case class Ports[O](override val outlet: Outlet[O], override val inlets: immutable.Seq[Inlet[_]]) extends Init[O] { + final case class Ports[O](override val outlet: Outlet[O], override val inlets: immutable.Seq[Inlet[_]]) + extends Init[O] { override def name: String = "FanIn" } } -abstract class FanInShape[+O] private (_out: Outlet[O @uncheckedVariance], _registered: Iterator[Inlet[_]], _name: String) extends Shape { +abstract class FanInShape[+O] private (_out: Outlet[O @uncheckedVariance], + _registered: Iterator[Inlet[_]], + _name: String) + extends Shape { import FanInShape._ def this(init: FanInShape.Init[O]) = this(init.outlet, init.inlets.iterator, init.name) final def out: Outlet[O @uncheckedVariance] = _out final override def outlets: immutable.Seq[Outlet[O @uncheckedVariance]] = _out :: Nil + /** * Not meant for overriding outside of Akka. */ diff --git a/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala b/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala index ff478894d2..910f8032c5 100644 --- a/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala +++ b/akka-stream/src/main/scala/akka/stream/FanInShape1N.scala @@ -8,7 +8,9 @@ import scala.annotation.unchecked.uncheckedVariance import scala.collection.immutable @Deprecated -@deprecated("FanInShape1N was removed because it was not used anywhere. Use a custom shape extending from FanInShape directly.", "2.5.5") +@deprecated( + "FanInShape1N was removed because it was not used anywhere. Use a custom shape extending from FanInShape directly.", + "2.5.5") class FanInShape1N[-T0, -T1, +O](val n: Int, _init: FanInShape.Init[O]) extends FanInShape[O](_init) { //ports get added to `FanInShape.inlets` as a side-effect of calling `newInlet` @@ -17,8 +19,12 @@ class FanInShape1N[-T0, -T1, +O](val n: Int, _init: FanInShape.Init[O]) extends def this(n: Int) = this(n, FanInShape.Name[O]("FanInShape1N")) def this(n: Int, name: String) = this(n, FanInShape.Name[O](name)) - def this(outlet: Outlet[O @uncheckedVariance], in0: Inlet[T0 @uncheckedVariance], inlets1: Array[Inlet[T1 @uncheckedVariance]]) = this(inlets1.length, FanInShape.Ports(outlet, in0 :: inlets1.toList)) - override protected def construct(init: FanInShape.Init[O @uncheckedVariance]): FanInShape[O] = new FanInShape1N(n, init) + def this(outlet: Outlet[O @uncheckedVariance], + in0: Inlet[T0 @uncheckedVariance], + inlets1: Array[Inlet[T1 @uncheckedVariance]]) = + this(inlets1.length, FanInShape.Ports(outlet, in0 :: inlets1.toList)) + override protected def construct(init: FanInShape.Init[O @uncheckedVariance]): FanInShape[O] = + new FanInShape1N(n, init) override def deepCopy(): FanInShape1N[T0, T1, O] = super.deepCopy().asInstanceOf[FanInShape1N[T0, T1, O]] @deprecated("Use 'inlets' or 'in(id)' instead.", "2.5.5") @@ -26,9 +32,8 @@ class FanInShape1N[-T0, -T1, +O](val n: Int, _init: FanInShape.Init[O]) extends // cannot deprecate a lazy val because of genjavadoc problem https://github.com/typesafehub/genjavadoc/issues/85 private lazy val _in1Seq: immutable.IndexedSeq[Inlet[T1 @uncheckedVariance]] = - inlets - .tail //head is in0 - .toIndexedSeq.asInstanceOf[immutable.IndexedSeq[Inlet[T1]]] + inlets.tail //head is in0 + .toIndexedSeq.asInstanceOf[immutable.IndexedSeq[Inlet[T1]]] def in(n: Int): Inlet[T1 @uncheckedVariance] = { require(n > 0, "n must be > 0") diff --git a/akka-stream/src/main/scala/akka/stream/FanOutShape.scala b/akka-stream/src/main/scala/akka/stream/FanOutShape.scala index e08e6ed8a6..17546f1755 100644 --- a/akka-stream/src/main/scala/akka/stream/FanOutShape.scala +++ b/akka-stream/src/main/scala/akka/stream/FanOutShape.scala @@ -17,12 +17,16 @@ object FanOutShape { override def inlet: Inlet[I] = Inlet(s"$name.in") override def outlets: immutable.Seq[Outlet[_]] = Nil } - final case class Ports[I](override val inlet: Inlet[I], override val outlets: immutable.Seq[Outlet[_]]) extends Init[I] { + final case class Ports[I](override val inlet: Inlet[I], override val outlets: immutable.Seq[Outlet[_]]) + extends Init[I] { override def name: String = "FanOut" } } -abstract class FanOutShape[-I] private (_in: Inlet[I @uncheckedVariance], _registered: Iterator[Outlet[_]], _name: String) extends Shape { +abstract class FanOutShape[-I] private (_in: Inlet[I @uncheckedVariance], + _registered: Iterator[Outlet[_]], + _name: String) + extends Shape { import FanOutShape._ def this(init: FanOutShape.Init[I]) = this(init.inlet, init.outlets.iterator, init.name) diff --git a/akka-stream/src/main/scala/akka/stream/Graph.scala b/akka-stream/src/main/scala/akka/stream/Graph.scala index 11adbb4223..c9c4cb9ef1 100644 --- a/akka-stream/src/main/scala/akka/stream/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/Graph.scala @@ -15,6 +15,7 @@ import scala.annotation.unchecked.uncheckedVariance * @see [[akka.stream.stage.GraphStage]] */ trait Graph[+S <: Shape, +M] { + /** * Type-level accessor for the shape parameter of this graph. */ @@ -23,6 +24,7 @@ trait Graph[+S <: Shape, +M] { * The shape of a graph is all that is externally visible: its inlets and outlets. */ def shape: S + /** * INTERNAL API. * @@ -45,9 +47,7 @@ trait Graph[+S <: Shape, +M] { * @param dispatcher Run the graph on this dispatcher */ def async(dispatcher: String) = - addAttributes( - Attributes.asyncBoundary and ActorAttributes.dispatcher(dispatcher) - ) + addAttributes(Attributes.asyncBoundary and ActorAttributes.dispatcher(dispatcher)) /** * Put an asynchronous boundary around this `Graph` @@ -58,8 +58,7 @@ trait Graph[+S <: Shape, +M] { def async(dispatcher: String, inputBufferSize: Int) = addAttributes( Attributes.asyncBoundary and ActorAttributes.dispatcher(dispatcher) - and Attributes.inputBuffer(inputBufferSize, inputBufferSize) - ) + and Attributes.inputBuffer(inputBufferSize, inputBufferSize)) /** * Add the given attributes to this [[Graph]]. If the specific attribute was already present diff --git a/akka-stream/src/main/scala/akka/stream/IOResult.scala b/akka-stream/src/main/scala/akka/stream/IOResult.scala index 45810e83ab..d57fd03c11 100644 --- a/akka-stream/src/main/scala/akka/stream/IOResult.scala +++ b/akka-stream/src/main/scala/akka/stream/IOResult.scala @@ -57,4 +57,5 @@ object IOResult { * while there was still IO operations in progress. */ final case class AbruptIOTerminationException(ioResult: IOResult, cause: Throwable) - extends RuntimeException("Stream terminated without completing IO operation.", cause) with NoStackTrace + extends RuntimeException("Stream terminated without completing IO operation.", cause) + with NoStackTrace diff --git a/akka-stream/src/main/scala/akka/stream/KillSwitch.scala b/akka-stream/src/main/scala/akka/stream/KillSwitch.scala index 38ee9b4d85..781ca9a49e 100644 --- a/akka-stream/src/main/scala/akka/stream/KillSwitch.scala +++ b/akka-stream/src/main/scala/akka/stream/KillSwitch.scala @@ -53,13 +53,15 @@ object KillSwitches { def singleBidi[T1, T2]: Graph[BidiShape[T1, T1, T2, T2], UniqueKillSwitch] = UniqueBidiKillSwitchStage.asInstanceOf[Graph[BidiShape[T1, T1, T2, T2], UniqueKillSwitch]] - abstract class KillableGraphStageLogic(val terminationSignal: Future[Done], _shape: Shape) extends GraphStageLogic(_shape) { + abstract class KillableGraphStageLogic(val terminationSignal: Future[Done], _shape: Shape) + extends GraphStageLogic(_shape) { override def preStart(): Unit = { terminationSignal.value match { case Some(status) => onSwitch(status) - case _ => + case _ => // callback.invoke is a simple actor send, so it is fine to run on the invoking thread - terminationSignal.onComplete(getAsyncCallback[Try[Done]](onSwitch).invoke)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext) + terminationSignal.onComplete(getAsyncCallback[Try[Done]](onSwitch).invoke)( + akka.dispatch.ExecutionContexts.sameThreadExecutionContext) } } @@ -69,7 +71,8 @@ object KillSwitches { } } - private[stream] object UniqueKillSwitchStage extends GraphStageWithMaterializedValue[FlowShape[Any, Any], UniqueKillSwitch] { + private[stream] object UniqueKillSwitchStage + extends GraphStageWithMaterializedValue[FlowShape[Any, Any], UniqueKillSwitch] { override val initialAttributes = Attributes.name("breaker") override val shape = FlowShape(Inlet[Any]("KillSwitch.in"), Outlet[Any]("KillSwitch.out")) override def toString: String = "UniqueKillSwitchFlow" @@ -89,12 +92,14 @@ object KillSwitches { } } - private[stream] object UniqueBidiKillSwitchStage extends GraphStageWithMaterializedValue[BidiShape[Any, Any, Any, Any], UniqueKillSwitch] { + private[stream] object UniqueBidiKillSwitchStage + extends GraphStageWithMaterializedValue[BidiShape[Any, Any, Any, Any], UniqueKillSwitch] { override val initialAttributes = Attributes.name("breaker") - override val shape = BidiShape( - Inlet[Any]("KillSwitchBidi.in1"), Outlet[Any]("KillSwitchBidi.out1"), - Inlet[Any]("KillSwitchBidi.in2"), Outlet[Any]("KillSwitchBidi.out2")) + override val shape = BidiShape(Inlet[Any]("KillSwitchBidi.in1"), + Outlet[Any]("KillSwitchBidi.out1"), + Inlet[Any]("KillSwitchBidi.in2"), + Outlet[Any]("KillSwitchBidi.out2")) override def toString: String = "UniqueKillSwitchBidi" override def createLogicAndMaterializedValue(attr: Attributes) = { @@ -138,10 +143,12 @@ object KillSwitches { */ //#kill-switch trait KillSwitch { + /** * After calling [[KillSwitch#shutdown()]] the linked [[Graph]]s of [[FlowShape]] are completed normally. */ def shutdown(): Unit + /** * After calling [[KillSwitch#abort()]] the linked [[Graph]]s of [[FlowShape]] are failed. */ @@ -281,9 +288,11 @@ final class SharedKillSwitch private[stream] (val name: String) extends KillSwit override def toString: String = s"SharedKillSwitchFlow(switch: $name)" - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, SharedKillSwitch) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, SharedKillSwitch) = { val shutdownListener = terminationSignal.createListener() - val logic = new KillSwitches.KillableGraphStageLogic(shutdownListener.future, shape) with InHandler with OutHandler { + val logic = new KillSwitches.KillableGraphStageLogic(shutdownListener.future, shape) with InHandler + with OutHandler { setHandler(shape.in, this) setHandler(shape.out, this) diff --git a/akka-stream/src/main/scala/akka/stream/Materializer.scala b/akka-stream/src/main/scala/akka/stream/Materializer.scala index ba3499f13e..36bc628ba1 100644 --- a/akka-stream/src/main/scala/akka/stream/Materializer.scala +++ b/akka-stream/src/main/scala/akka/stream/Materializer.scala @@ -46,9 +46,8 @@ abstract class Materializer { * The result can be highly implementation specific, ranging from local actor chains to remote-deployed * processing networks. */ - def materialize[Mat]( - runnable: Graph[ClosedShape, Mat], - @deprecatedName('initialAttributes) defaultAttributes: Attributes): Mat + def materialize[Mat](runnable: Graph[ClosedShape, Mat], + @deprecatedName('initialAttributes) defaultAttributes: Attributes): Mat /** * Running a flow graph will require execution resources, as will computations @@ -108,7 +107,6 @@ private[akka] object NoMaterializer extends Materializer { * INTERNAL API */ @InternalApi -private[akka] case class MaterializationContext( - materializer: Materializer, - effectiveAttributes: Attributes, - islandName: String) +private[akka] case class MaterializationContext(materializer: Materializer, + effectiveAttributes: Attributes, + islandName: String) diff --git a/akka-stream/src/main/scala/akka/stream/OverflowStrategy.scala b/akka-stream/src/main/scala/akka/stream/OverflowStrategy.scala index c84c64441d..210f72b1cf 100644 --- a/akka-stream/src/main/scala/akka/stream/OverflowStrategy.scala +++ b/akka-stream/src/main/scala/akka/stream/OverflowStrategy.scala @@ -15,6 +15,7 @@ import akka.event.Logging.LogLevel */ @DoNotInherit sealed abstract class DelayOverflowStrategy extends Serializable { + /** INTERNAL API */ @InternalApi private[akka] def isBackpressure: Boolean } @@ -27,12 +28,14 @@ final case class BufferOverflowException(msg: String) extends RuntimeException(m */ @DoNotInherit sealed abstract class OverflowStrategy extends DelayOverflowStrategy { + /** INTERNAL API */ @InternalApi private[akka] def logLevel: LogLevel def withLogLevel(logLevel: Logging.LogLevel): OverflowStrategy } private[akka] object OverflowStrategies { + /** * INTERNAL API */ @@ -40,6 +43,7 @@ private[akka] object OverflowStrategies { override def withLogLevel(logLevel: LogLevel): DropHead = DropHead(logLevel) private[akka] override def isBackpressure: Boolean = false } + /** * INTERNAL API */ @@ -47,6 +51,7 @@ private[akka] object OverflowStrategies { override def withLogLevel(logLevel: LogLevel): DropTail = DropTail(logLevel) private[akka] override def isBackpressure: Boolean = false } + /** * INTERNAL API */ @@ -54,6 +59,7 @@ private[akka] object OverflowStrategies { override def withLogLevel(logLevel: LogLevel): DropBuffer = DropBuffer(logLevel) private[akka] override def isBackpressure: Boolean = false } + /** * INTERNAL API */ @@ -61,6 +67,7 @@ private[akka] object OverflowStrategies { override def withLogLevel(logLevel: LogLevel): DropNew = DropNew(logLevel) private[akka] override def isBackpressure: Boolean = false } + /** * INTERNAL API */ @@ -68,6 +75,7 @@ private[akka] object OverflowStrategies { override def withLogLevel(logLevel: LogLevel): Backpressure = Backpressure(logLevel) private[akka] override def isBackpressure: Boolean = true } + /** * INTERNAL API */ @@ -75,6 +83,7 @@ private[akka] object OverflowStrategies { override def withLogLevel(logLevel: LogLevel): Fail = Fail(logLevel) private[akka] override def isBackpressure: Boolean = false } + /** * INTERNAL API */ @@ -84,6 +93,7 @@ private[akka] object OverflowStrategies { } object OverflowStrategy { + /** * If the buffer is full when a new element arrives, drops the oldest element from the buffer to make space for * the new element. @@ -119,6 +129,7 @@ object OverflowStrategy { } object DelayOverflowStrategy { + /** * If the buffer is full when a new element is available this strategy send next element downstream without waiting */ diff --git a/akka-stream/src/main/scala/akka/stream/Shape.scala b/akka-stream/src/main/scala/akka/stream/Shape.scala index 93f9997a8d..3f5692633c 100644 --- a/akka-stream/src/main/scala/akka/stream/Shape.scala +++ b/akka-stream/src/main/scala/akka/stream/Shape.scala @@ -35,6 +35,7 @@ sealed abstract class InPort { self: Inlet[_] => */ private[stream] def inlet: Inlet[_] = this } + /** * An output port of a StreamLayout.Module. This type logically belongs * into the impl package but must live here due to how `sealed` works. @@ -67,6 +68,7 @@ sealed abstract class OutPort { self: Outlet[_] => * express the internal structural hierarchy of stream topologies). */ object Inlet { + /** * Scala API * @@ -90,14 +92,16 @@ final class Inlet[T] private (val s: String) extends InPort { in.mappedTo = this in } + /** * INTERNAL API. */ def as[U]: Inlet[U] = this.asInstanceOf[Inlet[U]] - override def toString: String = s + "(" + this.hashCode + s")" + + override def toString: String = + s + "(" + this.hashCode + s")" + (if (mappedTo eq this) "" - else s" mapped to $mappedTo") + else s" mapped to $mappedTo") } /** @@ -130,20 +134,23 @@ final class Outlet[T] private (val s: String) extends OutPort { out.mappedTo = this out } + /** * INTERNAL API. */ def as[U]: Outlet[U] = this.asInstanceOf[Outlet[U]] - override def toString: String = s + "(" + this.hashCode + s")" + + override def toString: String = + s + "(" + this.hashCode + s")" + (if (mappedTo eq this) "" - else s" mapped to $mappedTo") + else s" mapped to $mappedTo") } /** * INTERNAL API */ @InternalApi private[akka] object Shape { + /** * `inlets` and `outlets` can be `Vector` or `List` so this method * checks the size of 1 in an optimized way. @@ -163,6 +170,7 @@ final class Outlet[T] private (val s: String) extends OutPort { * otherwise it is just a black box. */ abstract class Shape { + /** * Scala API: get a list of all input ports */ @@ -213,17 +221,20 @@ abstract class Shape { def requireSamePortsAndShapeAs(s: Shape): Unit = require(hasSamePortsAndShapeAs(s), nonCorrespondingMessage(s)) private def nonCorrespondingMessage(s: Shape) = - s"The inlets [${s.inlets.mkString(", ")}] and outlets [${s.outlets.mkString(", ")}] must correspond to the inlets [${inlets.mkString(", ")}] and outlets [${outlets.mkString(", ")}]" + s"The inlets [${s.inlets.mkString(", ")}] and outlets [${s.outlets.mkString(", ")}] must correspond to the inlets [${inlets + .mkString(", ")}] and outlets [${outlets.mkString(", ")}]" } /** * Java API for creating custom [[Shape]] types. */ abstract class AbstractShape extends Shape { + /** * Provide the list of all input ports of this shape. */ def allInlets: java.util.List[Inlet[_]] + /** * Provide the list of all output ports of this shape. */ @@ -275,6 +286,7 @@ final case class SourceShape[+T](out: Outlet[T @uncheckedVariance]) extends Shap override def deepCopy(): SourceShape[T] = SourceShape(out.carbonCopy()) } object SourceShape { + /** Java API */ def of[T](outlet: Outlet[T @uncheckedVariance]): SourceShape[T] = SourceShape(outlet) @@ -292,6 +304,7 @@ final case class FlowShape[-I, +O](in: Inlet[I @uncheckedVariance], out: Outlet[ override def deepCopy(): FlowShape[I, O] = FlowShape(in.carbonCopy(), out.carbonCopy()) } object FlowShape { + /** Java API */ def of[I, O](inlet: Inlet[I @uncheckedVariance], outlet: Outlet[O @uncheckedVariance]): FlowShape[I, O] = FlowShape(inlet, outlet) @@ -307,6 +320,7 @@ final case class SinkShape[-T](in: Inlet[T @uncheckedVariance]) extends Shape { override def deepCopy(): SinkShape[T] = SinkShape(in.carbonCopy()) } object SinkShape { + /** Java API */ def of[T](inlet: Inlet[T @uncheckedVariance]): SinkShape[T] = SinkShape(inlet) @@ -325,11 +339,11 @@ object SinkShape { * +------+ * }}} */ -final case class BidiShape[-In1, +Out1, -In2, +Out2]( - in1: Inlet[In1 @uncheckedVariance], - out1: Outlet[Out1 @uncheckedVariance], - in2: Inlet[In2 @uncheckedVariance], - out2: Outlet[Out2 @uncheckedVariance]) extends Shape { +final case class BidiShape[-In1, +Out1, -In2, +Out2](in1: Inlet[In1 @uncheckedVariance], + out1: Outlet[Out1 @uncheckedVariance], + in2: Inlet[In2 @uncheckedVariance], + out2: Outlet[Out2 @uncheckedVariance]) + extends Shape { //#implementation-details-elided override val inlets: immutable.Seq[Inlet[_]] = in1 :: in2 :: Nil override val outlets: immutable.Seq[Outlet[_]] = out1 :: out2 :: Nil @@ -350,11 +364,10 @@ object BidiShape { BidiShape(top.in, top.out, bottom.in, bottom.out) /** Java API */ - def of[In1, Out1, In2, Out2]( - in1: Inlet[In1 @uncheckedVariance], - out1: Outlet[Out1 @uncheckedVariance], - in2: Inlet[In2 @uncheckedVariance], - out2: Outlet[Out2 @uncheckedVariance]): BidiShape[In1, Out1, In2, Out2] = + def of[In1, Out1, In2, Out2](in1: Inlet[In1 @uncheckedVariance], + out1: Outlet[Out1 @uncheckedVariance], + in2: Inlet[In2 @uncheckedVariance], + out2: Outlet[Out2 @uncheckedVariance]): BidiShape[In1, Out1, In2, Out2] = BidiShape(in1, out1, in2, out2) } diff --git a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala index 16b2469002..97e70d414a 100644 --- a/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala +++ b/akka-stream/src/main/scala/akka/stream/SslTlsOptions.scala @@ -17,10 +17,12 @@ import scala.collection.immutable * actively initiates the exchange. */ object TLSRole { + /** * Java API: obtain the [[Client]] singleton value. */ def client: TLSRole = Client + /** * Java API: obtain the [[Server]] singleton value. */ @@ -79,18 +81,22 @@ sealed abstract class TLSClosing { def ignoreComplete: Boolean } object TLSClosing { + /** * Java API: obtain the [[EagerClose]] singleton value. */ def eagerClose: TLSClosing = EagerClose + /** * Java API: obtain the [[IgnoreCancel]] singleton value. */ def ignoreCancel: TLSClosing = IgnoreCancel + /** * Java API: obtain the [[IgnoreComplete]] singleton value. */ def ignoreComplete: TLSClosing = IgnoreComplete + /** * Java API: obtain the [[IgnoreBoth]] singleton value. */ @@ -163,7 +169,9 @@ object TLSProtocol { * The Java API for getting session information is given by the SSLSession object, * the Scala API adapters are offered below. */ - final case class SessionBytes(session: SSLSession, bytes: ByteString) extends SslTlsInbound with scaladsl.ScalaSessionAPI + final case class SessionBytes(session: SSLSession, bytes: ByteString) + extends SslTlsInbound + with scaladsl.ScalaSessionAPI /** * This is the supertype of all messages that the SslTls operator accepts on its @@ -189,11 +197,11 @@ object TLSProtocol { * on client authentication requirements while `clientAuth = Some(ClientAuth.None)` * switches off client authentication. */ - case class NegotiateNewSession( - enabledCipherSuites: Option[immutable.Seq[String]], - enabledProtocols: Option[immutable.Seq[String]], - clientAuth: Option[TLSClientAuth], - sslParameters: Option[SSLParameters]) extends SslTlsOutbound { + case class NegotiateNewSession(enabledCipherSuites: Option[immutable.Seq[String]], + enabledProtocols: Option[immutable.Seq[String]], + clientAuth: Option[TLSClientAuth], + sslParameters: Option[SSLParameters]) + extends SslTlsOutbound { /** * Java API: Make a copy of this message with the given `enabledCipherSuites`. @@ -219,6 +227,7 @@ object TLSProtocol { } object NegotiateNewSession extends NegotiateNewSession(None, None, None, None) { + /** * Java API: obtain the default value (which will leave the SSLEngine’s * settings unchanged). diff --git a/akka-stream/src/main/scala/akka/stream/StreamDetachedException.scala b/akka-stream/src/main/scala/akka/stream/StreamDetachedException.scala index 2cceaa3e89..7402d7cb3c 100644 --- a/akka-stream/src/main/scala/akka/stream/StreamDetachedException.scala +++ b/akka-stream/src/main/scala/akka/stream/StreamDetachedException.scala @@ -10,9 +10,7 @@ import scala.util.control.NoStackTrace * This exception signals that materialized value is already detached from stream. This usually happens * when stream is completed and an ActorSystem is shut down while materialized object is still available. */ -final class StreamDetachedException(message: String) - extends RuntimeException(message) - with NoStackTrace { +final class StreamDetachedException(message: String) extends RuntimeException(message) with NoStackTrace { def this() = this("Stream is terminated. Materialized value is detached.") } diff --git a/akka-stream/src/main/scala/akka/stream/StreamRefSettings.scala b/akka-stream/src/main/scala/akka/stream/StreamRefSettings.scala index fa939e38b1..ed39c95ff9 100644 --- a/akka-stream/src/main/scala/akka/stream/StreamRefSettings.scala +++ b/akka-stream/src/main/scala/akka/stream/StreamRefSettings.scala @@ -17,6 +17,7 @@ object StreamRefSettings { /** Java API */ def create(system: ActorSystem): StreamRefSettings = apply(system) + /** Scala API */ def apply(system: ActorSystem): StreamRefSettings = { apply(system.settings.config.getConfig("akka.stream.materializer.stream-ref")) @@ -24,14 +25,15 @@ object StreamRefSettings { /** Java API */ def create(c: Config): StreamRefSettings = apply(c) + /** Scala API */ def apply(c: Config): StreamRefSettings = { - StreamRefSettingsImpl( - bufferCapacity = c.getInt("buffer-capacity"), - demandRedeliveryInterval = c.getDuration("demand-redelivery-interval", TimeUnit.MILLISECONDS).millis, - subscriptionTimeout = c.getDuration("subscription-timeout", TimeUnit.MILLISECONDS).millis, - finalTerminationSignalDeadline = c.getDuration("final-termination-signal-deadline", TimeUnit.MILLISECONDS).millis - ) + StreamRefSettingsImpl(bufferCapacity = c.getInt("buffer-capacity"), + demandRedeliveryInterval = + c.getDuration("demand-redelivery-interval", TimeUnit.MILLISECONDS).millis, + subscriptionTimeout = c.getDuration("subscription-timeout", TimeUnit.MILLISECONDS).millis, + finalTerminationSignalDeadline = + c.getDuration("final-termination-signal-deadline", TimeUnit.MILLISECONDS).millis) } } @@ -53,4 +55,3 @@ trait StreamRefSettings { def withSubscriptionTimeout(value: FiniteDuration): StreamRefSettings def withTerminationReceivedBeforeCompletionLeeway(value: FiniteDuration): StreamRefSettings } - diff --git a/akka-stream/src/main/scala/akka/stream/StreamRefs.scala b/akka-stream/src/main/scala/akka/stream/StreamRefs.scala index 96eeb53d46..391cdf6118 100644 --- a/akka-stream/src/main/scala/akka/stream/StreamRefs.scala +++ b/akka-stream/src/main/scala/akka/stream/StreamRefs.scala @@ -14,6 +14,7 @@ import scala.language.implicitConversions * See full documentation on [[SinkRef]]. */ object SinkRef { + /** Implicitly converts a [[SinkRef]] to a [[Sink]]. The same can be achieved by calling `.sink` on the reference. */ implicit def convertRefToSink[T](sinkRef: SinkRef[T]): Sink[T, NotUsed] = sinkRef.sink() } @@ -35,6 +36,7 @@ trait SinkRef[In] { /** Scala API: Get [[Sink]] underlying to this source ref. */ def sink(): Sink[In, NotUsed] + /** Java API: Get [[javadsl.Sink]] underlying to this source ref. */ final def getSink(): javadsl.Sink[In, NotUsed] = sink().asJava } @@ -43,6 +45,7 @@ trait SinkRef[In] { * See full documentation on [[SourceRef]]. */ object SourceRef { + /** Implicitly converts a SourceRef to a Source. The same can be achieved by calling `.source` on the SourceRef itself. */ implicit def convertRefToSource[T](ref: SourceRef[T]): Source[T, NotUsed] = ref.source @@ -61,8 +64,10 @@ object SourceRef { * For additional configuration see `reference.conf` as well as [[akka.stream.StreamRefAttributes]]. */ trait SourceRef[T] { + /** Scala API: Get [[Source]] underlying to this source ref. */ def source: Source[T, NotUsed] + /** Java API: Get [[javadsl.Source]] underlying to this source ref. */ final def getSource: javadsl.Source[T, NotUsed] = source.asJava } @@ -70,16 +75,17 @@ trait SourceRef[T] { // --- exceptions --- final case class TargetRefNotInitializedYetException() - extends IllegalStateException("Internal remote target actor ref not yet resolved, yet attempted to send messages to it. " + - "This should not happen due to proper flow-control, please open a ticket on the issue tracker: https://github.com/akka/akka") + extends IllegalStateException( + "Internal remote target actor ref not yet resolved, yet attempted to send messages to it. " + + "This should not happen due to proper flow-control, please open a ticket on the issue tracker: https://github.com/akka/akka") -final case class StreamRefSubscriptionTimeoutException(msg: String) - extends IllegalStateException(msg) +final case class StreamRefSubscriptionTimeoutException(msg: String) extends IllegalStateException(msg) final case class RemoteStreamRefActorTerminatedException(msg: String) extends RuntimeException(msg) final case class InvalidSequenceNumberException(expectedSeqNr: Long, gotSeqNr: Long, msg: String) - extends IllegalStateException(s"$msg (expected: $expectedSeqNr, got: $gotSeqNr). " + - s"In most cases this means that message loss on this connection has occurred and the stream will fail eagerly.") + extends IllegalStateException( + s"$msg (expected: $expectedSeqNr, got: $gotSeqNr). " + + s"In most cases this means that message loss on this connection has occurred and the stream will fail eagerly.") /** * Stream refs establish a connection between a local and remote actor, representing the origin and remote sides @@ -92,7 +98,8 @@ final case class InvalidSequenceNumberException(expectedSeqNr: Long, gotSeqNr: L * This is not meant as a security feature, but rather as plain sanity-check. */ final case class InvalidPartnerActorException(expectedRef: ActorRef, gotRef: ActorRef, msg: String) - extends IllegalStateException(s"$msg (expected: $expectedRef, got: $gotRef). " + - s"This may happen due to 'double-materialization' on the other side of this stream ref. " + - s"Do note that stream refs are one-shot references and have to be paired up in 1:1 pairs. " + - s"Multi-cast such as broadcast etc can be implemented by sharing multiple new stream references. ") + extends IllegalStateException( + s"$msg (expected: $expectedRef, got: $gotRef). " + + s"This may happen due to 'double-materialization' on the other side of this stream ref. " + + s"Do note that stream refs are one-shot references and have to be paired up in 1:1 pairs. " + + s"Multi-cast such as broadcast etc can be implemented by sharing multiple new stream references. ") diff --git a/akka-stream/src/main/scala/akka/stream/StreamTcpException.scala b/akka-stream/src/main/scala/akka/stream/StreamTcpException.scala index 20f9ac4264..2bae757242 100644 --- a/akka-stream/src/main/scala/akka/stream/StreamTcpException.scala +++ b/akka-stream/src/main/scala/akka/stream/StreamTcpException.scala @@ -14,4 +14,3 @@ class BindFailedException extends StreamTcpException("bind failed") case object BindFailedException extends BindFailedException class ConnectionException(msg: String) extends StreamTcpException(msg) - diff --git a/akka-stream/src/main/scala/akka/stream/SubstreamCancelStrategy.scala b/akka-stream/src/main/scala/akka/stream/SubstreamCancelStrategy.scala index 83e30fffb1..af389f6f85 100644 --- a/akka-stream/src/main/scala/akka/stream/SubstreamCancelStrategy.scala +++ b/akka-stream/src/main/scala/akka/stream/SubstreamCancelStrategy.scala @@ -12,6 +12,7 @@ import SubstreamCancelStrategies._ sealed abstract class SubstreamCancelStrategy private[akka] object SubstreamCancelStrategies { + /** * INTERNAL API */ @@ -24,6 +25,7 @@ private[akka] object SubstreamCancelStrategies { } object SubstreamCancelStrategy { + /** * Cancel the stream of streams if any substream is cancelled. */ @@ -34,4 +36,3 @@ object SubstreamCancelStrategy { */ def drain: SubstreamCancelStrategy = Drain } - diff --git a/akka-stream/src/main/scala/akka/stream/TooManySubstreamsOpenException.scala b/akka-stream/src/main/scala/akka/stream/TooManySubstreamsOpenException.scala index 611eb25426..a7a5011603 100644 --- a/akka-stream/src/main/scala/akka/stream/TooManySubstreamsOpenException.scala +++ b/akka-stream/src/main/scala/akka/stream/TooManySubstreamsOpenException.scala @@ -11,6 +11,5 @@ import scala.util.control.NoStackTrace * A finite limit is imposed so that memory usage is controlled. */ final class TooManySubstreamsOpenException - extends IllegalStateException("Cannot open a new substream as there are too many substreams open") - with NoStackTrace { -} + extends IllegalStateException("Cannot open a new substream as there are too many substreams open") + with NoStackTrace {} diff --git a/akka-stream/src/main/scala/akka/stream/Transformer.scala b/akka-stream/src/main/scala/akka/stream/Transformer.scala index 230558b78f..564fc8d375 100644 --- a/akka-stream/src/main/scala/akka/stream/Transformer.scala +++ b/akka-stream/src/main/scala/akka/stream/Transformer.scala @@ -7,6 +7,7 @@ package akka.stream import scala.collection.immutable private[akka] abstract class TransformerLike[-T, +U] { + /** * Invoked for each element to produce a (possibly empty) sequence of * output elements. @@ -46,4 +47,3 @@ private[akka] abstract class TransformerLike[-T, +U] { def cleanup(): Unit = () } - diff --git a/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala b/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala index 279df4ae24..762113a99a 100644 --- a/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala +++ b/akka-stream/src/main/scala/akka/stream/UniformFanInShape.scala @@ -20,10 +20,12 @@ class UniformFanInShape[-T, +O](val n: Int, _init: FanInShape.Init[O]) extends F def this(n: Int) = this(n, FanInShape.Name[O]("UniformFanIn")) def this(n: Int, name: String) = this(n, FanInShape.Name[O](name)) def this(outlet: Outlet[O], inlets: Array[Inlet[T]]) = this(inlets.length, FanInShape.Ports(outlet, inlets.toList)) - override protected def construct(init: FanInShape.Init[O @uncheckedVariance]): FanInShape[O] = new UniformFanInShape(n, init) + override protected def construct(init: FanInShape.Init[O @uncheckedVariance]): FanInShape[O] = + new UniformFanInShape(n, init) override def deepCopy(): UniformFanInShape[T, O] = super.deepCopy().asInstanceOf[UniformFanInShape[T, O]] - final override def inlets: immutable.Seq[Inlet[T @uncheckedVariance]] = super.inlets.asInstanceOf[immutable.Seq[Inlet[T]]] + final override def inlets: immutable.Seq[Inlet[T @uncheckedVariance]] = + super.inlets.asInstanceOf[immutable.Seq[Inlet[T]]] @deprecated("Use 'inlets' or 'in(id)' instead.", "2.5.5") def inSeq: immutable.IndexedSeq[Inlet[T @uncheckedVariance]] = _inSeq diff --git a/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala b/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala index 1dbce51b57..6bf18d0323 100644 --- a/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala +++ b/akka-stream/src/main/scala/akka/stream/UniformFanOutShape.scala @@ -20,10 +20,12 @@ class UniformFanOutShape[-I, +O](n: Int, _init: FanOutShape.Init[I @uncheckedVar def this(n: Int) = this(n, FanOutShape.Name[I]("UniformFanOut")) def this(n: Int, name: String) = this(n, FanOutShape.Name[I](name)) def this(inlet: Inlet[I], outlets: Array[Outlet[O]]) = this(outlets.length, FanOutShape.Ports(inlet, outlets.toList)) - override protected def construct(init: FanOutShape.Init[I @uncheckedVariance]): FanOutShape[I] = new UniformFanOutShape(n, init) + override protected def construct(init: FanOutShape.Init[I @uncheckedVariance]): FanOutShape[I] = + new UniformFanOutShape(n, init) override def deepCopy(): UniformFanOutShape[I, O] = super.deepCopy().asInstanceOf[UniformFanOutShape[I, O]] - final override def outlets: immutable.Seq[Outlet[O @uncheckedVariance]] = super.outlets.asInstanceOf[immutable.Seq[Outlet[O]]] + final override def outlets: immutable.Seq[Outlet[O @uncheckedVariance]] = + super.outlets.asInstanceOf[immutable.Seq[Outlet[O]]] @Deprecated @deprecated("use 'outlets' or 'out(id)' instead", "2.5.5") diff --git a/akka-stream/src/main/scala/akka/stream/WatchedActorTerminatedException.scala b/akka-stream/src/main/scala/akka/stream/WatchedActorTerminatedException.scala index 823332ccac..e8b50a57a4 100644 --- a/akka-stream/src/main/scala/akka/stream/WatchedActorTerminatedException.scala +++ b/akka-stream/src/main/scala/akka/stream/WatchedActorTerminatedException.scala @@ -11,4 +11,4 @@ import akka.actor.ActorRef * See `Flow.ask` and `Flow.watch`. */ final class WatchedActorTerminatedException(val watchingStageName: String, val ref: ActorRef) - extends RuntimeException(s"Actor watched by [$watchingStageName] has terminated! Was: $ref") + extends RuntimeException(s"Actor watched by [$watchingStageName] has terminated! Was: $ref") diff --git a/akka-stream/src/main/scala/akka/stream/actor/ActorPublisher.scala b/akka-stream/src/main/scala/akka/stream/actor/ActorPublisher.scala index 26affe6bcd..ec1570abf7 100644 --- a/akka-stream/src/main/scala/akka/stream/actor/ActorPublisher.scala +++ b/akka-stream/src/main/scala/akka/stream/actor/ActorPublisher.scala @@ -13,7 +13,9 @@ import concurrent.duration.FiniteDuration import akka.stream.impl.CancelledSubscription import akka.stream.impl.ReactiveStreamsCompliance._ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") object ActorPublisher { /** @@ -27,7 +29,9 @@ object ActorPublisher { * INTERNAL API */ private[akka] object Internal { - final case class Subscribe(subscriber: Subscriber[Any]) extends DeadLetterSuppression with NoSerializationVerificationNeeded + final case class Subscribe(subscriber: Subscriber[Any]) + extends DeadLetterSuppression + with NoSerializationVerificationNeeded sealed trait LifecycleState case object PreSubscriber extends LifecycleState @@ -42,6 +46,7 @@ object ActorPublisher { sealed abstract class ActorPublisherMessage extends DeadLetterSuppression object ActorPublisherMessage { + /** * This message is delivered to the [[ActorPublisher]] actor when the stream subscriber requests * more elements. @@ -49,6 +54,7 @@ object ActorPublisherMessage { */ final case class Request(n: Long) extends ActorPublisherMessage with NoSerializationVerificationNeeded { private var processed = false + /** * INTERNAL API: needed for stash support */ @@ -66,6 +72,7 @@ object ActorPublisherMessage { */ final case object Cancel extends Cancel with NoSerializationVerificationNeeded sealed abstract class Cancel extends ActorPublisherMessage + /** * Java API: get the singleton instance of the `Cancel` message */ @@ -75,8 +82,11 @@ object ActorPublisherMessage { * This message is delivered to the [[ActorPublisher]] actor in order to signal the exceeding of an subscription timeout. * Once the actor receives this message, this publisher will already be in canceled state, thus the actor should clean-up and stop itself. */ - final case object SubscriptionTimeoutExceeded extends SubscriptionTimeoutExceeded with NoSerializationVerificationNeeded + final case object SubscriptionTimeoutExceeded + extends SubscriptionTimeoutExceeded + with NoSerializationVerificationNeeded sealed abstract class SubscriptionTimeoutExceeded extends ActorPublisherMessage + /** * Java API: get the singleton instance of the `SubscriptionTimeoutExceeded` message */ @@ -125,7 +135,9 @@ object ActorPublisherMessage { * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") trait ActorPublisher[T] extends Actor { import ActorPublisher.Internal._ import ActorPublisherMessage._ @@ -210,7 +222,8 @@ trait ActorPublisher[T] extends Actor { case Active | PreSubscriber => lifecycleState = Completed if (subscriber ne null) // otherwise onComplete will be called when the subscription arrives - try tryOnComplete(subscriber) finally subscriber = null + try tryOnComplete(subscriber) + finally subscriber = null case Completed | CompleteThenStop => throw new IllegalStateException("onComplete must only be called once") case _: ErrorEmitted => @@ -231,7 +244,8 @@ trait ActorPublisher[T] extends Actor { case Active | PreSubscriber => lifecycleState = CompleteThenStop if (subscriber ne null) // otherwise onComplete will be called when the subscription arrives - try tryOnComplete(subscriber) finally context.stop(self) + try tryOnComplete(subscriber) + finally context.stop(self) case _ => onComplete() } @@ -243,13 +257,15 @@ trait ActorPublisher[T] extends Actor { case Active | PreSubscriber => lifecycleState = ErrorEmitted(cause, stop = false) if (subscriber ne null) // otherwise onError will be called when the subscription arrives - try tryOnError(subscriber, cause) finally subscriber = null + try tryOnError(subscriber, cause) + finally subscriber = null case _: ErrorEmitted => throw new IllegalStateException("onError must only be called once") case Completed | CompleteThenStop => throw new IllegalStateException("onError must not be called after onComplete") case Canceled => // drop } + /** * Terminate the stream with failure. After that you are not allowed to * call [[#onNext]], [[#onError]] and [[#onComplete]]. @@ -263,7 +279,8 @@ trait ActorPublisher[T] extends Actor { case Active | PreSubscriber => lifecycleState = ErrorEmitted(cause, stop = true) if (subscriber ne null) // otherwise onError will be called when the subscription arrives - try tryOnError(subscriber, cause) finally context.stop(self) + try tryOnError(subscriber, cause) + finally context.stop(self) case _ => onError(cause) } @@ -364,7 +381,7 @@ trait ActorPublisher[T] extends Actor { * INTERNAL API */ protected[akka] override def aroundPostRestart(reason: Throwable): Unit = { - state.get(self) foreach { s => + state.get(self).foreach { s => // restore previous state subscriber = s.subscriber.orNull demand = s.demand @@ -443,6 +460,7 @@ private[akka] class ActorPublisherState extends Extension { * Java API */ object UntypedActorPublisher { + /** * Java API: Create a [[org.reactivestreams.Publisher]] backed by a [[UntypedActorPublisher]] actor. It can be * attached to a [[org.reactivestreams.Subscriber]] or be used as an input source for a @@ -455,13 +473,16 @@ object UntypedActorPublisher { * Java API * @see [[akka.stream.actor.ActorPublisher]] */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") abstract class UntypedActorPublisher[T] extends UntypedActor with ActorPublisher[T] /** * Java API compatible with lambda expressions */ object AbstractActorPublisher { + /** * Java API compatible with lambda expressions: Create a [[org.reactivestreams.Publisher]] * backed by a [[AbstractActorPublisher]] actor. It can be attached to a [[org.reactivestreams.Subscriber]] @@ -476,7 +497,9 @@ object AbstractActorPublisher { * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") abstract class AbstractActorPublisher[T] extends AbstractActor with ActorPublisher[T] /** @@ -486,7 +509,9 @@ abstract class AbstractActorPublisher[T] extends AbstractActor with ActorPublish * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") abstract class AbstractActorPublisherWithStash[T] extends AbstractActor with ActorPublisher[T] with Stash /** @@ -496,8 +521,13 @@ abstract class AbstractActorPublisherWithStash[T] extends AbstractActor with Act * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") -abstract class AbstractActorPublisherWithUnboundedStash[T] extends AbstractActor with ActorPublisher[T] with UnboundedStash +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") +abstract class AbstractActorPublisherWithUnboundedStash[T] + extends AbstractActor + with ActorPublisher[T] + with UnboundedStash /** * Java API compatible with lambda expressions. @@ -506,5 +536,10 @@ abstract class AbstractActorPublisherWithUnboundedStash[T] extends AbstractActor * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") -abstract class AbstractActorPublisherWithUnrestrictedStash[T] extends AbstractActor with ActorPublisher[T] with UnrestrictedStash +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") +abstract class AbstractActorPublisherWithUnrestrictedStash[T] + extends AbstractActor + with ActorPublisher[T] + with UnrestrictedStash diff --git a/akka-stream/src/main/scala/akka/stream/actor/ActorSubscriber.scala b/akka-stream/src/main/scala/akka/stream/actor/ActorSubscriber.scala index ebe51335a2..64449af296 100644 --- a/akka-stream/src/main/scala/akka/stream/actor/ActorSubscriber.scala +++ b/akka-stream/src/main/scala/akka/stream/actor/ActorSubscriber.scala @@ -21,7 +21,8 @@ object ActorSubscriber { * INTERNAL API */ private[akka] final case class OnSubscribe(subscription: Subscription) - extends DeadLetterSuppression with NoSerializationVerificationNeeded + extends DeadLetterSuppression + with NoSerializationVerificationNeeded } @@ -42,6 +43,7 @@ object ActorSubscriberMessage { * An [[ActorSubscriber]] defines a `RequestStrategy` to control the stream back pressure. */ trait RequestStrategy { + /** * Invoked by the [[ActorSubscriber]] after each incoming message to * determine how many more elements to request from the stream. @@ -82,6 +84,7 @@ case object ZeroRequestStrategy extends RequestStrategy { } object WatermarkRequestStrategy { + /** * Create [[WatermarkRequestStrategy]] with `lowWatermark` as half of * the specified `highWatermark`. @@ -162,7 +165,9 @@ abstract class MaxInFlightRequestStrategy(max: Int) extends RequestStrategy { * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") trait ActorSubscriber extends Actor { import ActorSubscriber._ import ActorSubscriberMessage._ @@ -218,7 +223,7 @@ trait ActorSubscriber extends Actor { * INTERNAL API */ protected[akka] override def aroundPostRestart(reason: Throwable): Unit = { - state.get(self) foreach { s => + state.get(self).foreach { s => // restore previous state subscription = s.subscription requested = s.requested @@ -341,6 +346,7 @@ private[akka] class ActorSubscriberState extends Extension { * Java API */ object UntypedActorSubscriber { + /** * Java API: Attach a [[UntypedActorSubscriber]] actor as a [[org.reactivestreams.Subscriber]] * to a [[org.reactivestreams.Publisher]] or [[akka.stream.javadsl.Flow]]. @@ -354,7 +360,9 @@ object UntypedActorSubscriber { * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") abstract class UntypedActorSubscriber extends UntypedActor with ActorSubscriber /** @@ -362,8 +370,11 @@ abstract class UntypedActorSubscriber extends UntypedActor with ActorSubscriber * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") object AbstractActorSubscriber { + /** * Java API compatible with lambda expressions: Attach a [[AbstractActorSubscriber]] actor * as a [[org.reactivestreams.Subscriber]] o a [[org.reactivestreams.Publisher]] or @@ -378,5 +389,7 @@ object AbstractActorSubscriber { * * @deprecated Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ -@deprecated("Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") +@deprecated( + "Use `akka.stream.stage.GraphStage` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") abstract class AbstractActorSubscriber extends AbstractActor with ActorSubscriber diff --git a/akka-stream/src/main/scala/akka/stream/extra/Implicits.scala b/akka-stream/src/main/scala/akka/stream/extra/Implicits.scala index 5993ec95be..1fce009841 100644 --- a/akka-stream/src/main/scala/akka/stream/extra/Implicits.scala +++ b/akka-stream/src/main/scala/akka/stream/extra/Implicits.scala @@ -23,7 +23,8 @@ object Implicits { /** * Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`. */ - def timed[O, Mat2](measuredOps: Source[I, Mat] => Source[O, Mat2], onComplete: FiniteDuration => Unit): Source[O, Mat2] = + def timed[O, Mat2](measuredOps: Source[I, Mat] => Source[O, Mat2], + onComplete: FiniteDuration => Unit): Source[O, Mat2] = Timed.timed[I, O, Mat, Mat2](source, measuredOps, onComplete) /** @@ -43,7 +44,8 @@ object Implicits { /** * Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`. */ - def timed[Out, Mat2](measuredOps: Flow[I, O, Mat] => Flow[I, Out, Mat2], onComplete: FiniteDuration => Unit): Flow[I, Out, Mat2] = + def timed[Out, Mat2](measuredOps: Flow[I, O, Mat] => Flow[I, Out, Mat2], + onComplete: FiniteDuration => Unit): Flow[I, Out, Mat2] = Timed.timed[I, O, Out, Mat, Mat2](flow, measuredOps, onComplete) /** diff --git a/akka-stream/src/main/scala/akka/stream/extra/Timed.scala b/akka-stream/src/main/scala/akka/stream/extra/Timed.scala index d606b65757..fd3e317a07 100644 --- a/akka-stream/src/main/scala/akka/stream/extra/Timed.scala +++ b/akka-stream/src/main/scala/akka/stream/extra/Timed.scala @@ -26,7 +26,9 @@ private[akka] trait TimedOps { * Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`. */ @deprecated("Moved to the akka/akka-stream-contrib project", since = "2.4.5") - def timed[I, O, Mat, Mat2](source: Source[I, Mat], measuredOps: Source[I, Mat] => Source[O, Mat2], onComplete: FiniteDuration => Unit): Source[O, Mat2] = { + def timed[I, O, Mat, Mat2](source: Source[I, Mat], + measuredOps: Source[I, Mat] => Source[O, Mat2], + onComplete: FiniteDuration => Unit): Source[O, Mat2] = { val ctx = new TimedFlowContext val startTimed = Flow[I].via(new StartTimed(ctx)).named("startTimed") @@ -41,7 +43,9 @@ private[akka] trait TimedOps { * Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`. */ @deprecated("Moved to the akka/akka-stream-contrib project", since = "2.4.5") - def timed[I, O, Out, Mat, Mat2](flow: Flow[I, O, Mat], measuredOps: Flow[I, O, Mat] => Flow[I, Out, Mat2], onComplete: FiniteDuration => Unit): Flow[I, Out, Mat2] = { + def timed[I, O, Out, Mat, Mat2](flow: Flow[I, O, Mat], + measuredOps: Flow[I, O, Mat] => Flow[I, Out, Mat2], + onComplete: FiniteDuration => Unit): Flow[I, Out, Mat2] = { // todo is there any other way to provide this for Flow, without duplicating impl? // they do share a super-type (FlowOps), but all operations of FlowOps return path dependant type val ctx = new TimedFlowContext @@ -67,7 +71,9 @@ private[akka] trait TimedIntervalBetweenOps { * Measures rolling interval between immediately subsequent `matching(o: O)` elements. */ @deprecated("Moved to the akka/akka-stream-contrib project", since = "2.4.5") - def timedIntervalBetween[O, Mat](source: Source[O, Mat], matching: O => Boolean, onInterval: FiniteDuration => Unit): Source[O, Mat] = { + def timedIntervalBetween[O, Mat](source: Source[O, Mat], + matching: O => Boolean, + onInterval: FiniteDuration => Unit): Source[O, Mat] = { val timedInterval = Flow[O].via(new TimedInterval[O](matching, onInterval)).named("timedInterval") source.via(timedInterval) } @@ -76,7 +82,9 @@ private[akka] trait TimedIntervalBetweenOps { * Measures rolling interval between immediately subsequent `matching(o: O)` elements. */ @deprecated("Moved to the akka/akka-stream-contrib project", since = "2.4.5") - def timedIntervalBetween[I, O, Mat](flow: Flow[I, O, Mat], matching: O => Boolean, onInterval: FiniteDuration => Unit): Flow[I, O, Mat] = { + def timedIntervalBetween[I, O, Mat](flow: Flow[I, O, Mat], + matching: O => Boolean, + onInterval: FiniteDuration => Unit): Flow[I, O, Mat] = { val timedInterval = Flow[O].via(new TimedInterval[O](matching, onInterval)).named("timedInterval") flow.via(timedInterval) } @@ -111,82 +119,87 @@ object Timed extends TimedOps with TimedIntervalBetweenOps { final class StartTimed[T](timedContext: TimedFlowContext) extends SimpleLinearGraphStage[T] { - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { - private var started = false + private var started = false - override def onPush(): Unit = { - if (!started) { - timedContext.start() - started = true + override def onPush(): Unit = { + if (!started) { + timedContext.start() + started = true + } + push(out, grab(in)) } - push(out, grab(in)) + + override def onPull(): Unit = pull(in) + + setHandlers(in, out, this) } - - override def onPull(): Unit = pull(in) - - setHandlers(in, out, this) - } } - final class StopTimed[T](timedContext: TimedFlowContext, _onComplete: FiniteDuration => Unit) extends SimpleLinearGraphStage[T] { + final class StopTimed[T](timedContext: TimedFlowContext, _onComplete: FiniteDuration => Unit) + extends SimpleLinearGraphStage[T] { - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { - override def onPush(): Unit = push(out, grab(in)) + override def onPush(): Unit = push(out, grab(in)) - override def onPull(): Unit = pull(in) + override def onPull(): Unit = pull(in) - override def onUpstreamFailure(cause: Throwable): Unit = { - stopTime() - failStage(cause) + override def onUpstreamFailure(cause: Throwable): Unit = { + stopTime() + failStage(cause) + } + + override def onUpstreamFinish(): Unit = { + stopTime() + completeStage() + } + + private def stopTime(): Unit = { + val d = timedContext.stop() + _onComplete(d) + } + + setHandlers(in, out, this) } - - override def onUpstreamFinish(): Unit = { - stopTime() - completeStage() - } - - private def stopTime(): Unit = { - val d = timedContext.stop() - _onComplete(d) - } - - setHandlers(in, out, this) - } } - final class TimedInterval[T](matching: T => Boolean, onInterval: FiniteDuration => Unit) extends SimpleLinearGraphStage[T] { + final class TimedInterval[T](matching: T => Boolean, onInterval: FiniteDuration => Unit) + extends SimpleLinearGraphStage[T] { - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { - private var prevNanos = 0L - private var matched = 0L + private var prevNanos = 0L + private var matched = 0L - override def onPush(): Unit = { - val elem = grab(in) - if (matching(elem)) { - val d = updateInterval() + override def onPush(): Unit = { + val elem = grab(in) + if (matching(elem)) { + val d = updateInterval() - if (matched > 1) - onInterval(d) + if (matched > 1) + onInterval(d) + } + push(out, elem) } - push(out, elem) + + override def onPull(): Unit = pull(in) + + private def updateInterval(): FiniteDuration = { + matched += 1 + val nowNanos = System.nanoTime() + val d = nowNanos - prevNanos + prevNanos = nowNanos + d.nanoseconds + } + + setHandlers(in, out, this) } - override def onPull(): Unit = pull(in) - - private def updateInterval(): FiniteDuration = { - matched += 1 - val nowNanos = System.nanoTime() - val d = nowNanos - prevNanos - prevNanos = nowNanos - d.nanoseconds - } - - setHandlers(in, out, this) - } - } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala index ed559ad28a..3e5140c379 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorMaterializerImpl.scala @@ -29,16 +29,13 @@ import scala.concurrent.{ Await, ExecutionContextExecutor } @InternalApi def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat]): Mat /** INTERNAL API */ - @InternalApi def materialize[Mat]( - _runnableGraph: Graph[ClosedShape, Mat], - defaultAttributes: Attributes): Mat + @InternalApi def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat], defaultAttributes: Attributes): Mat /** INTERNAL API */ - @InternalApi private[akka] def materialize[Mat]( - graph: Graph[ClosedShape, Mat], - defaultAttributes: Attributes, - defaultPhase: Phase[Any], - phases: Map[IslandTag, Phase[Any]]): Mat + @InternalApi private[akka] def materialize[Mat](graph: Graph[ClosedShape, Mat], + defaultAttributes: Attributes, + defaultPhase: Phase[Any], + phases: Map[IslandTag, Phase[Any]]): Mat /** * INTERNAL API @@ -94,11 +91,16 @@ import scala.concurrent.{ Await, ExecutionContextExecutor } * * The default phases are left in-tact since we still respect `.async` and other tags that were marked within a sub-fused graph. */ -private[akka] class SubFusingActorMaterializerImpl(val delegate: ExtendedActorMaterializer, registerShell: GraphInterpreterShell => ActorRef) extends Materializer { +private[akka] class SubFusingActorMaterializerImpl(val delegate: ExtendedActorMaterializer, + registerShell: GraphInterpreterShell => ActorRef) + extends Materializer { val subFusingPhase = new Phase[Any] { - override def apply(settings: ActorMaterializerSettings, attributes: Attributes, - materializer: PhasedFusingActorMaterializer, islandName: String): PhaseIsland[Any] = { - new GraphStageIsland(settings, attributes, materializer, islandName, OptionVal(registerShell)).asInstanceOf[PhaseIsland[Any]] + override def apply(settings: ActorMaterializerSettings, + attributes: Attributes, + materializer: PhasedFusingActorMaterializer, + islandName: String): PhaseIsland[Any] = { + new GraphStageIsland(settings, attributes, materializer, islandName, OptionVal(registerShell)) + .asInstanceOf[PhaseIsland[Any]] } } @@ -110,7 +112,8 @@ private[akka] class SubFusingActorMaterializerImpl(val delegate: ExtendedActorMa materialize(runnable, am.defaultAttributes) case other => - throw new IllegalStateException(s"SubFusing only supported by [PhasedFusingActorMaterializer], " + + throw new IllegalStateException( + s"SubFusing only supported by [PhasedFusingActorMaterializer], " + s"yet was used with [${other.getClass.getName}]!") } @@ -123,7 +126,9 @@ private[akka] class SubFusingActorMaterializerImpl(val delegate: ExtendedActorMa override def scheduleOnce(delay: FiniteDuration, task: Runnable): Cancellable = delegate.scheduleOnce(delay, task) - override def schedulePeriodically(initialDelay: FiniteDuration, interval: FiniteDuration, task: Runnable): Cancellable = + override def schedulePeriodically(initialDelay: FiniteDuration, + interval: FiniteDuration, + task: Runnable): Cancellable = delegate.schedulePeriodically(initialDelay, interval, task) override def withNamePrefix(name: String): SubFusingActorMaterializerImpl = @@ -151,21 +156,24 @@ private[akka] class SubFusingActorMaterializerImpl(val delegate: ExtendedActorMa */ @InternalApi private[akka] object StreamSupervisor { def props(settings: ActorMaterializerSettings, haveShutDown: AtomicBoolean): Props = - Props(new StreamSupervisor(haveShutDown)).withDeploy(Deploy.local) - .withDispatcher(settings.dispatcher) + Props(new StreamSupervisor(haveShutDown)).withDeploy(Deploy.local).withDispatcher(settings.dispatcher) private[stream] val baseName = "StreamSupervisor" private val actorName = SeqActorName(baseName) def nextName(): String = actorName.next() final case class Materialize(props: Props, name: String) - extends DeadLetterSuppression with NoSerializationVerificationNeeded + extends DeadLetterSuppression + with NoSerializationVerificationNeeded /** Testing purpose */ case object GetChildren + /** Testing purpose */ final case class Children(children: Set[ActorRef]) + /** Testing purpose */ case object StopChildren + /** Testing purpose */ case object StoppedChildren } @@ -190,4 +198,3 @@ private[akka] class SubFusingActorMaterializerImpl(val delegate: ExtendedActorMa override def postStop(): Unit = haveShutDown.set(true) } - diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala index 6993d5c9a1..d6bc304b69 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorProcessor.scala @@ -28,8 +28,9 @@ import akka.event.Logging /** * INTERNAL API */ -@InternalApi private[akka] class ActorProcessor[I, O](impl: ActorRef) extends ActorPublisher[O](impl) - with Processor[I, O] { +@InternalApi private[akka] class ActorProcessor[I, O](impl: ActorRef) + extends ActorPublisher[O](impl) + with Processor[I, O] { override def onSubscribe(s: Subscription): Unit = { ReactiveStreamsCompliance.requireNonNullSubscription(s) impl ! OnSubscribe(s) @@ -48,7 +49,8 @@ import akka.event.Logging /** * INTERNAL API */ -@InternalApi private[akka] abstract class BatchingInputBuffer(val size: Int, val pump: Pump) extends DefaultInputTransferStates { +@InternalApi private[akka] abstract class BatchingInputBuffer(val size: Int, val pump: Pump) + extends DefaultInputTransferStates { if (size < 1) throw new IllegalArgumentException(s"buffer size must be positive (was: $size)") if ((size & (size - 1)) != 0) throw new IllegalArgumentException(s"buffer size must be a power of two (was: $size)") @@ -160,7 +162,8 @@ import akka.event.Logging /** * INTERNAL API */ -@InternalApi private[akka] class SimpleOutputs(val actor: ActorRef, val pump: Pump) extends DefaultOutputTransferStates { +@InternalApi private[akka] class SimpleOutputs(val actor: ActorRef, val pump: Pump) + extends DefaultOutputTransferStates { import ReactiveStreamsCompliance._ protected var exposedPublisher: ActorPublisher[Any] = _ @@ -210,7 +213,7 @@ import akka.event.Logging protected def createSubscription(): Subscription = new ActorSubscription(actor, subscriber) private def subscribePending(subscribers: Seq[Subscriber[Any]]): Unit = - subscribers foreach { sub => + subscribers.foreach { sub => if (subscriber eq null) { subscriber = sub tryOnSubscribe(subscriber, createSubscription()) @@ -249,10 +252,11 @@ import akka.event.Logging /** * INTERNAL API */ -@InternalApi private[akka] abstract class ActorProcessorImpl(attributes: Attributes, val settings: ActorMaterializerSettings) - extends Actor - with ActorLogging - with Pump { +@InternalApi private[akka] abstract class ActorProcessorImpl(attributes: Attributes, + val settings: ActorMaterializerSettings) + extends Actor + with ActorLogging + with Pump { protected val primaryInputs: Inputs = { val initialInputBufferSize = attributes.mandatoryAttribute[Attributes.InputBuffer].initial @@ -269,7 +273,7 @@ import akka.event.Logging final override def receive = new ExposedPublisherReceive(activeReceive, unhandled) { override def receiveExposedPublisher(ep: ExposedPublisher): Unit = { primaryOutputs.subreceive(ep) - context become activeReceive + context.become(activeReceive) } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala index bcdaf4abc6..5455659197 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorPublisher.scala @@ -76,7 +76,7 @@ import org.reactivestreams.Subscription shutdownReason = reason pendingSubscribers.getAndSet(null) match { case null => // already called earlier - case pending => pending foreach reportSubscribeFailure + case pending => pending.foreach(reportSubscribeFailure) } } @@ -100,7 +100,9 @@ import org.reactivestreams.Subscription /** * INTERNAL API */ -@InternalApi private[akka] class ActorSubscription[T]( final val impl: ActorRef, final val subscriber: Subscriber[_ >: T]) extends Subscription { +@InternalApi private[akka] class ActorSubscription[T](final val impl: ActorRef, + final val subscriber: Subscriber[_ >: T]) + extends Subscription { override def request(elements: Long): Unit = impl ! RequestMore(this, elements) override def cancel(): Unit = impl ! Cancel(this) } @@ -109,7 +111,8 @@ import org.reactivestreams.Subscription * INTERNAL API */ @InternalApi private[akka] class ActorSubscriptionWithCursor[T](_impl: ActorRef, _subscriber: Subscriber[_ >: T]) - extends ActorSubscription[T](_impl, _subscriber) with SubscriptionWithCursor[T] + extends ActorSubscription[T](_impl, _subscriber) + with SubscriptionWithCursor[T] /** * INTERNAL API @@ -120,7 +123,7 @@ import org.reactivestreams.Subscription if (children.isEmpty) { context.stop(self) } else { - context.children foreach context.watch + context.children.foreach(context.watch) context.become { case Terminated(_) => if (context.children.isEmpty) context.stop(self) case _ => // ignore all the rest, we’re practically dead @@ -128,4 +131,3 @@ import org.reactivestreams.Subscription } } } - diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala index 347996986b..bebaf24878 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefBackpressureSinkStage.scala @@ -16,14 +16,13 @@ import akka.stream.stage._ /** * INTERNAL API */ -@InternalApi private[akka] class ActorRefBackpressureSinkStage[In]( - ref: ActorRef, - messageAdapter: ActorRef => In => Any, - onInitMessage: ActorRef => Any, - ackMessage: Any, - onCompleteMessage: Any, - onFailureMessage: (Throwable) => Any) - extends GraphStage[SinkShape[In]] { +@InternalApi private[akka] class ActorRefBackpressureSinkStage[In](ref: ActorRef, + messageAdapter: ActorRef => In => Any, + onInitMessage: ActorRef => Any, + ackMessage: Any, + onCompleteMessage: Any, + onFailureMessage: (Throwable) => Any) + extends GraphStage[SinkShape[In]] { val in: Inlet[In] = Inlet[In]("ActorRefBackpressureSink.in") override def initialAttributes = DefaultAttributes.actorRefWithAck override val shape: SinkShape[In] = SinkShape(in) @@ -75,7 +74,7 @@ import akka.stream.stage._ } def onPush(): Unit = { - buffer offer grab(in) + buffer.offer(grab(in)) if (acknowledgementReceived) { dequeueAndSend() acknowledgementReceived = false diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala index fe451e6d4f..26e4da3589 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSinkActor.scala @@ -23,8 +23,11 @@ import akka.annotation.InternalApi /** * INTERNAL API */ -@InternalApi private[akka] class ActorRefSinkActor(ref: ActorRef, highWatermark: Int, onCompleteMessage: Any, onFailureMessage: Throwable => Any) - extends ActorSubscriber { +@InternalApi private[akka] class ActorRefSinkActor(ref: ActorRef, + highWatermark: Int, + onCompleteMessage: Any, + onFailureMessage: Throwable => Any) + extends ActorSubscriber { import ActorSubscriberMessage._ override val requestStrategy = WatermarkRequestStrategy(highWatermark) diff --git a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSourceActor.scala b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSourceActor.scala index d3452f8fa7..573bbfc1a7 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ActorRefSourceActor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ActorRefSourceActor.scala @@ -16,8 +16,11 @@ import akka.stream.ActorMaterializerSettings * INTERNAL API */ @InternalApi private[akka] object ActorRefSourceActor { - def props(completionMatcher: PartialFunction[Any, Unit], failureMatcher: PartialFunction[Any, Throwable], - bufferSize: Int, overflowStrategy: OverflowStrategy, settings: ActorMaterializerSettings) = { + def props(completionMatcher: PartialFunction[Any, Unit], + failureMatcher: PartialFunction[Any, Throwable], + bufferSize: Int, + overflowStrategy: OverflowStrategy, + settings: ActorMaterializerSettings) = { require(overflowStrategy != OverflowStrategies.Backpressure, "Backpressure overflowStrategy not supported") val maxFixedBufferSize = settings.maxFixedBufferSize Props(new ActorRefSourceActor(completionMatcher, failureMatcher, bufferSize, overflowStrategy, maxFixedBufferSize)) @@ -27,23 +30,23 @@ import akka.stream.ActorMaterializerSettings /** * INTERNAL API */ -@InternalApi private[akka] class ActorRefSourceActor( - completionMatcher: PartialFunction[Any, Unit], failureMatcher: PartialFunction[Any, Throwable], - bufferSize: Int, overflowStrategy: OverflowStrategy, maxFixedBufferSize: Int) - extends akka.stream.actor.ActorPublisher[Any] with ActorLogging { +@InternalApi private[akka] class ActorRefSourceActor(completionMatcher: PartialFunction[Any, Unit], + failureMatcher: PartialFunction[Any, Throwable], + bufferSize: Int, + overflowStrategy: OverflowStrategy, + maxFixedBufferSize: Int) + extends akka.stream.actor.ActorPublisher[Any] + with ActorLogging { import akka.stream.actor.ActorPublisherMessage._ // when bufferSize is 0 there the buffer is not used protected val buffer = if (bufferSize == 0) null else Buffer[Any](bufferSize, maxFixedBufferSize) - def receive = ({ - case Cancel => - context.stop(self) - }: Receive) - .orElse(requestElem) - .orElse(receiveFailure) - .orElse(receiveComplete) - .orElse(receiveElem) + def receive = + ({ + case Cancel => + context.stop(self) + }: Receive).orElse(requestElem).orElse(receiveFailure).orElse(receiveComplete).orElse(receiveElem) def receiveComplete: Receive = completionMatcher.andThen { _ => if (bufferSize == 0 || buffer.isEmpty) onCompleteThenStop() // will complete the stream successfully @@ -59,8 +62,7 @@ import akka.stream.ActorMaterializerSettings case _: Request => // totalDemand is tracked by super if (bufferSize != 0) - while (totalDemand > 0L && !buffer.isEmpty) - onNext(buffer.dequeue()) + while (totalDemand > 0L && !buffer.isEmpty) onNext(buffer.dequeue()) } def receiveElem: Receive = { @@ -71,29 +73,31 @@ import akka.stream.ActorMaterializerSettings log.debug("Dropping element because there is no downstream demand: [{}]", elem) else if (!buffer.isFull) buffer.enqueue(elem) - else overflowStrategy match { - case s: DropHead => - log.log(s.logLevel, "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]") - buffer.dropHead() - buffer.enqueue(elem) - case s: DropTail => - log.log(s.logLevel, "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]") - buffer.dropTail() - buffer.enqueue(elem) - case s: DropBuffer => - log.log(s.logLevel, "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]") - buffer.clear() - buffer.enqueue(elem) - case s: DropNew => - // do not enqueue new element if the buffer is full - log.log(s.logLevel, "Dropping the new element because buffer is full and overflowStrategy is: [DropNew]") - case s: Fail => - log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Fail]") - onErrorThenStop(BufferOverflowException(s"Buffer overflow (max capacity was: $bufferSize)!")) - case s: Backpressure => - // there is a precondition check in Source.actorRefSource factory method - log.log(s.logLevel, "Backpressuring because buffer is full and overflowStrategy is: [Backpressure]") - } + else + overflowStrategy match { + case s: DropHead => + log.log(s.logLevel, "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]") + buffer.dropHead() + buffer.enqueue(elem) + case s: DropTail => + log.log(s.logLevel, "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]") + buffer.dropTail() + buffer.enqueue(elem) + case s: DropBuffer => + log.log(s.logLevel, + "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]") + buffer.clear() + buffer.enqueue(elem) + case s: DropNew => + // do not enqueue new element if the buffer is full + log.log(s.logLevel, "Dropping the new element because buffer is full and overflowStrategy is: [DropNew]") + case s: Fail => + log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Fail]") + onErrorThenStop(BufferOverflowException(s"Buffer overflow (max capacity was: $bufferSize)!")) + case s: Backpressure => + // there is a precondition check in Source.actorRefSource factory method + log.log(s.logLevel, "Backpressuring because buffer is full and overflowStrategy is: [Backpressure]") + } } def drainBufferThenComplete: Receive = { @@ -107,14 +111,15 @@ import akka.stream.ActorMaterializerSettings case _: Request => // totalDemand is tracked by super - while (totalDemand > 0L && !buffer.isEmpty) - onNext(buffer.dequeue()) + while (totalDemand > 0L && !buffer.isEmpty) onNext(buffer.dequeue()) if (buffer.isEmpty) onCompleteThenStop() // will complete the stream successfully case elem if isActive => log.debug("Dropping element because Status.Success received already, " + - "only draining already buffered elements: [{}] (pending: [{}])", elem, buffer.used) + "only draining already buffered elements: [{}] (pending: [{}])", + elem, + buffer.used) } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/Buffers.scala b/akka-stream/src/main/scala/akka/stream/impl/Buffers.scala index c5c0f50dfd..f5efaa18e7 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Buffers.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Buffers.scala @@ -66,7 +66,8 @@ private[akka] object Buffer { else new ModuloFixedSizeBuffer(size) sealed abstract class FixedSizeBuffer[T](val capacity: Int) extends Buffer[T] { - override def toString = s"Buffer($capacity, $readIdx, $writeIdx)(${(readIdx until writeIdx).map(get).mkString(", ")})" + override def toString = + s"Buffer($capacity, $readIdx, $writeIdx)(${(readIdx until writeIdx).map(get).mkString(", ")})" private val buffer = new Array[AnyRef](capacity) protected var readIdx = 0L @@ -88,7 +89,8 @@ private[akka] object Buffer { // for the maintenance parameter see dropHead protected def toOffset(idx: Long, maintenance: Boolean): Int - private def put(idx: Long, elem: T, maintenance: Boolean): Unit = buffer(toOffset(idx, maintenance)) = elem.asInstanceOf[AnyRef] + private def put(idx: Long, elem: T, maintenance: Boolean): Unit = + buffer(toOffset(idx, maintenance)) = elem.asInstanceOf[AnyRef] private def get(idx: Long): T = buffer(toOffset(idx, false)).asInstanceOf[T] def peek(): T = get(readIdx) diff --git a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala index c747da9f1f..3462b085c6 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/CompletedPublishers.scala @@ -69,7 +69,8 @@ import org.reactivestreams.{ Publisher, Subscriber, Subscription } @InternalApi private[akka] case object RejectAdditionalSubscribers extends Publisher[Nothing] { import ReactiveStreamsCompliance._ override def subscribe(subscriber: Subscriber[_ >: Nothing]): Unit = - try rejectAdditionalSubscriber(subscriber, "Publisher") catch { + try rejectAdditionalSubscriber(subscriber, "Publisher") + catch { case _: SpecViolation => // nothing we can do } def apply[T]: Publisher[T] = this.asInstanceOf[Publisher[T]] diff --git a/akka-stream/src/main/scala/akka/stream/impl/EmptySource.scala b/akka-stream/src/main/scala/akka/stream/impl/EmptySource.scala index 44a4d103d5..d53615d86c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/EmptySource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/EmptySource.scala @@ -28,4 +28,3 @@ import akka.stream.stage._ override def toString = "EmptySource" } - diff --git a/akka-stream/src/main/scala/akka/stream/impl/ExposedPublisherReceive.scala b/akka-stream/src/main/scala/akka/stream/impl/ExposedPublisherReceive.scala index 85dbf9bf90..372a5e2958 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ExposedPublisherReceive.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ExposedPublisherReceive.scala @@ -10,7 +10,8 @@ import akka.annotation.InternalApi /** * INTERNAL API */ -@InternalApi private[akka] abstract class ExposedPublisherReceive(activeReceive: Actor.Receive, unhandled: Any => Unit) extends Actor.Receive { +@InternalApi private[akka] abstract class ExposedPublisherReceive(activeReceive: Actor.Receive, unhandled: Any => Unit) + extends Actor.Receive { private var stash = List.empty[Any] def isDefinedAt(o: Any): Boolean = true diff --git a/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala b/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala index ec8d7ed14f..f0ca7f177f 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FailedSource.scala @@ -18,15 +18,16 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler } override protected def initialAttributes: Attributes = DefaultAttributes.failedSource - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { - override def onPull(): Unit = () + override def onPull(): Unit = () - override def preStart(): Unit = { - failStage(failure) + override def preStart(): Unit = { + failStage(failure) + } + setHandler(out, this) } - setHandler(out, this) - } override def toString = s"FailedSource(${failure.getClass.getName})" } diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanIn.scala b/akka-stream/src/main/scala/akka/stream/impl/FanIn.scala index 17c9759bc4..3b0f2cff8c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FanIn.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FanIn.scala @@ -15,10 +15,14 @@ import org.reactivestreams.{ Subscriber, Subscription } */ @InternalApi private[akka] object FanIn { - final case class OnError(id: Int, cause: Throwable) extends DeadLetterSuppression with NoSerializationVerificationNeeded + final case class OnError(id: Int, cause: Throwable) + extends DeadLetterSuppression + with NoSerializationVerificationNeeded final case class OnComplete(id: Int) extends DeadLetterSuppression with NoSerializationVerificationNeeded final case class OnNext(id: Int, e: Any) extends DeadLetterSuppression with NoSerializationVerificationNeeded - final case class OnSubscribe(id: Int, subscription: Subscription) extends DeadLetterSuppression with NoSerializationVerificationNeeded + final case class OnSubscribe(id: Int, subscription: Subscription) + extends DeadLetterSuppression + with NoSerializationVerificationNeeded final case class SubInput[T](impl: ActorRef, id: Int) extends Subscriber[T] { override def onError(cause: Throwable): Unit = { @@ -226,26 +230,27 @@ import org.reactivestreams.{ Subscriber, Subscription } } // FIXME: Eliminate re-wraps - def subreceive: SubReceive = new SubReceive({ - case OnSubscribe(id, subscription) => - inputs(id).subreceive(ActorSubscriber.OnSubscribe(subscription)) - case OnNext(id, elem) => - if (marked(id) && !pending(id)) markedPending += 1 - pending(id, on = true) - receivedInput = true - inputs(id).subreceive(ActorSubscriberMessage.OnNext(elem)) - case OnComplete(id) => - if (!pending(id)) { - if (marked(id) && !depleted(id)) markedDepleted += 1 - depleted(id, on = true) - onDepleted(id) - } - registerCompleted(id) - inputs(id).subreceive(ActorSubscriberMessage.OnComplete) - if (!receivedInput && isAllCompleted) onCompleteWhenNoInput() - case OnError(id, e) => - onError(id, e) - }) + def subreceive: SubReceive = + new SubReceive({ + case OnSubscribe(id, subscription) => + inputs(id).subreceive(ActorSubscriber.OnSubscribe(subscription)) + case OnNext(id, elem) => + if (marked(id) && !pending(id)) markedPending += 1 + pending(id, on = true) + receivedInput = true + inputs(id).subreceive(ActorSubscriberMessage.OnNext(elem)) + case OnComplete(id) => + if (!pending(id)) { + if (marked(id) && !depleted(id)) markedDepleted += 1 + depleted(id, on = true) + onDepleted(id) + } + registerCompleted(id) + inputs(id).subreceive(ActorSubscriberMessage.OnComplete) + if (!receivedInput && isAllCompleted) onCompleteWhenNoInput() + case OnError(id, e) => + onError(id, e) + }) } @@ -254,7 +259,10 @@ import org.reactivestreams.{ Subscriber, Subscription } /** * INTERNAL API */ -@DoNotInherit private[akka] class FanIn(val settings: ActorMaterializerSettings, val inputCount: Int) extends Actor with ActorLogging with Pump { +@DoNotInherit private[akka] class FanIn(val settings: ActorMaterializerSettings, val inputCount: Int) + extends Actor + with ActorLogging + with Pump { import FanIn._ protected val primaryOutputs: Outputs = new SimpleOutputs(self, this) @@ -292,4 +300,3 @@ import org.reactivestreams.{ Subscriber, Subscription } def receive = inputBunch.subreceive.orElse[Any, Unit](primaryOutputs.subreceive) } - diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala b/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala index c48bb3e029..086b6a084a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FanOut.scala @@ -16,9 +16,13 @@ import org.reactivestreams.Subscription */ @InternalApi private[akka] object FanOut { - final case class SubstreamRequestMore(id: Int, demand: Long) extends DeadLetterSuppression with NoSerializationVerificationNeeded + final case class SubstreamRequestMore(id: Int, demand: Long) + extends DeadLetterSuppression + with NoSerializationVerificationNeeded final case class SubstreamCancel(id: Int) extends DeadLetterSuppression with NoSerializationVerificationNeeded - final case class SubstreamSubscribePending(id: Int) extends DeadLetterSuppression with NoSerializationVerificationNeeded + final case class SubstreamSubscribePending(id: Int) + extends DeadLetterSuppression + with NoSerializationVerificationNeeded class SubstreamSubscription(val parent: ActorRef, val id: Int) extends Subscription { override def request(elements: Long): Unit = parent ! SubstreamRequestMore(id, elements) @@ -30,7 +34,9 @@ import org.reactivestreams.Subscription override def createSubscription(): Subscription = new SubstreamSubscription(actor, id) } - final case class ExposedPublishers(publishers: immutable.Seq[ActorPublisher[Any]]) extends DeadLetterSuppression with NoSerializationVerificationNeeded + final case class ExposedPublishers(publishers: immutable.Seq[ActorPublisher[Any]]) + extends DeadLetterSuppression + with NoSerializationVerificationNeeded class OutputBunch(outputCount: Int, impl: ActorRef, pump: Pump) { private var bunchCancelled = false @@ -215,32 +221,33 @@ import org.reactivestreams.Subscription } // FIXME: Eliminate re-wraps - def subreceive: SubReceive = new SubReceive({ - case ExposedPublishers(publishers) => - publishers.zip(outputs) foreach { - case (pub, output) => - output.subreceive(ExposedPublisher(pub)) - } + def subreceive: SubReceive = + new SubReceive({ + case ExposedPublishers(publishers) => + publishers.zip(outputs).foreach { + case (pub, output) => + output.subreceive(ExposedPublisher(pub)) + } - case SubstreamRequestMore(id, demand) => - if (demand < 1) // According to Reactive Streams Spec 3.9, with non-positive demand must yield onError - error(id, ReactiveStreamsCompliance.numberOfElementsInRequestMustBePositiveException) - else { - if (marked(id) && !pending(id)) markedPending += 1 - pending(id) = true - outputs(id).subreceive(RequestMore(null, demand)) - } - case SubstreamCancel(id) => - if (unmarkCancelled) { - unmarkOutput(id) - } - if (marked(id) && !cancelled(id)) markedCancelled += 1 - cancelled(id) = true - onCancel(id) - outputs(id).subreceive(Cancel(null)) - case SubstreamSubscribePending(id) => - outputs(id).subreceive(SubscribePending) - }) + case SubstreamRequestMore(id, demand) => + if (demand < 1) // According to Reactive Streams Spec 3.9, with non-positive demand must yield onError + error(id, ReactiveStreamsCompliance.numberOfElementsInRequestMustBePositiveException) + else { + if (marked(id) && !pending(id)) markedPending += 1 + pending(id) = true + outputs(id).subreceive(RequestMore(null, demand)) + } + case SubstreamCancel(id) => + if (unmarkCancelled) { + unmarkOutput(id) + } + if (marked(id) && !cancelled(id)) markedCancelled += 1 + cancelled(id) = true + onCancel(id) + outputs(id).subreceive(Cancel(null)) + case SubstreamSubscribePending(id) => + outputs(id).subreceive(SubscribePending) + }) } @@ -249,7 +256,10 @@ import org.reactivestreams.Subscription /** * INTERNAL API */ -@DoNotInherit private[akka] abstract class FanOut(val settings: ActorMaterializerSettings, val outputCount: Int) extends Actor with ActorLogging with Pump { +@DoNotInherit private[akka] abstract class FanOut(val settings: ActorMaterializerSettings, val outputCount: Int) + extends Actor + with ActorLogging + with Pump { import FanOut._ protected val outputBunch = new OutputBunch(outputCount, self, this) @@ -297,23 +307,25 @@ import org.reactivestreams.Subscription /** * INTERNAL API */ -@InternalApi private[akka] class Unzip(_settings: ActorMaterializerSettings) extends FanOut(_settings, outputCount = 2) { +@InternalApi private[akka] class Unzip(_settings: ActorMaterializerSettings) + extends FanOut(_settings, outputCount = 2) { outputBunch.markAllOutputs() - initialPhase(1, TransferPhase(primaryInputs.NeedsInput && outputBunch.AllOfMarkedOutputs) { () => - primaryInputs.dequeueInputElement() match { - case (a, b) => - outputBunch.enqueue(0, a) - outputBunch.enqueue(1, b) + initialPhase(1, + TransferPhase(primaryInputs.NeedsInput && outputBunch.AllOfMarkedOutputs) { () => + primaryInputs.dequeueInputElement() match { + case (a, b) => + outputBunch.enqueue(0, a) + outputBunch.enqueue(1, b) - case t: akka.japi.Pair[_, _] => - outputBunch.enqueue(0, t.first) - outputBunch.enqueue(1, t.second) + case t: akka.japi.Pair[_, _] => + outputBunch.enqueue(0, t.first) + outputBunch.enqueue(1, t.second) - case t => - throw new IllegalArgumentException( - s"Unable to unzip elements of type ${t.getClass.getName}, " + - s"can only handle Tuple2 and akka.japi.Pair!") - } - }) + case t => + throw new IllegalArgumentException( + s"Unable to unzip elements of type ${t.getClass.getName}, " + + s"can only handle Tuple2 and akka.japi.Pair!") + } + }) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala index 9070233ee0..958fa99f68 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/FanoutProcessor.scala @@ -12,13 +12,12 @@ import org.reactivestreams.Subscriber /** * INTERNAL API */ -@DoNotInherit private[akka] abstract class FanoutOutputs( - val maxBufferSize: Int, - val initialBufferSize: Int, - self: ActorRef, - val pump: Pump) - extends DefaultOutputTransferStates - with SubscriberManagement[Any] { +@DoNotInherit private[akka] abstract class FanoutOutputs(val maxBufferSize: Int, + val initialBufferSize: Int, + self: ActorRef, + val pump: Pump) + extends DefaultOutputTransferStates + with SubscriberManagement[Any] { override type S = ActorSubscriptionWithCursor[_ >: Any] override def createSubscription(subscriber: Subscriber[_ >: Any]): S = @@ -62,7 +61,7 @@ import org.reactivestreams.Subscriber override protected def requestFromUpstream(elements: Long): Unit = downstreamBufferSpace += elements private def subscribePending(): Unit = - exposedPublisher.takePendingSubscribers() foreach registerSubscriber + exposedPublisher.takePendingSubscribers().foreach(registerSubscriber) override protected def shutdown(completed: Boolean): Unit = { if (exposedPublisher ne null) { @@ -104,11 +103,12 @@ import org.reactivestreams.Subscriber def props(attributes: Attributes, actorMaterializerSettings: ActorMaterializerSettings): Props = Props(new FanoutProcessorImpl(attributes, actorMaterializerSettings)).withDeploy(Deploy.local) } + /** * INTERNAL API */ @InternalApi private[akka] class FanoutProcessorImpl(attributes: Attributes, _settings: ActorMaterializerSettings) - extends ActorProcessorImpl(attributes, _settings) { + extends ActorProcessorImpl(attributes, _settings) { override val primaryOutputs: FanoutOutputs = { val inputBuffer = attributes.mandatoryAttribute[Attributes.InputBuffer] diff --git a/akka-stream/src/main/scala/akka/stream/impl/JavaStreamSource.scala b/akka-stream/src/main/scala/akka/stream/impl/JavaStreamSource.scala index eb507ab694..57bc145250 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/JavaStreamSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/JavaStreamSource.scala @@ -9,8 +9,9 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler } import akka.annotation.InternalApi /** INTERNAL API */ -@InternalApi private[stream] final class JavaStreamSource[T, S <: java.util.stream.BaseStream[T, S]](open: () => java.util.stream.BaseStream[T, S]) - extends GraphStage[SourceShape[T]] { +@InternalApi private[stream] final class JavaStreamSource[T, S <: java.util.stream.BaseStream[T, S]]( + open: () => java.util.stream.BaseStream[T, S]) + extends GraphStage[SourceShape[T]] { val out: Outlet[T] = Outlet("JavaStreamSource") override val shape: SourceShape[T] = SourceShape(out) diff --git a/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala b/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala index eaeb30b241..35094daa74 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/JsonObjectParser.scala @@ -101,8 +101,7 @@ import scala.annotation.switch private def seekObject(): Boolean = { completedObject = false val bufSize = buffer.size - while (pos != -1 && (pos < bufSize && pos < maximumObjectLength) && !completedObject) - proceed(buffer(pos)) + while (pos != -1 && (pos < bufSize && pos < maximumObjectLength) && !completedObject) proceed(buffer(pos)) if (pos >= maximumObjectLength) throw new FramingException(s"""JSON element exceeded maximumObjectLength ($maximumObjectLength bytes)!""") diff --git a/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala b/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala index e05cd4e35d..aa542e4923 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/LazySource.scala @@ -23,7 +23,8 @@ import scala.util.control.NonFatal /** * INTERNAL API */ -@InternalApi private[akka] final class LazySource[T, M](sourceFactory: () => Source[T, M]) extends GraphStageWithMaterializedValue[SourceShape[T], Future[M]] { +@InternalApi private[akka] final class LazySource[T, M](sourceFactory: () => Source[T, M]) + extends GraphStageWithMaterializedValue[SourceShape[T], Future[M]] { val out = Outlet[T]("LazySource.out") override val shape = SourceShape(out) diff --git a/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala b/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala index 523d3eb3c1..05ab3f5bcf 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/MaybeSource.scala @@ -17,13 +17,15 @@ import scala.util.Try /** * INTERNAL API */ -@InternalApi private[akka] object MaybeSource extends GraphStageWithMaterializedValue[SourceShape[AnyRef], Promise[Option[AnyRef]]] { +@InternalApi private[akka] object MaybeSource + extends GraphStageWithMaterializedValue[SourceShape[AnyRef], Promise[Option[AnyRef]]] { val out = Outlet[AnyRef]("MaybeSource.out") override val shape = SourceShape(out) override protected def initialAttributes = DefaultAttributes.maybeSource - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Promise[Option[AnyRef]]) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, Promise[Option[AnyRef]]) = { import scala.util.{ Success => ScalaSuccess, Failure => ScalaFailure } val promise = Promise[Option[AnyRef]]() val logic = new GraphStageLogic(shape) with OutHandler { @@ -37,9 +39,8 @@ import scala.util.Try handleCompletion(value) case None => // callback on future completion - promise.future.onComplete( - getAsyncCallback(handleCompletion).invoke - )(ExecutionContexts.sameThreadExecutionContext) + promise.future.onComplete(getAsyncCallback(handleCompletion).invoke)( + ExecutionContexts.sameThreadExecutionContext) } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/Messages.scala b/akka-stream/src/main/scala/akka/stream/impl/Messages.scala index 8e59beb555..87f0447250 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Messages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Messages.scala @@ -10,23 +10,27 @@ import akka.annotation.InternalApi /** * INTERNAL API */ -@InternalApi private[akka] case object SubscribePending extends DeadLetterSuppression with NoSerializationVerificationNeeded +@InternalApi private[akka] case object SubscribePending + extends DeadLetterSuppression + with NoSerializationVerificationNeeded /** * INTERNAL API */ @InternalApi private[akka] final case class RequestMore(subscription: ActorSubscription[_], demand: Long) - extends DeadLetterSuppression with NoSerializationVerificationNeeded + extends DeadLetterSuppression + with NoSerializationVerificationNeeded /** * INTERNAL API */ @InternalApi private[akka] final case class Cancel(subscription: ActorSubscription[_]) - extends DeadLetterSuppression with NoSerializationVerificationNeeded + extends DeadLetterSuppression + with NoSerializationVerificationNeeded /** * INTERNAL API */ @InternalApi private[akka] final case class ExposedPublisher(publisher: ActorPublisher[Any]) - extends DeadLetterSuppression with NoSerializationVerificationNeeded - + extends DeadLetterSuppression + with NoSerializationVerificationNeeded diff --git a/akka-stream/src/main/scala/akka/stream/impl/Modules.scala b/akka-stream/src/main/scala/akka/stream/impl/Modules.scala index 12ab6d1486..833f85f502 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Modules.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Modules.scala @@ -17,7 +17,8 @@ import akka.event.Logging /** * INTERNAL API */ -@DoNotInherit private[akka] abstract class SourceModule[+Out, +Mat](val shape: SourceShape[Out]) extends AtomicModule[SourceShape[Out], Mat] { +@DoNotInherit private[akka] abstract class SourceModule[+Out, +Mat](val shape: SourceShape[Out]) + extends AtomicModule[SourceShape[Out], Mat] { protected def label: String = Logging.simpleName(this) final override def toString: String = f"$label [${System.identityHashCode(this)}%08x]" @@ -39,7 +40,8 @@ import akka.event.Logging else shape.copy(out = Outlet(thatN + ".out")) } - override private[stream] def traversalBuilder = LinearTraversalBuilder.fromModule(this, attributes).makeIsland(SourceModuleIslandTag) + override private[stream] def traversalBuilder = + LinearTraversalBuilder.fromModule(this, attributes).makeIsland(SourceModuleIslandTag) } @@ -48,15 +50,18 @@ import akka.event.Logging * Holds a `Subscriber` representing the input side of the flow. * The `Subscriber` can later be connected to an upstream `Publisher`. */ -@InternalApi private[akka] final class SubscriberSource[Out](val attributes: Attributes, shape: SourceShape[Out]) extends SourceModule[Out, Subscriber[Out]](shape) { +@InternalApi private[akka] final class SubscriberSource[Out](val attributes: Attributes, shape: SourceShape[Out]) + extends SourceModule[Out, Subscriber[Out]](shape) { override def create(context: MaterializationContext): (Publisher[Out], Subscriber[Out]) = { val processor = new VirtualProcessor[Out] (processor, processor) } - override protected def newInstance(shape: SourceShape[Out]): SourceModule[Out, Subscriber[Out]] = new SubscriberSource[Out](attributes, shape) - override def withAttributes(attr: Attributes): SourceModule[Out, Subscriber[Out]] = new SubscriberSource[Out](attr, amendShape(attr)) + override protected def newInstance(shape: SourceShape[Out]): SourceModule[Out, Subscriber[Out]] = + new SubscriberSource[Out](attributes, shape) + override def withAttributes(attr: Attributes): SourceModule[Out, Subscriber[Out]] = + new SubscriberSource[Out](attr, amendShape(attr)) } /** @@ -66,14 +71,19 @@ import akka.event.Logging * that mediate the flow of elements downstream and the propagation of * back-pressure upstream. */ -@InternalApi private[akka] final class PublisherSource[Out](p: Publisher[Out], val attributes: Attributes, shape: SourceShape[Out]) extends SourceModule[Out, NotUsed](shape) { +@InternalApi private[akka] final class PublisherSource[Out](p: Publisher[Out], + val attributes: Attributes, + shape: SourceShape[Out]) + extends SourceModule[Out, NotUsed](shape) { override protected def label: String = s"PublisherSource($p)" override def create(context: MaterializationContext) = (p, NotUsed) - override protected def newInstance(shape: SourceShape[Out]): SourceModule[Out, NotUsed] = new PublisherSource[Out](p, attributes, shape) - override def withAttributes(attr: Attributes): SourceModule[Out, NotUsed] = new PublisherSource[Out](p, attr, amendShape(attr)) + override protected def newInstance(shape: SourceShape[Out]): SourceModule[Out, NotUsed] = + new PublisherSource[Out](p, attributes, shape) + override def withAttributes(attr: Attributes): SourceModule[Out, NotUsed] = + new PublisherSource[Out](p, attr, amendShape(attr)) } /** @@ -81,7 +91,10 @@ import akka.event.Logging * Creates and wraps an actor into [[org.reactivestreams.Publisher]] from the given `props`, * which should be [[akka.actor.Props]] for an [[akka.stream.actor.ActorPublisher]]. */ -@InternalApi private[akka] final class ActorPublisherSource[Out](props: Props, val attributes: Attributes, shape: SourceShape[Out]) extends SourceModule[Out, ActorRef](shape) { +@InternalApi private[akka] final class ActorPublisherSource[Out](props: Props, + val attributes: Attributes, + shape: SourceShape[Out]) + extends SourceModule[Out, ActorRef](shape) { override def create(context: MaterializationContext) = { val publisherRef = ActorMaterializerHelper.downcast(context.materializer).actorOf(context, props) @@ -90,26 +103,28 @@ import akka.event.Logging override protected def newInstance(shape: SourceShape[Out]): SourceModule[Out, ActorRef] = new ActorPublisherSource[Out](props, attributes, shape) - override def withAttributes(attr: Attributes): SourceModule[Out, ActorRef] = new ActorPublisherSource(props, attr, amendShape(attr)) + override def withAttributes(attr: Attributes): SourceModule[Out, ActorRef] = + new ActorPublisherSource(props, attr, amendShape(attr)) } /** * INTERNAL API */ -@InternalApi private[akka] final class ActorRefSource[Out]( - completionMatcher: PartialFunction[Any, Unit], - failureMatcher: PartialFunction[Any, Throwable], - bufferSize: Int, overflowStrategy: OverflowStrategy, val attributes: Attributes, shape: SourceShape[Out]) - extends SourceModule[Out, ActorRef](shape) { +@InternalApi private[akka] final class ActorRefSource[Out](completionMatcher: PartialFunction[Any, Unit], + failureMatcher: PartialFunction[Any, Throwable], + bufferSize: Int, + overflowStrategy: OverflowStrategy, + val attributes: Attributes, + shape: SourceShape[Out]) + extends SourceModule[Out, ActorRef](shape) { override protected def label: String = s"ActorRefSource($bufferSize, $overflowStrategy)" override def create(context: MaterializationContext) = { val mat = ActorMaterializerHelper.downcast(context.materializer) - val ref = mat.actorOf(context, ActorRefSourceActor.props( - completionMatcher, - failureMatcher, - bufferSize, overflowStrategy, mat.settings)) + val ref = mat.actorOf( + context, + ActorRefSourceActor.props(completionMatcher, failureMatcher, bufferSize, overflowStrategy, mat.settings)) (akka.stream.actor.ActorPublisher[Out](ref), ref) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala b/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala index 6a6551ce2b..3dfae62268 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/PhasedFusingActorMaterializer.scala @@ -8,7 +8,16 @@ import java.util import java.util.concurrent.atomic.AtomicBoolean import akka.NotUsed -import akka.actor.{ ActorContext, ActorRef, ActorRefFactory, ActorSystem, Cancellable, Deploy, ExtendedActorSystem, PoisonPill } +import akka.actor.{ + ActorContext, + ActorRef, + ActorRefFactory, + ActorSystem, + Cancellable, + Deploy, + ExtendedActorSystem, + PoisonPill +} import akka.annotation.{ DoNotInherit, InternalApi } import akka.dispatch.Dispatchers import akka.event.{ Logging, LoggingAdapter } @@ -35,33 +44,41 @@ import akka.util.OptionVal val Debug = false val DefaultPhase: Phase[Any] = new Phase[Any] { - override def apply(settings: ActorMaterializerSettings, effectiveAttributes: Attributes, - materializer: PhasedFusingActorMaterializer, islandName: String): PhaseIsland[Any] = - new GraphStageIsland(settings, effectiveAttributes, materializer, islandName, subflowFuser = OptionVal.None).asInstanceOf[PhaseIsland[Any]] + override def apply(settings: ActorMaterializerSettings, + effectiveAttributes: Attributes, + materializer: PhasedFusingActorMaterializer, + islandName: String): PhaseIsland[Any] = + new GraphStageIsland(settings, effectiveAttributes, materializer, islandName, subflowFuser = OptionVal.None) + .asInstanceOf[PhaseIsland[Any]] } val DefaultPhases: Map[IslandTag, Phase[Any]] = Map[IslandTag, Phase[Any]]( SinkModuleIslandTag -> new Phase[Any] { - override def apply(settings: ActorMaterializerSettings, effectiveAttributes: Attributes, + override def apply(settings: ActorMaterializerSettings, + effectiveAttributes: Attributes, materializer: PhasedFusingActorMaterializer, - islandName: String): PhaseIsland[Any] = + islandName: String): PhaseIsland[Any] = new SinkModulePhase(materializer, islandName).asInstanceOf[PhaseIsland[Any]] }, SourceModuleIslandTag -> new Phase[Any] { - override def apply(settings: ActorMaterializerSettings, effectiveAttributes: Attributes, + override def apply(settings: ActorMaterializerSettings, + effectiveAttributes: Attributes, materializer: PhasedFusingActorMaterializer, - islandName: String): PhaseIsland[Any] = + islandName: String): PhaseIsland[Any] = new SourceModulePhase(materializer, islandName).asInstanceOf[PhaseIsland[Any]] }, ProcessorModuleIslandTag -> new Phase[Any] { - override def apply(settings: ActorMaterializerSettings, effectiveAttributes: Attributes, + override def apply(settings: ActorMaterializerSettings, + effectiveAttributes: Attributes, materializer: PhasedFusingActorMaterializer, - islandName: String): PhaseIsland[Any] = + islandName: String): PhaseIsland[Any] = new ProcessorModulePhase().asInstanceOf[PhaseIsland[Any]] }, TlsModuleIslandTag -> new Phase[Any] { - def apply(settings: ActorMaterializerSettings, effectiveAttributes: Attributes, - materializer: PhasedFusingActorMaterializer, islandName: String): PhaseIsland[Any] = + def apply(settings: ActorMaterializerSettings, + effectiveAttributes: Attributes, + materializer: PhasedFusingActorMaterializer, + islandName: String): PhaseIsland[Any] = new TlsModulePhase(materializer, islandName).asInstanceOf[PhaseIsland[Any]] }, GraphStageTag -> DefaultPhase) @@ -71,17 +88,15 @@ import akka.util.OptionVal val system = actorSystemOf(context) val materializerSettings = ActorMaterializerSettings(system) - val streamSupervisor = context.actorOf( - StreamSupervisor.props(materializerSettings, haveShutDown), - StreamSupervisor.nextName()) + val streamSupervisor = + context.actorOf(StreamSupervisor.props(materializerSettings, haveShutDown), StreamSupervisor.nextName()) - PhasedFusingActorMaterializer( - system, - materializerSettings, - system.dispatchers, - streamSupervisor, - haveShutDown, - FlowNames(system).name.copy("flow")) + PhasedFusingActorMaterializer(system, + materializerSettings, + system.dispatchers, + streamSupervisor, + haveShutDown, + FlowNames(system).name.copy("flow")) } private def actorSystemOf(context: ActorRefFactory): ActorSystem = { @@ -90,19 +105,19 @@ import akka.util.OptionVal case c: ActorContext => c.system case null => throw new IllegalArgumentException("ActorRefFactory context must be defined") case _ => - throw new IllegalArgumentException(s"ActorRefFactory context must be an ActorSystem or ActorContext, got [${context.getClass.getName}]") + throw new IllegalArgumentException( + s"ActorRefFactory context must be an ActorSystem or ActorContext, got [${context.getClass.getName}]") } system } } -private final case class SegmentInfo( - globalislandOffset: Int, // The island to which the segment belongs - length: Int, // How many slots are contained by the segment - globalBaseOffset: Int, // The global slot where this segment starts - relativeBaseOffset: Int, // the local offset of the slot where this segment starts - phase: PhaseIsland[Any]) { +private final case class SegmentInfo(globalislandOffset: Int, // The island to which the segment belongs + length: Int, // How many slots are contained by the segment + globalBaseOffset: Int, // The global slot where this segment starts + relativeBaseOffset: Int, // the local offset of the slot where this segment starts + phase: PhaseIsland[Any]) { override def toString: String = s""" @@ -115,25 +130,27 @@ private final case class SegmentInfo( """.stripMargin } -private final case class ForwardWire( - islandGlobalOffset: Int, - from: OutPort, - toGlobalOffset: Int, - outStage: Any, - phase: PhaseIsland[Any]) { +private final case class ForwardWire(islandGlobalOffset: Int, + from: OutPort, + toGlobalOffset: Int, + outStage: Any, + phase: PhaseIsland[Any]) { - override def toString: String = s"ForwardWire(islandId = $islandGlobalOffset, from = $from, toGlobal = $toGlobalOffset, phase = $phase)" + override def toString: String = + s"ForwardWire(islandId = $islandGlobalOffset, from = $from, toGlobal = $toGlobalOffset, phase = $phase)" } -private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOffset: Int, skippedSlots: Int, phase: PhaseIsland[Any]) +private final case class SavedIslandData(islandGlobalOffset: Int, + lastVisitedOffset: Int, + skippedSlots: Int, + phase: PhaseIsland[Any]) -@InternalApi private[akka] class IslandTracking( - val phases: Map[IslandTag, Phase[Any]], - val settings: ActorMaterializerSettings, - attributes: Attributes, - defaultPhase: Phase[Any], - val materializer: PhasedFusingActorMaterializer, - islandNamePrefix: String) { +@InternalApi private[akka] class IslandTracking(val phases: Map[IslandTag, Phase[Any]], + val settings: ActorMaterializerSettings, + attributes: Attributes, + defaultPhase: Phase[Any], + val materializer: PhasedFusingActorMaterializer, + islandNamePrefix: String) { import PhasedFusingActorMaterializer.Debug @@ -171,12 +188,11 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff if (length > 0) { // We just finished a segment by entering an island. - val previousSegment = SegmentInfo( - globalislandOffset = currentIslandGlobalOffset, - length = currentGlobalOffset - currentSegmentGlobalOffset, - globalBaseOffset = currentSegmentGlobalOffset, - relativeBaseOffset = currentSegmentGlobalOffset - currentIslandGlobalOffset - currentIslandSkippedSlots, - currentPhase) + val previousSegment = SegmentInfo(globalislandOffset = currentIslandGlobalOffset, + length = currentGlobalOffset - currentSegmentGlobalOffset, + globalBaseOffset = currentSegmentGlobalOffset, + relativeBaseOffset = currentSegmentGlobalOffset - currentIslandGlobalOffset - currentIslandSkippedSlots, + currentPhase) // Segment tracking is by demand, we only allocate this list if it is used. // If there are no islands, then there is no need to track segments @@ -192,7 +208,8 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff completeSegment() val previousPhase = currentPhase val previousIslandOffset = currentIslandGlobalOffset - islandStateStack.add(SavedIslandData(previousIslandOffset, currentGlobalOffset, currentIslandSkippedSlots, previousPhase)) + islandStateStack.add( + SavedIslandData(previousIslandOffset, currentGlobalOffset, currentIslandSkippedSlots, previousPhase)) currentPhase = phases(tag)(settings, attributes, materializer, nextIslandName()) activePhases.add(currentPhase) @@ -252,10 +269,12 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff if (forwardWire ne null) { // The forward wire ends up in the same island if (forwardWire.phase eq currentPhase) { - if (Debug) println(s" in-island forward wiring from port ${forwardWire.from} wired to local slot = $localInSlot") + if (Debug) + println(s" in-island forward wiring from port ${forwardWire.from} wired to local slot = $localInSlot") forwardWire.phase.assignPort(forwardWire.from, localInSlot, forwardWire.outStage) } else { - if (Debug) println(s" cross island forward wiring from port ${forwardWire.from} wired to local slot = $localInSlot") + if (Debug) + println(s" cross island forward wiring from port ${forwardWire.from} wired to local slot = $localInSlot") val publisher = forwardWire.phase.createPublisher(forwardWire.from, forwardWire.outStage) currentPhase.takePublisher(localInSlot, publisher) } @@ -278,7 +297,9 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff if (absoluteOffset >= currentSegmentGlobalOffset) { // Wiring is in the same segment, no complex lookup needed val localInSlot = absoluteOffset - currentIslandGlobalOffset - currentIslandSkippedSlots - if (Debug) println(s" in-segment wiring to local ($absoluteOffset - $currentIslandGlobalOffset - $currentIslandSkippedSlots) = $localInSlot") + if (Debug) + println( + s" in-segment wiring to local ($absoluteOffset - $currentIslandGlobalOffset - $currentIslandSkippedSlots) = $localInSlot") currentPhase.assignPort(out, localInSlot, logic) } else { // Wiring is cross-segment, but we don't know if it is cross-island or not yet @@ -315,12 +336,11 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff forwardWires = new java.util.ArrayList[ForwardWire](8) } - val forwardWire = ForwardWire( - islandGlobalOffset = currentIslandGlobalOffset, - from = out, - toGlobalOffset = absoluteOffset, - logic, - currentPhase) + val forwardWire = ForwardWire(islandGlobalOffset = currentIslandGlobalOffset, + from = out, + toGlobalOffset = absoluteOffset, + logic, + currentPhase) if (Debug) println(s" wiring is forward, recording $forwardWire") forwardWires.add(forwardWire) @@ -343,24 +363,26 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff /** * INTERNAL API */ -@InternalApi private[akka] case class PhasedFusingActorMaterializer( - system: ActorSystem, - override val settings: ActorMaterializerSettings, - dispatchers: Dispatchers, - supervisor: ActorRef, - haveShutDown: AtomicBoolean, - flowNames: SeqActorName) extends ExtendedActorMaterializer { +@InternalApi private[akka] case class PhasedFusingActorMaterializer(system: ActorSystem, + override val settings: ActorMaterializerSettings, + dispatchers: Dispatchers, + supervisor: ActorRef, + haveShutDown: AtomicBoolean, + flowNames: SeqActorName) + extends ExtendedActorMaterializer { import PhasedFusingActorMaterializer._ private val _logger = Logging.getLogger(system, this) override def logger: LoggingAdapter = _logger if (settings.fuzzingMode && !system.settings.config.hasPath("akka.stream.secret-test-fuzzing-warning-disable")) { - _logger.warning("Fuzzing mode is enabled on this system. If you see this warning on your production system then " + + _logger.warning( + "Fuzzing mode is enabled on this system. If you see this warning on your production system then " + "set akka.stream.materializer.debug.fuzzing-mode to off.") } if (!settings.autoFusing) { - _logger.warning("Deprecated setting auto-fusing set to false. Since Akka 2.5.0 it does not have any effect " + + _logger.warning( + "Deprecated setting auto-fusing set to false. Since Akka 2.5.0 it does not have any effect " + "and streams are always fused.") } @@ -385,11 +407,9 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff val defaultAttributes: Attributes = { Attributes( Attributes.InputBuffer(settings.initialInputBufferSize, settings.maxInputBufferSize) :: - ActorAttributes.SupervisionStrategy(settings.supervisionDecider) :: - ActorAttributes.Dispatcher( - if (settings.dispatcher == Deploy.NoDispatcherGiven) Dispatchers.DefaultDispatcherId - else settings.dispatcher - ) :: Nil) + ActorAttributes.SupervisionStrategy(settings.supervisionDecider) :: + ActorAttributes.Dispatcher(if (settings.dispatcher == Deploy.NoDispatcherGiven) Dispatchers.DefaultDispatcherId + else settings.dispatcher) :: Nil) } override lazy val executionContext: ExecutionContextExecutor = dispatchers.lookup(settings.dispatcher match { @@ -397,7 +417,9 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff case other => other }) - override def schedulePeriodically(initialDelay: FiniteDuration, interval: FiniteDuration, task: Runnable): Cancellable = + override def schedulePeriodically(initialDelay: FiniteDuration, + interval: FiniteDuration, + task: Runnable): Cancellable = system.scheduler.schedule(initialDelay, interval, task)(executionContext) override def scheduleOnce(delay: FiniteDuration, task: Runnable): Cancellable = @@ -406,22 +428,23 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff override def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat]): Mat = materialize(_runnableGraph, defaultAttributes) - override def materialize[Mat]( - _runnableGraph: Graph[ClosedShape, Mat], - defaultAttributes: Attributes): Mat = - materialize( - _runnableGraph, - defaultAttributes, - PhasedFusingActorMaterializer.DefaultPhase, - PhasedFusingActorMaterializer.DefaultPhases) + override def materialize[Mat](_runnableGraph: Graph[ClosedShape, Mat], defaultAttributes: Attributes): Mat = + materialize(_runnableGraph, + defaultAttributes, + PhasedFusingActorMaterializer.DefaultPhase, + PhasedFusingActorMaterializer.DefaultPhases) - override def materialize[Mat]( - graph: Graph[ClosedShape, Mat], - defaultAttributes: Attributes, - defaultPhase: Phase[Any], - phases: Map[IslandTag, Phase[Any]]): Mat = { + override def materialize[Mat](graph: Graph[ClosedShape, Mat], + defaultAttributes: Attributes, + defaultPhase: Phase[Any], + phases: Map[IslandTag, Phase[Any]]): Mat = { if (isShutdown) throw new IllegalStateException("Trying to materialize stream after materializer has been shutdown") - val islandTracking = new IslandTracking(phases, settings, defaultAttributes, defaultPhase, this, islandNamePrefix = createFlowName() + "-") + val islandTracking = new IslandTracking(phases, + settings, + defaultAttributes, + defaultPhase, + this, + islandNamePrefix = createFlowName() + "-") var current: Traversal = graph.traversalBuilder.traversal @@ -512,7 +535,9 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff } - private def wireInlets(islandTracking: IslandTracking, mod: StreamLayout.AtomicModule[Shape, Any], logic: Any): Unit = { + private def wireInlets(islandTracking: IslandTracking, + mod: StreamLayout.AtomicModule[Shape, Any], + logic: Any): Unit = { val inlets = mod.shape.inlets if (inlets.nonEmpty) { if (Shape.hasOnePort(inlets)) { @@ -528,8 +553,11 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff } } - private def wireOutlets(islandTracking: IslandTracking, mod: StreamLayout.AtomicModule[Shape, Any], logic: Any, - stageGlobalOffset: Int, outToSlot: Array[Int]): Unit = { + private def wireOutlets(islandTracking: IslandTracking, + mod: StreamLayout.AtomicModule[Shape, Any], + logic: Any, + stageGlobalOffset: Int, + outToSlot: Array[Int]): Unit = { val outlets = mod.shape.outlets if (outlets.nonEmpty) { if (Shape.hasOnePort(outlets)) { @@ -564,11 +592,10 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff * INTERNAL API */ @DoNotInherit private[akka] trait Phase[M] { - def apply( - settings: ActorMaterializerSettings, - effectiveAttributes: Attributes, - materializer: PhasedFusingActorMaterializer, - islandName: String): PhaseIsland[M] + def apply(settings: ActorMaterializerSettings, + effectiveAttributes: Attributes, + materializer: PhasedFusingActorMaterializer, + islandName: String): PhaseIsland[M] } /** @@ -600,12 +627,12 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff /** * INTERNAL API */ -@InternalApi private[akka] final class GraphStageIsland( - settings: ActorMaterializerSettings, - effectiveAttributes: Attributes, - materializer: PhasedFusingActorMaterializer, - islandName: String, - subflowFuser: OptionVal[GraphInterpreterShell => ActorRef]) extends PhaseIsland[GraphStageLogic] { +@InternalApi private[akka] final class GraphStageIsland(settings: ActorMaterializerSettings, + effectiveAttributes: Attributes, + materializer: PhasedFusingActorMaterializer, + islandName: String, + subflowFuser: OptionVal[GraphInterpreterShell => ActorRef]) + extends PhaseIsland[GraphStageLogic] { // TODO: remove these private val logicArrayType = Array.empty[GraphStageLogic] private[this] val logics = new util.ArrayList[GraphStageLogic](16) @@ -615,12 +642,7 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff private var outConnections: List[Connection] = Nil private var fullIslandName: OptionVal[String] = OptionVal.None - val shell = new GraphInterpreterShell( - connections = null, - logics = null, - settings, - effectiveAttributes, - materializer) + val shell = new GraphInterpreterShell(connections = null, logics = null, settings, effectiveAttributes, materializer) override def name: String = "Fusing GraphStages phase" @@ -737,8 +759,9 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff fuseIntoExistingInterpreter(shell) case _ => - - val props = ActorGraphInterpreter.props(shell).withDispatcher(ActorAttributes.Dispatcher.resolve(effectiveAttributes, settings)) + val props = ActorGraphInterpreter + .props(shell) + .withDispatcher(ActorAttributes.Dispatcher.resolve(effectiveAttributes, settings)) val actorName = fullIslandName match { case OptionVal.Some(n) => n @@ -763,7 +786,8 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff if (isIn) s"in port id [$missingHandlerIdx]" else s"out port id [$missingHandlerIdx]" } - throw new IllegalStateException(s"No handler defined in stage [${logic.originalStage.getOrElse(logic).toString}] for $portLabel." + + throw new IllegalStateException( + s"No handler defined in stage [${logic.originalStage.getOrElse(logic).toString}] for $portLabel." + " All inlets and outlets must be assigned a handler with setHandler in the constructor of your graph stage logic.") } @@ -778,14 +802,15 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff /** * INTERNAL API */ -@InternalApi private[akka] final class SourceModulePhase( - materializer: PhasedFusingActorMaterializer, - islandName: String) extends PhaseIsland[Publisher[Any]] { +@InternalApi private[akka] final class SourceModulePhase(materializer: PhasedFusingActorMaterializer, + islandName: String) + extends PhaseIsland[Publisher[Any]] { override def name: String = s"SourceModule phase" override def materializeAtomic(mod: AtomicModule[Shape, Any], attributes: Attributes): (Publisher[Any], Any) = { - mod.asInstanceOf[SourceModule[Any, Any]].create(MaterializationContext(materializer, attributes, - islandName + "-" + attributes.nameOrDefault())) + mod + .asInstanceOf[SourceModule[Any, Any]] + .create(MaterializationContext(materializer, attributes, islandName + "-" + attributes.nameOrDefault())) } override def assignPort(in: InPort, slot: Int, logic: Publisher[Any]): Unit = () @@ -809,14 +834,15 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff * INTERNAL API */ @InternalApi private[akka] final class SinkModulePhase(materializer: PhasedFusingActorMaterializer, islandName: String) - extends PhaseIsland[AnyRef] { + extends PhaseIsland[AnyRef] { override def name: String = s"SinkModule phase" var subscriberOrVirtualPublisher: AnyRef = _ override def materializeAtomic(mod: AtomicModule[Shape, Any], attributes: Attributes): (AnyRef, Any) = { val subAndMat = - mod.asInstanceOf[SinkModule[Any, Any]].create(MaterializationContext(materializer, attributes, - islandName + "-" + attributes.nameOrDefault())) + mod + .asInstanceOf[SinkModule[Any, Any]] + .create(MaterializationContext(materializer, attributes, islandName + "-" + attributes.nameOrDefault())) subscriberOrVirtualPublisher = subAndMat._1 (subscriberOrVirtualPublisher, subAndMat._2) @@ -848,8 +874,7 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff /** * INTERNAL API */ -@InternalApi private[akka] final class ProcessorModulePhase() - extends PhaseIsland[Processor[Any, Any]] { +@InternalApi private[akka] final class ProcessorModulePhase() extends PhaseIsland[Processor[Any, Any]] { override def name: String = "ProcessorModulePhase" private[this] var processor: Processor[Any, Any] = _ @@ -876,7 +901,8 @@ private final case class SavedIslandData(islandGlobalOffset: Int, lastVisitedOff /** * INTERNAL API */ -@InternalApi private[akka] final class TlsModulePhase(materializer: PhasedFusingActorMaterializer, islandName: String) extends PhaseIsland[NotUsed] { +@InternalApi private[akka] final class TlsModulePhase(materializer: PhasedFusingActorMaterializer, islandName: String) + extends PhaseIsland[NotUsed] { def name: String = "TlsModulePhase" var tlsActor: ActorRef = _ diff --git a/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala b/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala index ad04fca781..1332f72acd 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/QueueSource.scala @@ -30,7 +30,8 @@ import scala.concurrent.{ Future, Promise } /** * INTERNAL API */ -@InternalApi private[akka] final class QueueSource[T](maxBuffer: Int, overflowStrategy: OverflowStrategy) extends GraphStageWithMaterializedValue[SourceShape[T], SourceQueueWithComplete[T]] { +@InternalApi private[akka] final class QueueSource[T](maxBuffer: Int, overflowStrategy: OverflowStrategy) + extends GraphStageWithMaterializedValue[SourceShape[T], SourceQueueWithComplete[T]] { import QueueSource._ val out = Outlet[T]("queueSource.out") @@ -62,37 +63,43 @@ import scala.concurrent.{ Future, Promise } private def bufferElem(offer: Offer[T]): Unit = { if (!buffer.isFull) { enqueueAndSuccess(offer) - } else overflowStrategy match { - case s: DropHead => - log.log(s.logLevel, "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]") - buffer.dropHead() - enqueueAndSuccess(offer) - case s: DropTail => - log.log(s.logLevel, "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]") - buffer.dropTail() - enqueueAndSuccess(offer) - case s: DropBuffer => - log.log(s.logLevel, "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]") - buffer.clear() - enqueueAndSuccess(offer) - case s: DropNew => - log.log(s.logLevel, "Dropping the new element because buffer is full and overflowStrategy is: [DropNew]") - offer.promise.success(QueueOfferResult.Dropped) - case s: Fail => - log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Fail]") - val bufferOverflowException = BufferOverflowException(s"Buffer overflow (max capacity was: $maxBuffer)!") - offer.promise.success(QueueOfferResult.Failure(bufferOverflowException)) - completion.failure(bufferOverflowException) - failStage(bufferOverflowException) - case s: Backpressure => - log.log(s.logLevel, "Backpressuring because buffer is full and overflowStrategy is: [Backpressure]") - pendingOffer match { - case Some(_) => - offer.promise.failure(new IllegalStateException("You have to wait for previous offer to be resolved to send another request")) - case None => - pendingOffer = Some(offer) - } - } + } else + overflowStrategy match { + case s: DropHead => + log.log(s.logLevel, + "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]") + buffer.dropHead() + enqueueAndSuccess(offer) + case s: DropTail => + log.log(s.logLevel, + "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]") + buffer.dropTail() + enqueueAndSuccess(offer) + case s: DropBuffer => + log.log(s.logLevel, + "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]") + buffer.clear() + enqueueAndSuccess(offer) + case s: DropNew => + log.log(s.logLevel, "Dropping the new element because buffer is full and overflowStrategy is: [DropNew]") + offer.promise.success(QueueOfferResult.Dropped) + case s: Fail => + log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Fail]") + val bufferOverflowException = BufferOverflowException(s"Buffer overflow (max capacity was: $maxBuffer)!") + offer.promise.success(QueueOfferResult.Failure(bufferOverflowException)) + completion.failure(bufferOverflowException) + failStage(bufferOverflowException) + case s: Backpressure => + log.log(s.logLevel, "Backpressuring because buffer is full and overflowStrategy is: [Backpressure]") + pendingOffer match { + case Some(_) => + offer.promise.failure( + new IllegalStateException( + "You have to wait for previous offer to be resolved to send another request")) + case None => + pendingOffer = Some(offer) + } + } } private val callback = getAsyncCallback[Input[T]] { @@ -108,24 +115,28 @@ import scala.concurrent.{ Future, Promise } promise.success(QueueOfferResult.Enqueued) } else if (pendingOffer.isEmpty) pendingOffer = Some(offer) - else overflowStrategy match { - case s @ (_: DropHead | _: DropBuffer) => - log.log(s.logLevel, "Dropping element because buffer is full and overflowStrategy is: [{}]", s) - pendingOffer.get.promise.success(QueueOfferResult.Dropped) - pendingOffer = Some(offer) - case s @ (_: DropTail | _: DropNew) => - log.log(s.logLevel, "Dropping element because buffer is full and overflowStrategy is: [{}]", s) - promise.success(QueueOfferResult.Dropped) - case s: Fail => - log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Fail]") - val bufferOverflowException = BufferOverflowException(s"Buffer overflow (max capacity was: $maxBuffer)!") - promise.success(QueueOfferResult.Failure(bufferOverflowException)) - completion.failure(bufferOverflowException) - failStage(bufferOverflowException) - case s: Backpressure => - log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Backpressure]") - promise.failure(new IllegalStateException("You have to wait for previous offer to be resolved to send another request")) - } + else + overflowStrategy match { + case s @ (_: DropHead | _: DropBuffer) => + log.log(s.logLevel, "Dropping element because buffer is full and overflowStrategy is: [{}]", s) + pendingOffer.get.promise.success(QueueOfferResult.Dropped) + pendingOffer = Some(offer) + case s @ (_: DropTail | _: DropNew) => + log.log(s.logLevel, "Dropping element because buffer is full and overflowStrategy is: [{}]", s) + promise.success(QueueOfferResult.Dropped) + case s: Fail => + log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Fail]") + val bufferOverflowException = + BufferOverflowException(s"Buffer overflow (max capacity was: $maxBuffer)!") + promise.success(QueueOfferResult.Failure(bufferOverflowException)) + completion.failure(bufferOverflowException) + failStage(bufferOverflowException) + case s: Backpressure => + log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Backpressure]") + promise.failure( + new IllegalStateException( + "You have to wait for previous offer to be resolved to send another request")) + } case Completion => if (maxBuffer != 0 && buffer.nonEmpty || pendingOffer.nonEmpty) terminating = true @@ -183,7 +194,8 @@ import scala.concurrent.{ Future, Promise } override def watchCompletion() = completion.future override def offer(element: T): Future[QueueOfferResult] = { val p = Promise[QueueOfferResult] - callback.invokeWithFeedback(Offer(element, p)) + callback + .invokeWithFeedback(Offer(element, p)) .onComplete { case scala.util.Success(_) => case scala.util.Failure(e) => p.tryFailure(e) @@ -203,7 +215,8 @@ import scala.concurrent.{ Future, Promise } /** * INTERNAL API */ -@InternalApi private[akka] final class SourceQueueAdapter[T](delegate: SourceQueueWithComplete[T]) extends akka.stream.javadsl.SourceQueueWithComplete[T] { +@InternalApi private[akka] final class SourceQueueAdapter[T](delegate: SourceQueueWithComplete[T]) + extends akka.stream.javadsl.SourceQueueWithComplete[T] { def offer(elem: T): CompletionStage[QueueOfferResult] = delegate.offer(elem).toJava def watchCompletion(): CompletionStage[Done] = delegate.watchCompletion().toJava def complete(): Unit = delegate.complete() diff --git a/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala b/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala index bf4c7a4d02..c6183b916a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ReactiveStreamsCompliance.scala @@ -79,47 +79,60 @@ import org.reactivestreams.{ Subscriber, Subscription } sealed trait SpecViolation extends Throwable @SerialVersionUID(1L) - final class SignalThrewException(message: String, cause: Throwable) extends IllegalStateException(message, cause) with SpecViolation + final class SignalThrewException(message: String, cause: Throwable) + extends IllegalStateException(message, cause) + with SpecViolation final def tryOnError[T](subscriber: Subscriber[T], error: Throwable): Unit = error match { - case sv: SpecViolation => throw new IllegalStateException("It is not legal to try to signal onError with a SpecViolation", sv) + case sv: SpecViolation => + throw new IllegalStateException("It is not legal to try to signal onError with a SpecViolation", sv) case other => - try subscriber.onError(other) catch { + try subscriber.onError(other) + catch { case NonFatal(t) => throw new SignalThrewException(subscriber + ".onError", t) } } final def tryOnNext[T](subscriber: Subscriber[T], element: T): Unit = { requireNonNullElement(element) - try subscriber.onNext(element) catch { + try subscriber.onNext(element) + catch { case NonFatal(t) => throw new SignalThrewException(subscriber + ".onNext", t) } } final def tryOnSubscribe[T](subscriber: Subscriber[T], subscription: Subscription): Unit = { - try subscriber.onSubscribe(subscription) catch { + try subscriber.onSubscribe(subscription) + catch { case NonFatal(t) => throw new SignalThrewException(subscriber + ".onSubscribe", t) } } final def tryOnComplete[T](subscriber: Subscriber[T]): Unit = { - try subscriber.onComplete() catch { + try subscriber.onComplete() + catch { case NonFatal(t) => throw new SignalThrewException(subscriber + ".onComplete", t) } } final def tryRequest(subscription: Subscription, demand: Long): Unit = { - if (subscription eq null) throw new IllegalStateException("Subscription must be not null on request() call, rule 1.3") - try subscription.request(demand) catch { - case NonFatal(t) => throw new SignalThrewException("It is illegal to throw exceptions from request(), rule 3.16", t) + if (subscription eq null) + throw new IllegalStateException("Subscription must be not null on request() call, rule 1.3") + try subscription.request(demand) + catch { + case NonFatal(t) => + throw new SignalThrewException("It is illegal to throw exceptions from request(), rule 3.16", t) } } final def tryCancel(subscription: Subscription): Unit = { - if (subscription eq null) throw new IllegalStateException("Subscription must be not null on cancel() call, rule 1.3") - try subscription.cancel() catch { - case NonFatal(t) => throw new SignalThrewException("It is illegal to throw exceptions from cancel(), rule 3.15", t) + if (subscription eq null) + throw new IllegalStateException("Subscription must be not null on cancel() call, rule 1.3") + try subscription.cancel() + catch { + case NonFatal(t) => + throw new SignalThrewException("It is illegal to throw exceptions from cancel(), rule 3.15", t) } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala index 10b3be64fa..7910aa39c9 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/ResizableMultiReaderRingBuffer.scala @@ -15,16 +15,13 @@ import akka.annotation.InternalApi * Contrary to many other ring buffer implementations this one does not automatically overwrite the oldest * elements, rather, if full, the buffer tries to grow and rejects further writes if max capacity is reached. */ -@InternalApi private[akka] class ResizableMultiReaderRingBuffer[T]( - initialSize: Int, // constructor param, not field - maxSize: Int, // constructor param, not field - val cursors: Cursors) { - require( - Integer.lowestOneBit(maxSize) == maxSize && 0 < maxSize && maxSize <= Int.MaxValue / 2, - "maxSize must be a power of 2 that is > 0 and < Int.MaxValue/2") - require( - Integer.lowestOneBit(initialSize) == initialSize && 0 < initialSize && initialSize <= maxSize, - "initialSize must be a power of 2 that is > 0 and <= maxSize") +@InternalApi private[akka] class ResizableMultiReaderRingBuffer[T](initialSize: Int, // constructor param, not field + maxSize: Int, // constructor param, not field + val cursors: Cursors) { + require(Integer.lowestOneBit(maxSize) == maxSize && 0 < maxSize && maxSize <= Int.MaxValue / 2, + "maxSize must be a power of 2 that is > 0 and < Int.MaxValue/2") + require(Integer.lowestOneBit(initialSize) == initialSize && 0 < initialSize && initialSize <= maxSize, + "initialSize must be a power of 2 that is > 0 and <= maxSize") private[this] val maxSizeBit = Integer.numberOfTrailingZeros(maxSize) private[this] var array = new Array[Any](initialSize) diff --git a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala index eef1d0a7a3..1acc0deec8 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Sinks.scala @@ -34,7 +34,8 @@ import akka.util.ccompat._ /** * INTERNAL API */ -@DoNotInherit private[akka] abstract class SinkModule[-In, Mat](val shape: SinkShape[In]) extends AtomicModule[SinkShape[In], Mat] { +@DoNotInherit private[akka] abstract class SinkModule[-In, Mat](val shape: SinkShape[In]) + extends AtomicModule[SinkShape[In], Mat] { /** * Create the Subscriber or VirtualPublisher that consumes the incoming @@ -74,7 +75,8 @@ import akka.util.ccompat._ * elements to fill the internal buffers it will assert back-pressure until * a subscriber connects and creates demand for elements to be emitted. */ -@InternalApi private[akka] class PublisherSink[In](val attributes: Attributes, shape: SinkShape[In]) extends SinkModule[In, Publisher[In]](shape) { +@InternalApi private[akka] class PublisherSink[In](val attributes: Attributes, shape: SinkShape[In]) + extends SinkModule[In, Publisher[In]](shape) { /* * This method is the reason why SinkModule.create may return something that is @@ -86,17 +88,17 @@ import akka.util.ccompat._ (proc, proc) } - override protected def newInstance(shape: SinkShape[In]): SinkModule[In, Publisher[In]] = new PublisherSink[In](attributes, shape) - override def withAttributes(attr: Attributes): SinkModule[In, Publisher[In]] = new PublisherSink[In](attr, amendShape(attr)) + override protected def newInstance(shape: SinkShape[In]): SinkModule[In, Publisher[In]] = + new PublisherSink[In](attributes, shape) + override def withAttributes(attr: Attributes): SinkModule[In, Publisher[In]] = + new PublisherSink[In](attr, amendShape(attr)) } /** * INTERNAL API */ -@InternalApi private[akka] final class FanoutPublisherSink[In]( - val attributes: Attributes, - shape: SinkShape[In]) - extends SinkModule[In, Publisher[In]](shape) { +@InternalApi private[akka] final class FanoutPublisherSink[In](val attributes: Attributes, shape: SinkShape[In]) + extends SinkModule[In, Publisher[In]](shape) { override def create(context: MaterializationContext): (Subscriber[In], Publisher[In]) = { val actorMaterializer = ActorMaterializerHelper.downcast(context.materializer) @@ -120,21 +122,29 @@ import akka.util.ccompat._ * INTERNAL API * Attaches a subscriber to this stream. */ -@InternalApi private[akka] final class SubscriberSink[In](subscriber: Subscriber[In], val attributes: Attributes, shape: SinkShape[In]) extends SinkModule[In, NotUsed](shape) { +@InternalApi private[akka] final class SubscriberSink[In](subscriber: Subscriber[In], + val attributes: Attributes, + shape: SinkShape[In]) + extends SinkModule[In, NotUsed](shape) { override def create(context: MaterializationContext) = (subscriber, NotUsed) - override protected def newInstance(shape: SinkShape[In]): SinkModule[In, NotUsed] = new SubscriberSink[In](subscriber, attributes, shape) - override def withAttributes(attr: Attributes): SinkModule[In, NotUsed] = new SubscriberSink[In](subscriber, attr, amendShape(attr)) + override protected def newInstance(shape: SinkShape[In]): SinkModule[In, NotUsed] = + new SubscriberSink[In](subscriber, attributes, shape) + override def withAttributes(attr: Attributes): SinkModule[In, NotUsed] = + new SubscriberSink[In](subscriber, attr, amendShape(attr)) } /** * INTERNAL API * A sink that immediately cancels its upstream upon materialization. */ -@InternalApi private[akka] final class CancelSink(val attributes: Attributes, shape: SinkShape[Any]) extends SinkModule[Any, NotUsed](shape) { - override def create(context: MaterializationContext): (Subscriber[Any], NotUsed) = (new CancellingSubscriber[Any], NotUsed) - override protected def newInstance(shape: SinkShape[Any]): SinkModule[Any, NotUsed] = new CancelSink(attributes, shape) +@InternalApi private[akka] final class CancelSink(val attributes: Attributes, shape: SinkShape[Any]) + extends SinkModule[Any, NotUsed](shape) { + override def create(context: MaterializationContext): (Subscriber[Any], NotUsed) = + (new CancellingSubscriber[Any], NotUsed) + override protected def newInstance(shape: SinkShape[Any]): SinkModule[Any, NotUsed] = + new CancelSink(attributes, shape) override def withAttributes(attr: Attributes): SinkModule[Any, NotUsed] = new CancelSink(attr, amendShape(attr)) } @@ -143,23 +153,31 @@ import akka.util.ccompat._ * Creates and wraps an actor into [[org.reactivestreams.Subscriber]] from the given `props`, * which should be [[akka.actor.Props]] for an [[akka.stream.actor.ActorSubscriber]]. */ -@InternalApi private[akka] final class ActorSubscriberSink[In](props: Props, val attributes: Attributes, shape: SinkShape[In]) extends SinkModule[In, ActorRef](shape) { +@InternalApi private[akka] final class ActorSubscriberSink[In](props: Props, + val attributes: Attributes, + shape: SinkShape[In]) + extends SinkModule[In, ActorRef](shape) { override def create(context: MaterializationContext) = { val subscriberRef = ActorMaterializerHelper.downcast(context.materializer).actorOf(context, props) (akka.stream.actor.ActorSubscriber[In](subscriberRef), subscriberRef) } - override protected def newInstance(shape: SinkShape[In]): SinkModule[In, ActorRef] = new ActorSubscriberSink[In](props, attributes, shape) - override def withAttributes(attr: Attributes): SinkModule[In, ActorRef] = new ActorSubscriberSink[In](props, attr, amendShape(attr)) + override protected def newInstance(shape: SinkShape[In]): SinkModule[In, ActorRef] = + new ActorSubscriberSink[In](props, attributes, shape) + override def withAttributes(attr: Attributes): SinkModule[In, ActorRef] = + new ActorSubscriberSink[In](props, attr, amendShape(attr)) } /** * INTERNAL API */ -@InternalApi private[akka] final class ActorRefSink[In](ref: ActorRef, onCompleteMessage: Any, onFailureMessage: Throwable => Any, +@InternalApi private[akka] final class ActorRefSink[In](ref: ActorRef, + onCompleteMessage: Any, + onFailureMessage: Throwable => Any, val attributes: Attributes, - shape: SinkShape[In]) extends SinkModule[In, NotUsed](shape) { + shape: SinkShape[In]) + extends SinkModule[In, NotUsed](shape) { override def create(context: MaterializationContext) = { val actorMaterializer = ActorMaterializerHelper.downcast(context.materializer) @@ -179,7 +197,8 @@ import akka.util.ccompat._ /** * INTERNAL API */ -@InternalApi private[akka] final class TakeLastStage[T](n: Int) extends GraphStageWithMaterializedValue[SinkShape[T], Future[immutable.Seq[T]]] { +@InternalApi private[akka] final class TakeLastStage[T](n: Int) + extends GraphStageWithMaterializedValue[SinkShape[T], Future[immutable.Seq[T]]] { if (n <= 0) throw new IllegalArgumentException("requirement failed: n must be greater than 0") @@ -226,7 +245,8 @@ import akka.util.ccompat._ /** * INTERNAL API */ -@InternalApi private[akka] final class HeadOptionStage[T] extends GraphStageWithMaterializedValue[SinkShape[T], Future[Option[T]]] { +@InternalApi private[akka] final class HeadOptionStage[T] + extends GraphStageWithMaterializedValue[SinkShape[T], Future[Option[T]]] { val in: Inlet[T] = Inlet("headOption.in") @@ -266,7 +286,8 @@ import akka.util.ccompat._ /** * INTERNAL API */ -@InternalApi private[akka] final class SeqStage[T, That](implicit cbf: Factory[T, That with immutable.Iterable[_]]) extends GraphStageWithMaterializedValue[SinkShape[T], Future[That]] { +@InternalApi private[akka] final class SeqStage[T, That](implicit cbf: Factory[T, That with immutable.Iterable[_]]) + extends GraphStageWithMaterializedValue[SinkShape[T], Future[That]] { val in = Inlet[T]("seq.in") override def toString: String = "SeqStage" @@ -321,7 +342,8 @@ import akka.util.ccompat._ /** * INTERNAL API */ -@InternalApi private[akka] final class QueueSink[T]() extends GraphStageWithMaterializedValue[SinkShape[T], SinkQueueWithCancel[T]] { +@InternalApi private[akka] final class QueueSink[T]() + extends GraphStageWithMaterializedValue[SinkShape[T], SinkQueueWithCancel[T]] { type Requested[E] = Promise[Option[E]] val in = Inlet[T]("queueSink.in") @@ -349,16 +371,19 @@ import akka.util.ccompat._ } private val callback = getAsyncCallback[Output[T]] { - case QueueSink.Pull(pullPromise) => currentRequest match { - case Some(_) => - pullPromise.failure(new IllegalStateException("You have to wait for previous future to be resolved to send another request")) - case None => - if (buffer.isEmpty) currentRequest = Some(pullPromise) - else { - if (buffer.used == maxBuffer) tryPull(in) - sendDownstream(pullPromise) - } - } + case QueueSink.Pull(pullPromise) => + currentRequest match { + case Some(_) => + pullPromise.failure( + new IllegalStateException( + "You have to wait for previous future to be resolved to send another request")) + case None => + if (buffer.isEmpty) currentRequest = Some(pullPromise) + else { + if (buffer.used == maxBuffer) tryPull(in) + sendDownstream(pullPromise) + } + } case QueueSink.Cancel => completeStage() } @@ -395,8 +420,10 @@ import akka.util.ccompat._ // SinkQueueWithCancel impl override def pull(): Future[Option[T]] = { val p = Promise[Option[T]] - callback.invokeWithFeedback(Pull(p)) - .failed.foreach { + callback + .invokeWithFeedback(Pull(p)) + .failed + .foreach { case NonFatal(e) => p.tryFailure(e) case _ => () }(akka.dispatch.ExecutionContexts.sameThreadExecutionContext) @@ -414,7 +441,8 @@ import akka.util.ccompat._ /** * INTERNAL API */ -@InternalApi private[akka] final class SinkQueueAdapter[T](delegate: SinkQueueWithCancel[T]) extends akka.stream.javadsl.SinkQueueWithCancel[T] { +@InternalApi private[akka] final class SinkQueueAdapter[T](delegate: SinkQueueWithCancel[T]) + extends akka.stream.javadsl.SinkQueueWithCancel[T] { import akka.dispatch.ExecutionContexts.{ sameThreadExecutionContext => same } def pull(): CompletionStage[Optional[T]] = delegate.pull().map(_.asJava)(same).toJava def cancel(): Unit = delegate.cancel() @@ -459,7 +487,8 @@ import akka.util.ccompat._ /** * INTERNAL API */ -@InternalApi final private[stream] class LazySink[T, M](sinkFactory: T => Future[Sink[T, M]]) extends GraphStageWithMaterializedValue[SinkShape[T], Future[Option[M]]] { +@InternalApi final private[stream] class LazySink[T, M](sinkFactory: T => Future[Sink[T, M]]) + extends GraphStageWithMaterializedValue[SinkShape[T], Future[Option[M]]] { val in = Inlet[T]("lazySink.in") override def initialAttributes = DefaultAttributes.lazySink override val shape: SinkShape[T] = SinkShape.of(in) @@ -541,22 +570,23 @@ import akka.util.ccompat._ // The stage must not be shut down automatically; it is completed when maybeCompleteStage decides setKeepGoing(true) - setHandler(in, new InHandler { - override def onPush(): Unit = { - subOutlet.push(grab(in)) - } - override def onUpstreamFinish(): Unit = { - if (firstElementPushed) { - subOutlet.complete() - maybeCompleteStage() - } - } - override def onUpstreamFailure(ex: Throwable): Unit = { - // propagate exception irrespective if the cached element has been pushed or not - subOutlet.fail(ex) - maybeCompleteStage() - } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + subOutlet.push(grab(in)) + } + override def onUpstreamFinish(): Unit = { + if (firstElementPushed) { + subOutlet.complete() + maybeCompleteStage() + } + } + override def onUpstreamFailure(ex: Throwable): Unit = { + // propagate exception irrespective if the cached element has been pushed or not + subOutlet.fail(ex) + maybeCompleteStage() + } + }) subOutlet.setHandler(new OutHandler { override def onPull(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala index b580e00f79..55b8e7fe5c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/StreamLayout.scala @@ -47,10 +47,10 @@ import scala.util.control.NonFatal } final case class Both(subscriber: Subscriber[Any]) extends HasActualSubscriber - final case class Establishing( - subscriber: Subscriber[Any], - onCompleteBuffered: Boolean = false, - onErrorBuffered: OptionVal[Throwable] = OptionVal.None) extends HasActualSubscriber + final case class Establishing(subscriber: Subscriber[Any], + onCompleteBuffered: Boolean = false, + onErrorBuffered: OptionVal[Throwable] = OptionVal.None) + extends HasActualSubscriber object Establishing { def create(s: Subscriber[_]) = Establishing(s.asInstanceOf[Subscriber[Any]]) } @@ -129,7 +129,8 @@ import scala.util.control.NonFatal if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode(null).subscribe.rec($s) -> sub") if (!compareAndSet(null, s)) rec(sub) case subscription: Subscription => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($subscription).subscribe.rec($s) -> Establishing(sub)") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode($subscription).subscribe.rec($s) -> Establishing(sub)") val establishing = Establishing(sub, false) if (compareAndSet(subscription, establishing)) establishSubscription(establishing, subscription) else rec(sub) @@ -138,7 +139,8 @@ import scala.util.control.NonFatal if (compareAndSet(pub, Inert)) pub.subscribe(sub) else rec(sub) case other => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($other).subscribe.rec($s): rejectAdditionalSubscriber") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode($other).subscribe.rec($s): rejectAdditionalSubscriber") rejectAdditionalSubscriber(sub, "VirtualProcessor") } } @@ -154,17 +156,20 @@ import scala.util.control.NonFatal @tailrec def rec(obj: AnyRef): Unit = { get() match { case null => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode(null).onSubscribe.rec($obj) -> ${obj.getClass}") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode(null).onSubscribe.rec($obj) -> ${obj.getClass}") if (!compareAndSet(null, obj)) rec(obj) case subscriber: Subscriber[_] => obj match { case subscription: Subscription => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($subscriber).onSubscribe.rec($obj) -> Establishing") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode($subscriber).onSubscribe.rec($obj) -> Establishing") val establishing = Establishing.create(subscriber) if (compareAndSet(subscriber, establishing)) establishSubscription(establishing, subscription) else rec(obj) case pub: Publisher[_] => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($subscriber).onSubscribe.rec($obj) -> INert") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode($subscriber).onSubscribe.rec($obj) -> INert") getAndSet(Inert) match { case Inert => // nothing to be done case _ => pub.subscribe(subscriber.asInstanceOf[Subscriber[Any]]) @@ -192,7 +197,8 @@ import scala.util.control.NonFatal // while we were establishing some stuff could have happened // most likely case, nobody changed it while we where establishing - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode.establishSubscription.rec($establishing) -> Both") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode.establishSubscription.rec($establishing) -> Both") if (compareAndSet(establishing, Both(establishing.subscriber))) { // cas won - life is good // Requests will be only allowed once onSubscribe has returned to avoid reentering on an onNext before @@ -203,13 +209,15 @@ import scala.util.control.NonFatal get() match { case Establishing(sub, _, OptionVal.Some(error)) => // there was an onError while establishing - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode.establishSubscription.rec(Establishing(buffered-error) -> Inert") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode.establishSubscription.rec(Establishing(buffered-error) -> Inert") tryOnError(sub, error) set(Inert) case Establishing(sub, true, _) => // there was on onComplete while we were establishing - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode.establishSubscription.rec(Establishing(buffered-complete) -> Inert") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode.establishSubscription.rec(Establishing(buffered-complete) -> Inert") tryOnComplete(sub) set(Inert) @@ -217,7 +225,8 @@ import scala.util.control.NonFatal tryCancel(subscription) case other => - throw new IllegalStateException(s"Unexpected state while establishing: [$other], if this ever happens it is a bug.") + throw new IllegalStateException( + s"Unexpected state while establishing: [$other], if this ever happens it is a bug.") } } @@ -238,13 +247,16 @@ import scala.util.control.NonFatal @tailrec def rec(ex: Throwable): Unit = get() match { case null => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode(null).onError(${ex.getMessage}) -> ErrorPublisher") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode(null).onError(${ex.getMessage}) -> ErrorPublisher") if (!compareAndSet(null, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec(ex) case s: Subscription => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($s).onError(${ex.getMessage}) -> ErrorPublisher") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode($s).onError(${ex.getMessage}) -> ErrorPublisher") if (!compareAndSet(s, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec(ex) case Both(s) => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode(Both($s)).onError(${ex.getMessage}) -> ErrorPublisher") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode(Both($s)).onError(${ex.getMessage}) -> ErrorPublisher") set(Inert) tryOnError(s, ex) case s: Subscriber[_] => // spec violation @@ -258,7 +270,9 @@ import scala.util.control.NonFatal if (!compareAndSet(est, est.copy(onErrorBuffered = OptionVal.Some(ex)))) rec(ex) case other => // spec violation or cancellation race, but nothing we can do - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($other).onError(${ex.getMessage}). spec violation or cancellation race") + if (VirtualProcessor.Debug) + println( + s"VirtualPublisher#$hashCode($other).onError(${ex.getMessage}). spec violation or cancellation race") } val ex = if (t == null) exceptionMustNotBeNullException else t @@ -275,7 +289,7 @@ import scala.util.control.NonFatal case s: Subscription => if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($s).onComplete -> EmptyPublisher") if (!compareAndSet(s, EmptyPublisher)) onComplete() - case _@ Both(s) => + case _ @Both(s) => if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($s).onComplete -> Inert") set(Inert) tryOnComplete(s) @@ -284,7 +298,8 @@ import scala.util.control.NonFatal set(Inert) EmptyPublisher.subscribe(s) case est @ Establishing(_, false, OptionVal.None) => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($est).onComplete -> Establishing with buffered complete") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode($est).onComplete -> Establishing with buffered complete") if (!est.onCompleteBuffered && !compareAndSet(est, est.copy(onCompleteBuffered = true))) onComplete() case other => if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($other).onComplete spec violation") @@ -298,10 +313,15 @@ import scala.util.control.NonFatal if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode.onNext(null)") @tailrec def rec(): Unit = get() match { - case x @ (null | _: Subscription) => if (!compareAndSet(x, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec() - case s: Subscriber[_] => try s.onError(ex) catch { case NonFatal(_) => } finally set(Inert) - case Both(s) => try s.onError(ex) catch { case NonFatal(_) => } finally set(Inert) - case _ => // spec violation or cancellation race, but nothing we can do + case x @ (null | _: Subscription) => + if (!compareAndSet(x, ErrorPublisher(ex, "failed-VirtualProcessor"))) rec() + case s: Subscriber[_] => + try s.onError(ex) + catch { case NonFatal(_) => } finally set(Inert) + case Both(s) => + try s.onError(ex) + catch { case NonFatal(_) => } finally set(Inert) + case _ => // spec violation or cancellation race, but nothing we can do } rec() throw ex // must throw NPE, rule 2:13 @@ -311,17 +331,20 @@ import scala.util.control.NonFatal case h: HasActualSubscriber => val s = h.subscriber try { - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode(${h.getClass.getName}($s)).onNext($t).rec()") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode(${h.getClass.getName}($s)).onNext($t).rec()") s.onNext(t) } catch { case NonFatal(e) => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode(Both($s)).onNext($t) threw, spec violation -> Inert") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode(Both($s)).onNext($t) threw, spec violation -> Inert") set(Inert) throw new IllegalStateException("Subscriber threw exception, this is in violation of rule 2:13", e) } case s: Subscriber[_] => // spec violation - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($s).onNext($t).rec(): spec violation -> Inert") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode($s).onNext($t).rec(): spec violation -> Inert") val ex = new IllegalStateException(noDemand) getAndSet(Inert) match { case Inert => // nothing to be done @@ -332,7 +355,8 @@ import scala.util.control.NonFatal if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode(Inert|Publisher).onNext($t).rec(): nop") // nothing to be done case other => - if (VirtualProcessor.Debug) println(s"VirtualPublisher#$hashCode($other).onNext($t).rec() -> ErrorPublisher") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#$hashCode($other).onNext($t).rec() -> ErrorPublisher") val pub = ErrorPublisher(new IllegalStateException(noDemand), "failed-VirtualPublisher") if (!compareAndSet(other, pub)) rec() else throw pub.t @@ -353,12 +377,15 @@ import scala.util.control.NonFatal // Extdending AtomicReference to make the hot memory location share the same cache line with the Subscription private class WrappedSubscription(real: Subscription) - extends AtomicReference[WrappedSubscription.SubscriptionState](WrappedSubscription.NoBufferedDemand) with Subscription { + extends AtomicReference[WrappedSubscription.SubscriptionState](WrappedSubscription.NoBufferedDemand) + with Subscription { import WrappedSubscription._ // Release def ungateDemandAndRequestBuffered(): Unit = { - if (VirtualProcessor.Debug) println(s"VirtualPublisher#${VirtualProcessor.this.hashCode}.WrappedSubscription($real).ungateDemandAndRequestBuffered") + if (VirtualProcessor.Debug) + println( + s"VirtualPublisher#${VirtualProcessor.this.hashCode}.WrappedSubscription($real).ungateDemandAndRequestBuffered") // Ungate demand val requests = getAndSet(PassThrough).demand // And request buffered demand @@ -367,7 +394,8 @@ import scala.util.control.NonFatal override def request(n: Long): Unit = { if (n < 1) { - if (VirtualProcessor.Debug) println(s"VirtualPublisher#${VirtualProcessor.this.hashCode}.WrappedSubscription($real).request($n)") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#${VirtualProcessor.this.hashCode}.WrappedSubscription($real).request($n)") tryCancel(real) VirtualProcessor.this.getAndSet(Inert) match { case Both(subscriber) => rejectDueToNonPositiveDemand(subscriber) @@ -384,10 +412,14 @@ import scala.util.control.NonFatal @tailrec def bufferDemand(n: Long): Unit = { val current = get() if (current eq PassThrough) { - if (VirtualProcessor.Debug) println(s"VirtualPublisher#${VirtualProcessor.this.hashCode}WrappedSubscription($real).bufferDemand($n) passthrough") + if (VirtualProcessor.Debug) + println( + s"VirtualPublisher#${VirtualProcessor.this.hashCode}WrappedSubscription($real).bufferDemand($n) passthrough") real.request(n) } else if (!compareAndSet(current, Buffering(current.demand + n))) { - if (VirtualProcessor.Debug) println(s"VirtualPublisher#${VirtualProcessor.this.hashCode}WrappedSubscription($real).bufferDemand($n) buffering") + if (VirtualProcessor.Debug) + println( + s"VirtualPublisher#${VirtualProcessor.this.hashCode}WrappedSubscription($real).bufferDemand($n) buffering") bufferDemand(n) } } @@ -395,7 +427,8 @@ import scala.util.control.NonFatal } } override def cancel(): Unit = { - if (VirtualProcessor.Debug) println(s"VirtualPublisher#${VirtualProcessor.this.hashCode}WrappedSubscription.cancel() -> Inert") + if (VirtualProcessor.Debug) + println(s"VirtualPublisher#${VirtualProcessor.this.hashCode}WrappedSubscription.cancel() -> Inert") VirtualProcessor.this.set(Inert) real.cancel() } @@ -453,7 +486,8 @@ import scala.util.control.NonFatal pub.asInstanceOf[Publisher[r]].subscribe(sub) case p: Publisher[_] => - throw new IllegalStateException(s"internal error, already registered [$p], yet attempted to register 2nd publisher [$pub]!") + throw new IllegalStateException( + s"internal error, already registered [$p], yet attempted to register 2nd publisher [$pub]!") case unexpected => throw new IllegalStateException(s"internal error, unexpected state: $unexpected") @@ -467,8 +501,9 @@ import scala.util.control.NonFatal * INTERNAL API */ @InternalApi private[akka] final case class ProcessorModule[In, Out, Mat]( - val createProcessor: () => (Processor[In, Out], Mat), - attributes: Attributes = DefaultAttributes.processor) extends StreamLayout.AtomicModule[FlowShape[In, Out], Mat] { + val createProcessor: () => (Processor[In, Out], Mat), + attributes: Attributes = DefaultAttributes.processor) + extends StreamLayout.AtomicModule[FlowShape[In, Out], Mat] { val inPort = Inlet[In]("ProcessorModule.in") val outPort = Outlet[Out]("ProcessorModule.out") override val shape = new FlowShape(inPort, outPort) diff --git a/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala b/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala index 1bbd3f3408..c64f989fa6 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/StreamSubscriptionTimeout.scala @@ -85,19 +85,26 @@ import scala.util.control.NoStackTrace target match { case p: Processor[_, _] => log.debug("Cancelling {} Processor's publisher and subscriber sides (after {} ms)", p, millis) - handleSubscriptionTimeout(target, new SubscriptionTimeoutException(s"Publisher was not attached to upstream within deadline ($millis) ms") with NoStackTrace) + handleSubscriptionTimeout( + target, + new SubscriptionTimeoutException(s"Publisher was not attached to upstream within deadline ($millis) ms") + with NoStackTrace) case p: Publisher[_] => log.debug("Cancelling {} (after: {} ms)", p, millis) - handleSubscriptionTimeout(target, new SubscriptionTimeoutException(s"Publisher ($p) you are trying to subscribe to has been shut-down " + - s"because exceeding it's subscription-timeout.") with NoStackTrace) + handleSubscriptionTimeout(target, + new SubscriptionTimeoutException( + s"Publisher ($p) you are trying to subscribe to has been shut-down " + + s"because exceeding it's subscription-timeout.") with NoStackTrace) } } private def warn(target: Publisher[_], timeout: FiniteDuration): Unit = { log.warning( "Timed out {} detected (after {} ms)! You should investigate if you either cancel or consume all {} instances", - target, timeout.toMillis, target.getClass.getCanonicalName) + target, + timeout.toMillis, + target.getClass.getCanonicalName) } /** diff --git a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala index 67ff8d94a2..b80593bd5f 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/SubFlowImpl.scala @@ -15,7 +15,7 @@ import language.higherKinds * INTERNAL API */ @InternalApi private[akka] object SubFlowImpl { - trait MergeBack[In, F[+_]] { + trait MergeBack[In, F[+ _]] { def apply[T](f: Flow[In, T, NotUsed], breadth: Int): F[T] } } @@ -23,11 +23,10 @@ import language.higherKinds /** * INTERNAL API */ -@InternalApi private[akka] class SubFlowImpl[In, Out, Mat, F[+_], C]( - val subFlow: Flow[In, Out, NotUsed], - mergeBackFunction: SubFlowImpl.MergeBack[In, F], - finishFunction: Sink[In, NotUsed] => C) - extends SubFlow[Out, Mat, F, C] { +@InternalApi private[akka] class SubFlowImpl[In, Out, Mat, F[+ _], C](val subFlow: Flow[In, Out, NotUsed], + mergeBackFunction: SubFlowImpl.MergeBack[In, F], + finishFunction: Sink[In, NotUsed] => C) + extends SubFlow[Out, Mat, F, C] { override def via[T, Mat2](flow: Graph[FlowShape[Out, T], Mat2]): Repr[T] = new SubFlowImpl[In, T, Mat, F, C](subFlow.via(flow), mergeBackFunction, finishFunction) diff --git a/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala b/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala index b18d40ebc7..68d0c96f6c 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/SubscriberManagement.scala @@ -154,7 +154,8 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff case head :: tail => maxRequested(tail, math.max(head.totalDemand, result)) case _ => result } - val desired = Math.min(Int.MaxValue, Math.min(maxRequested(subscriptions), buffer.maxAvailable) - pendingFromUpstream).toInt + val desired = + Math.min(Int.MaxValue, Math.min(maxRequested(subscriptions), buffer.maxAvailable) - pendingFromUpstream).toInt if (desired > 0) { pendingFromUpstream += desired requestFromUpstream(desired) @@ -222,10 +223,11 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff * Register a new subscriber. */ protected def registerSubscriber(subscriber: Subscriber[_ >: T]): Unit = endOfStream match { - case NotReached if subscriptions.exists(_.subscriber == subscriber) => ReactiveStreamsCompliance.rejectDuplicateSubscriber(subscriber) - case NotReached => addSubscription(subscriber) + case NotReached if subscriptions.exists(_.subscriber == subscriber) => + ReactiveStreamsCompliance.rejectDuplicateSubscriber(subscriber) + case NotReached => addSubscription(subscriber) case Completed if buffer.nonEmpty => addSubscription(subscriber) - case eos => eos(subscriber) + case eos => eos(subscriber) } private def addSubscription(subscriber: Subscriber[_ >: T]): Unit = { @@ -267,4 +269,3 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff } // else ignore, we need to be idempotent } } - diff --git a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala index 6b362cf2c8..a438ccfb27 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Throttle.scala @@ -23,13 +23,12 @@ import scala.concurrent.duration.{ FiniteDuration, _ } /** * INTERNAL API */ -@InternalApi private[akka] class Throttle[T]( - val cost: Int, - val per: FiniteDuration, - val maximumBurst: Int, - val costCalculation: (T) => Int, - val mode: ThrottleMode) - extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] class Throttle[T](val cost: Int, + val per: FiniteDuration, + val maximumBurst: Int, + val costCalculation: (T) => Int, + val mode: ThrottleMode) + extends SimpleLinearGraphStage[T] { require(cost > 0, "cost must be > 0") require(per.toNanos > 0, "per time must be > 0") require(per.toNanos >= cost, "Rates larger than 1 unit / nanosecond are not supported") diff --git a/akka-stream/src/main/scala/akka/stream/impl/Timers.scala b/akka-stream/src/main/scala/akka/stream/impl/Timers.scala index 3c3c6a71e0..ac95c69a07 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Timers.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Timers.scala @@ -35,7 +35,8 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } import scala.concurrent.duration._ if (timeout > 1.second) 1.second else { - FiniteDuration(math.min(math.max(timeout.toNanos / 8, 100.millis.toNanos), timeout.toNanos / 2), TimeUnit.NANOSECONDS) + FiniteDuration(math.min(math.max(timeout.toNanos / 8, 100.millis.toNanos), timeout.toNanos / 2), + TimeUnit.NANOSECONDS) } } @@ -213,7 +214,8 @@ import scala.concurrent.duration.{ Duration, FiniteDuration } } - final class IdleInject[I, O >: I](val timeout: FiniteDuration, val inject: () => O) extends GraphStage[FlowShape[I, O]] { + final class IdleInject[I, O >: I](val timeout: FiniteDuration, val inject: () => O) + extends GraphStage[FlowShape[I, O]] { val in: Inlet[I] = Inlet("IdleInject.in") val out: Outlet[O] = Outlet("IdleInject.out") diff --git a/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala b/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala index 104fcd552d..4475eb5f1a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Transfer.scala @@ -130,7 +130,8 @@ import akka.annotation.InternalApi /** * INTERNAL API */ -@InternalApi private[akka] case class WaitingForUpstreamSubscription(remaining: Int, andThen: TransferPhase) extends TransferState { +@InternalApi private[akka] case class WaitingForUpstreamSubscription(remaining: Int, andThen: TransferPhase) + extends TransferState { def isReady = false def isCompleted = false } @@ -190,8 +191,8 @@ import akka.annotation.InternalApi final def isPumpFinished: Boolean = transferState.isCompleted - protected final val completedPhase = TransferPhase(Completed) { - () => throw new IllegalStateException("The action of completed phase must be never executed") + protected final val completedPhase = TransferPhase(Completed) { () => + throw new IllegalStateException("The action of completed phase must be never executed") } // Exchange input buffer elements and output buffer "requests" until one of them becomes empty. diff --git a/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala b/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala index 4b2aef36e0..13326f3d44 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/TraversalBuilder.scala @@ -120,7 +120,8 @@ import akka.stream.impl.fusing.GraphStages.SingleSource * * See the `TraversalTestUtils` class and the `testMaterialize` method for a simple example. */ -@InternalApi private[akka] final case class MaterializeAtomic(module: AtomicModule[Shape, Any], outToSlots: Array[Int]) extends Traversal { +@InternalApi private[akka] final case class MaterializeAtomic(module: AtomicModule[Shape, Any], outToSlots: Array[Int]) + extends Traversal { override def toString: String = s"MaterializeAtomic($module, ${outToSlots.mkString("[", ", ", "]")})" override def rewireFirstTo(relativeOffset: Int): Traversal = copy(outToSlots = Array(relativeOffset)) @@ -160,7 +161,8 @@ import akka.stream.impl.fusing.GraphStages.SingleSource /** * INTERNAL API */ -@InternalApi private[akka] final case class Compose(composer: AnyFunction2, reverse: Boolean = false) extends MaterializedValueOp { +@InternalApi private[akka] final case class Compose(composer: AnyFunction2, reverse: Boolean = false) + extends MaterializedValueOp { def apply(arg1: Any, arg2: Any): Any = { if (reverse) composer.asInstanceOf[(Any, Any) => Any](arg2, arg1) @@ -262,11 +264,10 @@ import akka.stream.impl.fusing.GraphStages.SingleSource Attributes.none) b } else { - AtomicTraversalBuilder( - module, - new Array[Int](module.shape.outlets.size), - module.shape.outlets.size, - Attributes.none) + AtomicTraversalBuilder(module, + new Array[Int](module.shape.outlets.size), + module.shape.outlets.size, + Attributes.none) } // important to use setAttributes because it will create island for async (dispatcher attribute) builder.setAttributes(attributes) @@ -284,16 +285,17 @@ import akka.stream.impl.fusing.GraphStages.SingleSource var nextStep: Traversal = EmptyTraversal current match { - case PushNotUsed => prindent("push NotUsed") - case Pop => prindent("pop mat") - case _: Transform => prindent("transform mat") - case Compose(_, false) => prindent("compose mat") - case Compose(_, true) => prindent("compose reversed mat") - case PushAttributes(attr) => prindent("push attr " + attr) - case PopAttributes => prindent("pop attr") - case EnterIsland(tag) => prindent("enter island " + tag) - case ExitIsland => prindent("exit island") - case MaterializeAtomic(mod, outToSlots) => prindent("materialize " + mod + " " + outToSlots.mkString("[", ", ", "]")) + case PushNotUsed => prindent("push NotUsed") + case Pop => prindent("pop mat") + case _: Transform => prindent("transform mat") + case Compose(_, false) => prindent("compose mat") + case Compose(_, true) => prindent("compose reversed mat") + case PushAttributes(attr) => prindent("push attr " + attr) + case PopAttributes => prindent("pop attr") + case EnterIsland(tag) => prindent("enter island " + tag) + case ExitIsland => prindent("exit island") + case MaterializeAtomic(mod, outToSlots) => + prindent("materialize " + mod + " " + outToSlots.mkString("[", ", ", "]")) case Concat(first, next) => printTraversal(first, indent + 1) nextStep = next @@ -482,21 +484,20 @@ import akka.stream.impl.fusing.GraphStages.SingleSource * Returned by [[CompositeTraversalBuilder]] once all output ports of a subgraph has been wired. * See comments in akka.stream.impl.package for more details. */ -@InternalApi private[akka] final case class CompletedTraversalBuilder( - traversalSoFar: Traversal, - inSlots: Int, - inToOffset: Map[InPort, Int], - attributes: Attributes, - islandTag: OptionVal[IslandTag] = OptionVal.None) extends TraversalBuilder { +@InternalApi private[akka] final case class CompletedTraversalBuilder(traversalSoFar: Traversal, + inSlots: Int, + inToOffset: Map[InPort, Int], + attributes: Attributes, + islandTag: OptionVal[IslandTag] = OptionVal.None) + extends TraversalBuilder { override def add(submodule: TraversalBuilder, shape: Shape, combineMat: AnyFunction2): TraversalBuilder = { val key = new BuilderKey - CompositeTraversalBuilder( - reverseBuildSteps = key :: Nil, - inSlots = inSlots, - inOffsets = inToOffset, - pendingBuilders = Map(key -> this), - attributes = attributes).add(submodule, shape, combineMat) + CompositeTraversalBuilder(reverseBuildSteps = key :: Nil, + inSlots = inSlots, + inOffsets = inToOffset, + pendingBuilders = Map(key -> this), + attributes = attributes).add(submodule, shape, combineMat) } override def traversal: Traversal = { @@ -517,7 +518,8 @@ import akka.stream.impl.fusing.GraphStages.SingleSource override def isTraversalComplete: Boolean = true override def wire(out: OutPort, in: InPort): TraversalBuilder = - throw new UnsupportedOperationException(s"Cannot wire ports in a completed builder. ${out.mappedTo} ~> ${in.mappedTo}") + throw new UnsupportedOperationException( + s"Cannot wire ports in a completed builder. ${out.mappedTo} ~> ${in.mappedTo}") override def internalSetAttributes(attributes: Attributes): TraversalBuilder = copy(attributes = attributes) @@ -547,11 +549,11 @@ import akka.stream.impl.fusing.GraphStages.SingleSource * outToSlot array which will be then embedded in a [[MaterializeAtomic]] Traversal step. * See comments in akka.stream.impl.package for more details. */ -@InternalApi private[akka] final case class AtomicTraversalBuilder( - module: AtomicModule[Shape, Any], - outToSlot: Array[Int], - unwiredOuts: Int, - attributes: Attributes) extends TraversalBuilder { +@InternalApi private[akka] final case class AtomicTraversalBuilder(module: AtomicModule[Shape, Any], + outToSlot: Array[Int], + unwiredOuts: Int, + attributes: Attributes) + extends TraversalBuilder { override def add(submodule: TraversalBuilder, shape: Shape, combineMat: AnyFunction2): TraversalBuilder = { // TODO: Use automatically a linear builder if applicable @@ -590,11 +592,10 @@ import akka.stream.impl.fusing.GraphStages.SingleSource else if (Shape.hasOnePort(inlets)) new Map1(inlets.head, inlets.head.id) else inlets.iterator.map(in => in.asInstanceOf[InPort] -> in.id).toMap } - CompletedTraversalBuilder( - traversalSoFar = MaterializeAtomic(module, newOutToSlot), - inSlots, - inToOffset, - attributes) + CompletedTraversalBuilder(traversalSoFar = MaterializeAtomic(module, newOutToSlot), + inSlots, + inToOffset, + attributes) } else copy(outToSlot = newOutToSlot, unwiredOuts = newUnwiredOuts) } @@ -611,22 +612,33 @@ import akka.stream.impl.fusing.GraphStages.SingleSource @InternalApi private[akka] object LinearTraversalBuilder { // TODO: Remove - private val cachedEmptyLinear = LinearTraversalBuilder(OptionVal.None, OptionVal.None, 0, 0, PushNotUsed, OptionVal.None, Attributes.none) + private val cachedEmptyLinear = + LinearTraversalBuilder(OptionVal.None, OptionVal.None, 0, 0, PushNotUsed, OptionVal.None, Attributes.none) private[this] final val wireBackward: Array[Int] = Array(-1) private[this] final val noWire: Array[Int] = Array() def empty(attributes: Attributes = Attributes.none): LinearTraversalBuilder = if (attributes eq Attributes.none) cachedEmptyLinear - else LinearTraversalBuilder(OptionVal.None, OptionVal.None, 0, 0, PushNotUsed, OptionVal.None, attributes, EmptyTraversal) + else + LinearTraversalBuilder(OptionVal.None, + OptionVal.None, + 0, + 0, + PushNotUsed, + OptionVal.None, + attributes, + EmptyTraversal) /** * Create a traversal builder specialized for linear graphs. This is designed to be much faster and lightweight * than its generic counterpart. It can be freely mixed with the generic builder in both ways. */ def fromModule(module: AtomicModule[Shape, Any], attributes: Attributes): LinearTraversalBuilder = { - if (module.shape.inlets.size > 1) throw new IllegalStateException("Modules with more than one input port cannot be linear.") - if (module.shape.outlets.size > 1) throw new IllegalStateException("Modules with more than one output port cannot be linear.") + if (module.shape.inlets.size > 1) + throw new IllegalStateException("Modules with more than one input port cannot be linear.") + if (module.shape.outlets.size > 1) + throw new IllegalStateException("Modules with more than one output port cannot be linear.") TraversalBuilder.initShape(module.shape) val inPortOpt = OptionVal(module.shape.inlets.headOption.orNull) @@ -634,14 +646,13 @@ import akka.stream.impl.fusing.GraphStages.SingleSource val wiring = if (outPortOpt.isDefined) wireBackward else noWire - LinearTraversalBuilder( - inPortOpt, - outPortOpt, - inOffset = 0, - if (inPortOpt.isDefined) 1 else 0, - traversalSoFar = MaterializeAtomic(module, wiring), - pendingBuilder = OptionVal.None, - attributes) + LinearTraversalBuilder(inPortOpt, + outPortOpt, + inOffset = 0, + if (inPortOpt.isDefined) 1 else 0, + traversalSoFar = MaterializeAtomic(module, wiring), + pendingBuilder = OptionVal.None, + attributes) } def addMatCompose(t: Traversal, matCompose: AnyFunction2): Traversal = { @@ -655,10 +666,9 @@ import akka.stream.impl.fusing.GraphStages.SingleSource t.concat(Compose(matCompose, reverse = true)) } - def fromBuilder( - traversalBuilder: TraversalBuilder, - shape: Shape, - combine: AnyFunction2 = Keep.right): LinearTraversalBuilder = { + def fromBuilder(traversalBuilder: TraversalBuilder, + shape: Shape, + combine: AnyFunction2 = Keep.right): LinearTraversalBuilder = { traversalBuilder match { case linear: LinearTraversalBuilder => if (combine eq Keep.right) linear @@ -671,14 +681,13 @@ import akka.stream.impl.fusing.GraphStages.SingleSource case OptionVal.None => 0 } - LinearTraversalBuilder( - inPort = OptionVal(inOpt.orNull), - outPort = OptionVal.None, - inOffset = inOffs, - inSlots = completed.inSlots, - completed.traversal.concat(addMatCompose(PushNotUsed, combine)), - pendingBuilder = OptionVal.None, - Attributes.none) + LinearTraversalBuilder(inPort = OptionVal(inOpt.orNull), + outPort = OptionVal.None, + inOffset = inOffs, + inSlots = completed.inSlots, + completed.traversal.concat(addMatCompose(PushNotUsed, combine)), + pendingBuilder = OptionVal.None, + Attributes.none) case composite => val inOpt = OptionVal(shape.inlets.headOption.orNull) @@ -688,15 +697,14 @@ import akka.stream.impl.fusing.GraphStages.SingleSource case OptionVal.None => 0 } - LinearTraversalBuilder( - inPort = OptionVal(inOpt.orNull), - outPort = OptionVal.Some(out), - inOffset = inOffs, - inSlots = composite.inSlots, - addMatCompose(PushNotUsed, combine), - pendingBuilder = OptionVal.Some(composite), - Attributes.none, - beforeBuilder = EmptyTraversal) + LinearTraversalBuilder(inPort = OptionVal(inOpt.orNull), + outPort = OptionVal.Some(out), + inOffset = inOffs, + inSlots = composite.inSlots, + addMatCompose(PushNotUsed, combine), + pendingBuilder = OptionVal.Some(composite), + Attributes.none, + beforeBuilder = EmptyTraversal) } } @@ -713,21 +721,22 @@ import akka.stream.impl.fusing.GraphStages.SingleSource * -1 relative offset to something else (see rewireLastOutTo). * See comments in akka.stream.impl.package for more details. */ -@InternalApi private[akka] final case class LinearTraversalBuilder( - inPort: OptionVal[InPort], - outPort: OptionVal[OutPort], - inOffset: Int, - override val inSlots: Int, - traversalSoFar: Traversal, - pendingBuilder: OptionVal[TraversalBuilder], - attributes: Attributes, - beforeBuilder: Traversal = EmptyTraversal, - islandTag: OptionVal[IslandTag] = OptionVal.None) extends TraversalBuilder { +@InternalApi private[akka] final case class LinearTraversalBuilder(inPort: OptionVal[InPort], + outPort: OptionVal[OutPort], + inOffset: Int, + override val inSlots: Int, + traversalSoFar: Traversal, + pendingBuilder: OptionVal[TraversalBuilder], + attributes: Attributes, + beforeBuilder: Traversal = EmptyTraversal, + islandTag: OptionVal[IslandTag] = OptionVal.None) + extends TraversalBuilder { protected def isEmpty: Boolean = inSlots == 0 && outPort.isEmpty override def add(submodule: TraversalBuilder, shape: Shape, combineMat: AnyFunction2): TraversalBuilder = { - throw new UnsupportedOperationException("LinearTraversal does not support free-form addition. Add it into a" + + throw new UnsupportedOperationException( + "LinearTraversal does not support free-form addition. Add it into a" + "composite builder instead and add the second module to that.") } @@ -778,21 +787,18 @@ import akka.stream.impl.fusing.GraphStages.SingleSource if (outPort.contains(out) && inPort.contains(in)) { pendingBuilder match { case OptionVal.Some(composite) => - copy( - inPort = OptionVal.None, - outPort = OptionVal.None, - traversalSoFar = - applyIslandAndAttributes( - beforeBuilder.concat( - composite - .assign(out, inOffset - composite.offsetOfModule(out)) - .traversal).concat(traversalSoFar)), - pendingBuilder = OptionVal.None, beforeBuilder = EmptyTraversal) + copy(inPort = OptionVal.None, + outPort = OptionVal.None, + traversalSoFar = applyIslandAndAttributes( + beforeBuilder + .concat(composite.assign(out, inOffset - composite.offsetOfModule(out)).traversal) + .concat(traversalSoFar)), + pendingBuilder = OptionVal.None, + beforeBuilder = EmptyTraversal) case OptionVal.None => - copy( - inPort = OptionVal.None, - outPort = OptionVal.None, - traversalSoFar = rewireLastOutTo(traversalSoFar, inOffset)) + copy(inPort = OptionVal.None, + outPort = OptionVal.None, + traversalSoFar = rewireLastOutTo(traversalSoFar, inOffset)) } } else throw new IllegalArgumentException(s"The ports $in and $out cannot be accessed in this builder.") @@ -821,21 +827,13 @@ import akka.stream.impl.fusing.GraphStages.SingleSource if (outPort.contains(out)) { pendingBuilder match { case OptionVal.Some(composite) => - copy( - outPort = OptionVal.None, - traversalSoFar = - applyIslandAndAttributes( - beforeBuilder.concat( - composite - .assign(out, relativeSlot) - .traversal - .concat(traversalSoFar))), - pendingBuilder = OptionVal.None, - beforeBuilder = EmptyTraversal) + copy(outPort = OptionVal.None, + traversalSoFar = applyIslandAndAttributes( + beforeBuilder.concat(composite.assign(out, relativeSlot).traversal.concat(traversalSoFar))), + pendingBuilder = OptionVal.None, + beforeBuilder = EmptyTraversal) case OptionVal.None => - copy( - outPort = OptionVal.None, - traversalSoFar = rewireLastOutTo(traversalSoFar, relativeSlot)) + copy(outPort = OptionVal.None, traversalSoFar = rewireLastOutTo(traversalSoFar, relativeSlot)) } } else throw new IllegalArgumentException(s"Port $out cannot be assigned in this builder") @@ -856,15 +854,15 @@ import akka.stream.impl.fusing.GraphStages.SingleSource def append(toAppend: LinearTraversalBuilder, matCompose: AnyFunction2): LinearTraversalBuilder = { if (toAppend.isEmpty) { - copy( - traversalSoFar = PushNotUsed.concat(LinearTraversalBuilder.addMatCompose(traversalSoFar, matCompose))) + copy(traversalSoFar = PushNotUsed.concat(LinearTraversalBuilder.addMatCompose(traversalSoFar, matCompose))) } else if (this.isEmpty) { toAppend.copy( traversalSoFar = toAppend.traversalSoFar.concat(LinearTraversalBuilder.addMatCompose(traversal, matCompose))) } else { if (outPort.isDefined) { if (toAppend.inPort.isEmpty) - throw new IllegalArgumentException("Appended linear module must have an unwired input port because there is a dangling output.") + throw new IllegalArgumentException( + "Appended linear module must have an unwired input port because there is a dangling output.") /* * To understand how append works, first the general structure of the LinearTraversalBuilder must be @@ -981,9 +979,7 @@ import akka.stream.impl.fusing.GraphStages.SingleSource * * (remember that this is the _reverse_ of the Flow DSL order) */ - beforeBuilder - .concat(compositeTraversal) - .concat(traversalSoFar) + beforeBuilder.concat(compositeTraversal).concat(traversalSoFar) } /* @@ -1004,18 +1000,19 @@ import akka.stream.impl.fusing.GraphStages.SingleSource * This is the simple case, when the other is purely linear. We just concatenate the traversals * and do some bookkeeping. */ - LinearTraversalBuilder( - inPort = inPort, - outPort = toAppend.outPort, - inSlots = inSlots + toAppend.inSlots, // we have now more input ports than before - // the inOffset of _this_ gets shifted by toAppend.inSlots, because the traversal of toAppend is _prepended_ - inOffset = inOffset + toAppend.inSlots, - // Build in reverse so it yields a more efficient layout for left-to-right building - traversalSoFar = toAppend.applyIslandAndAttributes(toAppend.traversalSoFar).concat(finalTraversalForThis), - pendingBuilder = OptionVal.None, - attributes = Attributes.none, // attributes are none for the new enclosing builder - beforeBuilder = EmptyTraversal, // no need for beforeBuilder as there are no composites - islandTag = OptionVal.None // islandTag is reset for the new enclosing builder + LinearTraversalBuilder(inPort = inPort, + outPort = toAppend.outPort, + inSlots = inSlots + toAppend.inSlots, // we have now more input ports than before + // the inOffset of _this_ gets shifted by toAppend.inSlots, because the traversal of toAppend is _prepended_ + inOffset = inOffset + toAppend.inSlots, + // Build in reverse so it yields a more efficient layout for left-to-right building + traversalSoFar = toAppend + .applyIslandAndAttributes(toAppend.traversalSoFar) + .concat(finalTraversalForThis), + pendingBuilder = OptionVal.None, + attributes = Attributes.none, // attributes are none for the new enclosing builder + beforeBuilder = EmptyTraversal, // no need for beforeBuilder as there are no composites + islandTag = OptionVal.None // islandTag is reset for the new enclosing builder ) case OptionVal.Some(_) => @@ -1032,7 +1029,7 @@ import akka.stream.impl.fusing.GraphStages.SingleSource // First prepare island enter and exit if tags are present toAppend.islandTag match { - case OptionVal.None => // Nothing changes + case OptionVal.None => // Nothing changes case OptionVal.Some(tag) => // Enter the island just before the appended builder (keeping the toAppend.beforeBuilder steps) newBeforeTraversal = EnterIsland(tag).concat(newBeforeTraversal) @@ -1056,20 +1053,19 @@ import akka.stream.impl.fusing.GraphStages.SingleSource // Finally add the already completed part of toAppend to newTraversalSoFar newTraversalSoFar = toAppend.traversalSoFar.concat(newTraversalSoFar) - LinearTraversalBuilder( - inPort = inPort, - outPort = toAppend.outPort, - inSlots = inSlots + toAppend.inSlots, // we have now more input ports than before - // the inOffset of _this_ gets shifted by toAppend.inSlots, because the traversal of toAppend is _prepended_ - inOffset = inOffset + toAppend.inSlots, - // Build in reverse so it yields a more efficient layout for left-to-right building. We cannot - // apply the full traversal, only the completed part of it - traversalSoFar = newTraversalSoFar, - // Last composite of toAppend is still pending - pendingBuilder = toAppend.pendingBuilder, - attributes = Attributes.none, // attributes are none for the new enclosing builder - beforeBuilder = newBeforeTraversal, // no need for beforeBuilder as there are no composites - islandTag = OptionVal.None // islandTag is reset for the new enclosing builder + LinearTraversalBuilder(inPort = inPort, + outPort = toAppend.outPort, + inSlots = inSlots + toAppend.inSlots, // we have now more input ports than before + // the inOffset of _this_ gets shifted by toAppend.inSlots, because the traversal of toAppend is _prepended_ + inOffset = inOffset + toAppend.inSlots, + // Build in reverse so it yields a more efficient layout for left-to-right building. We cannot + // apply the full traversal, only the completed part of it + traversalSoFar = newTraversalSoFar, + // Last composite of toAppend is still pending + pendingBuilder = toAppend.pendingBuilder, + attributes = Attributes.none, // attributes are none for the new enclosing builder + beforeBuilder = newBeforeTraversal, // no need for beforeBuilder as there are no composites + islandTag = OptionVal.None // islandTag is reset for the new enclosing builder ) } } else throw new Exception("should this happen?") @@ -1088,8 +1084,9 @@ import akka.stream.impl.fusing.GraphStages.SingleSource */ override def makeIsland(islandTag: IslandTag): LinearTraversalBuilder = this.islandTag match { - case OptionVal.Some(_) => this // Wrapping with an island, then immediately re-wrapping makes the second island empty, so can be omitted - case OptionVal.None => copy(islandTag = OptionVal.Some(islandTag)) + case OptionVal.Some(_) => + this // Wrapping with an island, then immediately re-wrapping makes the second island empty, so can be omitted + case OptionVal.None => copy(islandTag = OptionVal.Some(islandTag)) } } @@ -1141,16 +1138,17 @@ import akka.stream.impl.fusing.GraphStages.SingleSource * @param unwiredOuts Number of output ports that have not yet been wired/assigned */ @InternalApi private[akka] final case class CompositeTraversalBuilder( - finalSteps: Traversal = EmptyTraversal, - reverseBuildSteps: List[TraversalBuildStep] = AppendTraversal(PushNotUsed) :: Nil, - inSlots: Int = 0, - inOffsets: Map[InPort, Int] = Map.empty, - inBaseOffsetForOut: Map[OutPort, Int] = Map.empty, - pendingBuilders: Map[BuilderKey, TraversalBuilder] = Map.empty, - outOwners: Map[OutPort, BuilderKey] = Map.empty, - unwiredOuts: Int = 0, - attributes: Attributes, - islandTag: OptionVal[IslandTag] = OptionVal.None) extends TraversalBuilder { + finalSteps: Traversal = EmptyTraversal, + reverseBuildSteps: List[TraversalBuildStep] = AppendTraversal(PushNotUsed) :: Nil, + inSlots: Int = 0, + inOffsets: Map[InPort, Int] = Map.empty, + inBaseOffsetForOut: Map[OutPort, Int] = Map.empty, + pendingBuilders: Map[BuilderKey, TraversalBuilder] = Map.empty, + outOwners: Map[OutPort, BuilderKey] = Map.empty, + unwiredOuts: Int = 0, + attributes: Attributes, + islandTag: OptionVal[IslandTag] = OptionVal.None) + extends TraversalBuilder { override def toString: String = s""" @@ -1200,11 +1198,7 @@ import akka.stream.impl.fusing.GraphStages.SingleSource // The CompleteTraversalBuilder only keeps the minimum amount of necessary information that is needed for it // to be embedded in a larger graph, making partial graph reuse much more efficient. - CompletedTraversalBuilder( - traversalSoFar = finalTraversal, - inSlots, - inOffsets, - attributes) + CompletedTraversalBuilder(traversalSoFar = finalTraversal, inSlots, inOffsets, attributes) } else this } @@ -1224,19 +1218,17 @@ import akka.stream.impl.fusing.GraphStages.SingleSource // final traversal (remember, input ports are assigned in traversal order of modules, and the inOffsets // and inBaseOffseForOut Maps are updated when adding a module; we must respect addition order). - copy( - inBaseOffsetForOut = inBaseOffsetForOut - out, - outOwners = outOwners - out, - // TODO Optimize Map access - pendingBuilders = pendingBuilders.updated(builderKey, result), - // pendingBuilders = pendingBuilders - builderKey, - unwiredOuts = unwiredOuts - 1) + copy(inBaseOffsetForOut = inBaseOffsetForOut - out, + outOwners = outOwners - out, + // TODO Optimize Map access + pendingBuilders = pendingBuilders.updated(builderKey, result), + // pendingBuilders = pendingBuilders - builderKey, + unwiredOuts = unwiredOuts - 1) } else { // Update structures with result - copy( - inBaseOffsetForOut = inBaseOffsetForOut - out, - unwiredOuts = unwiredOuts - 1, - pendingBuilders = pendingBuilders.updated(builderKey, result)) + copy(inBaseOffsetForOut = inBaseOffsetForOut - out, + unwiredOuts = unwiredOuts - 1, + pendingBuilders = pendingBuilders.updated(builderKey, result)) } // If we have no more unconnected outputs, we can finally build the Traversal and shed most of the auxiliary data. @@ -1250,22 +1242,22 @@ import akka.stream.impl.fusing.GraphStages.SingleSource val newBuildSteps = if (combineMat == Keep.left) { AppendTraversal(Pop) :: - builderKey :: - reverseBuildSteps + builderKey :: + reverseBuildSteps } else if (combineMat == Keep.right) { builderKey :: - AppendTraversal(Pop) :: - reverseBuildSteps + AppendTraversal(Pop) :: + reverseBuildSteps } else if (combineMat == Keep.none) { AppendTraversal(PushNotUsed) :: - AppendTraversal(Pop) :: - AppendTraversal(Pop) :: - builderKey :: - reverseBuildSteps + AppendTraversal(Pop) :: + AppendTraversal(Pop) :: + builderKey :: + reverseBuildSteps } else { AppendTraversal(Compose(combineMat)) :: - builderKey :: - reverseBuildSteps + builderKey :: + reverseBuildSteps } val added = if (submodule.isTraversalComplete) { @@ -1282,11 +1274,10 @@ import akka.stream.impl.fusing.GraphStages.SingleSource newInOffsets = newInOffsets.updated(in, inSlots + submodule.offsetOf(in.mappedTo)) } - copy( - reverseBuildSteps = newBuildSteps, - inSlots = inSlots + submodule.inSlots, - pendingBuilders = pendingBuilders.updated(builderKey, submodule), - inOffsets = newInOffsets) + copy(reverseBuildSteps = newBuildSteps, + inSlots = inSlots + submodule.inSlots, + pendingBuilders = pendingBuilders.updated(builderKey, submodule), + inOffsets = newInOffsets) } else { // Added module have unwired outputs. @@ -1313,14 +1304,13 @@ import akka.stream.impl.fusing.GraphStages.SingleSource newOutOwners = newOutOwners.updated(out, builderKey) } - copy( - reverseBuildSteps = newBuildSteps, - inSlots = inSlots + submodule.inSlots, - inOffsets = newInOffsets, - inBaseOffsetForOut = newBaseOffsetsForOut, - outOwners = newOutOwners, - pendingBuilders = pendingBuilders.updated(builderKey, submodule), - unwiredOuts = unwiredOuts + submodule.unwiredOuts) + copy(reverseBuildSteps = newBuildSteps, + inSlots = inSlots + submodule.inSlots, + inOffsets = newInOffsets, + inBaseOffsetForOut = newBaseOffsetsForOut, + outOwners = newOutOwners, + pendingBuilders = pendingBuilders.updated(builderKey, submodule), + unwiredOuts = unwiredOuts + submodule.unwiredOuts) } added.completeIfPossible @@ -1337,7 +1327,8 @@ import akka.stream.impl.fusing.GraphStages.SingleSource override def makeIsland(islandTag: IslandTag): TraversalBuilder = { this.islandTag match { case OptionVal.None => copy(islandTag = OptionVal(islandTag)) - case _ => this // Wrapping with an island, then immediately re-wrapping makes the second island empty, so can be omitted + case _ => + this // Wrapping with an island, then immediately re-wrapping makes the second island empty, so can be omitted } } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/Unfold.scala b/akka-stream/src/main/scala/akka/stream/impl/Unfold.scala index b215fb9dfd..544e724a1b 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/Unfold.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/Unfold.scala @@ -38,7 +38,8 @@ import scala.util.{ Failure, Success, Try } /** * INTERNAL API */ -@InternalApi private[akka] final class UnfoldAsync[S, E](s: S, f: S => Future[Option[(S, E)]]) extends GraphStage[SourceShape[E]] { +@InternalApi private[akka] final class UnfoldAsync[S, E](s: S, f: S => Future[Option[(S, E)]]) + extends GraphStage[SourceShape[E]] { val out: Outlet[E] = Outlet("UnfoldAsync.out") override val shape: SourceShape[E] = SourceShape(out) override def initialAttributes: Attributes = DefaultAttributes.unfoldAsync @@ -58,8 +59,7 @@ import scala.util.{ Failure, Success, Try } asyncHandler = ac.invoke } - def onPull(): Unit = f(state).onComplete(asyncHandler)( - akka.dispatch.ExecutionContexts.sameThreadExecutionContext) + def onPull(): Unit = f(state).onComplete(asyncHandler)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext) setHandler(out, this) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala index e1551c849a..6c2f2958d7 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSource.scala @@ -16,10 +16,10 @@ import scala.util.control.NonFatal /** * INTERNAL API */ -@InternalApi private[akka] final class UnfoldResourceSource[T, S]( - create: () => S, - readData: (S) => Option[T], - close: (S) => Unit) extends GraphStage[SourceShape[T]] { +@InternalApi private[akka] final class UnfoldResourceSource[T, S](create: () => S, + readData: (S) => Option[T], + close: (S) => Unit) + extends GraphStage[SourceShape[T]] { val out = Outlet[T]("UnfoldResourceSource.out") override val shape = SourceShape(out) override def initialAttributes: Attributes = DefaultAttributes.unfoldResourceSource diff --git a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala index 94c8601503..8411999d6a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/UnfoldResourceSourceAsync.scala @@ -19,10 +19,10 @@ import scala.util.control.NonFatal /** * INTERNAL API */ -@InternalApi private[akka] final class UnfoldResourceSourceAsync[T, S]( - create: () => Future[S], - readData: (S) => Future[Option[T]], - close: (S) => Future[Done]) extends GraphStage[SourceShape[T]] { +@InternalApi private[akka] final class UnfoldResourceSourceAsync[T, S](create: () => Future[S], + readData: (S) => Future[Option[T]], + close: (S) => Future[Done]) + extends GraphStage[SourceShape[T]] { val out = Outlet[T]("UnfoldResourceSourceAsync.out") override val shape = SourceShape(out) override def initialAttributes: Attributes = DefaultAttributes.unfoldResourceSourceAsync @@ -40,32 +40,34 @@ import scala.util.control.NonFatal }.invokeWithFeedback _ private val errorHandler: PartialFunction[Throwable, Unit] = { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => - failStage(ex) - case Supervision.Restart => restartResource() - case Supervision.Resume => onPull() - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => + failStage(ex) + case Supervision.Restart => restartResource() + case Supervision.Resume => onPull() + } } private val readCallback = getAsyncCallback[Try[Option[T]]] { - case Success(data) => data match { - case Some(d) => push(out, d) - case None => - // end of resource reached, lets close it - state match { - case Some(resource) => - close(resource).onComplete(getAsyncCallback[Try[Done]] { - case Success(Done) => completeStage() - case Failure(ex) => failStage(ex) - }.invoke) - state = None + case Success(data) => + data match { + case Some(d) => push(out, d) + case None => + // end of resource reached, lets close it + state match { + case Some(resource) => + close(resource).onComplete(getAsyncCallback[Try[Done]] { + case Success(Done) => completeStage() + case Failure(ex) => failStage(ex) + }.invoke) + state = None - case None => - // cannot happen, but for good measure - throw new IllegalStateException("Reached end of data but there is no open resource") - } - } + case None => + // cannot happen, but for good measure + throw new IllegalStateException("Reached end of data but there is no open resource") + } + } case Failure(t) => errorHandler(t) }.invoke _ diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala index 557cf8b9e0..62078d4c9a 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/ActorGraphInterpreter.scala @@ -55,11 +55,12 @@ import scala.util.control.NonFatal def props(shell: GraphInterpreterShell): Props = Props(new ActorGraphInterpreter(shell)).withDeploy(Deploy.local) - class BatchingActorInputBoundary( - size: Int, - shell: GraphInterpreterShell, - publisher: Publisher[Any], - internalPortName: String) extends UpstreamBoundaryStageLogic[Any] with OutHandler { + class BatchingActorInputBoundary(size: Int, + shell: GraphInterpreterShell, + publisher: Publisher[Any], + internalPortName: String) + extends UpstreamBoundaryStageLogic[Any] + with OutHandler { final case class OnError(shell: GraphInterpreterShell, cause: Throwable) extends SimpleBoundaryEvent { override def execute(): Unit = { @@ -116,27 +117,26 @@ import scala.util.control.NonFatal def setActor(actor: ActorRef): Unit = this.actor = actor override def preStart(): Unit = { - publisher.subscribe( - new Subscriber[Any] { - override def onError(t: Throwable): Unit = { - ReactiveStreamsCompliance.requireNonNullException(t) - actor ! OnError(shell, t) - } + publisher.subscribe(new Subscriber[Any] { + override def onError(t: Throwable): Unit = { + ReactiveStreamsCompliance.requireNonNullException(t) + actor ! OnError(shell, t) + } - override def onSubscribe(s: Subscription): Unit = { - ReactiveStreamsCompliance.requireNonNullSubscription(s) - actor ! OnSubscribe(shell, s) - } + override def onSubscribe(s: Subscription): Unit = { + ReactiveStreamsCompliance.requireNonNullSubscription(s) + actor ! OnSubscribe(shell, s) + } - override def onComplete(): Unit = { - actor ! OnComplete(shell) - } + override def onComplete(): Unit = { + actor ! OnComplete(shell) + } - override def onNext(t: Any): Unit = { - ReactiveStreamsCompliance.requireNonNullElement(t) - actor ! OnNext(shell, t) - } - }) + override def onNext(t: Any): Unit = { + ReactiveStreamsCompliance.requireNonNullElement(t) + actor ! OnNext(shell, t) + } + }) } private def dequeue(): Any = { @@ -240,7 +240,8 @@ import scala.util.control.NonFatal case s: SpecViolation => shell.tryAbort(s) } - override def toString: String = s"BatchingActorInputBoundary(forPort=$internalPortName, fill=$inputBufferElements/$size, completed=$upstreamCompleted, canceled=$downstreamCanceled)" + override def toString: String = + s"BatchingActorInputBoundary(forPort=$internalPortName, fill=$inputBufferElements/$size, completed=$upstreamCompleted, canceled=$downstreamCanceled)" } final case class SubscribePending(boundary: ActorOutputBoundary) extends SimpleBoundaryEvent { @@ -253,7 +254,8 @@ import scala.util.control.NonFatal final case class RequestMore(boundary: ActorOutputBoundary, demand: Long) extends SimpleBoundaryEvent { override def execute(): Unit = { - if (GraphInterpreter.Debug) println(s"${boundary.shell.interpreter.Name} request $demand port=${boundary.internalPortName}") + if (GraphInterpreter.Debug) + println(s"${boundary.shell.interpreter.Name} request $demand port=${boundary.internalPortName}") boundary.requestMore(demand) } override def shell: GraphInterpreterShell = boundary.shell @@ -261,7 +263,8 @@ import scala.util.control.NonFatal } final case class Cancel(boundary: ActorOutputBoundary) extends SimpleBoundaryEvent { override def execute(): Unit = { - if (GraphInterpreter.Debug) println(s"${boundary.shell.interpreter.Name} cancel port=${boundary.internalPortName}") + if (GraphInterpreter.Debug) + println(s"${boundary.shell.interpreter.Name} cancel port=${boundary.internalPortName}") boundary.cancel() } @@ -269,7 +272,8 @@ import scala.util.control.NonFatal override def logic: GraphStageLogic = boundary } - private[stream] class OutputBoundaryPublisher(boundary: ActorOutputBoundary, internalPortName: String) extends Publisher[Any] { + private[stream] class OutputBoundaryPublisher(boundary: ActorOutputBoundary, internalPortName: String) + extends Publisher[Any] { import ReactiveStreamsCompliance._ // The subscriber of an subscription attempt is first placed in this list of pending subscribers. @@ -308,7 +312,7 @@ import scala.util.control.NonFatal shutdownReason = OptionVal(reason.orNull) pendingSubscribers.getAndSet(null) match { case null => // already called earlier - case pending => pending foreach reportSubscribeFailure + case pending => pending.foreach(reportSubscribeFailure) } } @@ -331,7 +335,8 @@ import scala.util.control.NonFatal } private[stream] class ActorOutputBoundary(val shell: GraphInterpreterShell, val internalPortName: String) - extends DownstreamBoundaryStageLogic[Any] with InHandler { + extends DownstreamBoundaryStageLogic[Any] + with InHandler { val in: Inlet[Any] = Inlet[Any]("UpstreamBoundary:" + internalPortName) in.id = 0 @@ -398,7 +403,7 @@ import scala.util.control.NonFatal } def subscribePending(): Unit = - publisher.takePendingSubscribers() foreach { sub => + publisher.takePendingSubscribers().foreach { sub => if (subscriber eq null) { subscriber = sub val subscription = new Subscription { @@ -433,7 +438,8 @@ import scala.util.control.NonFatal cancel(in) } - override def toString: String = s"ActorOutputBoundary(port=$internalPortName, demand=$downstreamDemand, finished=$downstreamCompleted)" + override def toString: String = + s"ActorOutputBoundary(port=$internalPortName, demand=$downstreamDemand, finished=$downstreamCompleted)" } } @@ -441,12 +447,11 @@ import scala.util.control.NonFatal /** * INTERNAL API */ -@InternalApi private[akka] final class GraphInterpreterShell( - var connections: Array[Connection], - var logics: Array[GraphStageLogic], - settings: ActorMaterializerSettings, - attributes: Attributes, - val mat: ExtendedActorMaterializer) { +@InternalApi private[akka] final class GraphInterpreterShell(var connections: Array[Connection], + var logics: Array[GraphStageLogic], + settings: ActorMaterializerSettings, + attributes: Attributes, + val mat: ExtendedActorMaterializer) { import ActorGraphInterpreter._ @@ -457,12 +462,12 @@ import scala.util.control.NonFatal * @param promise Will be completed upon processing the event, or failed if processing the event throws * if the event isn't ever processed the promise (the operator stops) is failed elsewhere */ - final case class AsyncInput( - shell: GraphInterpreterShell, - logic: GraphStageLogic, - evt: Any, - promise: Promise[Done], - handler: (Any) => Unit) extends BoundaryEvent { + final case class AsyncInput(shell: GraphInterpreterShell, + logic: GraphStageLogic, + evt: Any, + promise: Promise[Done], + handler: (Any) => Unit) + extends BoundaryEvent { override def execute(eventLimit: Int): Int = { if (!waitingForShutdown) { interpreter.runAsyncInput(logic, evt, promise, handler) @@ -488,7 +493,8 @@ import scala.util.control.NonFatal override def execute(eventLimit: Int): Int = { if (waitingForShutdown) { subscribesPending = 0 - tryAbort(new TimeoutException("Streaming actor has been already stopped processing (normally), but not all of its " + + tryAbort( + new TimeoutException("Streaming actor has been already stopped processing (normally), but not all of its " + s"inputs or outputs have been subscribed in [${settings.subscriptionTimeoutSettings.timeout}}]. Aborting actor now.")) } 0 @@ -497,8 +503,8 @@ import scala.util.control.NonFatal private var enqueueToShortCircuit: (Any) => Unit = _ - lazy val interpreter: GraphInterpreter = new GraphInterpreter(mat, log, logics, connections, - (logic, event, promise, handler) => { + lazy val interpreter: GraphInterpreter = + new GraphInterpreter(mat, log, logics, connections, (logic, event, promise, handler) => { val asyncInput = AsyncInput(this, logic, event, promise, handler) val currentInterpreter = GraphInterpreter.currentInterpreterOrNull if (currentInterpreter == null || (currentInterpreter.context ne self)) @@ -533,7 +539,10 @@ import scala.util.control.NonFatal private var resumeScheduled = false def isInitialized: Boolean = self != null - def init(self: ActorRef, subMat: SubFusingActorMaterializerImpl, enqueueToShortCircuit: (Any) => Unit, eventLimit: Int): Int = { + def init(self: ActorRef, + subMat: SubFusingActorMaterializerImpl, + enqueueToShortCircuit: (Any) => Unit, + eventLimit: Int): Int = { this.self = self this.enqueueToShortCircuit = enqueueToShortCircuit var i = 0 @@ -637,12 +646,10 @@ import scala.util.control.NonFatal def toSnapshot: InterpreterSnapshot = { if (!isInitialized) - UninitializedInterpreterImpl( - logics.zipWithIndex.map { - case (logic, idx) => - LogicSnapshotImpl(idx, logic.originalStage.getOrElse(logic).toString, logic.attributes) - }.toVector - ) + UninitializedInterpreterImpl(logics.zipWithIndex.map { + case (logic, idx) => + LogicSnapshotImpl(idx, logic.originalStage.getOrElse(logic).toString, logic.attributes) + }.toVector) else interpreter.toSnapshot } } @@ -650,7 +657,9 @@ import scala.util.control.NonFatal /** * INTERNAL API */ -@InternalApi private[akka] final class ActorGraphInterpreter(_initial: GraphInterpreterShell) extends Actor with ActorLogging { +@InternalApi private[akka] final class ActorGraphInterpreter(_initial: GraphInterpreterShell) + extends Actor + with ActorLogging { import ActorGraphInterpreter._ var activeInterpreters = Set.empty[GraphInterpreterShell] @@ -660,7 +669,8 @@ import scala.util.control.NonFatal def tryInit(shell: GraphInterpreterShell): Boolean = try { currentLimit = shell.init(self, subFusingMaterializerImpl, enqueueToShortCircuit, currentLimit) - if (GraphInterpreter.Debug) println(s"registering new shell in ${_initial}\n ${shell.toString.replace("\n", "\n ")}") + if (GraphInterpreter.Debug) + println(s"registering new shell in ${_initial}\n ${shell.toString.replace("\n", "\n ")}") if (shell.isTerminated) false else { activeInterpreters += shell @@ -713,11 +723,11 @@ import scala.util.control.NonFatal } private def shortCircuitBatch(): Unit = { - while (!shortCircuitBuffer.isEmpty && currentLimit > 0 && activeInterpreters.nonEmpty) - shortCircuitBuffer.poll() match { - case b: BoundaryEvent => processEvent(b) - case Resume => finishShellRegistration() - } + while (!shortCircuitBuffer.isEmpty && currentLimit > 0 && activeInterpreters.nonEmpty) shortCircuitBuffer + .poll() match { + case b: BoundaryEvent => processEvent(b) + case Resume => finishShellRegistration() + } if (!shortCircuitBuffer.isEmpty && currentLimit == 0) self ! Resume } @@ -748,11 +758,11 @@ import scala.util.control.NonFatal if (shortCircuitBuffer != null) shortCircuitBatch() case Snapshot => - sender() ! StreamSnapshotImpl( - self.path, - activeInterpreters.map(shell => shell.toSnapshot.asInstanceOf[RunningInterpreter]).toSeq, - newShells.map(shell => shell.toSnapshot.asInstanceOf[UninitializedInterpreter]) - ) + sender() ! StreamSnapshotImpl(self.path, + activeInterpreters + .map(shell => shell.toSnapshot.asInstanceOf[RunningInterpreter]) + .toSeq, + newShells.map(shell => shell.toSnapshot.asInstanceOf[UninitializedInterpreter])) } override def postStop(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala index 6abef26342..d819777bbb 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphInterpreter.scala @@ -24,6 +24,7 @@ import akka.stream.snapshot._ * (See the class for the documentation of the internals) */ @InternalApi private[akka] object GraphInterpreter { + /** * Compile time constant, enable it for debug logging to the console. */ @@ -77,12 +78,11 @@ import akka.stream.snapshot._ * @param inHandler The handler that contains the callback for input events. * @param outHandler The handler that contains the callback for output events. */ - final class Connection( - var id: Int, - var inOwner: GraphStageLogic, - var outOwner: GraphStageLogic, - var inHandler: InHandler, - var outHandler: OutHandler) { + final class Connection(var id: Int, + var inOwner: GraphStageLogic, + var outOwner: GraphStageLogic, + var inHandler: InHandler, + var outHandler: OutHandler) { var portState: Int = InReady var slot: Any = Empty } @@ -188,13 +188,13 @@ import akka.stream.snapshot._ * edge of a balance is pulled, dissolving the original cycle). */ @InternalApi private[akka] final class GraphInterpreter( - val materializer: Materializer, - val log: LoggingAdapter, - val logics: Array[GraphStageLogic], // Array of stage logics - val connections: Array[GraphInterpreter.Connection], - val onAsyncInput: (GraphStageLogic, Any, Promise[Done], (Any) => Unit) => Unit, - val fuzzingMode: Boolean, - val context: ActorRef) { + val materializer: Materializer, + val log: LoggingAdapter, + val logics: Array[GraphStageLogic], // Array of stage logics + val connections: Array[GraphInterpreter.Connection], + val onAsyncInput: (GraphStageLogic, Any, Promise[Done], (Any) => Unit) => Unit, + val fuzzingMode: Boolean, + val context: ActorRef) { import GraphInterpreter._ @@ -331,7 +331,8 @@ import akka.stream.snapshot._ * true. */ def execute(eventLimit: Int): Int = { - if (Debug) println(s"$Name ---------------- EXECUTE $queueStatus (running=$runningStages, shutdown=$shutdownCounters)") + if (Debug) + println(s"$Name ---------------- EXECUTE $queueStatus (running=$runningStages, shutdown=$shutdownCounters)") val currentInterpreterHolder = _currentInterpreter.get() val previousInterpreter = currentInterpreterHolder(0) currentInterpreterHolder(0) = this @@ -484,7 +485,9 @@ import akka.stream.snapshot._ // CANCEL } else if ((code & (OutClosed | InClosed)) == InClosed) { activeStage = connection.outOwner - if (Debug) println(s"$Name CANCEL ${inOwnerName(connection)} -> ${outOwnerName(connection)} (${connection.outHandler}) [${outLogicName(connection)}]") + if (Debug) + println( + s"$Name CANCEL ${inOwnerName(connection)} -> ${outOwnerName(connection)} (${connection.outHandler}) [${outLogicName(connection)}]") connection.portState |= OutClosed completeConnection(connection.outOwner.stageId) connection.outHandler.onDownstreamFinish() @@ -493,7 +496,9 @@ import akka.stream.snapshot._ if ((code & Pushing) == 0) { // Normal completion (no push pending) - if (Debug) println(s"$Name COMPLETE ${outOwnerName(connection)} -> ${inOwnerName(connection)} (${connection.inHandler}) [${inLogicName(connection)}]") + if (Debug) + println( + s"$Name COMPLETE ${outOwnerName(connection)} -> ${inOwnerName(connection)} (${connection.inHandler}) [${inLogicName(connection)}]") connection.portState |= InClosed activeStage = connection.inOwner completeConnection(connection.inOwner.stageId) @@ -509,14 +514,18 @@ import akka.stream.snapshot._ } private def processPush(connection: Connection): Unit = { - if (Debug) println(s"$Name PUSH ${outOwnerName(connection)} -> ${inOwnerName(connection)}, ${connection.slot} (${connection.inHandler}) [${inLogicName(connection)}]") + if (Debug) + println( + s"$Name PUSH ${outOwnerName(connection)} -> ${inOwnerName(connection)}, ${connection.slot} (${connection.inHandler}) [${inLogicName(connection)}]") activeStage = connection.inOwner connection.portState ^= PushEndFlip connection.inHandler.onPush() } private def processPull(connection: Connection): Unit = { - if (Debug) println(s"$Name PULL ${inOwnerName(connection)} -> ${outOwnerName(connection)} (${connection.outHandler}) [${outLogicName(connection)}]") + if (Debug) + println( + s"$Name PULL ${inOwnerName(connection)} -> ${outOwnerName(connection)} (${connection.outHandler}) [${outLogicName(connection)}]") activeStage = connection.outOwner connection.portState ^= PullEndFlip connection.outHandler.onPull() @@ -537,7 +546,9 @@ import akka.stream.snapshot._ } def enqueue(connection: Connection): Unit = { - if (Debug) if (queueTail - queueHead > mask) new Exception(s"$Name internal queue full ($queueStatus) + $connection").printStackTrace() + if (Debug) + if (queueTail - queueHead > mask) + new Exception(s"$Name internal queue full ($queueStatus) + $connection").printStackTrace() eventQueue(queueTail & mask) = connection queueTail += 1 } @@ -650,26 +661,23 @@ import akka.stream.snapshot._ LogicSnapshotImpl(idx, label, logic.attributes) } val logicIndexes = logics.zipWithIndex.map { case (stage, idx) => stage -> idx }.toMap - val connectionSnapshots = connections.filter(_ != null) - .map { connection => - ConnectionSnapshotImpl( - connection.id, - logicSnapshots(logicIndexes(connection.inOwner)), - logicSnapshots(logicIndexes(connection.outOwner)), - connection.portState match { - case InReady => ConnectionSnapshot.ShouldPull - case OutReady => ConnectionSnapshot.ShouldPush - case x if (x | InClosed | OutClosed) == (InClosed | OutClosed) => ConnectionSnapshot.Closed - } - ) - } + val connectionSnapshots = connections.filter(_ != null).map { connection => + ConnectionSnapshotImpl(connection.id, + logicSnapshots(logicIndexes(connection.inOwner)), + logicSnapshots(logicIndexes(connection.outOwner)), + connection.portState match { + case InReady => ConnectionSnapshot.ShouldPull + case OutReady => ConnectionSnapshot.ShouldPush + case x if (x | InClosed | OutClosed) == (InClosed | OutClosed) => + ConnectionSnapshot.Closed + }) + } - RunningInterpreterImpl( - logicSnapshots.toVector, - connectionSnapshots.toVector, - queueStatus, - runningStages, - shutdownCounter.toList.map(n => logicSnapshots(n))) + RunningInterpreterImpl(logicSnapshots.toVector, + connectionSnapshots.toVector, + queueStatus, + runningStages, + shutdownCounter.toList.map(n => logicSnapshots(n))) } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala index b749dfcdcf..f7f6361cb8 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/GraphStages.scala @@ -29,9 +29,10 @@ import scala.concurrent.{ Future, Promise } */ // TODO: Fix variance issues @InternalApi private[akka] final case class GraphStageModule[+S <: Shape @uncheckedVariance, +M]( - shape: S, - attributes: Attributes, - stage: GraphStageWithMaterializedValue[S, M]) extends AtomicModule[S, M] { + shape: S, + attributes: Attributes, + stage: GraphStageWithMaterializedValue[S, M]) + extends AtomicModule[S, M] { override def withAttributes(attributes: Attributes): AtomicModule[S, M] = if (attributes ne this.attributes) new GraphStageModule(shape, attributes, stage) @@ -59,13 +60,14 @@ import scala.concurrent.{ Future, Promise } private object Identity extends SimpleLinearGraphStage[Any] { override def initialAttributes = DefaultAttributes.identityOp - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - def onPush(): Unit = push(out, grab(in)) - def onPull(): Unit = pull(in) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + def onPush(): Unit = push(out, grab(in)) + def onPull(): Unit = pull(in) - setHandler(in, this) - setHandler(out, this) - } + setHandler(in, this) + setHandler(out, this) + } override def toString = "Identity" } @@ -78,32 +80,33 @@ import scala.concurrent.{ Future, Promise } @InternalApi private[akka] final class Detacher[T] extends SimpleLinearGraphStage[T] { override def initialAttributes = DefaultAttributes.detacher - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { - def onPush(): Unit = { - if (isAvailable(out)) { - push(out, grab(in)) - tryPull(in) + def onPush(): Unit = { + if (isAvailable(out)) { + push(out, grab(in)) + tryPull(in) + } } - } - override def onUpstreamFinish(): Unit = { - if (!isAvailable(in)) completeStage() - } - - def onPull(): Unit = { - if (isAvailable(in)) { - push(out, grab(in)) - if (isClosed(in)) completeStage() - else pull(in) + override def onUpstreamFinish(): Unit = { + if (!isAvailable(in)) completeStage() } + + def onPull(): Unit = { + if (isAvailable(in)) { + push(out, grab(in)) + if (isClosed(in)) completeStage() + else pull(in) + } + } + + setHandlers(in, out, this) + + override def preStart(): Unit = tryPull(in) } - setHandlers(in, out, this) - - override def preStart(): Unit = tryPull(in) - } - override def toString = "Detacher" } @@ -216,7 +219,7 @@ import scala.concurrent.{ Future, Promise } new MonitorFlow[T] final class TickSource[T](val initialDelay: FiniteDuration, val interval: FiniteDuration, val tick: T) - extends GraphStageWithMaterializedValue[SourceShape[T], Cancellable] { + extends GraphStageWithMaterializedValue[SourceShape[T], Cancellable] { override val shape = SourceShape(Outlet[T]("TickSource.out")) val out = shape.out override def initialAttributes: Attributes = DefaultAttributes.tickSource @@ -273,9 +276,8 @@ import scala.concurrent.{ Future, Promise } override def toString: String = "SingleSource" } - final class FutureFlattenSource[T, M]( - futureSource: Future[Graph[SourceShape[T], M]]) - extends GraphStageWithMaterializedValue[SourceShape[T], Future[M]] { + final class FutureFlattenSource[T, M](futureSource: Future[Graph[SourceShape[T], M]]) + extends GraphStageWithMaterializedValue[SourceShape[T], Future[M]] { ReactiveStreamsCompliance.requireNonNullElement(futureSource) val out: Outlet[T] = Outlet("FutureFlattenSource.out") @@ -300,21 +302,23 @@ import scala.concurrent.{ Future, Promise } } // initial handler (until future completes) - setHandler(out, new OutHandler { - def onPull(): Unit = {} + setHandler(out, + new OutHandler { + def onPull(): Unit = {} - override def onDownstreamFinish(): Unit = { - if (!materialized.isCompleted) { - // we used to try to materialize the "inner" source here just to get - // the materialized value, but that is not safe and may cause the graph shell - // to leak/stay alive after the stage completes + override def onDownstreamFinish(): Unit = { + if (!materialized.isCompleted) { + // we used to try to materialize the "inner" source here just to get + // the materialized value, but that is not safe and may cause the graph shell + // to leak/stay alive after the stage completes - materialized.tryFailure(new StreamDetachedException("Stream cancelled before Source Future completed")) - } + materialized.tryFailure( + new StreamDetachedException("Stream cancelled before Source Future completed")) + } - super.onDownstreamFinish() - } - }) + super.onDownstreamFinish() + } + }) def onPush(): Unit = push(out, sinkIn.grab()) @@ -329,24 +333,26 @@ import scala.concurrent.{ Future, Promise } if (!sinkIn.isClosed) sinkIn.cancel() def onFutureSourceCompleted(result: Try[Graph[SourceShape[T], M]]): Unit = { - result.map { graph => - val runnable = Source.fromGraph(graph).toMat(sinkIn.sink)(Keep.left) - val matVal = interpreter.subFusingMaterializer.materialize(runnable, defaultAttributes = attr) - materialized.success(matVal) + result + .map { graph => + val runnable = Source.fromGraph(graph).toMat(sinkIn.sink)(Keep.left) + val matVal = interpreter.subFusingMaterializer.materialize(runnable, defaultAttributes = attr) + materialized.success(matVal) - setHandler(out, this) - sinkIn.setHandler(this) + setHandler(out, this) + sinkIn.setHandler(this) + + if (isAvailable(out)) { + sinkIn.pull() + } - if (isAvailable(out)) { - sinkIn.pull() } - - }.recover { - case t => - sinkIn.cancel() - materialized.failure(t) - failStage(t) - } + .recover { + case t => + sinkIn.cancel() + materialized.failure(t) + failStage(t) + } } } @@ -450,4 +456,3 @@ import scala.concurrent.{ Future, Promise } } } - diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala index 2168bd10cd..df5f8d2483 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/Ops.scala @@ -51,10 +51,11 @@ import akka.util.OptionVal try { push(out, f(grab(in))) } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case _ => pull(in) - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case _ => pull(in) + } } } @@ -85,10 +86,11 @@ import akka.util.OptionVal pull(in) } } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case _ => pull(in) - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case _ => pull(in) + } } } @@ -101,7 +103,8 @@ import akka.util.OptionVal /** * INTERNAL API */ -@InternalApi private[akka] final case class TakeWhile[T](p: T => Boolean, inclusive: Boolean = false) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final case class TakeWhile[T](p: T => Boolean, inclusive: Boolean = false) + extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.takeWhile override def toString: String = "TakeWhile" @@ -122,10 +125,11 @@ import akka.util.OptionVal completeStage() } } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case _ => pull(in) - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case _ => pull(in) + } } } @@ -141,36 +145,38 @@ import akka.util.OptionVal @InternalApi private[akka] final case class DropWhile[T](p: T => Boolean) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.dropWhile - def createLogic(inheritedAttributes: Attributes) = new SupervisedGraphStageLogic(inheritedAttributes, shape) with InHandler with OutHandler { - override def onPush(): Unit = { - val elem = grab(in) - withSupervision(() => p(elem)) match { - case Some(flag) if flag => pull(in) - case Some(flag) if !flag => - push(out, elem) - setHandler(in, rest) - case None => // do nothing + def createLogic(inheritedAttributes: Attributes) = + new SupervisedGraphStageLogic(inheritedAttributes, shape) with InHandler with OutHandler { + override def onPush(): Unit = { + val elem = grab(in) + withSupervision(() => p(elem)) match { + case Some(flag) if flag => pull(in) + case Some(flag) if !flag => + push(out, elem) + setHandler(in, rest) + case None => // do nothing + } } + + def rest = new InHandler { + def onPush() = push(out, grab(in)) + } + + override def onResume(t: Throwable): Unit = if (!hasBeenPulled(in)) pull(in) + + override def onPull(): Unit = pull(in) + + setHandlers(in, out, this) } - def rest = new InHandler { - def onPush() = push(out, grab(in)) - } - - override def onResume(t: Throwable): Unit = if (!hasBeenPulled(in)) pull(in) - - override def onPull(): Unit = pull(in) - - setHandlers(in, out, this) - } - override def toString = "DropWhile" } /** * INTERNAL API */ -@DoNotInherit private[akka] abstract class SupervisedGraphStageLogic(inheritedAttributes: Attributes, shape: Shape) extends GraphStageLogic(shape) { +@DoNotInherit private[akka] abstract class SupervisedGraphStageLogic(inheritedAttributes: Attributes, shape: Shape) + extends GraphStageLogic(shape) { private lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider def withSupervision[T](f: () => T): Option[T] = @@ -203,79 +209,84 @@ private[stream] object Collect { /** * INTERNAL API */ -@InternalApi private[akka] final case class Collect[In, Out](pf: PartialFunction[In, Out]) extends GraphStage[FlowShape[In, Out]] { +@InternalApi private[akka] final case class Collect[In, Out](pf: PartialFunction[In, Out]) + extends GraphStage[FlowShape[In, Out]] { val in = Inlet[In]("Collect.in") val out = Outlet[Out]("Collect.out") override val shape = FlowShape(in, out) override def initialAttributes: Attributes = DefaultAttributes.collect - def createLogic(inheritedAttributes: Attributes) = new SupervisedGraphStageLogic(inheritedAttributes, shape) with InHandler with OutHandler { + def createLogic(inheritedAttributes: Attributes) = + new SupervisedGraphStageLogic(inheritedAttributes, shape) with InHandler with OutHandler { - import Collect.NotApplied + import Collect.NotApplied - val wrappedPf = () => pf.applyOrElse(grab(in), NotApplied) + val wrappedPf = () => pf.applyOrElse(grab(in), NotApplied) - override def onPush(): Unit = withSupervision(wrappedPf) match { - case Some(result) => result match { - case NotApplied => pull(in) - case result: Out @unchecked => push(out, result) + override def onPush(): Unit = withSupervision(wrappedPf) match { + case Some(result) => + result match { + case NotApplied => pull(in) + case result: Out @unchecked => push(out, result) + } + case None => //do nothing } - case None => //do nothing + + override def onResume(t: Throwable): Unit = if (!hasBeenPulled(in)) pull(in) + + override def onPull(): Unit = pull(in) + + setHandlers(in, out, this) } - override def onResume(t: Throwable): Unit = if (!hasBeenPulled(in)) pull(in) - - override def onPull(): Unit = pull(in) - - setHandlers(in, out, this) - } - override def toString = "Collect" } /** * INTERNAL API */ -@InternalApi private[akka] final case class Recover[T](pf: PartialFunction[Throwable, T]) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final case class Recover[T](pf: PartialFunction[Throwable, T]) + extends SimpleLinearGraphStage[T] { override protected def initialAttributes: Attributes = DefaultAttributes.recover - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { - import Collect.NotApplied + import Collect.NotApplied - var recovered: Option[T] = None + var recovered: Option[T] = None - override def onPush(): Unit = { - push(out, grab(in)) - } - - override def onPull(): Unit = { - recovered match { - case Some(elem) => - push(out, elem) - completeStage() - case None => - pull(in) + override def onPush(): Unit = { + push(out, grab(in)) } - } - override def onUpstreamFailure(ex: Throwable): Unit = { - pf.applyOrElse(ex, NotApplied) match { - case NotApplied => failStage(ex) - case result: T @unchecked => { - if (isAvailable(out)) { - push(out, result) + override def onPull(): Unit = { + recovered match { + case Some(elem) => + push(out, elem) completeStage() - } else { - recovered = Some(result) + case None => + pull(in) + } + } + + override def onUpstreamFailure(ex: Throwable): Unit = { + pf.applyOrElse(ex, NotApplied) match { + case NotApplied => failStage(ex) + case result: T @unchecked => { + if (isAvailable(out)) { + push(out, result) + completeStage() + } else { + recovered = Some(result) + } } } } - } - setHandlers(in, out, this) - } + setHandlers(in, out, this) + } } /** @@ -285,7 +296,8 @@ private[stream] object Collect { * it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover * would log the `t2` error. */ -@InternalApi private[akka] final case class MapError[T](f: PartialFunction[Throwable, Throwable]) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final case class MapError[T](f: PartialFunction[Throwable, Throwable]) + extends SimpleLinearGraphStage[T] { override def createLogic(attr: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { override def onPush(): Unit = push(out, grab(in)) @@ -306,24 +318,25 @@ private[stream] object Collect { @InternalApi private[akka] final case class Take[T](count: Long) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.take - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private var left: Long = count + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private var left: Long = count - override def onPush(): Unit = { - if (left > 0) { - push(out, grab(in)) - left -= 1 + override def onPush(): Unit = { + if (left > 0) { + push(out, grab(in)) + left -= 1 + } + if (left <= 0) completeStage() } - if (left <= 0) completeStage() - } - override def onPull(): Unit = { - if (left > 0) pull(in) - else completeStage() - } + override def onPull(): Unit = { + if (left > 0) pull(in) + else completeStage() + } - setHandlers(in, out, this) - } + setHandlers(in, out, this) + } override def toString: String = "Take" } @@ -334,28 +347,30 @@ private[stream] object Collect { @InternalApi private[akka] final case class Drop[T](count: Long) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.drop - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private var left: Long = count + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private var left: Long = count - override def onPush(): Unit = { - if (left > 0) { - left -= 1 - pull(in) - } else push(out, grab(in)) + override def onPush(): Unit = { + if (left > 0) { + left -= 1 + pull(in) + } else push(out, grab(in)) + } + + override def onPull(): Unit = pull(in) + + setHandlers(in, out, this) } - override def onPull(): Unit = pull(in) - - setHandlers(in, out, this) - } - override def toString: String = "Drop" } /** * INTERNAL API */ -@InternalApi private[akka] final case class Scan[In, Out](zero: Out, f: (Out, In) => Out) extends GraphStage[FlowShape[In, Out]] { +@InternalApi private[akka] final case class Scan[In, Out](zero: Out, f: (Out, In) => Out) + extends GraphStage[FlowShape[In, Out]] { override val shape = FlowShape[In, Out](Inlet("Scan.in"), Outlet("Scan.out")) override def initialAttributes: Attributes = DefaultAttributes.scan @@ -368,7 +383,7 @@ private[stream] object Collect { private var aggregator = zero private lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider - import Supervision.{ Stop, Resume, Restart } + import Supervision.{ Restart, Resume, Stop } import shape.{ in, out } // Initial behavior makes sure that the zero gets flushed if upstream is empty @@ -379,16 +394,18 @@ private[stream] object Collect { } }) - setHandler(in, new InHandler { - override def onPush(): Unit = () + setHandler(in, + new InHandler { + override def onPush(): Unit = () - override def onUpstreamFinish(): Unit = setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, aggregator) - completeStage() - } - }) - }) + override def onUpstreamFinish(): Unit = + setHandler(out, new OutHandler { + override def onPull(): Unit = { + push(out, aggregator) + completeStage() + } + }) + }) override def onPull(): Unit = pull(in) @@ -397,13 +414,14 @@ private[stream] object Collect { aggregator = f(aggregator, grab(in)) push(out, aggregator) } catch { - case NonFatal(ex) => decider(ex) match { - case Resume => if (!hasBeenPulled(in)) pull(in) - case Stop => failStage(ex) - case Restart => - aggregator = zero - push(out, aggregator) - } + case NonFatal(ex) => + decider(ex) match { + case Resume => if (!hasBeenPulled(in)) pull(in) + case Stop => failStage(ex) + case Restart => + aggregator = zero + push(out, aggregator) + } } } } @@ -412,7 +430,8 @@ private[stream] object Collect { /** * INTERNAL API */ -@InternalApi private[akka] final case class ScanAsync[In, Out](zero: Out, f: (Out, In) => Future[Out]) extends GraphStage[FlowShape[In, Out]] { +@InternalApi private[akka] final case class ScanAsync[In, Out](zero: Out, f: (Out, In) => Future[Out]) + extends GraphStage[FlowShape[In, Out]] { import akka.dispatch.ExecutionContexts @@ -444,12 +463,13 @@ private[stream] object Collect { setHandlers(in, out, self) } - override def onUpstreamFinish(): Unit = setHandler(out, new OutHandler { - override def onPull(): Unit = { - push(out, current) - completeStage() - } - }) + override def onUpstreamFinish(): Unit = + setHandler(out, new OutHandler { + override def onPull(): Unit = { + push(out, current) + completeStage() + } + }) } private def onRestart(t: Throwable): Unit = { @@ -531,7 +551,8 @@ private[stream] object Collect { /** * INTERNAL API */ -@InternalApi private[akka] final case class Fold[In, Out](zero: Out, f: (Out, In) => Out) extends GraphStage[FlowShape[In, Out]] { +@InternalApi private[akka] final case class Fold[In, Out](zero: Out, f: (Out, In) => Out) + extends GraphStage[FlowShape[In, Out]] { val in = Inlet[In]("Fold.in") val out = Outlet[Out]("Fold.out") @@ -553,11 +574,12 @@ private[stream] object Collect { try { aggregator = f(aggregator, elem) } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case Supervision.Restart => aggregator = zero - case _ => () - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case Supervision.Restart => aggregator = zero + case _ => () + } } finally { if (!isClosed(in)) pull(in) } @@ -586,7 +608,8 @@ private[stream] object Collect { /** * INTERNAL API */ -@InternalApi private[akka] final class FoldAsync[In, Out](zero: Out, f: (Out, In) => Future[Out]) extends GraphStage[FlowShape[In, Out]] { +@InternalApi private[akka] final class FoldAsync[In, Out](zero: Out, f: (Out, In) => Future[Out]) + extends GraphStage[FlowShape[In, Out]] { import akka.dispatch.ExecutionContexts @@ -644,17 +667,18 @@ private[stream] object Collect { aggregating = f(aggregator, grab(in)) handleAggregatingValue() } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case supervision => { - supervision match { - case Supervision.Restart => onRestart(ex) - case _ => () // just ignore on Resume - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case supervision => { + supervision match { + case Supervision.Restart => onRestart(ex) + case _ => () // just ignore on Resume + } - tryPull(in) + tryPull(in) + } } - } } } @@ -681,7 +705,8 @@ private[stream] object Collect { /** * INTERNAL API */ -@InternalApi private[akka] final case class Intersperse[T](start: Option[T], inject: T, end: Option[T]) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final case class Intersperse[T](start: Option[T], inject: T, end: Option[T]) + extends SimpleLinearGraphStage[T] { ReactiveStreamsCompliance.requireNonNullElement(inject) if (start.isDefined) ReactiveStreamsCompliance.requireNonNullElement(start.get) if (end.isDefined) ReactiveStreamsCompliance.requireNonNullElement(end.get) @@ -729,86 +754,90 @@ private[stream] object Collect { override protected val initialAttributes: Attributes = DefaultAttributes.grouped - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private val buf = { - val b = Vector.newBuilder[T] - b.sizeHint(n) - b - } - var left = n + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private val buf = { + val b = Vector.newBuilder[T] + b.sizeHint(n) + b + } + var left = n - override def onPush(): Unit = { - buf += grab(in) - left -= 1 - if (left == 0) { - val elements = buf.result() - buf.clear() - left = n - push(out, elements) - } else { + override def onPush(): Unit = { + buf += grab(in) + left -= 1 + if (left == 0) { + val elements = buf.result() + buf.clear() + left = n + push(out, elements) + } else { + pull(in) + } + } + + override def onPull(): Unit = { pull(in) } - } - override def onPull(): Unit = { - pull(in) - } - - override def onUpstreamFinish(): Unit = { - // This means the buf is filled with some elements but not enough (left < n) to group together. - // Since the upstream has finished we have to push them to downstream though. - if (left < n) { - val elements = buf.result() - buf.clear() - left = n - push(out, elements) + override def onUpstreamFinish(): Unit = { + // This means the buf is filled with some elements but not enough (left < n) to group together. + // Since the upstream has finished we have to push them to downstream though. + if (left < n) { + val elements = buf.result() + buf.clear() + left = n + push(out, elements) + } + completeStage() } - completeStage() - } - setHandlers(in, out, this) - } + setHandlers(in, out, this) + } } /** * INTERNAL API */ -@InternalApi private[akka] final case class LimitWeighted[T](val n: Long, val costFn: T => Long) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final case class LimitWeighted[T](val n: Long, val costFn: T => Long) + extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.limitWeighted - def createLogic(inheritedAttributes: Attributes) = new SupervisedGraphStageLogic(inheritedAttributes, shape) with InHandler with OutHandler { - private var left = n + def createLogic(inheritedAttributes: Attributes) = + new SupervisedGraphStageLogic(inheritedAttributes, shape) with InHandler with OutHandler { + private var left = n - override def onPush(): Unit = { - val elem = grab(in) - withSupervision(() => costFn(elem)) match { - case Some(weight) => - left -= weight - if (left >= 0) push(out, elem) else failStage(new StreamLimitReachedException(n)) - case None => //do nothing + override def onPush(): Unit = { + val elem = grab(in) + withSupervision(() => costFn(elem)) match { + case Some(weight) => + left -= weight + if (left >= 0) push(out, elem) else failStage(new StreamLimitReachedException(n)) + case None => //do nothing + } } + + override def onResume(t: Throwable): Unit = if (!hasBeenPulled(in)) pull(in) + + override def onRestart(t: Throwable): Unit = { + left = n + if (!hasBeenPulled(in)) pull(in) + } + + override def onPull(): Unit = pull(in) + + setHandlers(in, out, this) } - override def onResume(t: Throwable): Unit = if (!hasBeenPulled(in)) pull(in) - - override def onRestart(t: Throwable): Unit = { - left = n - if (!hasBeenPulled(in)) pull(in) - } - - override def onPull(): Unit = pull(in) - - setHandlers(in, out, this) - } - override def toString = "LimitWeighted" } /** * INTERNAL API */ -@InternalApi private[akka] final case class Sliding[T](val n: Int, val step: Int) extends GraphStage[FlowShape[T, immutable.Seq[T]]] { +@InternalApi private[akka] final case class Sliding[T](val n: Int, val step: Int) + extends GraphStage[FlowShape[T, immutable.Seq[T]]] { require(n > 0, "n must be greater than 0") require(step > 0, "step must be greater than 0") @@ -818,264 +847,288 @@ private[stream] object Collect { override protected val initialAttributes: Attributes = DefaultAttributes.sliding - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private var buf = Vector.empty[T] + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private var buf = Vector.empty[T] - override def onPush(): Unit = { - buf :+= grab(in) - if (buf.size < n) { - pull(in) - } else if (buf.size == n) { - push(out, buf) - } else if (step <= n) { - buf = buf.drop(step) - if (buf.size == n) { + override def onPush(): Unit = { + buf :+= grab(in) + if (buf.size < n) { + pull(in) + } else if (buf.size == n) { push(out, buf) - } else pull(in) - } else if (step > n) { - if (buf.size == step) { + } else if (step <= n) { buf = buf.drop(step) + if (buf.size == n) { + push(out, buf) + } else pull(in) + } else if (step > n) { + if (buf.size == step) { + buf = buf.drop(step) + } + pull(in) } + } + + override def onPull(): Unit = { pull(in) } - } - override def onPull(): Unit = { - pull(in) - } + override def onUpstreamFinish(): Unit = { - override def onUpstreamFinish(): Unit = { - - // We can finish current stage directly if: - // 1. the buf is empty or - // 2. when the step size is greater than the sliding size (step > n) and current stage is in between - // two sliding (ie. buf.size >= n && buf.size < step). - // - // Otherwise it means there is still a not finished sliding so we have to push them before finish current stage. - if (buf.size < n && buf.size > 0) { - push(out, buf) + // We can finish current stage directly if: + // 1. the buf is empty or + // 2. when the step size is greater than the sliding size (step > n) and current stage is in between + // two sliding (ie. buf.size >= n && buf.size < step). + // + // Otherwise it means there is still a not finished sliding so we have to push them before finish current stage. + if (buf.size < n && buf.size > 0) { + push(out, buf) + } + completeStage() } - completeStage() - } - this.setHandlers(in, out, this) - } + this.setHandlers(in, out, this) + } } /** * INTERNAL API */ -@InternalApi private[akka] final case class Buffer[T](size: Int, overflowStrategy: OverflowStrategy) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final case class Buffer[T](size: Int, overflowStrategy: OverflowStrategy) + extends SimpleLinearGraphStage[T] { - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler with StageLogging { - override protected def logSource: Class[_] = classOf[Buffer[_]] + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler with StageLogging { + override protected def logSource: Class[_] = classOf[Buffer[_]] - private var buffer: BufferImpl[T] = _ + private var buffer: BufferImpl[T] = _ - val enqueueAction: T => Unit = - overflowStrategy match { - case s: DropHead => elem => - if (buffer.isFull) { - log.log(s.logLevel, "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]") - buffer.dropHead() - } - buffer.enqueue(elem) - pull(in) - case s: DropTail => elem => - if (buffer.isFull) { - log.log(s.logLevel, "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]") - buffer.dropTail() - } - buffer.enqueue(elem) - pull(in) - case s: DropBuffer => elem => - if (buffer.isFull) { - log.log(s.logLevel, "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]") - buffer.clear() - } - buffer.enqueue(elem) - pull(in) - case s: DropNew => elem => - if (!buffer.isFull) buffer.enqueue(elem) - else log.log(s.logLevel, "Dropping the new element because buffer is full and overflowStrategy is: [DropNew]") - pull(in) - case s: Backpressure => elem => - buffer.enqueue(elem) - if (!buffer.isFull) pull(in) - else log.log(s.logLevel, "Backpressuring because buffer is full and overflowStrategy is: [Backpressure]") - case s: Fail => elem => - if (buffer.isFull) { - log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Fail]") - failStage(BufferOverflowException(s"Buffer overflow (max capacity was: $size)!")) - } else { - buffer.enqueue(elem) - pull(in) - } - } + val enqueueAction: T => Unit = + overflowStrategy match { + case s: DropHead => + elem => + if (buffer.isFull) { + log.log(s.logLevel, + "Dropping the head element because buffer is full and overflowStrategy is: [DropHead]") + buffer.dropHead() + } + buffer.enqueue(elem) + pull(in) + case s: DropTail => + elem => + if (buffer.isFull) { + log.log(s.logLevel, + "Dropping the tail element because buffer is full and overflowStrategy is: [DropTail]") + buffer.dropTail() + } + buffer.enqueue(elem) + pull(in) + case s: DropBuffer => + elem => + if (buffer.isFull) { + log.log( + s.logLevel, + "Dropping all the buffered elements because buffer is full and overflowStrategy is: [DropBuffer]") + buffer.clear() + } + buffer.enqueue(elem) + pull(in) + case s: DropNew => + elem => + if (!buffer.isFull) buffer.enqueue(elem) + else + log.log(s.logLevel, + "Dropping the new element because buffer is full and overflowStrategy is: [DropNew]") + pull(in) + case s: Backpressure => + elem => + buffer.enqueue(elem) + if (!buffer.isFull) pull(in) + else log.log(s.logLevel, "Backpressuring because buffer is full and overflowStrategy is: [Backpressure]") + case s: Fail => + elem => + if (buffer.isFull) { + log.log(s.logLevel, "Failing because buffer is full and overflowStrategy is: [Fail]") + failStage(BufferOverflowException(s"Buffer overflow (max capacity was: $size)!")) + } else { + buffer.enqueue(elem) + pull(in) + } + } - override def preStart(): Unit = { - buffer = BufferImpl(size, materializer) - pull(in) - } - - override def onPush(): Unit = { - val elem = grab(in) - // If out is available, then it has been pulled but no dequeued element has been delivered. - // It means the buffer at this moment is definitely empty, - // so we just push the current element to out, then pull. - if (isAvailable(out)) { - push(out, elem) + override def preStart(): Unit = { + buffer = BufferImpl(size, materializer) pull(in) - } else { - enqueueAction(elem) } - } - override def onPull(): Unit = { - if (buffer.nonEmpty) push(out, buffer.dequeue()) - if (isClosed(in)) { + override def onPush(): Unit = { + val elem = grab(in) + // If out is available, then it has been pulled but no dequeued element has been delivered. + // It means the buffer at this moment is definitely empty, + // so we just push the current element to out, then pull. + if (isAvailable(out)) { + push(out, elem) + pull(in) + } else { + enqueueAction(elem) + } + } + + override def onPull(): Unit = { + if (buffer.nonEmpty) push(out, buffer.dequeue()) + if (isClosed(in)) { + if (buffer.isEmpty) completeStage() + } else if (!hasBeenPulled(in)) { + pull(in) + } + } + + override def onUpstreamFinish(): Unit = { if (buffer.isEmpty) completeStage() - } else if (!hasBeenPulled(in)) { - pull(in) } - } - override def onUpstreamFinish(): Unit = { - if (buffer.isEmpty) completeStage() + setHandlers(in, out, this) } - setHandlers(in, out, this) - } - } /** * INTERNAL API */ -@InternalApi private[akka] final case class Batch[In, Out](val max: Long, val costFn: In => Long, val seed: In => Out, val aggregate: (Out, In) => Out) - extends GraphStage[FlowShape[In, Out]] { +@InternalApi private[akka] final case class Batch[In, Out](val max: Long, + val costFn: In => Long, + val seed: In => Out, + val aggregate: (Out, In) => Out) + extends GraphStage[FlowShape[In, Out]] { val in = Inlet[In]("Batch.in") val out = Outlet[Out]("Batch.out") override val shape: FlowShape[In, Out] = FlowShape.of(in, out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { - lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider + lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider - private var agg: Out = null.asInstanceOf[Out] - private var left: Long = max - private var pending: In = null.asInstanceOf[In] + private var agg: Out = null.asInstanceOf[Out] + private var left: Long = max + private var pending: In = null.asInstanceOf[In] - private def flush(): Unit = { - if (agg != null) { - push(out, agg) - left = max - } - if (pending != null) { - try { - agg = seed(pending) - left -= costFn(pending) - pending = null.asInstanceOf[In] - } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case Supervision.Restart => restartState() - case Supervision.Resume => - pending = null.asInstanceOf[In] - } + private def flush(): Unit = { + if (agg != null) { + push(out, agg) + left = max } - } else { - agg = null.asInstanceOf[Out] - } - } - - override def preStart() = pull(in) - - def onPush(): Unit = { - val elem = grab(in) - val cost = costFn(elem) - - if (agg == null) { - try { - agg = seed(elem) - left -= cost - } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case Supervision.Restart => - restartState() - case Supervision.Resume => - } - } - } else if (left < cost) { - pending = elem - } else { - try { - agg = aggregate(agg, elem) - left -= cost - } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case Supervision.Restart => - restartState() - case Supervision.Resume => - } - } - } - - if (isAvailable(out)) flush() - if (pending == null) pull(in) - } - - override def onUpstreamFinish(): Unit = { - if (agg == null) completeStage() - } - - def onPull(): Unit = { - if (agg == null) { - if (isClosed(in)) completeStage() - else if (!hasBeenPulled(in)) pull(in) - } else if (isClosed(in)) { - push(out, agg) - if (pending == null) completeStage() - else { + if (pending != null) { try { agg = seed(pending) + left -= costFn(pending) + pending = null.asInstanceOf[In] } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case Supervision.Resume => - case Supervision.Restart => - restartState() - if (!hasBeenPulled(in)) pull(in) - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case Supervision.Restart => restartState() + case Supervision.Resume => + pending = null.asInstanceOf[In] + } } - pending = null.asInstanceOf[In] + } else { + agg = null.asInstanceOf[Out] } - } else { - flush() - if (!hasBeenPulled(in)) pull(in) } - } + override def preStart() = pull(in) - private def restartState(): Unit = { - agg = null.asInstanceOf[Out] - left = max - pending = null.asInstanceOf[In] - } + def onPush(): Unit = { + val elem = grab(in) + val cost = costFn(elem) - setHandlers(in, out, this) - } + if (agg == null) { + try { + agg = seed(elem) + left -= cost + } catch { + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case Supervision.Restart => + restartState() + case Supervision.Resume => + } + } + } else if (left < cost) { + pending = elem + } else { + try { + agg = aggregate(agg, elem) + left -= cost + } catch { + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case Supervision.Restart => + restartState() + case Supervision.Resume => + } + } + } + + if (isAvailable(out)) flush() + if (pending == null) pull(in) + } + + override def onUpstreamFinish(): Unit = { + if (agg == null) completeStage() + } + + def onPull(): Unit = { + if (agg == null) { + if (isClosed(in)) completeStage() + else if (!hasBeenPulled(in)) pull(in) + } else if (isClosed(in)) { + push(out, agg) + if (pending == null) completeStage() + else { + try { + agg = seed(pending) + } catch { + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case Supervision.Resume => + case Supervision.Restart => + restartState() + if (!hasBeenPulled(in)) pull(in) + } + } + pending = null.asInstanceOf[In] + } + } else { + flush() + if (!hasBeenPulled(in)) pull(in) + } + + } + + private def restartState(): Unit = { + agg = null.asInstanceOf[Out] + left = max + pending = null.asInstanceOf[In] + } + + setHandlers(in, out, this) + } } /** * INTERNAL API */ -@InternalApi private[akka] final class Expand[In, Out](val extrapolate: In => Iterator[Out]) extends GraphStage[FlowShape[In, Out]] { +@InternalApi private[akka] final class Expand[In, Out](val extrapolate: In => Iterator[Out]) + extends GraphStage[FlowShape[In, Out]] { private val in = Inlet[In]("expand.in") private val out = Outlet[Out]("expand.out") @@ -1131,10 +1184,7 @@ private[stream] object Collect { */ @InternalApi private[akka] object MapAsync { - final class Holder[T]( - var elem: Try[T], - val cb: AsyncCallback[Holder[T]] - ) extends (Try[T] => Unit) { + final class Holder[T](var elem: Try[T], val cb: AsyncCallback[Holder[T]]) extends (Try[T] => Unit) { // To support both fail-fast when the supervision directive is Stop // and not calling the decider multiple times (#23888) we need to cache the decider result and re-use that @@ -1170,7 +1220,7 @@ private[stream] object Collect { * INTERNAL API */ @InternalApi private[akka] final case class MapAsync[In, Out](parallelism: Int, f: In => Future[Out]) - extends GraphStage[FlowShape[In, Out]] { + extends GraphStage[FlowShape[In, Out]] { import MapAsync._ @@ -1209,7 +1259,7 @@ private[stream] object Collect { buffer.enqueue(holder) future.value match { - case None => future.onComplete(holder)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext) + case None => future.onComplete(holder)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext) case Some(v) => // #20217 the future is already here, optimization: avoid scheduling it on the dispatcher and // run the logic directly on this thread @@ -1217,7 +1267,7 @@ private[stream] object Collect { v match { // this optimization also requires us to stop the stage to fail fast if the decider says so: case Failure(ex) if holder.supervisionDirectiveFor(decider, ex) == Supervision.Stop => failStage(ex) - case _ => pushNextIfPossible() + case _ => pushNextIfPossible() } } @@ -1249,7 +1299,7 @@ private[stream] object Collect { // this could happen if we are looping in pushNextIfPossible and end up on a failed future before the // onComplete callback has run case Supervision.Stop => failStage(ex) - case _ => + case _ => // try next element pushNextIfPossible() } @@ -1268,7 +1318,7 @@ private[stream] object Collect { * INTERNAL API */ @InternalApi private[akka] final case class MapAsyncUnordered[In, Out](parallelism: Int, f: In => Future[Out]) - extends GraphStage[FlowShape[In, Out]] { + extends GraphStage[FlowShape[In, Out]] { private val in = Inlet[In]("MapAsyncUnordered.in") private val out = Outlet[Out]("MapAsyncUnordered.out") @@ -1372,10 +1422,8 @@ private[stream] object Collect { /** * INTERNAL API */ -@InternalApi private[akka] final case class Log[T]( - name: String, - extract: T => Any, - logAdapter: Option[LoggingAdapter]) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final case class Log[T](name: String, extract: T => Any, logAdapter: Option[LoggingAdapter]) + extends SimpleLinearGraphStage[T] { override def toString = "Log" @@ -1398,8 +1446,10 @@ private[stream] object Collect { val mat = try ActorMaterializerHelper.downcast(materializer) catch { case ex: Exception => - throw new RuntimeException("Log stage can only provide LoggingAdapter when used with ActorMaterializer! " + - "Provide a LoggingAdapter explicitly or use the actor based flow materializer.", ex) + throw new RuntimeException( + "Log stage can only provide LoggingAdapter when used with ActorMaterializer! " + + "Provide a LoggingAdapter explicitly or use the actor based flow materializer.", + ex) } Logging(mat.system, mat)(fromMaterializer) @@ -1414,10 +1464,11 @@ private[stream] object Collect { push(out, elem) } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case _ => pull(in) - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case _ => pull(in) + } } } @@ -1427,8 +1478,12 @@ private[stream] object Collect { if (isEnabled(logLevels.onFailure)) logLevels.onFailure match { case Logging.ErrorLevel => log.error(cause, "[{}] Upstream failed.", name) - case level => log.log(level, "[{}] Upstream failed, cause: {}: {}", name, Logging.simpleName(cause - .getClass), cause.getMessage) + case level => + log.log(level, + "[{}] Upstream failed, cause: {}: {}", + name, + Logging.simpleName(cause.getClass), + cause.getMessage) } super.onUpstreamFailure(cause) @@ -1479,8 +1534,8 @@ private[stream] object Collect { private final val DefaultLoggerName = "akka.stream.Log" private final val OffInt = LogLevels.Off.asInt - private final val DefaultLogLevels = LogLevels(onElement = Logging.DebugLevel, onFinish = Logging - .DebugLevel, onFailure = Logging.ErrorLevel) + private final val DefaultLogLevels = + LogLevels(onElement = Logging.DebugLevel, onFinish = Logging.DebugLevel, onFailure = Logging.ErrorLevel) } /** @@ -1503,7 +1558,10 @@ private[stream] object Collect { /** * INTERNAL API */ -@InternalApi private[akka] final class GroupedWeightedWithin[T](val maxWeight: Long, costFn: T => Long, val interval: FiniteDuration) extends GraphStage[FlowShape[T, immutable.Seq[T]]] { +@InternalApi private[akka] final class GroupedWeightedWithin[T](val maxWeight: Long, + costFn: T => Long, + val interval: FiniteDuration) + extends GraphStage[FlowShape[T, immutable.Seq[T]]] { require(maxWeight > 0, "maxWeight must be greater than 0") require(interval > Duration.Zero) @@ -1514,244 +1572,248 @@ private[stream] object Collect { val shape = FlowShape(in, out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with InHandler with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new TimerGraphStageLogic(shape) with InHandler with OutHandler { - private val buf: VectorBuilder[T] = new VectorBuilder - private var pending: T = null.asInstanceOf[T] - private var pendingWeight: Long = 0L - // True if: - // - buf is nonEmpty - // AND - // - (timer fired - // OR - // totalWeight >= maxWeight - // OR - // pending != null - // OR - // upstream completed) - private var pushEagerly = false - private var groupEmitted = true - private var finished = false - private var totalWeight = 0L - private var hasElements = false + private val buf: VectorBuilder[T] = new VectorBuilder + private var pending: T = null.asInstanceOf[T] + private var pendingWeight: Long = 0L + // True if: + // - buf is nonEmpty + // AND + // - (timer fired + // OR + // totalWeight >= maxWeight + // OR + // pending != null + // OR + // upstream completed) + private var pushEagerly = false + private var groupEmitted = true + private var finished = false + private var totalWeight = 0L + private var hasElements = false - override def preStart() = { - schedulePeriodically(GroupedWeightedWithin.groupedWeightedWithinTimer, interval) - pull(in) - } + override def preStart() = { + schedulePeriodically(GroupedWeightedWithin.groupedWeightedWithinTimer, interval) + pull(in) + } - private def nextElement(elem: T): Unit = { - groupEmitted = false - val cost = costFn(elem) - if (cost < 0L) failStage(new IllegalArgumentException(s"Negative weight [$cost] for element [$elem] is not allowed")) - else { - hasElements = true - if (totalWeight + cost <= maxWeight) { - buf += elem - totalWeight += cost - - if (totalWeight < maxWeight) pull(in) - else { - // `totalWeight >= maxWeight` which means that downstream can get the next group. - if (!isAvailable(out)) { - // We should emit group when downstream becomes available - pushEagerly = true - // we want to pull anyway, since we allow for zero weight elements - // but since `emitGroup()` will pull internally (by calling `startNewGroup()`) - // we also have to pull if downstream hasn't yet requested an element. - pull(in) - } else { - schedulePeriodically(GroupedWeightedWithin.groupedWeightedWithinTimer, interval) - emitGroup() - } - } - } else { - //we have a single heavy element that weighs more than the limit - if (totalWeight == 0L) { + private def nextElement(elem: T): Unit = { + groupEmitted = false + val cost = costFn(elem) + if (cost < 0L) + failStage(new IllegalArgumentException(s"Negative weight [$cost] for element [$elem] is not allowed")) + else { + hasElements = true + if (totalWeight + cost <= maxWeight) { buf += elem totalWeight += cost - pushEagerly = true + + if (totalWeight < maxWeight) pull(in) + else { + // `totalWeight >= maxWeight` which means that downstream can get the next group. + if (!isAvailable(out)) { + // We should emit group when downstream becomes available + pushEagerly = true + // we want to pull anyway, since we allow for zero weight elements + // but since `emitGroup()` will pull internally (by calling `startNewGroup()`) + // we also have to pull if downstream hasn't yet requested an element. + pull(in) + } else { + schedulePeriodically(GroupedWeightedWithin.groupedWeightedWithinTimer, interval) + emitGroup() + } + } } else { - pending = elem - pendingWeight = cost + //we have a single heavy element that weighs more than the limit + if (totalWeight == 0L) { + buf += elem + totalWeight += cost + pushEagerly = true + } else { + pending = elem + pendingWeight = cost + } + schedulePeriodically(GroupedWeightedWithin.groupedWeightedWithinTimer, interval) + tryCloseGroup() } - schedulePeriodically(GroupedWeightedWithin.groupedWeightedWithinTimer, interval) - tryCloseGroup() } } - } - private def tryCloseGroup(): Unit = { - if (isAvailable(out)) emitGroup() - else if (pending != null || finished) pushEagerly = true - } - - private def emitGroup(): Unit = { - groupEmitted = true - push(out, buf.result()) - buf.clear() - if (!finished) startNewGroup() - else if (pending != null) emit(out, Vector(pending), () => completeStage()) - else completeStage() - } - - private def startNewGroup(): Unit = { - if (pending != null) { - totalWeight = pendingWeight - pendingWeight = 0L - buf += pending - pending = null.asInstanceOf[T] - groupEmitted = false - } else { - totalWeight = 0L - hasElements = false + private def tryCloseGroup(): Unit = { + if (isAvailable(out)) emitGroup() + else if (pending != null || finished) pushEagerly = true } - pushEagerly = false - if (isAvailable(in)) nextElement(grab(in)) - else if (!hasBeenPulled(in)) pull(in) + + private def emitGroup(): Unit = { + groupEmitted = true + push(out, buf.result()) + buf.clear() + if (!finished) startNewGroup() + else if (pending != null) emit(out, Vector(pending), () => completeStage()) + else completeStage() + } + + private def startNewGroup(): Unit = { + if (pending != null) { + totalWeight = pendingWeight + pendingWeight = 0L + buf += pending + pending = null.asInstanceOf[T] + groupEmitted = false + } else { + totalWeight = 0L + hasElements = false + } + pushEagerly = false + if (isAvailable(in)) nextElement(grab(in)) + else if (!hasBeenPulled(in)) pull(in) + } + + override def onPush(): Unit = { + if (pending == null) nextElement(grab(in)) // otherwise keep the element for next round + } + + override def onPull(): Unit = if (pushEagerly) emitGroup() + + override def onUpstreamFinish(): Unit = { + finished = true + if (groupEmitted) completeStage() + else tryCloseGroup() + } + + override protected def onTimer(timerKey: Any) = if (hasElements) { + if (isAvailable(out)) emitGroup() + else pushEagerly = true + } + + setHandlers(in, out, this) } - - override def onPush(): Unit = { - if (pending == null) nextElement(grab(in)) // otherwise keep the element for next round - } - - override def onPull(): Unit = if (pushEagerly) emitGroup() - - override def onUpstreamFinish(): Unit = { - finished = true - if (groupEmitted) completeStage() - else tryCloseGroup() - } - - override protected def onTimer(timerKey: Any) = if (hasElements) { - if (isAvailable(out)) emitGroup() - else pushEagerly = true - } - - setHandlers(in, out, this) - } } /** * INTERNAL API */ -@InternalApi private[akka] final class Delay[T](val d: FiniteDuration, val strategy: DelayOverflowStrategy) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final class Delay[T](val d: FiniteDuration, val strategy: DelayOverflowStrategy) + extends SimpleLinearGraphStage[T] { private[this] def timerName = "DelayedTimer" final val DelayPrecisionMS = 10 override def initialAttributes: Attributes = DefaultAttributes.delay - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with InHandler with OutHandler { - val size = inheritedAttributes.mandatoryAttribute[InputBuffer].max + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new TimerGraphStageLogic(shape) with InHandler with OutHandler { + val size = inheritedAttributes.mandatoryAttribute[InputBuffer].max - val delayMillis = d.toMillis + val delayMillis = d.toMillis - var buffer: BufferImpl[(Long, T)] = _ // buffer has pairs timestamp with upstream element + var buffer: BufferImpl[(Long, T)] = _ // buffer has pairs timestamp with upstream element - override def preStart(): Unit = buffer = BufferImpl(size, materializer) + override def preStart(): Unit = buffer = BufferImpl(size, materializer) - val onPushWhenBufferFull: () => Unit = strategy match { - case EmitEarly => - () => { - if (!isTimerActive(timerName)) - push(out, buffer.dequeue()._2) - else { - cancelTimer(timerName) - onTimer(timerName) + val onPushWhenBufferFull: () => Unit = strategy match { + case EmitEarly => + () => { + if (!isTimerActive(timerName)) + push(out, buffer.dequeue()._2) + else { + cancelTimer(timerName) + onTimer(timerName) + } + grabAndPull() } - grabAndPull() - } - case _: DropHead => - () => { - buffer.dropHead() - grabAndPull() - } - case _: DropTail => - () => { - buffer.dropTail() - grabAndPull() - } - case _: DropNew => - () => { - grab(in) - if (!isTimerActive(timerName)) scheduleOnce(timerName, d) - } - case _: DropBuffer => - () => { - buffer.clear() - grabAndPull() - } - case _: Fail => - () => { - failStage(BufferOverflowException(s"Buffer overflow for delay operator (max capacity was: $size)!")) - } - case _: Backpressure => - () => { - throw new IllegalStateException("Delay buffer must never overflow in Backpressure mode") - } - } + case _: DropHead => + () => { + buffer.dropHead() + grabAndPull() + } + case _: DropTail => + () => { + buffer.dropTail() + grabAndPull() + } + case _: DropNew => + () => { + grab(in) + if (!isTimerActive(timerName)) scheduleOnce(timerName, d) + } + case _: DropBuffer => + () => { + buffer.clear() + grabAndPull() + } + case _: Fail => + () => { + failStage(BufferOverflowException(s"Buffer overflow for delay operator (max capacity was: $size)!")) + } + case _: Backpressure => + () => { + throw new IllegalStateException("Delay buffer must never overflow in Backpressure mode") + } + } - def onPush(): Unit = { - if (buffer.isFull) - onPushWhenBufferFull() - else { - grabAndPull() - if (!isTimerActive(timerName)) { - scheduleOnce(timerName, d) + def onPush(): Unit = { + if (buffer.isFull) + onPushWhenBufferFull() + else { + grabAndPull() + if (!isTimerActive(timerName)) { + scheduleOnce(timerName, d) + } } } - } - def pullCondition: Boolean = - !strategy.isBackpressure || buffer.used < size + def pullCondition: Boolean = + !strategy.isBackpressure || buffer.used < size - def grabAndPull(): Unit = { - buffer.enqueue((System.nanoTime(), grab(in))) - if (pullCondition) pull(in) - } + def grabAndPull(): Unit = { + buffer.enqueue((System.nanoTime(), grab(in))) + if (pullCondition) pull(in) + } - override def onUpstreamFinish(): Unit = - completeIfReady() + override def onUpstreamFinish(): Unit = + completeIfReady() - def onPull(): Unit = { - if (!isTimerActive(timerName) && !buffer.isEmpty) { - val waitTime = nextElementWaitTime() - if (waitTime < 0) { + def onPull(): Unit = { + if (!isTimerActive(timerName) && !buffer.isEmpty) { + val waitTime = nextElementWaitTime() + if (waitTime < 0) { + push(out, buffer.dequeue()._2) + } else { + scheduleOnce(timerName, Math.max(DelayPrecisionMS, waitTime).millis) + } + } + + if (!isClosed(in) && !hasBeenPulled(in) && pullCondition) + pull(in) + + completeIfReady() + } + + setHandler(in, this) + setHandler(out, this) + + def completeIfReady(): Unit = if (isClosed(in) && buffer.isEmpty) completeStage() + + def nextElementWaitTime(): Long = { + delayMillis - NANOSECONDS.toMillis(System.nanoTime() - buffer.peek()._1) + } + + final override protected def onTimer(key: Any): Unit = { + if (isAvailable(out)) push(out, buffer.dequeue()._2) - } else { - scheduleOnce(timerName, Math.max(DelayPrecisionMS, waitTime).millis) + + if (!buffer.isEmpty) { + val waitTime = nextElementWaitTime() + if (waitTime > DelayPrecisionMS) + scheduleOnce(timerName, waitTime.millis) } + completeIfReady() } - - if (!isClosed(in) && !hasBeenPulled(in) && pullCondition) - pull(in) - - completeIfReady() } - setHandler(in, this) - setHandler(out, this) - - def completeIfReady(): Unit = if (isClosed(in) && buffer.isEmpty) completeStage() - - def nextElementWaitTime(): Long = { - delayMillis - NANOSECONDS.toMillis(System.nanoTime() - buffer.peek()._1) - } - - final override protected def onTimer(key: Any): Unit = { - if (isAvailable(out)) - push(out, buffer.dequeue()._2) - - if (!buffer.isEmpty) { - val waitTime = nextElementWaitTime() - if (waitTime > DelayPrecisionMS) - scheduleOnce(timerName, waitTime.millis) - } - completeIfReady() - } - } - override def toString = "Delay" } @@ -1760,17 +1822,18 @@ private[stream] object Collect { */ @InternalApi private[akka] final class TakeWithin[T](val timeout: FiniteDuration) extends SimpleLinearGraphStage[T] { - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with InHandler with OutHandler { - def onPush(): Unit = push(out, grab(in)) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new TimerGraphStageLogic(shape) with InHandler with OutHandler { + def onPush(): Unit = push(out, grab(in)) - def onPull(): Unit = pull(in) + def onPull(): Unit = pull(in) - setHandlers(in, out, this) + setHandlers(in, out, this) - final override protected def onTimer(key: Any): Unit = completeStage() + final override protected def onTimer(key: Any): Unit = completeStage() - override def preStart(): Unit = scheduleOnce("TakeWithinTimer", timeout) - } + override def preStart(): Unit = scheduleOnce("TakeWithinTimer", timeout) + } override def toString = "TakeWithin" } @@ -1779,29 +1842,30 @@ private[stream] object Collect { * INTERNAL API */ @InternalApi private[akka] final class DropWithin[T](val timeout: FiniteDuration) extends SimpleLinearGraphStage[T] { - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { - private val startNanoTime = System.nanoTime() - private val timeoutInNano = timeout.toNanos + private val startNanoTime = System.nanoTime() + private val timeoutInNano = timeout.toNanos - def onPush(): Unit = { - if (System.nanoTime() - startNanoTime <= timeoutInNano) { - pull(in) - } else { - push(out, grab(in)) - // change the in handler to avoid System.nanoTime call after timeout - setHandler(in, new InHandler { - def onPush() = push(out, grab(in)) - }) + def onPush(): Unit = { + if (System.nanoTime() - startNanoTime <= timeoutInNano) { + pull(in) + } else { + push(out, grab(in)) + // change the in handler to avoid System.nanoTime call after timeout + setHandler(in, new InHandler { + def onPush() = push(out, grab(in)) + }) + } } + + def onPull(): Unit = pull(in) + + setHandlers(in, out, this) + } - def onPull(): Unit = pull(in) - - setHandlers(in, out, this) - - } - override def toString = "DropWithin" } @@ -1811,57 +1875,59 @@ private[stream] object Collect { @InternalApi private[akka] final class Reduce[T](val f: (T, T) => T) extends SimpleLinearGraphStage[T] { override def initialAttributes: Attributes = DefaultAttributes.reduce - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { self => - override def toString = s"Reduce.Logic(aggregator=$aggregator)" + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { self => + override def toString = s"Reduce.Logic(aggregator=$aggregator)" - var aggregator: T = _ + var aggregator: T = _ - private def decider = - inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider + private def decider = + inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider - def setInitialInHandler(): Unit = { - // Initial input handler - setHandler(in, new InHandler { - override def onPush(): Unit = { - aggregator = grab(in) - pull(in) - setHandler(in, self) - } + def setInitialInHandler(): Unit = { + // Initial input handler + setHandler(in, new InHandler { + override def onPush(): Unit = { + aggregator = grab(in) + pull(in) + setHandler(in, self) + } - override def onUpstreamFinish(): Unit = - failStage(new NoSuchElementException("reduce over empty stream")) - }) - } - - override def onPush(): Unit = { - val elem = grab(in) - try { - aggregator = f(aggregator, elem) - } catch { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case Supervision.Restart => - aggregator = _: T - setInitialInHandler() - case _ => () - - } - } finally { - if (!isClosed(in)) pull(in) + override def onUpstreamFinish(): Unit = + failStage(new NoSuchElementException("reduce over empty stream")) + }) } + + override def onPush(): Unit = { + val elem = grab(in) + try { + aggregator = f(aggregator, elem) + } catch { + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case Supervision.Restart => + aggregator = _: T + setInitialInHandler() + case _ => () + + } + } finally { + if (!isClosed(in)) pull(in) + } + } + + override def onPull(): Unit = pull(in) + + override def onUpstreamFinish(): Unit = { + push(out, aggregator) + completeStage() + } + + setInitialInHandler() + setHandler(out, self) } - override def onPull(): Unit = pull(in) - - override def onUpstreamFinish(): Unit = { - push(out, aggregator) - completeStage() - } - - setInitialInHandler() - setHandler(out, self) - } - override def toString = "Reduce" } @@ -1870,7 +1936,9 @@ private[stream] object Collect { */ @InternalApi private[stream] object RecoverWith -@InternalApi private[akka] final class RecoverWith[T, M](val maximumRetries: Int, val pf: PartialFunction[Throwable, Graph[SourceShape[T], M]]) extends SimpleLinearGraphStage[T] { +@InternalApi private[akka] final class RecoverWith[T, M](val maximumRetries: Int, + val pf: PartialFunction[Throwable, Graph[SourceShape[T], M]]) + extends SimpleLinearGraphStage[T] { override def initialAttributes = DefaultAttributes.recoverWith @@ -1923,7 +1991,8 @@ private[stream] object Collect { /** * INTERNAL API */ -@InternalApi private[akka] final class StatefulMapConcat[In, Out](val f: () => In => immutable.Iterable[Out]) extends GraphStage[FlowShape[In, Out]] { +@InternalApi private[akka] final class StatefulMapConcat[In, Out](val f: () => In => immutable.Iterable[Out]) + extends GraphStage[FlowShape[In, Out]] { val in = Inlet[In]("StatefulMapConcat.in") val out = Outlet[Out]("StatefulMapConcat.out") override val shape = FlowShape(in, out) @@ -1962,18 +2031,19 @@ private[stream] object Collect { catch handleException private def handleException: Catcher[Unit] = { - case NonFatal(ex) => decider(ex) match { - case Supervision.Stop => failStage(ex) - case Supervision.Resume => - if (isClosed(in)) completeStage() - else if (!hasBeenPulled(in)) pull(in) - case Supervision.Restart => - if (isClosed(in)) completeStage() - else { - restartState() - if (!hasBeenPulled(in)) pull(in) - } - } + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => failStage(ex) + case Supervision.Resume => + if (isClosed(in)) completeStage() + else if (!hasBeenPulled(in)) pull(in) + case Supervision.Restart => + if (isClosed(in)) completeStage() + else { + restartState() + if (!hasBeenPulled(in)) pull(in) + } + } } private def restartState(): Unit = { @@ -1990,7 +2060,7 @@ private[stream] object Collect { * INTERNAL API */ @InternalApi final private[akka] class LazyFlow[I, O, M](flowFactory: I => Future[Flow[I, O, M]]) - extends GraphStageWithMaterializedValue[FlowShape[I, O], Future[Option[M]]] { + extends GraphStageWithMaterializedValue[FlowShape[I, O], Future[Option[M]]] { val in = Inlet[I]("lazyFlow.in") val out = Outlet[O]("lazyFlow.out") @@ -2081,7 +2151,8 @@ private[stream] object Collect { val subInlet = new SubSinkInlet[O]("LazyFlowSubSink") val subOutlet = new SubSourceOutlet[I]("LazyFlowSubSource") - val matVal = Source.fromGraph(subOutlet.source) + val matVal = Source + .fromGraph(subOutlet.source) .viaMat(flow)(Keep.right) .toMat(subInlet.sink)(Keep.left) .run()(interpreter.subFusingMaterializer) @@ -2104,22 +2175,23 @@ private[stream] object Collect { // The stage must not be shut down automatically; it is completed when maybeCompleteStage decides setKeepGoing(true) - setHandler(in, new InHandler { - override def onPush(): Unit = { - subOutlet.push(grab(in)) - } - override def onUpstreamFinish(): Unit = { - if (firstElementPushed) { - subOutlet.complete() - maybeCompleteStage() - } - } - override def onUpstreamFailure(ex: Throwable): Unit = { - // propagate exception irrespective if the cached element has been pushed or not - subOutlet.fail(ex) - maybeCompleteStage() - } - }) + setHandler(in, + new InHandler { + override def onPush(): Unit = { + subOutlet.push(grab(in)) + } + override def onUpstreamFinish(): Unit = { + if (firstElementPushed) { + subOutlet.complete() + maybeCompleteStage() + } + } + override def onUpstreamFailure(ex: Throwable): Unit = { + // propagate exception irrespective if the cached element has been pushed or not + subOutlet.fail(ex) + maybeCompleteStage() + } + }) setHandler(out, new OutHandler { override def onPull(): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala b/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala index a97f85bf41..1723b0fd4e 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/fusing/StreamOfStreams.scala @@ -31,7 +31,8 @@ import akka.stream.impl.fusing.GraphStages.SingleSource /** * INTERNAL API */ -@InternalApi private[akka] final class FlattenMerge[T, M](val breadth: Int) extends GraphStage[FlowShape[Graph[SourceShape[T], M], T]] { +@InternalApi private[akka] final class FlattenMerge[T, M](val breadth: Int) + extends GraphStage[FlowShape[Graph[SourceShape[T], M], T]] { private val in = Inlet[Graph[SourceShape[T], M]]("flatten.in") private val out = Outlet[T]("flatten.out") @@ -135,14 +136,18 @@ import akka.stream.impl.fusing.GraphStages.SingleSource /** * INTERNAL API */ -@InternalApi private[akka] final class PrefixAndTail[T](val n: Int) extends GraphStage[FlowShape[T, (immutable.Seq[T], Source[T, NotUsed])]] { +@InternalApi private[akka] final class PrefixAndTail[T](val n: Int) + extends GraphStage[FlowShape[T, (immutable.Seq[T], Source[T, NotUsed])]] { val in: Inlet[T] = Inlet("PrefixAndTail.in") val out: Outlet[(immutable.Seq[T], Source[T, NotUsed])] = Outlet("PrefixAndTail.out") override val shape: FlowShape[T, (immutable.Seq[T], Source[T, NotUsed])] = FlowShape(in, out) override def initialAttributes = DefaultAttributes.prefixAndTail - private final class PrefixAndTailLogic(_shape: Shape) extends TimerGraphStageLogic(_shape) with OutHandler with InHandler { + private final class PrefixAndTailLogic(_shape: Shape) + extends TimerGraphStageLogic(_shape) + with OutHandler + with InHandler { private var left = if (n < 0) 0 else n private var builder = Vector.newBuilder[T] @@ -164,7 +169,9 @@ import akka.stream.impl.fusing.GraphStages.SingleSource case StreamSubscriptionTimeoutTerminationMode.NoopTermination => // do nothing case StreamSubscriptionTimeoutTerminationMode.WarnTermination => - materializer.logger.warning("Substream subscription timeout triggered after {} in prefixAndTail({}).", timeout, n) + materializer.logger.warning("Substream subscription timeout triggered after {} in prefixAndTail({}).", + timeout, + n) } } @@ -182,7 +189,8 @@ import akka.stream.impl.fusing.GraphStages.SingleSource } private def openSubstream(): Source[T, NotUsed] = { - val timeout = ActorMaterializerHelper.downcast(interpreter.materializer).settings.subscriptionTimeoutSettings.timeout + val timeout = + ActorMaterializerHelper.downcast(interpreter.materializer).settings.subscriptionTimeoutSettings.timeout tailSource = new SubSourceOutlet[T]("TailSource") tailSource.setHandler(subHandler) setKeepGoing(true) @@ -243,188 +251,199 @@ import akka.stream.impl.fusing.GraphStages.SingleSource /** * INTERNAL API */ -@InternalApi private[akka] final class GroupBy[T, K](val maxSubstreams: Int, val keyFor: T => K, val allowClosedSubstreamRecreation: Boolean = false) extends GraphStage[FlowShape[T, Source[T, NotUsed]]] { +@InternalApi private[akka] final class GroupBy[T, K](val maxSubstreams: Int, + val keyFor: T => K, + val allowClosedSubstreamRecreation: Boolean = false) + extends GraphStage[FlowShape[T, Source[T, NotUsed]]] { val in: Inlet[T] = Inlet("GroupBy.in") val out: Outlet[Source[T, NotUsed]] = Outlet("GroupBy.out") override val shape: FlowShape[T, Source[T, NotUsed]] = FlowShape(in, out) override def initialAttributes = DefaultAttributes.groupBy - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with OutHandler with InHandler { - parent => + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new TimerGraphStageLogic(shape) with OutHandler with InHandler { + parent => - lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider - private val activeSubstreamsMap = new java.util.HashMap[Any, SubstreamSource]() - private val closedSubstreams = if (allowClosedSubstreamRecreation) Collections.unmodifiableSet(Collections.emptySet[Any]) else new java.util.HashSet[Any]() - private var timeout: FiniteDuration = _ - private var substreamWaitingToBePushed: Option[SubstreamSource] = None - private var nextElementKey: K = null.asInstanceOf[K] - private var nextElementValue: T = null.asInstanceOf[T] - private var _nextId = 0 - private val substreamsJustStared = new java.util.HashSet[Any]() - private var firstPushCounter: Int = 0 + lazy val decider = inheritedAttributes.mandatoryAttribute[SupervisionStrategy].decider + private val activeSubstreamsMap = new java.util.HashMap[Any, SubstreamSource]() + private val closedSubstreams = + if (allowClosedSubstreamRecreation) Collections.unmodifiableSet(Collections.emptySet[Any]) + else new java.util.HashSet[Any]() + private var timeout: FiniteDuration = _ + private var substreamWaitingToBePushed: Option[SubstreamSource] = None + private var nextElementKey: K = null.asInstanceOf[K] + private var nextElementValue: T = null.asInstanceOf[T] + private var _nextId = 0 + private val substreamsJustStared = new java.util.HashSet[Any]() + private var firstPushCounter: Int = 0 - private val tooManySubstreamsOpenException = new TooManySubstreamsOpenException + private val tooManySubstreamsOpenException = new TooManySubstreamsOpenException - private def nextId(): Long = { _nextId += 1; _nextId } + private def nextId(): Long = { _nextId += 1; _nextId } - private def hasNextElement = nextElementKey != null + private def hasNextElement = nextElementKey != null - private def clearNextElement(): Unit = { - nextElementKey = null.asInstanceOf[K] - nextElementValue = null.asInstanceOf[T] - } - - private def tryCompleteAll(): Boolean = - if (activeSubstreamsMap.isEmpty || (!hasNextElement && firstPushCounter == 0)) { - for (value <- activeSubstreamsMap.values().asScala) value.complete() - completeStage() - true - } else false - - private def tryCancel(): Boolean = - // if there's no active substreams or there's only one but it's not been pushed yet - if (activeSubstreamsMap.isEmpty || (activeSubstreamsMap.size == substreamWaitingToBePushed.size)) { - completeStage() - true - } else false - - private def fail(ex: Throwable): Unit = { - for (value <- activeSubstreamsMap.values().asScala) value.fail(ex) - failStage(ex) - } - - private def needToPull: Boolean = - !(hasBeenPulled(in) || isClosed(in) || hasNextElement || substreamWaitingToBePushed.nonEmpty) - - override def preStart(): Unit = - timeout = ActorMaterializerHelper.downcast(interpreter.materializer).settings.subscriptionTimeoutSettings.timeout - - override def onPull(): Unit = { - substreamWaitingToBePushed match { - case Some(substreamSource) => - push(out, Source.fromGraph(substreamSource.source)) - scheduleOnce(substreamSource.key, timeout) - substreamWaitingToBePushed = None - case None => - if (hasNextElement) { - val subSubstreamSource = activeSubstreamsMap.get(nextElementKey) - if (subSubstreamSource.isAvailable) { - subSubstreamSource.push(nextElementValue) - clearNextElement() - } - } else if (!hasBeenPulled(in)) tryPull(in) - } - } - - override def onUpstreamFailure(ex: Throwable): Unit = fail(ex) - - override def onUpstreamFinish(): Unit = if (!tryCompleteAll()) setKeepGoing(true) - - override def onDownstreamFinish(): Unit = if (!tryCancel()) setKeepGoing(true) - - override def onPush(): Unit = try { - val elem = grab(in) - val key = keyFor(elem) - require(key != null, "Key cannot be null") - val substreamSource = activeSubstreamsMap.get(key) - if (substreamSource != null) { - if (substreamSource.isAvailable) substreamSource.push(elem) - else { - nextElementKey = key - nextElementValue = elem - } - } else { - if (activeSubstreamsMap.size + closedSubstreams.size == maxSubstreams) - throw tooManySubstreamsOpenException - else if (closedSubstreams.contains(key) && !hasBeenPulled(in)) - pull(in) - else runSubstream(key, elem) - } - } catch { - case NonFatal(ex) => - decider(ex) match { - case Supervision.Stop => fail(ex) - case Supervision.Resume | Supervision.Restart => if (!hasBeenPulled(in)) pull(in) - } - } - - private def runSubstream(key: K, value: T): Unit = { - val substreamSource = new SubstreamSource("GroupBySource " + nextId, key, value) - activeSubstreamsMap.put(key, substreamSource) - firstPushCounter += 1 - if (isAvailable(out)) { - push(out, Source.fromGraph(substreamSource.source)) - scheduleOnce(key, timeout) - substreamWaitingToBePushed = None - } else { - setKeepGoing(true) - substreamsJustStared.add(substreamSource) - substreamWaitingToBePushed = Some(substreamSource) - } - } - - override protected def onTimer(timerKey: Any): Unit = { - val substreamSource = activeSubstreamsMap.get(timerKey) - if (substreamSource != null) { - if (!allowClosedSubstreamRecreation) { - closedSubstreams.add(timerKey) - } - activeSubstreamsMap.remove(timerKey) - if (isClosed(in)) tryCompleteAll() - } - } - - setHandlers(in, out, this) - - private class SubstreamSource(name: String, val key: K, var firstElement: T) extends SubSourceOutlet[T](name) with OutHandler { - def firstPush(): Boolean = firstElement != null - def hasNextForSubSource = hasNextElement && nextElementKey == key - private def completeSubStream(): Unit = { - complete() - activeSubstreamsMap.remove(key) - if (!allowClosedSubstreamRecreation) { - closedSubstreams.add(key) - } + private def clearNextElement(): Unit = { + nextElementKey = null.asInstanceOf[K] + nextElementValue = null.asInstanceOf[T] } - private def tryCompleteHandler(): Unit = { - if (parent.isClosed(in) && !hasNextForSubSource) { - completeSubStream() - tryCompleteAll() - } + private def tryCompleteAll(): Boolean = + if (activeSubstreamsMap.isEmpty || (!hasNextElement && firstPushCounter == 0)) { + for (value <- activeSubstreamsMap.values().asScala) value.complete() + completeStage() + true + } else false + + private def tryCancel(): Boolean = + // if there's no active substreams or there's only one but it's not been pushed yet + if (activeSubstreamsMap.isEmpty || (activeSubstreamsMap.size == substreamWaitingToBePushed.size)) { + completeStage() + true + } else false + + private def fail(ex: Throwable): Unit = { + for (value <- activeSubstreamsMap.values().asScala) value.fail(ex) + failStage(ex) } + private def needToPull: Boolean = + !(hasBeenPulled(in) || isClosed(in) || hasNextElement || substreamWaitingToBePushed.nonEmpty) + + override def preStart(): Unit = + timeout = + ActorMaterializerHelper.downcast(interpreter.materializer).settings.subscriptionTimeoutSettings.timeout + override def onPull(): Unit = { - cancelTimer(key) - if (firstPush) { - firstPushCounter -= 1 - push(firstElement) - firstElement = null.asInstanceOf[T] - substreamsJustStared.remove(this) - if (substreamsJustStared.isEmpty) setKeepGoing(false) - } else if (hasNextForSubSource) { - push(nextElementValue) - clearNextElement() - } else if (needToPull) pull(in) - - tryCompleteHandler() + substreamWaitingToBePushed match { + case Some(substreamSource) => + push(out, Source.fromGraph(substreamSource.source)) + scheduleOnce(substreamSource.key, timeout) + substreamWaitingToBePushed = None + case None => + if (hasNextElement) { + val subSubstreamSource = activeSubstreamsMap.get(nextElementKey) + if (subSubstreamSource.isAvailable) { + subSubstreamSource.push(nextElementValue) + clearNextElement() + } + } else if (!hasBeenPulled(in)) tryPull(in) + } } - override def onDownstreamFinish(): Unit = { - if (hasNextElement && nextElementKey == key) clearNextElement() - if (firstPush()) firstPushCounter -= 1 - completeSubStream() - if (parent.isClosed(out)) tryCancel() - if (parent.isClosed(in)) tryCompleteAll() else if (needToPull) pull(in) + override def onUpstreamFailure(ex: Throwable): Unit = fail(ex) + + override def onUpstreamFinish(): Unit = if (!tryCompleteAll()) setKeepGoing(true) + + override def onDownstreamFinish(): Unit = if (!tryCancel()) setKeepGoing(true) + + override def onPush(): Unit = + try { + val elem = grab(in) + val key = keyFor(elem) + require(key != null, "Key cannot be null") + val substreamSource = activeSubstreamsMap.get(key) + if (substreamSource != null) { + if (substreamSource.isAvailable) substreamSource.push(elem) + else { + nextElementKey = key + nextElementValue = elem + } + } else { + if (activeSubstreamsMap.size + closedSubstreams.size == maxSubstreams) + throw tooManySubstreamsOpenException + else if (closedSubstreams.contains(key) && !hasBeenPulled(in)) + pull(in) + else runSubstream(key, elem) + } + } catch { + case NonFatal(ex) => + decider(ex) match { + case Supervision.Stop => fail(ex) + case Supervision.Resume | Supervision.Restart => if (!hasBeenPulled(in)) pull(in) + } + } + + private def runSubstream(key: K, value: T): Unit = { + val substreamSource = new SubstreamSource("GroupBySource " + nextId, key, value) + activeSubstreamsMap.put(key, substreamSource) + firstPushCounter += 1 + if (isAvailable(out)) { + push(out, Source.fromGraph(substreamSource.source)) + scheduleOnce(key, timeout) + substreamWaitingToBePushed = None + } else { + setKeepGoing(true) + substreamsJustStared.add(substreamSource) + substreamWaitingToBePushed = Some(substreamSource) + } } - setHandler(this) + override protected def onTimer(timerKey: Any): Unit = { + val substreamSource = activeSubstreamsMap.get(timerKey) + if (substreamSource != null) { + if (!allowClosedSubstreamRecreation) { + closedSubstreams.add(timerKey) + } + activeSubstreamsMap.remove(timerKey) + if (isClosed(in)) tryCompleteAll() + } + } + + setHandlers(in, out, this) + + private class SubstreamSource(name: String, val key: K, var firstElement: T) + extends SubSourceOutlet[T](name) + with OutHandler { + def firstPush(): Boolean = firstElement != null + def hasNextForSubSource = hasNextElement && nextElementKey == key + private def completeSubStream(): Unit = { + complete() + activeSubstreamsMap.remove(key) + if (!allowClosedSubstreamRecreation) { + closedSubstreams.add(key) + } + } + + private def tryCompleteHandler(): Unit = { + if (parent.isClosed(in) && !hasNextForSubSource) { + completeSubStream() + tryCompleteAll() + } + } + + override def onPull(): Unit = { + cancelTimer(key) + if (firstPush) { + firstPushCounter -= 1 + push(firstElement) + firstElement = null.asInstanceOf[T] + substreamsJustStared.remove(this) + if (substreamsJustStared.isEmpty) setKeepGoing(false) + } else if (hasNextForSubSource) { + push(nextElementValue) + clearNextElement() + } else if (needToPull) pull(in) + + tryCompleteHandler() + } + + override def onDownstreamFinish(): Unit = { + if (hasNextElement && nextElementKey == key) clearNextElement() + if (firstPush()) firstPushCounter -= 1 + completeSubStream() + if (parent.isClosed(out)) tryCancel() + if (parent.isClosed(in)) tryCompleteAll() else if (needToPull) pull(in) + } + + setHandler(this) + } } - } override def toString: String = "GroupBy" } + /** * INTERNAL API */ @@ -437,17 +456,22 @@ import akka.stream.impl.fusing.GraphStages.SingleSource /** Splits after the current element. The current element will be the last element in the current substream. */ case object SplitAfter extends SplitDecision - def when[T](p: T => Boolean, substreamCancelStrategy: SubstreamCancelStrategy): Graph[FlowShape[T, Source[T, NotUsed]], NotUsed] = + def when[T](p: T => Boolean, + substreamCancelStrategy: SubstreamCancelStrategy): Graph[FlowShape[T, Source[T, NotUsed]], NotUsed] = new Split(Split.SplitBefore, p, substreamCancelStrategy) - def after[T](p: T => Boolean, substreamCancelStrategy: SubstreamCancelStrategy): Graph[FlowShape[T, Source[T, NotUsed]], NotUsed] = + def after[T](p: T => Boolean, + substreamCancelStrategy: SubstreamCancelStrategy): Graph[FlowShape[T, Source[T, NotUsed]], NotUsed] = new Split(Split.SplitAfter, p, substreamCancelStrategy) } /** * INTERNAL API */ -@InternalApi private[akka] final class Split[T](val decision: Split.SplitDecision, val p: T => Boolean, val substreamCancelStrategy: SubstreamCancelStrategy) extends GraphStage[FlowShape[T, Source[T, NotUsed]]] { +@InternalApi private[akka] final class Split[T](val decision: Split.SplitDecision, + val p: T => Boolean, + val substreamCancelStrategy: SubstreamCancelStrategy) + extends GraphStage[FlowShape[T, Source[T, NotUsed]]] { val in: Inlet[T] = Inlet("Split.in") val out: Outlet[Source[T, NotUsed]] = Outlet("Split.out") override val shape: FlowShape[T, Source[T, NotUsed]] = FlowShape(in, out) @@ -471,19 +495,20 @@ import akka.stream.impl.fusing.GraphStages.SingleSource timeout = ActorMaterializerHelper.downcast(interpreter.materializer).settings.subscriptionTimeoutSettings.timeout } - setHandler(out, new OutHandler { - override def onPull(): Unit = { - if (substreamSource eq null) { - //can be already pulled from substream in case split after - if (!hasBeenPulled(in)) pull(in) - } else if (substreamWaitingToBePushed) pushSubstreamSource() - } + setHandler(out, + new OutHandler { + override def onPull(): Unit = { + if (substreamSource eq null) { + //can be already pulled from substream in case split after + if (!hasBeenPulled(in)) pull(in) + } else if (substreamWaitingToBePushed) pushSubstreamSource() + } - override def onDownstreamFinish(): Unit = { - // If the substream is already cancelled or it has not been handed out, we can go away - if ((substreamSource eq null) || substreamWaitingToBePushed || substreamCancelled) completeStage() - } - }) + override def onDownstreamFinish(): Unit = { + // If the substream is already cancelled or it has not been handed out, we can go away + if ((substreamSource eq null) || substreamWaitingToBePushed || substreamCancelled) completeStage() + } + }) val initInHandler = new InHandler { override def onPush(): Unit = { @@ -618,6 +643,7 @@ import akka.stream.impl.fusing.GraphStages.SingleSource */ @InternalApi private[stream] object SubSink { sealed trait State + /** Not yet materialized and no command has been scheduled */ case object Uninitialized extends State @@ -627,6 +653,7 @@ import akka.stream.impl.fusing.GraphStages.SingleSource // preallocated instances for both commands /** A RequestOne command was scheduled before materialization */ case object RequestOneScheduledBeforeMaterialization extends CommandScheduledBeforeMaterialization(RequestOne) + /** A Cancel command was scheduled before materialization */ case object CancelScheduledBeforeMaterialization extends CommandScheduledBeforeMaterialization(Cancel) @@ -643,7 +670,7 @@ import akka.stream.impl.fusing.GraphStages.SingleSource * INTERNAL API */ @InternalApi private[stream] final class SubSink[T](name: String, externalCallback: ActorSubscriberMessage => Unit) - extends GraphStage[SinkShape[T]] { + extends GraphStage[SinkShape[T]] { import SubSink._ private val in = Inlet[T](s"SubSink($name).in") @@ -670,7 +697,8 @@ import akka.stream.impl.fusing.GraphStages.SingleSource dispatchCommand(RequestOneScheduledBeforeMaterialization) case cmd: CommandScheduledBeforeMaterialization => - throw new IllegalStateException(s"${newState.command} on subsink is illegal when ${cmd.command} is still pending") + throw new IllegalStateException( + s"${newState.command} on subsink is illegal when ${cmd.command} is still pending") } override def createLogic(attr: Attributes) = new GraphStageLogic(shape) with InHandler { @@ -712,8 +740,10 @@ import akka.stream.impl.fusing.GraphStages.SingleSource /** * INTERNAL API */ -@InternalApi private[akka] final class SubSource[T](name: String, private[fusing] val externalCallback: AsyncCallback[SubSink.Command]) - extends GraphStage[SourceShape[T]] { +@InternalApi private[akka] final class SubSource[T]( + name: String, + private[fusing] val externalCallback: AsyncCallback[SubSink.Command]) + extends GraphStage[SourceShape[T]] { import SubSink._ val out: Outlet[T] = Outlet(s"SubSource($name).out") @@ -743,7 +773,9 @@ import akka.stream.impl.fusing.GraphStages.SingleSource } def timeout(d: FiniteDuration): Boolean = - status.compareAndSet(null, ActorSubscriberMessage.OnError(new SubscriptionTimeoutException(s"Substream Source has not been materialized in $d"))) + status.compareAndSet(null, + ActorSubscriberMessage.OnError( + new SubscriptionTimeoutException(s"Substream Source has not been materialized in $d"))) override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with OutHandler { setHandler(out, this) @@ -753,7 +785,8 @@ import akka.stream.impl.fusing.GraphStages.SingleSource case null => if (!status.compareAndSet(null, cb)) setCB(cb) case ActorSubscriberMessage.OnComplete => completeStage() case ActorSubscriberMessage.OnError(ex) => failStage(ex) - case _: AsyncCallback[_] => failStage(new IllegalStateException("Substream Source cannot be materialized more than once")) + case _: AsyncCallback[_] => + failStage(new IllegalStateException("Substream Source cannot be materialized more than once")) } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala index 868afdc3f8..6b3ea625e4 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/ByteStringParser.scala @@ -98,10 +98,10 @@ import scala.util.control.{ NoStackTrace, NonFatal } @tailrec private def doParse(remainingRecursions: Int = recursionLimit): Unit = if (remainingRecursions == 0) failStage( - new IllegalStateException(s"Parsing logic didn't produce result after $recursionLimit steps. " + + new IllegalStateException( + s"Parsing logic didn't produce result after $recursionLimit steps. " + "Aborting processing to avoid infinite cycles. In the unlikely case that the parsing logic " + - "needs more recursion, override ParsingLogic.recursionLimit.") - ) + "needs more recursion, override ParsingLogic.recursionLimit.")) else { val recurse = doParseInner() if (recurse) doParse(remainingRecursions - 1) @@ -159,12 +159,10 @@ import scala.util.control.{ NoStackTrace, NonFatal } * @param acceptUpstreamFinish - if true - stream will complete when received `onUpstreamFinish`, if "false" * - onTruncation will be called */ - case class ParseResult[+T]( - result: Option[T], - nextStep: ParseStep[T], - acceptUpstreamFinish: Boolean = true) + case class ParseResult[+T](result: Option[T], nextStep: ParseStep[T], acceptUpstreamFinish: Boolean = true) trait ParseStep[+T] { + /** * Must return true when NeedMoreData will clean buffer. If returns false - next pulled * data will be appended to existing data in buffer @@ -215,11 +213,11 @@ import scala.util.control.{ NoStackTrace, NonFatal } } else throw NeedMoreData def readShortLE(): Int = readByte() | (readByte() << 8) def readIntLE(): Int = readShortLE() | (readShortLE() << 16) - def readLongLE(): Long = (readIntLE() & 0xffffffffL) | ((readIntLE() & 0xffffffffL) << 32) + def readLongLE(): Long = (readIntLE() & 0XFFFFFFFFL) | ((readIntLE() & 0XFFFFFFFFL) << 32) def readShortBE(): Int = (readByte() << 8) | readByte() def readIntBE(): Int = (readShortBE() << 16) | readShortBE() - def readLongBE(): Long = ((readIntBE() & 0xffffffffL) << 32) | (readIntBE() & 0xffffffffL) + def readLongBE(): Long = ((readIntBE() & 0XFFFFFFFFL) << 32) | (readIntBE() & 0XFFFFFFFFL) def skip(numBytes: Int): Unit = if (off + numBytes <= input.length) off += numBytes diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/FileSubscriber.scala b/akka-stream/src/main/scala/akka/stream/impl/io/FileSubscriber.scala index b7e0bede18..831daad971 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/FileSubscriber.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/FileSubscriber.scala @@ -20,7 +20,11 @@ import scala.util.{ Failure, Success, Try } /** INTERNAL API */ @InternalApi private[akka] object FileSubscriber { - def props(f: Path, completionPromise: Promise[IOResult], bufSize: Int, startPosition: Long, openOptions: Set[OpenOption]) = { + def props(f: Path, + completionPromise: Promise[IOResult], + bufSize: Int, + startPosition: Long, + openOptions: Set[OpenOption]) = { require(bufSize > 0, "buffer size must be > 0") require(startPosition >= 0, s"startPosition must be >= 0 (was $startPosition)") Props(classOf[FileSubscriber], f, completionPromise, bufSize, startPosition, openOptions).withDeploy(Deploy.local) @@ -28,9 +32,13 @@ import scala.util.{ Failure, Success, Try } } /** INTERNAL API */ -@InternalApi private[akka] class FileSubscriber(f: Path, completionPromise: Promise[IOResult], bufSize: Int, startPosition: Long, openOptions: Set[OpenOption]) - extends akka.stream.actor.ActorSubscriber - with ActorLogging { +@InternalApi private[akka] class FileSubscriber(f: Path, + completionPromise: Promise[IOResult], + bufSize: Int, + startPosition: Long, + openOptions: Set[OpenOption]) + extends akka.stream.actor.ActorSubscriber + with ActorLogging { override protected val requestStrategy = WatermarkRequestStrategy(highWatermark = bufSize) @@ -38,18 +46,19 @@ import scala.util.{ Failure, Success, Try } private var bytesWritten: Long = 0 - override def preStart(): Unit = try { - chan = FileChannel.open(f, openOptions.asJava) - if (startPosition > 0) { - chan.position(startPosition) - } + override def preStart(): Unit = + try { + chan = FileChannel.open(f, openOptions.asJava) + if (startPosition > 0) { + chan.position(startPosition) + } - super.preStart() - } catch { - case ex: Exception => - closeAndComplete(Failure(ex)) - cancel() - } + super.preStart() + } catch { + case ex: Exception => + closeAndComplete(Failure(ex)) + cancel() + } def receive = { case ActorSubscriberMessage.OnNext(bytes: ByteString) => @@ -82,12 +91,14 @@ import scala.util.{ Failure, Success, Try } if (chan ne null) chan.close() completionPromise.tryComplete(result) } catch { - case closingException: Exception => result match { - case Success(ioResult) => - val statusWithClosingException = ioResult.status.transform(_ => Failure(closingException), ex => Failure(closingException.initCause(ex))) - completionPromise.trySuccess(ioResult.copy(status = statusWithClosingException)) - case Failure(ex) => completionPromise.tryFailure(closingException.initCause(ex)) - } + case closingException: Exception => + result match { + case Success(ioResult) => + val statusWithClosingException = + ioResult.status.transform(_ => Failure(closingException), ex => Failure(closingException.initCause(ex))) + completionPromise.trySuccess(ioResult.copy(status = statusWithClosingException)) + case Failure(ex) => completionPromise.tryFailure(closingException.initCause(ex)) + } } } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/IOSinks.scala b/akka-stream/src/main/scala/akka/stream/impl/io/IOSinks.scala index f9ffba6543..647d748ea3 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/IOSinks.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/IOSinks.scala @@ -21,8 +21,12 @@ import scala.concurrent.{ Future, Promise } * Creates simple synchronous Sink which writes all incoming elements to the given file * (creating it before hand if necessary). */ -@InternalApi private[akka] final class FileSink(f: Path, startPosition: Long, options: immutable.Set[OpenOption], val attributes: Attributes, shape: SinkShape[ByteString]) - extends SinkModule[ByteString, Future[IOResult]](shape) { +@InternalApi private[akka] final class FileSink(f: Path, + startPosition: Long, + options: immutable.Set[OpenOption], + val attributes: Attributes, + shape: SinkShape[ByteString]) + extends SinkModule[ByteString, Future[IOResult]](shape) { override protected def label: String = s"FileSink($f, $options)" @@ -50,8 +54,11 @@ import scala.concurrent.{ Future, Promise } * INTERNAL API * Creates simple synchronous Sink which writes all incoming elements to the output stream. */ -@InternalApi private[akka] final class OutputStreamSink(createOutput: () => OutputStream, val attributes: Attributes, shape: SinkShape[ByteString], autoFlush: Boolean) - extends SinkModule[ByteString, Future[IOResult]](shape) { +@InternalApi private[akka] final class OutputStreamSink(createOutput: () => OutputStream, + val attributes: Attributes, + shape: SinkShape[ByteString], + autoFlush: Boolean) + extends SinkModule[ByteString, Future[IOResult]](shape) { override def create(context: MaterializationContext) = { val materializer = ActorMaterializerHelper.downcast(context.materializer) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala b/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala index 3977dd41ae..bc0c062a51 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/IOSources.scala @@ -27,7 +27,6 @@ import scala.util.{ Failure, Success, Try } /** * INTERNAL API */ - private[akka] object FileSource { val completionHandler = new CompletionHandler[Integer, Try[Int] => Unit] { @@ -47,7 +46,7 @@ private[akka] object FileSource { * Creates simple asynchronous Source backed by the given file. */ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: Long) - extends GraphStageWithMaterializedValue[SourceShape[ByteString], Future[IOResult]] { + extends GraphStageWithMaterializedValue[SourceShape[ByteString], Future[IOResult]] { require(chunkSize > 0, "chunkSize must be greater than 0") val out = Outlet[ByteString]("FileSource.out") @@ -90,9 +89,7 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: availableChunks = readAhead(maxReadAhead, availableChunks) //if already read something and try if (availableChunks.nonEmpty) { - emitMultiple(out, availableChunks.iterator, - () => if (eofEncountered) success() else setHandler(out, handler) - ) + emitMultiple(out, availableChunks.iterator, () => if (eofEncountered) success() else setHandler(out, handler)) availableChunks = Vector.empty[ByteString] } else if (eofEncountered) success() } @@ -105,7 +102,8 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: /** BLOCKING I/O READ */ @tailrec def readAhead(maxChunks: Int, chunks: Vector[ByteString]): Vector[ByteString] = if (chunks.size < maxChunks && !eofEncountered) { - val readBytes = try channel.read(buffer, position) catch { + val readBytes = try channel.read(buffer, position) + catch { case NonFatal(ex) => failStage(ex) ioResultPromise.trySuccess(IOResult(position, Failure(ex))) @@ -146,8 +144,11 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: * INTERNAL API * Source backed by the given input stream. */ -@InternalApi private[akka] final class InputStreamSource(createInputStream: () => InputStream, chunkSize: Int, val attributes: Attributes, shape: SourceShape[ByteString]) - extends SourceModule[ByteString, Future[IOResult]](shape) { +@InternalApi private[akka] final class InputStreamSource(createInputStream: () => InputStream, + chunkSize: Int, + val attributes: Attributes, + shape: SourceShape[ByteString]) + extends SourceModule[ByteString, Future[IOResult]](shape) { override def create(context: MaterializationContext) = { val materializer = ActorMaterializerHelper.downcast(context.materializer) val ioResultPromise = Promise[IOResult]() @@ -155,9 +156,7 @@ private[akka] final class FileSource(path: Path, chunkSize: Int, startPosition: val pub = try { val is = createInputStream() // can throw, i.e. FileNotFound - val props = InputStreamPublisher - .props(is, ioResultPromise, chunkSize) - .withDispatcher(Dispatcher.resolve(context)) + val props = InputStreamPublisher.props(is, ioResultPromise, chunkSize).withDispatcher(Dispatcher.resolve(context)) val ref = materializer.actorOf(context, props) akka.stream.actor.ActorPublisher[ByteString](ref) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamPublisher.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamPublisher.scala index 45dba3f745..ca06a19908 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamPublisher.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamPublisher.scala @@ -29,9 +29,11 @@ import scala.util.{ Failure, Success } } /** INTERNAL API */ -@InternalApi private[akka] class InputStreamPublisher(is: InputStream, completionPromise: Promise[IOResult], chunkSize: Int) - extends akka.stream.actor.ActorPublisher[ByteString] - with ActorLogging { +@InternalApi private[akka] class InputStreamPublisher(is: InputStream, + completionPromise: Promise[IOResult], + chunkSize: Int) + extends akka.stream.actor.ActorPublisher[ByteString] + with ActorLogging { // TODO possibly de-duplicate with FilePublisher? @@ -52,26 +54,27 @@ import scala.util.{ Failure, Success } if (totalDemand > 0 && isActive) self ! Continue } - def readAndEmit(): Unit = if (totalDemand > 0) try { - // blocking read - val readBytes = is.read(arr) + def readAndEmit(): Unit = + if (totalDemand > 0) try { + // blocking read + val readBytes = is.read(arr) - readBytes match { - case -1 => - // had nothing to read into this chunk - log.debug("No more bytes available to read (got `-1` from `read`)") - onCompleteThenStop() + readBytes match { + case -1 => + // had nothing to read into this chunk + log.debug("No more bytes available to read (got `-1` from `read`)") + onCompleteThenStop() - case _ => - readBytesTotal += readBytes + case _ => + readBytesTotal += readBytes - // emit immediately, as this is the only chance to do it before we might block again - onNext(ByteString.fromArray(arr, 0, readBytes)) + // emit immediately, as this is the only chance to do it before we might block again + onNext(ByteString.fromArray(arr, 0, readBytes)) + } + } catch { + case ex: Exception => + onErrorThenStop(ex) } - } catch { - case ex: Exception => - onErrorThenStop(ex) - } override def postStop(): Unit = { super.postStop() diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala index 39c7aeeeb8..3392978c70 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/InputStreamSinkStage.scala @@ -39,7 +39,8 @@ private[stream] object InputStreamSinkStage { /** * INTERNAL API */ -@InternalApi final private[stream] class InputStreamSinkStage(readTimeout: FiniteDuration) extends GraphStageWithMaterializedValue[SinkShape[ByteString], InputStream] { +@InternalApi final private[stream] class InputStreamSinkStage(readTimeout: FiniteDuration) + extends GraphStageWithMaterializedValue[SinkShape[ByteString], InputStream] { val in = Inlet[ByteString]("InputStreamSink.in") override def initialAttributes: Attributes = DefaultAttributes.inputStreamSink @@ -110,11 +111,10 @@ private[stream] object InputStreamSinkStage { * INTERNAL API * InputStreamAdapter that interacts with InputStreamSinkStage */ -@InternalApi private[akka] class InputStreamAdapter( - sharedBuffer: BlockingQueue[StreamToAdapterMessage], - sendToStage: (AdapterToStageMessage) => Unit, - readTimeout: FiniteDuration) - extends InputStream { +@InternalApi private[akka] class InputStreamAdapter(sharedBuffer: BlockingQueue[StreamToAdapterMessage], + sendToStage: (AdapterToStageMessage) => Unit, + readTimeout: FiniteDuration) + extends InputStream { var isInitialized = false var isActive = true @@ -194,8 +194,7 @@ private[stream] object InputStreamSinkStage { } @tailrec - private[this] def getData(arr: Array[Byte], begin: Int, length: Int, - gotBytes: Int): Int = { + private[this] def getData(arr: Array[Byte], begin: Int, length: Int, gotBytes: Int): Int = { grabDataChunk() match { case Some(data) => val size = data.size diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala index 978b95311a..7362a5c2dd 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSourceStage.scala @@ -24,7 +24,8 @@ private[stream] object OutputStreamSourceStage { case object Close extends AdapterToStageMessage } -final private[stream] class OutputStreamSourceStage(writeTimeout: FiniteDuration) extends GraphStageWithMaterializedValue[SourceShape[ByteString], OutputStream] { +final private[stream] class OutputStreamSourceStage(writeTimeout: FiniteDuration) + extends GraphStageWithMaterializedValue[SourceShape[ByteString], OutputStream] { val out = Outlet[ByteString]("OutputStreamSource.out") override def initialAttributes = DefaultAttributes.outputStreamSource override val shape: SourceShape[ByteString] = SourceShape.of(out) @@ -56,8 +57,7 @@ final private[stream] class OutputStreamSourceStage(writeTimeout: FiniteDuration } setHandler(out, new OutHandler { - override def onPull(): Unit = { - } + override def onPull(): Unit = {} }) } @@ -66,11 +66,10 @@ final private[stream] class OutputStreamSourceStage(writeTimeout: FiniteDuration } } -private[akka] class OutputStreamAdapter( - unfulfilledDemand: Semaphore, - sendToStage: AsyncCallback[AdapterToStageMessage], - writeTimeout: FiniteDuration) - extends OutputStream { +private[akka] class OutputStreamAdapter(unfulfilledDemand: Semaphore, + sendToStage: AsyncCallback[AdapterToStageMessage], + writeTimeout: FiniteDuration) + extends OutputStream { @scala.throws(classOf[IOException]) private[this] def sendData(data: ByteString): Unit = { diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSubscriber.scala b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSubscriber.scala index f034749d2c..09b435b0f2 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSubscriber.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/OutputStreamSubscriber.scala @@ -26,9 +26,12 @@ import scala.util.{ Failure, Success } } /** INTERNAL API */ -@InternalApi private[akka] class OutputStreamSubscriber(os: OutputStream, completionPromise: Promise[IOResult], bufSize: Int, autoFlush: Boolean) - extends akka.stream.actor.ActorSubscriber - with ActorLogging { +@InternalApi private[akka] class OutputStreamSubscriber(os: OutputStream, + completionPromise: Promise[IOResult], + bufSize: Int, + autoFlush: Boolean) + extends akka.stream.actor.ActorSubscriber + with ActorLogging { override protected val requestStrategy = WatermarkRequestStrategy(highWatermark = bufSize) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala index 85503cd1ab..18042dfd03 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TLSActor.scala @@ -30,11 +30,11 @@ import scala.util.{ Failure, Success, Try } @InternalApi private[stream] object TLSActor { def props( - maxInputBufferSize: Int, - createSSLEngine: ActorSystem => SSLEngine, // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 - verifySession: (ActorSystem, SSLSession) => Try[Unit], // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 - closing: TLSClosing, - tracing: Boolean = false): Props = + maxInputBufferSize: Int, + createSSLEngine: ActorSystem => SSLEngine, // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 + verifySession: (ActorSystem, SSLSession) => Try[Unit], // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 + closing: TLSClosing, + tracing: Boolean = false): Props = Props(new TLSActor(maxInputBufferSize, createSSLEngine, verifySession, closing, tracing)).withDeploy(Deploy.local) final val TransportIn = 0 @@ -48,12 +48,14 @@ import scala.util.{ Failure, Success, Try } * INTERNAL API. */ @InternalApi private[stream] class TLSActor( - maxInputBufferSize: Int, - createSSLEngine: ActorSystem => SSLEngine, // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 - verifySession: (ActorSystem, SSLSession) => Try[Unit], // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 - closing: TLSClosing, - tracing: Boolean) - extends Actor with ActorLogging with Pump { + maxInputBufferSize: Int, + createSSLEngine: ActorSystem => SSLEngine, // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 + verifySession: (ActorSystem, SSLSession) => Try[Unit], // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 + closing: TLSClosing, + tracing: Boolean) + extends Actor + with ActorLogging + with Pump { import TLSActor._ @@ -154,7 +156,8 @@ import scala.util.{ Failure, Success, Try } // The engine could also be instantiated in ActorMaterializerImpl but if creation fails // during materialization it would be worse than failing later on. val engine = - try createSSLEngine(context.system) catch { case NonFatal(ex) => fail(ex, closeTransport = true); throw ex } + try createSSLEngine(context.system) + catch { case NonFatal(ex) => fail(ex, closeTransport = true); throw ex } engine.beginHandshake() lastHandshakeStatus = engine.getHandshakeStatus @@ -349,7 +352,10 @@ import scala.util.{ Failure, Success, Try } private def doWrap(): Unit = { val result = engine.wrap(userInBuffer, transportOutBuffer) lastHandshakeStatus = result.getHandshakeStatus - if (tracing) log.debug(s"wrap: status=${result.getStatus} handshake=$lastHandshakeStatus remaining=${userInBuffer.remaining} out=${transportOutBuffer.position()}") + if (tracing) + log.debug( + s"wrap: status=${result.getStatus} handshake=$lastHandshakeStatus remaining=${userInBuffer.remaining} out=${transportOutBuffer + .position()}") if (lastHandshakeStatus == FINISHED) handshakeFinished() runDelegatedTasks() result.getStatus match { @@ -369,7 +375,10 @@ import scala.util.{ Failure, Success, Try } val result = engine.unwrap(transportInBuffer, userOutBuffer) if (ignoreOutput) userOutBuffer.clear() lastHandshakeStatus = result.getHandshakeStatus - if (tracing) log.debug(s"unwrap: status=${result.getStatus} handshake=$lastHandshakeStatus remaining=${transportInBuffer.remaining} out=${userOutBuffer.position()}") + if (tracing) + log.debug( + s"unwrap: status=${result.getStatus} handshake=$lastHandshakeStatus remaining=${transportInBuffer.remaining} out=${userOutBuffer + .position()}") runDelegatedTasks() result.getStatus match { case OK => @@ -459,8 +468,8 @@ import scala.util.{ Failure, Success, Try } */ @InternalApi private[akka] object TlsUtils { def applySessionParameters(engine: SSLEngine, sessionParameters: NegotiateNewSession): Unit = { - sessionParameters.enabledCipherSuites foreach (cs => engine.setEnabledCipherSuites(cs.toArray)) - sessionParameters.enabledProtocols foreach (p => engine.setEnabledProtocols(p.toArray)) + sessionParameters.enabledCipherSuites.foreach(cs => engine.setEnabledCipherSuites(cs.toArray)) + sessionParameters.enabledProtocols.foreach(p => engine.setEnabledProtocols(p.toArray)) sessionParameters.clientAuth match { case Some(TLSClientAuth.None) => engine.setNeedClientAuth(false) case Some(TLSClientAuth.Want) => engine.setWantClientAuth(true) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala index 5486e49307..b5c666c1a9 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TcpStages.scala @@ -30,16 +30,15 @@ import scala.concurrent.{ Future, Promise } /** * INTERNAL API */ -@InternalApi private[stream] class ConnectionSourceStage( - val tcpManager: ActorRef, - val endpoint: InetSocketAddress, - val backlog: Int, - val options: immutable.Traversable[SocketOption], - val halfClose: Boolean, - val idleTimeout: Duration, - val bindShutdownTimeout: FiniteDuration, - val ioSettings: IOSettings) - extends GraphStageWithMaterializedValue[SourceShape[StreamTcp.IncomingConnection], Future[StreamTcp.ServerBinding]] { +@InternalApi private[stream] class ConnectionSourceStage(val tcpManager: ActorRef, + val endpoint: InetSocketAddress, + val backlog: Int, + val options: immutable.Traversable[SocketOption], + val halfClose: Boolean, + val idleTimeout: Duration, + val bindShutdownTimeout: FiniteDuration, + val ioSettings: IOSettings) + extends GraphStageWithMaterializedValue[SourceShape[StreamTcp.IncomingConnection], Future[StreamTcp.ServerBinding]] { import ConnectionSourceStage._ val out: Outlet[StreamTcp.IncomingConnection] = Outlet("IncomingConnections.out") @@ -47,7 +46,8 @@ import scala.concurrent.{ Future, Promise } val shape: SourceShape[StreamTcp.IncomingConnection] = SourceShape(out) // TODO: Timeout on bind - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[ServerBinding]) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, Future[ServerBinding]) = { val bindingPromise = Promise[ServerBinding] val logic = new TimerGraphStageLogic(shape) { @@ -97,7 +97,8 @@ import scala.concurrent.{ Future, Promise } if (unbindStarted) { unbindCompleted() } else { - val ex = new IllegalStateException("IO Listener actor terminated unexpectedly for remote endpoint [" + + val ex = new IllegalStateException( + "IO Listener actor terminated unexpectedly for remote endpoint [" + endpoint.getHostString + ":" + endpoint.getPort + "]") unbindPromise.tryFailure(ex) failStage(ex) @@ -118,8 +119,13 @@ import scala.concurrent.{ Future, Promise } connectionFlowsAwaitingInitialization.incrementAndGet() val tcpFlow = - Flow.fromGraph(new IncomingConnectionStage(connection, connected.remoteAddress, halfClose, ioSettings, - () => connectionFlowsAwaitingInitialization.decrementAndGet())) + Flow + .fromGraph( + new IncomingConnectionStage(connection, + connected.remoteAddress, + halfClose, + ioSettings, + () => connectionFlowsAwaitingInitialization.decrementAndGet())) .via(detacher[ByteString]) // must read ahead for proper completions // FIXME: Previous code was wrong, must add new tests @@ -128,10 +134,7 @@ import scala.concurrent.{ Future, Promise } case _ => tcpFlow } - StreamTcp.IncomingConnection( - connected.localAddress, - connected.remoteAddress, - handler) + StreamTcp.IncomingConnection(connected.localAddress, connected.remoteAddress, handler) } private def tryUnbind(): Unit = { @@ -182,14 +185,15 @@ private[stream] object ConnectionSourceStage { def halfClose: Boolean def ioSettings: IOSettings } - case class Outbound( - manager: ActorRef, - connectCmd: Connect, - localAddressPromise: Promise[InetSocketAddress], - halfClose: Boolean, - ioSettings: IOSettings) extends TcpRole + case class Outbound(manager: ActorRef, + connectCmd: Connect, + localAddressPromise: Promise[InetSocketAddress], + halfClose: Boolean, + ioSettings: IOSettings) + extends TcpRole - case class Inbound(connection: ActorRef, halfClose: Boolean, ioSettings: IOSettings, registerCallback: () => Unit) extends TcpRole + case class Inbound(connection: ActorRef, halfClose: Boolean, ioSettings: IOSettings, registerCallback: () => Unit) + extends TcpRole /* * This is a *non-detached* design, i.e. this does not prefetch itself any of the inputs. It relies on downstream @@ -198,7 +202,10 @@ private[stream] object ConnectionSourceStage { * to attach an extra, fused buffer to the end of this flow. Keeping this stage non-detached makes it much simpler and * easier to maintain and understand. */ - class TcpStreamLogic(val shape: FlowShape[ByteString, ByteString], val role: TcpRole, remoteAddress: InetSocketAddress) extends GraphStageLogic(shape) { + class TcpStreamLogic(val shape: FlowShape[ByteString, ByteString], + val role: TcpRole, + remoteAddress: InetSocketAddress) + extends GraphStageLogic(shape) { implicit def self: ActorRef = stageActor.ref private def bytesIn = shape.in @@ -235,8 +242,9 @@ private[stream] object ConnectionSourceStage { val sender = evt._1 val msg = evt._2 msg match { - case Terminated(_) => fail(new StreamTcpException("The IO manager actor (TCP) has terminated. Stopping now.")) - case f @ CommandFailed(cmd) => fail(new StreamTcpException(s"Tcp command [$cmd] failed${f.causedByString}").initCause(f.cause.orNull)) + case Terminated(_) => fail(new StreamTcpException("The IO manager actor (TCP) has terminated. Stopping now.")) + case f @ CommandFailed(cmd) => + fail(new StreamTcpException(s"Tcp command [$cmd] failed${f.causedByString}").initCause(f.cause.orNull)) case c: Connected => role.asInstanceOf[Outbound].localAddressPromise.success(c.localAddress) connection = sender @@ -275,13 +283,14 @@ private[stream] object ConnectionSourceStage { if (!isClosed(bytesIn) && !hasBeenPulled(bytesIn)) pull(bytesIn) - case Terminated(_) => fail(new StreamTcpException("The connection actor has terminated. Stopping now.")) - case f @ CommandFailed(cmd) => fail(new StreamTcpException(s"Tcp command [$cmd] failed${f.causedByString}").initCause(f.cause.orNull)) - case ErrorClosed(cause) => fail(new StreamTcpException(s"The connection closed with error: $cause")) - case Aborted => fail(new StreamTcpException("The connection has been aborted")) - case Closed => completeStage() - case ConfirmedClosed => completeStage() - case PeerClosed => complete(bytesOut) + case Terminated(_) => fail(new StreamTcpException("The connection actor has terminated. Stopping now.")) + case f @ CommandFailed(cmd) => + fail(new StreamTcpException(s"Tcp command [$cmd] failed${f.causedByString}").initCause(f.cause.orNull)) + case ErrorClosed(cause) => fail(new StreamTcpException(s"The connection closed with error: $cause")) + case Aborted => fail(new StreamTcpException("The connection has been aborted")) + case Closed => completeStage() + case ConfirmedClosed => completeStage() + case PeerClosed => complete(bytesOut) } } @@ -318,37 +327,38 @@ private[stream] object ConnectionSourceStage { } } - setHandler(bytesIn, new InHandler { - override def onPush(): Unit = { - val elem = grab(bytesIn) - ReactiveStreamsCompliance.requireNonNullElement(elem) - if (writeInProgress) { - writeBuffer = writeBuffer ++ elem - } else { - connection ! Write(writeBuffer ++ elem, WriteAck) - writeInProgress = true - writeBuffer = ByteString.empty - } - if (writeBuffer.size < writeBufferSize) - pull(bytesIn) + setHandler(bytesIn, + new InHandler { + override def onPush(): Unit = { + val elem = grab(bytesIn) + ReactiveStreamsCompliance.requireNonNullElement(elem) + if (writeInProgress) { + writeBuffer = writeBuffer ++ elem + } else { + connection ! Write(writeBuffer ++ elem, WriteAck) + writeInProgress = true + writeBuffer = ByteString.empty + } + if (writeBuffer.size < writeBufferSize) + pull(bytesIn) - } + } - override def onUpstreamFinish(): Unit = - closeConnection() + override def onUpstreamFinish(): Unit = + closeConnection() - override def onUpstreamFailure(ex: Throwable): Unit = { - if (connection != null) { - if (interpreter.log.isDebugEnabled) { - val msg = "Aborting tcp connection to {} because of upstream failure: {}" + override def onUpstreamFailure(ex: Throwable): Unit = { + if (connection != null) { + if (interpreter.log.isDebugEnabled) { + val msg = "Aborting tcp connection to {} because of upstream failure: {}" - if (ex.getStackTrace.isEmpty) interpreter.log.debug(msg, remoteAddress, ex) - else interpreter.log.debug(msg + "\n{}", remoteAddress, ex, ex.getStackTrace.mkString("\n")) - } - connection ! Abort - } else fail(ex) - } - }) + if (ex.getStackTrace.isEmpty) interpreter.log.debug(msg, remoteAddress, ex) + else interpreter.log.debug(msg + "\n{}", remoteAddress, ex, ex.getStackTrace.mkString("\n")) + } + connection ! Abort + } else fail(ex) + } + }) /** Fail stage and report to localAddressPromise if still possible */ private def fail(ex: Throwable): Unit = { @@ -372,9 +382,12 @@ private[stream] object ConnectionSourceStage { /** * INTERNAL API */ -@InternalApi private[akka] class IncomingConnectionStage( - connection: ActorRef, remoteAddress: InetSocketAddress, halfClose: Boolean, ioSettings: IOSettings, registerCallback: () => Unit) - extends GraphStage[FlowShape[ByteString, ByteString]] { +@InternalApi private[akka] class IncomingConnectionStage(connection: ActorRef, + remoteAddress: InetSocketAddress, + halfClose: Boolean, + ioSettings: IOSettings, + registerCallback: () => Unit) + extends GraphStage[FlowShape[ByteString, ByteString]] { import TcpConnectionStage._ private val hasBeenCreated = new AtomicBoolean(false) @@ -397,16 +410,14 @@ private[stream] object ConnectionSourceStage { /** * INTERNAL API */ -@InternalApi private[stream] class OutgoingConnectionStage( - manager: ActorRef, - remoteAddress: InetSocketAddress, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil, - halfClose: Boolean = true, - connectTimeout: Duration = Duration.Inf, - ioSettings: IOSettings) - - extends GraphStageWithMaterializedValue[FlowShape[ByteString, ByteString], Future[StreamTcp.OutgoingConnection]] { +@InternalApi private[stream] class OutgoingConnectionStage(manager: ActorRef, + remoteAddress: InetSocketAddress, + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil, + halfClose: Boolean = true, + connectTimeout: Duration = Duration.Inf, + ioSettings: IOSettings) + extends GraphStageWithMaterializedValue[FlowShape[ByteString, ByteString], Future[StreamTcp.OutgoingConnection]] { import TcpConnectionStage._ val bytesIn: Inlet[ByteString] = Inlet("OutgoingTCP.in") @@ -414,7 +425,8 @@ private[stream] object ConnectionSourceStage { override def initialAttributes = Attributes.name("OutgoingConnection") val shape: FlowShape[ByteString, ByteString] = FlowShape(bytesIn, bytesOut) - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[StreamTcp.OutgoingConnection]) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, Future[StreamTcp.OutgoingConnection]) = { // FIXME: A method like this would make soo much sense on Duration (i.e. toOption) val connTimeout = connectTimeout match { case x: FiniteDuration => Some(x) @@ -422,15 +434,16 @@ private[stream] object ConnectionSourceStage { } val localAddressPromise = Promise[InetSocketAddress] - val logic = new TcpStreamLogic(shape, Outbound( - manager, - Connect(remoteAddress, localAddress, options, connTimeout, pullMode = true), - localAddressPromise, - halfClose, - ioSettings), - remoteAddress) + val logic = new TcpStreamLogic(shape, + Outbound(manager, + Connect(remoteAddress, localAddress, options, connTimeout, pullMode = true), + localAddressPromise, + halfClose, + ioSettings), + remoteAddress) - (logic, localAddressPromise.future.map(OutgoingConnection(remoteAddress, _))(ExecutionContexts.sameThreadExecutionContext)) + (logic, + localAddressPromise.future.map(OutgoingConnection(remoteAddress, _))(ExecutionContexts.sameThreadExecutionContext)) } override def toString = s"TCP-to($remoteAddress)" @@ -438,7 +451,9 @@ private[stream] object ConnectionSourceStage { /** INTERNAL API */ @InternalApi private[akka] object TcpIdleTimeout { - def apply(idleTimeout: FiniteDuration, remoteAddress: Option[InetSocketAddress]): BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = { + def apply( + idleTimeout: FiniteDuration, + remoteAddress: Option[InetSocketAddress]): BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = { val connectionToString = remoteAddress match { case Some(address) => s" on connection to [$address]" case _ => "" @@ -448,13 +463,14 @@ private[stream] object ConnectionSourceStage { BidiFlow.fromFlows( Flow[ByteString].mapError { case t: TimeoutException => - new TcpIdleTimeoutException(s"TCP idle-timeout encountered$connectionToString, no bytes passed in the last $idleTimeout", idleTimeout) + new TcpIdleTimeoutException( + s"TCP idle-timeout encountered$connectionToString, no bytes passed in the last $idleTimeout", + idleTimeout) }, - Flow[ByteString] - ) + Flow[ByteString]) val fromNetTimeout: BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = toNetTimeout.reversed // now the bottom flow transforms the exception, the top one doesn't (since that one is "fromNet") - fromNetTimeout atop BidiFlow.bidirectionalIdleTimeout[ByteString, ByteString](idleTimeout) atop toNetTimeout + fromNetTimeout.atop(BidiFlow.bidirectionalIdleTimeout[ByteString, ByteString](idleTimeout)).atop(toNetTimeout) } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala b/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala index 0ab0ada0e4..2850c4dc3e 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/TlsModule.scala @@ -20,20 +20,24 @@ import scala.util.Try /** * INTERNAL API. */ -@InternalApi private[stream] final case class TlsModule(plainIn: Inlet[SslTlsOutbound], plainOut: Outlet[SslTlsInbound], - cipherIn: Inlet[ByteString], cipherOut: Outlet[ByteString], - shape: BidiShape[SslTlsOutbound, ByteString, ByteString, SslTlsInbound], - attributes: Attributes, - createSSLEngine: ActorSystem => SSLEngine, // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 - verifySession: (ActorSystem, SSLSession) => Try[Unit], // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 - closing: TLSClosing) - extends AtomicModule[BidiShape[SslTlsOutbound, ByteString, ByteString, SslTlsInbound], NotUsed] { +@InternalApi private[stream] final case class TlsModule( + plainIn: Inlet[SslTlsOutbound], + plainOut: Outlet[SslTlsInbound], + cipherIn: Inlet[ByteString], + cipherOut: Outlet[ByteString], + shape: BidiShape[SslTlsOutbound, ByteString, ByteString, SslTlsInbound], + attributes: Attributes, + createSSLEngine: ActorSystem => SSLEngine, // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 + verifySession: (ActorSystem, SSLSession) => Try[Unit], // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 + closing: TLSClosing) + extends AtomicModule[BidiShape[SslTlsOutbound, ByteString, ByteString, SslTlsInbound], NotUsed] { override def withAttributes(att: Attributes): TlsModule = copy(attributes = att) override def toString: String = f"TlsModule($closing) [${System.identityHashCode(this)}%08x]" - override private[stream] def traversalBuilder = TraversalBuilder.atomic(this, attributes).makeIsland(TlsModuleIslandTag) + override private[stream] def traversalBuilder = + TraversalBuilder.atomic(this, attributes).makeIsland(TlsModuleIslandTag) } /** @@ -41,10 +45,10 @@ import scala.util.Try */ @InternalApi private[stream] object TlsModule { def apply( - attributes: Attributes, - createSSLEngine: ActorSystem => SSLEngine, // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 - verifySession: (ActorSystem, SSLSession) => Try[Unit], // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 - closing: TLSClosing): TlsModule = { + attributes: Attributes, + createSSLEngine: ActorSystem => SSLEngine, // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 + verifySession: (ActorSystem, SSLSession) => Try[Unit], // ActorSystem is only needed to support the AkkaSSLConfig legacy, see #21753 + closing: TLSClosing): TlsModule = { val name = attributes.nameOrDefault(s"StreamTls()") val cipherIn = Inlet[ByteString](s"$name.cipherIn") val cipherOut = Outlet[ByteString](s"$name.cipherOut") diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/CompressionUtils.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/CompressionUtils.scala index ae6a7b64db..1072fa0069 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/CompressionUtils.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/CompressionUtils.scala @@ -14,33 +14,35 @@ import akka.util.ByteString /** INTERNAL API */ @InternalApi private[stream] object CompressionUtils { + /** * Creates a flow from a compressor constructor. */ def compressorFlow(newCompressor: () => Compressor): Flow[ByteString, ByteString, NotUsed] = Flow.fromGraph { new SimpleLinearGraphStage[ByteString] { - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - val compressor = newCompressor() + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + val compressor = newCompressor() - override def onPush(): Unit = { - val data = compressor.compressAndFlush(grab(in)) - if (data.nonEmpty) push(out, data) - else pull(in) + override def onPush(): Unit = { + val data = compressor.compressAndFlush(grab(in)) + if (data.nonEmpty) push(out, data) + else pull(in) + } + + override def onPull(): Unit = pull(in) + + override def onUpstreamFinish(): Unit = { + val data = compressor.finish() + if (data.nonEmpty) emit(out, data) + completeStage() + } + + override def postStop(): Unit = compressor.close() + + setHandlers(in, out, this) } - - override def onPull(): Unit = pull(in) - - override def onUpstreamFinish(): Unit = { - val data = compressor.finish() - if (data.nonEmpty) emit(out, data) - completeStage() - } - - override def postStop(): Unit = compressor.close() - - setHandlers(in, out, this) - } } } } diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/Compressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/Compressor.scala index 5af096f149..c28a8ea71d 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/Compressor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/Compressor.scala @@ -13,6 +13,7 @@ import akka.util.ByteString * A stateful object representing ongoing compression. */ @InternalApi private[akka] abstract class Compressor { + /** * Compresses the given input and returns compressed data. The implementation * can and will choose to buffer output data to improve compression. Use @@ -34,6 +35,7 @@ import akka.util.ByteString /** Combines `compress` + `flush` */ def compressAndFlush(input: ByteString): ByteString + /** Combines `compress` + `finish` */ def compressAndFinish(input: ByteString): ByteString diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateCompressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateCompressor.scala index 18571d75bd..0c341371f5 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateCompressor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateCompressor.scala @@ -12,7 +12,8 @@ import akka.util.{ ByteString, ByteStringBuilder } import scala.annotation.tailrec /** INTERNAL API */ -@InternalApi private[akka] class DeflateCompressor(level: Int = Deflater.BEST_COMPRESSION, nowrap: Boolean = false) extends Compressor { +@InternalApi private[akka] class DeflateCompressor(level: Int = Deflater.BEST_COMPRESSION, nowrap: Boolean = false) + extends Compressor { import DeflateCompressor._ protected lazy val deflater = new Deflater(level, nowrap) @@ -68,7 +69,9 @@ import scala.annotation.tailrec val MinBufferSize = 1024 @tailrec - def drainDeflater(deflater: Deflater, buffer: Array[Byte], result: ByteStringBuilder = new ByteStringBuilder()): ByteString = { + def drainDeflater(deflater: Deflater, + buffer: Array[Byte], + result: ByteStringBuilder = new ByteStringBuilder()): ByteString = { val len = deflater.deflate(buffer) if (len > 0) { result ++= ByteString.fromArray(buffer, 0, len) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateDecompressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateDecompressor.scala index a610787026..9824842793 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateDecompressor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateDecompressor.scala @@ -11,7 +11,7 @@ import akka.stream.Attributes /** INTERNAL API */ @InternalApi private[akka] class DeflateDecompressor(maxBytesPerChunk: Int, nowrap: Boolean) - extends DeflateDecompressorBase(maxBytesPerChunk) { + extends DeflateDecompressorBase(maxBytesPerChunk) { def this(maxBytesPerChunk: Int) = this(maxBytesPerChunk, false) // for binary compatibility @@ -28,4 +28,3 @@ import akka.stream.Attributes startWith(inflating) } } - diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateDecompressorBase.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateDecompressorBase.scala index 038ccdb098..8b8a9f723d 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateDecompressorBase.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/DeflateDecompressorBase.scala @@ -13,7 +13,7 @@ import akka.util.ByteString /** INTERNAL API */ @InternalApi private[akka] abstract class DeflateDecompressorBase(maxBytesPerChunk: Int) - extends ByteStringParser[ByteString] { + extends ByteStringParser[ByteString] { abstract class DecompressorParsingLogic extends ParsingLogic { val inflater: Inflater diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipCompressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipCompressor.scala index b17bcb5fca..09e40c2af1 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipCompressor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipCompressor.scala @@ -10,7 +10,8 @@ import akka.annotation.InternalApi import akka.util.ByteString /** INTERNAL API */ -@InternalApi private[akka] class GzipCompressor(compressionLevel: Int = Deflater.BEST_COMPRESSION) extends DeflateCompressor(compressionLevel, true) { +@InternalApi private[akka] class GzipCompressor(compressionLevel: Int = Deflater.BEST_COMPRESSION) + extends DeflateCompressor(compressionLevel, true) { override protected lazy val deflater = new Deflater(compressionLevel, true) private val checkSum = new CRC32 // CRC32 of uncompressed data private var headerSent = false @@ -21,7 +22,8 @@ import akka.util.ByteString header() ++ super.compressWithBuffer(input, buffer) } override protected def flushWithBuffer(buffer: Array[Byte]): ByteString = header() ++ super.flushWithBuffer(buffer) - override protected def finishWithBuffer(buffer: Array[Byte]): ByteString = header() ++ super.finishWithBuffer(buffer) ++ trailer() + override protected def finishWithBuffer(buffer: Array[Byte]): ByteString = + header() ++ super.finishWithBuffer(buffer) ++ trailer() private def updateCrc(input: ByteString): Unit = { checkSum.update(input.toArray) diff --git a/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala b/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala index 0b6f7aac22..b9f285bdee 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/io/compression/GzipDecompressor.scala @@ -14,7 +14,7 @@ import akka.util.ByteString /** INTERNAL API */ @InternalApi private[akka] class GzipDecompressor(maxBytesPerChunk: Int) - extends DeflateDecompressorBase(maxBytesPerChunk) { + extends DeflateDecompressorBase(maxBytesPerChunk) { override def createLogic(attr: Attributes) = new DecompressorParsingLogic { override val inflater: Inflater = new Inflater(true) @@ -70,8 +70,7 @@ import akka.util.ByteString /** INTERNAL API */ @InternalApi private[akka] object GzipDecompressor { // RFC 1952: http://tools.ietf.org/html/rfc1952 section 2.2 - private[impl] val Header = ByteString( - 0x1F, // ID1 + private[impl] val Header = ByteString(0x1F, // ID1 0x8B, // ID2 8, // CM = Deflate 0, // FLG @@ -81,5 +80,5 @@ import akka.util.ByteString 0, // MTIME 4 0, // XFL 0 // OS - ) + ) } diff --git a/akka-stream/src/main/scala/akka/stream/impl/package.scala b/akka-stream/src/main/scala/akka/stream/impl/package.scala index e3bd6eabd7..b477c02256 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/package.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/package.scala @@ -460,6 +460,4 @@ package akka.stream * debugging if everything is wired to the right thing. * */ -package object impl { - -} +package object impl {} diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala index e23e8a909a..f5ce7c2284 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/SinkRefImpl.scala @@ -31,9 +31,8 @@ private[stream] final case class SinkRefImpl[In](initialPartnerRef: ActorRef) ex * the ref. */ @InternalApi -private[stream] final class SinkRefStageImpl[In] private[akka] ( - val initialPartnerRef: OptionVal[ActorRef] -) extends GraphStageWithMaterializedValue[SinkShape[In], Future[SourceRef[In]]] { +private[stream] final class SinkRefStageImpl[In] private[akka] (val initialPartnerRef: OptionVal[ActorRef]) + extends GraphStageWithMaterializedValue[SinkShape[In], Future[SourceRef[In]]] { val in: Inlet[In] = Inlet[In](s"${Logging.simpleName(getClass)}($initialRefName).in") override def shape: SinkShape[In] = SinkShape.of(in) @@ -55,8 +54,8 @@ private[stream] final class SinkRefStageImpl[In] private[akka] ( import StreamRefAttributes._ private[this] lazy val settings = ActorMaterializerHelper.downcast(materializer).settings.streamRefSettings - private[this] lazy val subscriptionTimeout = inheritedAttributes - .get[StreamRefAttributes.SubscriptionTimeout](SubscriptionTimeout(settings.subscriptionTimeout)) + private[this] lazy val subscriptionTimeout = inheritedAttributes.get[StreamRefAttributes.SubscriptionTimeout]( + SubscriptionTimeout(settings.subscriptionTimeout)) // end of settings --- override protected lazy val stageActorName: String = streamRefsMaster.nextSinkRefStageName() @@ -90,8 +89,9 @@ private[stream] final class SinkRefStageImpl[In] private[akka] ( initialPartnerRef match { case OptionVal.Some(ref) => // this will set the `partnerRef` - observeAndValidateSender(ref, "Illegal initialPartnerRef! This may be a bug, please report your " + - "usage and complete stack trace on the issue tracker: https://github.com/akka/akka") + observeAndValidateSender(ref, + "Illegal initialPartnerRef! This may be a bug, please report your " + + "usage and complete stack trace on the issue tracker: https://github.com/akka/akka") tryPull() case OptionVal.None => // only schedule timeout timer if partnerRef has not been resolved yet (i.e. if this instance of the Actor @@ -99,7 +99,9 @@ private[stream] final class SinkRefStageImpl[In] private[akka] ( scheduleOnce(SubscriptionTimeoutTimerKey, subscriptionTimeout.timeout) } - log.debug("Created SinkRef, pointing to remote Sink receiver: {}, local worker: {}", initialPartnerRef, self.ref) + log.debug("Created SinkRef, pointing to remote Sink receiver: {}, local worker: {}", + initialPartnerRef, + self.ref) promise.success(SourceRefImpl(self.ref)) } @@ -113,8 +115,10 @@ private[stream] final class SinkRefStageImpl[In] private[akka] ( case OptionVal.Some(_ /* known to be Success*/ ) => completeStage() // other side has terminated (in response to a completion message) so we can safely terminate case OptionVal.None => - failStage(RemoteStreamRefActorTerminatedException(s"Remote target receiver of data $partnerRef terminated. " + - s"Local stream terminating, message loss (on remote side) may have happened.")) + failStage( + RemoteStreamRefActorTerminatedException( + s"Remote target receiver of data $partnerRef terminated. " + + s"Local stream terminating, message loss (on remote side) may have happened.")) } case (sender, StreamRefsProtocol.CumulativeDemand(d)) => @@ -123,7 +127,9 @@ private[stream] final class SinkRefStageImpl[In] private[akka] ( if (remoteCumulativeDemandReceived < d) { remoteCumulativeDemandReceived = d - log.debug("Received cumulative demand [{}], consumable demand: [{}]", StreamRefsProtocol.CumulativeDemand(d), remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed) + log.debug("Received cumulative demand [{}], consumable demand: [{}]", + StreamRefsProtocol.CumulativeDemand(d), + remoteCumulativeDemandReceived - remoteCumulativeDemandConsumed) } tryPull() @@ -146,7 +152,7 @@ private[stream] final class SinkRefStageImpl[In] private[akka] ( val ex = StreamRefSubscriptionTimeoutException( // we know the future has been competed by now, since it is in preStart s"[$stageActorName] Remote side did not subscribe (materialize) handed out Source reference [${promise.future.value}], " + - s"within subscription timeout: ${PrettyDuration.format(subscriptionTimeout.timeout)}!") + s"within subscription timeout: ${PrettyDuration.format(subscriptionTimeout.timeout)}!") throw ex } @@ -194,7 +200,9 @@ private[stream] final class SinkRefStageImpl[In] private[akka] ( completedBeforeRemoteConnected match { case OptionVal.Some(scala.util.Failure(ex)) => - log.warning("Stream already terminated with exception before remote side materialized, sending failure: {}", ex) + log.warning( + "Stream already terminated with exception before remote side materialized, sending failure: {}", + ex) partner ! StreamRefsProtocol.RemoteStreamFailure(ex.getMessage) finishedWithAwaitingPartnerTermination = OptionVal(Failure(ex)) setKeepGoing(true) // we will terminate once partner ref has Terminated (to avoid racing Terminated with completion message) diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala index 601521a946..29e7a301ec 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/SourceRefImpl.scala @@ -31,9 +31,8 @@ private[stream] final case class SourceRefImpl[T](initialPartnerRef: ActorRef) e * If it is none, then we are the side creating the ref. */ @InternalApi -private[stream] final class SourceRefStageImpl[Out]( - val initialPartnerRef: OptionVal[ActorRef] -) extends GraphStageWithMaterializedValue[SourceShape[Out], Future[SinkRef[Out]]] { stage => +private[stream] final class SourceRefStageImpl[Out](val initialPartnerRef: OptionVal[ActorRef]) + extends GraphStageWithMaterializedValue[SourceShape[Out], Future[SinkRef[Out]]] { stage => val out: Outlet[Out] = Outlet[Out](s"${Logging.simpleName(getClass)}.out") override def shape = SourceShape.of(out) @@ -44,7 +43,8 @@ private[stream] final class SourceRefStageImpl[Out]( case _ => "" } - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[SinkRef[Out]]) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, Future[SinkRef[Out]]) = { val promise = Promise[SinkRefImpl[Out]]() val logic = new TimerGraphStageLogic(shape) with StageLogging with OutHandler { @@ -54,8 +54,8 @@ private[stream] final class SourceRefStageImpl[Out]( import StreamRefAttributes._ private[this] lazy val settings = ActorMaterializerHelper.downcast(materializer).settings.streamRefSettings - private[this] lazy val subscriptionTimeout = inheritedAttributes - .get[StreamRefAttributes.SubscriptionTimeout](SubscriptionTimeout(settings.subscriptionTimeout)) + private[this] lazy val subscriptionTimeout = inheritedAttributes.get[StreamRefAttributes.SubscriptionTimeout]( + SubscriptionTimeout(settings.subscriptionTimeout)) // end of settings --- override protected lazy val stageActorName: String = streamRefsMaster.nextSourceRefStageName() @@ -73,7 +73,8 @@ private[stream] final class SourceRefStageImpl[Out]( private var localCumulativeDemand: Long = 0L private var localRemainingRequested: Int = 0 - private var receiveBuffer: FixedSizeBuffer.FixedSizeBuffer[Out] = _ // initialized in preStart since depends on settings + private var receiveBuffer + : FixedSizeBuffer.FixedSizeBuffer[Out] = _ // initialized in preStart since depends on settings private var requestStrategy: RequestStrategy = _ // initialized in preStart since depends on receiveBuffer's size // end of demand management --- @@ -90,7 +91,8 @@ private[stream] final class SourceRefStageImpl[Out]( self = getStageActor(initialReceive) log.debug("[{}] Allocated receiver: {}", stageActorName, self.ref) if (initialPartnerRef.isDefined) // this will set the partnerRef - observeAndValidateSender(initialPartnerRef.get, "Illegal initialPartnerRef! This would be a bug in the SourceRef usage or impl.") + observeAndValidateSender(initialPartnerRef.get, + "Illegal initialPartnerRef! This would be a bug in the SourceRef usage or impl.") promise.success(SinkRefImpl(self.ref)) @@ -131,7 +133,7 @@ private[stream] final class SourceRefStageImpl[Out]( val ex = StreamRefSubscriptionTimeoutException( // we know the future has been competed by now, since it is in preStart s"[$stageActorName] Remote side did not subscribe (materialize) handed out Sink reference [${promise.future.value}]," + - s"within subscription timeout: ${PrettyDuration.format(subscriptionTimeout.timeout)}!") + s"within subscription timeout: ${PrettyDuration.format(subscriptionTimeout.timeout)}!") throw ex // this will also log the exception, unlike failStage; this should fail rarely, but would be good to have it "loud" @@ -141,7 +143,8 @@ private[stream] final class SourceRefStageImpl[Out]( scheduleDemandRedelivery() case TerminationDeadlineTimerKey => - failStage(RemoteStreamRefActorTerminatedException(s"Remote partner [$partnerRef] has terminated unexpectedly and no clean completion/failure message was received " + + failStage(RemoteStreamRefActorTerminatedException( + s"Remote partner [$partnerRef] has terminated unexpectedly and no clean completion/failure message was received " + "(possible reasons: network partition or subscription timeout triggered termination of partner). Tearing down.")) } @@ -187,8 +190,10 @@ private[stream] final class SourceRefStageImpl[Out]( case _ => // this should not have happened! It should be impossible that we watched some other actor - failStage(RemoteStreamRefActorTerminatedException(s"Received UNEXPECTED Terminated($ref) message! " + - s"This actor was NOT our trusted remote partner, which was: $getPartnerRef. Tearing down.")) + failStage( + RemoteStreamRefActorTerminatedException( + s"Received UNEXPECTED Terminated($ref) message! " + + s"This actor was NOT our trusted remote partner, which was: $getPartnerRef. Tearing down.")) } } @@ -203,7 +208,8 @@ private[stream] final class SourceRefStageImpl[Out]( if (receiveBuffer.isEmpty && isAvailable(out)) { push(out, payload) } else if (receiveBuffer.isFull) { - throw new IllegalStateException(s"Attempted to overflow buffer! " + + throw new IllegalStateException( + s"Attempted to overflow buffer! " + s"Capacity: ${receiveBuffer.capacity}, incoming element: $payload, " + s"localRemainingRequested: $localRemainingRequested, localCumulativeDemand: $localCumulativeDemand") } else { @@ -245,4 +251,3 @@ private[stream] final class SourceRefStageImpl[Out]( override def toString: String = s"${Logging.simpleName(getClass)}($initialRefName)}" } - diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefSettingsImpl.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefSettingsImpl.scala index 2e24735f90..a28ebd7484 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefSettingsImpl.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefSettingsImpl.scala @@ -13,16 +13,18 @@ import scala.concurrent.duration.FiniteDuration /** INTERNAL API */ @InternalApi private[akka] final case class StreamRefSettingsImpl private ( - override val bufferCapacity: Int, - override val demandRedeliveryInterval: FiniteDuration, - override val subscriptionTimeout: FiniteDuration, - override val finalTerminationSignalDeadline: FiniteDuration -) extends StreamRefSettings { + override val bufferCapacity: Int, + override val demandRedeliveryInterval: FiniteDuration, + override val subscriptionTimeout: FiniteDuration, + override val finalTerminationSignalDeadline: FiniteDuration) + extends StreamRefSettings { override def withBufferCapacity(value: Int): StreamRefSettings = copy(bufferCapacity = value) - override def withDemandRedeliveryInterval(value: FiniteDuration): StreamRefSettings = copy(demandRedeliveryInterval = value) + override def withDemandRedeliveryInterval(value: FiniteDuration): StreamRefSettings = + copy(demandRedeliveryInterval = value) override def withSubscriptionTimeout(value: FiniteDuration): StreamRefSettings = copy(subscriptionTimeout = value) - override def withTerminationReceivedBeforeCompletionLeeway(value: FiniteDuration): StreamRefSettings = copy(finalTerminationSignalDeadline = value) + override def withTerminationReceivedBeforeCompletionLeeway(value: FiniteDuration): StreamRefSettings = + copy(finalTerminationSignalDeadline = value) override def productPrefix: String = Logging.simpleName(classOf[StreamRefSettings]) diff --git a/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefsProtocol.scala b/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefsProtocol.scala index ce70b7468d..ab0a9502c7 100644 --- a/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefsProtocol.scala +++ b/akka-stream/src/main/scala/akka/stream/impl/streamref/StreamRefsProtocol.scala @@ -15,6 +15,7 @@ private[akka] sealed trait StreamRefsProtocol /** INTERNAL API */ @InternalApi private[akka] object StreamRefsProtocol { + /** * Sequenced `Subscriber#onNext` equivalent. * The receiving end of these messages MUST fail the stream if it observes gaps in the sequence, @@ -23,7 +24,9 @@ private[akka] object StreamRefsProtocol { * Sequence numbers start from `0`. */ @InternalApi - private[akka] final case class SequencedOnNext[T](seqNr: Long, payload: T) extends StreamRefsProtocol with DeadLetterSuppression { + private[akka] final case class SequencedOnNext[T](seqNr: Long, payload: T) + extends StreamRefsProtocol + with DeadLetterSuppression { if (payload == null) throw ReactiveStreamsCompliance.elementMustNotBeNullException } @@ -31,7 +34,9 @@ private[akka] object StreamRefsProtocol { * INTERNAL API: Initial message sent to remote side to establish partnership between origin and remote stream refs. */ @InternalApi - private[akka] final case class OnSubscribeHandshake(targetRef: ActorRef) extends StreamRefsProtocol with DeadLetterSuppression + private[akka] final case class OnSubscribeHandshake(targetRef: ActorRef) + extends StreamRefsProtocol + with DeadLetterSuppression /** * INTERNAL API: Sent to a the receiver side of a stream ref, once the sending side of the SinkRef gets signalled a Failure. diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala index 6ea03a4185..1d242d1a18 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/BidiFlow.scala @@ -46,10 +46,9 @@ object BidiFlow { * }}} * */ - def fromFlowsMat[I1, O1, I2, O2, M1, M2, M]( - flow1: Graph[FlowShape[I1, O1], M1], - flow2: Graph[FlowShape[I2, O2], M2], - combine: function.Function2[M1, M2, M]): BidiFlow[I1, O1, I2, O2, M] = { + def fromFlowsMat[I1, O1, I2, O2, M1, M2, M](flow1: Graph[FlowShape[I1, O1], M1], + flow2: Graph[FlowShape[I2, O2], M2], + combine: function.Function2[M1, M2, M]): BidiFlow[I1, O1, I2, O2, M] = { new BidiFlow(scaladsl.BidiFlow.fromFlowsMat(flow1, flow2)(combinerToScala(combine))) } @@ -71,16 +70,16 @@ object BidiFlow { * }}} * */ - def fromFlows[I1, O1, I2, O2, M1, M2]( - flow1: Graph[FlowShape[I1, O1], M1], - flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] = + def fromFlows[I1, O1, I2, O2, M1, M2](flow1: Graph[FlowShape[I1, O1], M1], + flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] = new BidiFlow(scaladsl.BidiFlow.fromFlows(flow1, flow2)) /** * Create a BidiFlow where the top and bottom flows are just one simple mapping * operator each, expressed by the two functions. */ - def fromFunctions[I1, O1, I2, O2](top: function.Function[I1, O1], bottom: function.Function[I2, O2]): BidiFlow[I1, O1, I2, O2, NotUsed] = + def fromFunctions[I1, O1, I2, O2](top: function.Function[I1, O1], + bottom: function.Function[I2, O2]): BidiFlow[I1, O1, I2, O2, NotUsed] = new BidiFlow(scaladsl.BidiFlow.fromFunctions(top.apply _, bottom.apply _)) /** @@ -112,7 +111,8 @@ object BidiFlow { } } -final class BidiFlow[I1, O1, I2, O2, Mat](delegate: scaladsl.BidiFlow[I1, O1, I2, O2, Mat]) extends Graph[BidiShape[I1, O1, I2, O2], Mat] { +final class BidiFlow[I1, O1, I2, O2, Mat](delegate: scaladsl.BidiFlow[I1, O1, I2, O2, Mat]) + extends Graph[BidiShape[I1, O1, I2, O2], Mat] { override def traversalBuilder = delegate.traversalBuilder override def shape = delegate.shape @@ -159,7 +159,8 @@ final class BidiFlow[I1, O1, I2, O2, Mat](delegate: scaladsl.BidiFlow[I1, O1, I2 * The `combine` function is used to compose the materialized values of this flow and that * flow into the materialized value of the resulting BidiFlow. */ - def atop[OO1, II2, Mat2, M](bidi: BidiFlow[O1, OO1, II2, I2, Mat2], combine: function.Function2[Mat, Mat2, M]): BidiFlow[I1, OO1, II2, O2, M] = + def atop[OO1, II2, Mat2, M](bidi: BidiFlow[O1, OO1, II2, I2, Mat2], + combine: function.Function2[Mat, Mat2, M]): BidiFlow[I1, OO1, II2, O2, M] = new BidiFlow(delegate.atopMat(bidi.asScala)(combinerToScala(combine))) /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Compression.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Compression.scala index fa17ad42b2..b41a8f3d4c 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Compression.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Compression.scala @@ -9,6 +9,7 @@ import akka.stream.scaladsl import akka.util.ByteString object Compression { + /** * Creates a Flow that decompresses gzip-compressed stream of data. * diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala b/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala index c0a23fe4d4..218ec3cf3e 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/FileIO.scala @@ -9,7 +9,7 @@ import java.nio.file.{ OpenOption, Path } import java.util import java.util.concurrent.CompletionStage -import akka.stream.{ IOResult, javadsl, scaladsl } +import akka.stream.{ javadsl, scaladsl, IOResult } import akka.util.ByteString import scala.collection.JavaConverters._ @@ -113,7 +113,9 @@ object FileIO { * @param options File open options, see [[java.nio.file.StandardOpenOption]] * @param startPosition startPosition the start position to read from, defaults to 0 */ - def toPath[Opt <: OpenOption](f: Path, options: util.Set[Opt], startPosition: Long): javadsl.Sink[ByteString, CompletionStage[IOResult]] = + def toPath[Opt <: OpenOption](f: Path, + options: util.Set[Opt], + startPosition: Long): javadsl.Sink[ByteString, CompletionStage[IOResult]] = new Sink(scaladsl.FileIO.toPath(f, options.asScala.toSet, startPosition).toCompletionStage()) /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala index dfc7d1620d..653148caec 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Flow.scala @@ -7,7 +7,7 @@ package akka.stream.javadsl import akka.util.{ ConstantFun, Timeout } import akka.{ Done, NotUsed } import akka.event.LoggingAdapter -import akka.japi.{ Pair, Util, function } +import akka.japi.{ function, Pair, Util } import akka.stream._ import org.reactivestreams.Processor @@ -34,7 +34,8 @@ object Flow { def fromProcessor[I, O](processorFactory: function.Creator[Processor[I, O]]): javadsl.Flow[I, O, NotUsed] = new Flow(scaladsl.Flow.fromProcessor(() => processorFactory.create())) - def fromProcessorMat[I, O, Mat](processorFactory: function.Creator[Pair[Processor[I, O], Mat]]): javadsl.Flow[I, O, Mat] = + def fromProcessorMat[I, O, Mat]( + processorFactory: function.Creator[Pair[Processor[I, O], Mat]]): javadsl.Flow[I, O, Mat] = new Flow(scaladsl.Flow.fromProcessorMat { () => val javaPair = processorFactory.create() (javaPair.first, javaPair.second) @@ -111,9 +112,9 @@ object Flow { * The `combine` function is used to compose the materialized values of the `sink` and `source` * into the materialized value of the resulting [[Flow]]. */ - def fromSinkAndSourceMat[I, O, M1, M2, M]( - sink: Graph[SinkShape[I], M1], source: Graph[SourceShape[O], M2], - combine: function.Function2[M1, M2, M]): Flow[I, O, M] = + def fromSinkAndSourceMat[I, O, M1, M2, M](sink: Graph[SinkShape[I], M1], + source: Graph[SourceShape[O], M2], + combine: function.Function2[M1, M2, M]): Flow[I, O, M] = new Flow(scaladsl.Flow.fromSinkAndSourceMat(sink, source)(combinerToScala(combine))) /** @@ -176,7 +177,8 @@ object Flow { * * See also [[fromSinkAndSourceCoupledMat]] when access to materialized values of the parameters is needed. */ - def fromSinkAndSourceCoupled[I, O](sink: Graph[SinkShape[I], _], source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] = + def fromSinkAndSourceCoupled[I, O](sink: Graph[SinkShape[I], _], + source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] = new Flow(scaladsl.Flow.fromSinkAndSourceCoupled(sink, source)) /** @@ -203,9 +205,9 @@ object Flow { * The `combine` function is used to compose the materialized values of the `sink` and `source` * into the materialized value of the resulting [[Flow]]. */ - def fromSinkAndSourceCoupledMat[I, O, M1, M2, M]( - sink: Graph[SinkShape[I], M1], source: Graph[SourceShape[O], M2], - combine: function.Function2[M1, M2, M]): Flow[I, O, M] = + def fromSinkAndSourceCoupledMat[I, O, M1, M2, M](sink: Graph[SinkShape[I], M1], + source: Graph[SourceShape[O], M2], + combine: function.Function2[M1, M2, M]): Flow[I, O, M] = new Flow(scaladsl.Flow.fromSinkAndSourceCoupledMat(sink, source)(combinerToScala(combine))) /** @@ -223,11 +225,14 @@ object Flow { * '''Cancels when''' downstream cancels */ @Deprecated - @deprecated("Use lazyInitAsync instead. (lazyInitAsync returns a flow with a more useful materialized value.)", "2.5.12") - def lazyInit[I, O, M](flowFactory: function.Function[I, CompletionStage[Flow[I, O, M]]], fallback: function.Creator[M]): Flow[I, O, M] = { + @deprecated("Use lazyInitAsync instead. (lazyInitAsync returns a flow with a more useful materialized value.)", + "2.5.12") + def lazyInit[I, O, M](flowFactory: function.Function[I, CompletionStage[Flow[I, O, M]]], + fallback: function.Creator[M]): Flow[I, O, M] = { import scala.compat.java8.FutureConverters._ val sflow = scaladsl.Flow - .fromGraph(new LazyFlow[I, O, M](t => flowFactory.apply(t).toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext))) + .fromGraph(new LazyFlow[I, O, M](t => + flowFactory.apply(t).toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext))) .mapMaterializedValue(_ => fallback.create()) new Flow(sflow) } @@ -248,13 +253,21 @@ object Flow { * * '''Cancels when''' downstream cancels */ - def lazyInitAsync[I, O, M](flowFactory: function.Creator[CompletionStage[Flow[I, O, M]]]): Flow[I, O, CompletionStage[Optional[M]]] = { + def lazyInitAsync[I, O, M]( + flowFactory: function.Creator[CompletionStage[Flow[I, O, M]]]): Flow[I, O, CompletionStage[Optional[M]]] = { import scala.compat.java8.FutureConverters._ - val sflow = scaladsl.Flow.lazyInitAsync(() => flowFactory.create().toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext)) - .mapMaterializedValue(fut => fut.map(_.fold[Optional[M]](Optional.empty())(m => Optional.ofNullable(m)))(ExecutionContexts.sameThreadExecutionContext).toJava) + val sflow = scaladsl.Flow + .lazyInitAsync(() => flowFactory.create().toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext)) + .mapMaterializedValue( + fut => + fut + .map(_.fold[Optional[M]](Optional.empty())(m => Optional.ofNullable(m)))( + ExecutionContexts.sameThreadExecutionContext) + .toJava) new Flow(sflow) } + /** * Upcast a stream of elements to a stream of supertypes of that element. Useful in combination with * fan-in operators where you do not want to pay the cost of casting each element in a `map`. @@ -326,7 +339,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def viaMat[T, M, M2](flow: Graph[FlowShape[Out, T], M], combine: function.Function2[Mat, M, M2]): javadsl.Flow[In, T, M2] = + def viaMat[T, M, M2](flow: Graph[FlowShape[Out, T], M], + combine: function.Function2[Mat, M, M2]): javadsl.Flow[In, T, M2] = new Flow(delegate.viaMat(flow)(combinerToScala(combine))) /** @@ -406,7 +420,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def joinMat[M, M2](flow: Graph[FlowShape[Out, In], M], combine: function.Function2[Mat, M, M2]): javadsl.RunnableGraph[M2] = + def joinMat[M, M2](flow: Graph[FlowShape[Out, In], M], + combine: function.Function2[Mat, M, M2]): javadsl.RunnableGraph[M2] = RunnableGraph.fromGraph(delegate.joinMat(flow)(combinerToScala(combine))) /** @@ -450,7 +465,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * See also [[viaMat]] when access to materialized values of the parameter is needed. */ - def joinMat[I2, O2, Mat2, M](bidi: Graph[BidiShape[Out, O2, I2, In], Mat2], combine: function.Function2[Mat, Mat2, M]): Flow[I2, O2, M] = + def joinMat[I2, O2, Mat2, M](bidi: Graph[BidiShape[Out, O2, I2, In], Mat2], + combine: function.Function2[Mat, Mat2, M]): Flow[I2, O2, M] = new Flow(delegate.joinMat(bidi)(combinerToScala(combine))) /** @@ -462,7 +478,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * @tparam T materialized type of given Source * @tparam U materialized type of given Sink */ - def runWith[T, U](source: Graph[SourceShape[In], T], sink: Graph[SinkShape[Out], U], materializer: Materializer): akka.japi.Pair[T, U] = { + def runWith[T, U](source: Graph[SourceShape[In], T], + sink: Graph[SinkShape[Out], U], + materializer: Materializer): akka.japi.Pair[T, U] = { val (som, sim) = delegate.runWith(source, sink)(materializer) akka.japi.Pair(som, sim) } @@ -532,7 +550,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Cancels when''' downstream cancels */ def mapConcat[T](f: function.Function[Out, java.lang.Iterable[T]]): javadsl.Flow[In, T, Mat] = - new Flow(delegate.mapConcat { elem => Util.immutableSeq(f(elem)) }) + new Flow(delegate.mapConcat { elem => + Util.immutableSeq(f(elem)) + }) /** * Transform each input element into an `Iterable` of output elements that is @@ -560,7 +580,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * '''Cancels when''' downstream cancels */ - def statefulMapConcat[T](f: function.Creator[function.Function[Out, java.lang.Iterable[T]]]): javadsl.Flow[In, T, Mat] = + def statefulMapConcat[T]( + f: function.Creator[function.Function[Out, java.lang.Iterable[T]]]): javadsl.Flow[In, T, Mat] = new Flow(delegate.statefulMapConcat { () => val fun = f.create() elem => Util.immutableSeq(fun(elem)) @@ -923,7 +944,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * See also [[FlowOps.scan]] */ def scanAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): javadsl.Flow[In, T, Mat] = - new Flow(delegate.scanAsync(zero) { (out, in) => f(out, in).toScala }) + new Flow(delegate.scanAsync(zero) { (out, in) => + f(out, in).toScala + }) /** * Similar to `scan` but only emits its result when the upstream completes, @@ -970,7 +993,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * '''Cancels when''' downstream cancels */ - def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): javadsl.Flow[In, T, Mat] = new Flow(delegate.foldAsync(zero) { (out, in) => f(out, in).toScala }) + def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): javadsl.Flow[In, T, Mat] = + new Flow(delegate.foldAsync(zero) { (out, in) => + f(out, in).toScala + }) /** * Similar to `fold` but uses first element as zero element. @@ -1118,7 +1144,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: FiniteDuration): javadsl.Flow[In, java.util.List[Out], Mat] = + def groupedWeightedWithin(maxWeight: Long, + costFn: function.Function[Out, java.lang.Long], + d: FiniteDuration): javadsl.Flow[In, java.util.List[Out], Mat] = new Flow(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava)) /** @@ -1139,7 +1167,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise * IllegalArgumentException is thrown. */ - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: java.time.Duration): javadsl.Flow[In, java.util.List[Out], Mat] = + def groupedWeightedWithin(maxWeight: Long, + costFn: function.Function[Out, java.lang.Long], + d: java.time.Duration): javadsl.Flow[In, java.util.List[Out], Mat] = groupedWeightedWithin(maxWeight, costFn, d.asScala) /** @@ -1267,7 +1297,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * See also [[Flow.limit]], [[Flow.limitWeighted]] */ - def takeWhile(p: function.Predicate[Out], inclusive: Boolean): javadsl.Flow[In, Out, Mat] = new Flow(delegate.takeWhile(p.test, inclusive)) + def takeWhile(p: function.Predicate[Out], inclusive: Boolean): javadsl.Flow[In, Out, Mat] = + new Flow(delegate.takeWhile(p.test, inclusive)) /** * Terminate processing (and cancel the upstream publisher) after predicate @@ -1412,7 +1443,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Cancels when''' downstream cancels * */ - def recoverWith(clazz: Class[_ <: Throwable], supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = + def recoverWith(clazz: Class[_ <: Throwable], + supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = recoverWith { case elem if clazz.isInstance(elem) => supplier.get() } @@ -1442,7 +1474,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * @param attempts Maximum number of retries or -1 to retry indefinitely * @param pf Receives the failure cause and returns the new Source to be materialized if any */ - def recoverWithRetries(attempts: Int, pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = + def recoverWithRetries(attempts: Int, + pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.recoverWithRetries(attempts, pf)) /** @@ -1471,7 +1504,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * @param clazz the class object of the failure cause * @param supplier supply the new Source to be materialized */ - def recoverWithRetries(attempts: Int, clazz: Class[_ <: Throwable], supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = + def recoverWithRetries(attempts: Int, + clazz: Class[_ <: Throwable], + supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): javadsl.Flow[In, Out, Mat] = recoverWithRetries(attempts, { case elem if clazz.isInstance(elem) => supplier.get() }) @@ -1571,7 +1606,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate * */ - def conflateWithSeed[S](seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] = + def conflateWithSeed[S](seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] = new Flow(delegate.conflateWithSeed(seed.apply)(aggregate.apply)) /** @@ -1627,7 +1663,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate */ - def batch[S](max: Long, seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] = + def batch[S](max: Long, + seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] = new Flow(delegate.batch(max, seed.apply)(aggregate.apply)) /** @@ -1658,7 +1696,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new batch */ - def batchWeighted[S](max: Long, costFn: function.Function[Out, java.lang.Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] = + def batchWeighted[S](max: Long, + costFn: function.Function[Out, java.lang.Long], + seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): javadsl.Flow[In, S, Mat] = new Flow(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply)) /** @@ -1714,7 +1755,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * on the original, to be emitted in case downstream signals demand. * @see [[#expand]] */ - def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]]): javadsl.Flow[In, Out, Mat] = + def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]]) + : javadsl.Flow[In, Out, Mat] = new Flow(delegate.extrapolate(in => extrapolator(in).asScala)) /** @@ -1742,7 +1784,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * @param initial The initial element to be emitted, in case upstream is able to stall the entire stream. * @see [[#expand]] */ - def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]], initial: Out @uncheckedVariance): javadsl.Flow[In, Out, Mat] = + def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]], + initial: Out @uncheckedVariance): javadsl.Flow[In, Out, Mat] = new Flow(delegate.extrapolate(in => extrapolator(in).asScala, Some(initial))) /** @@ -1846,7 +1889,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * @param allowClosedSubstreamRecreation enables recreation of already closed substreams if elements with their * corresponding keys arrive after completion */ - def groupBy[K](maxSubstreams: Int, f: function.Function[Out, K], allowClosedSubstreamRecreation: Boolean): SubFlow[In, Out, Mat] = + def groupBy[K](maxSubstreams: Int, + f: function.Function[Out, K], + allowClosedSubstreamRecreation: Boolean): SubFlow[In, Out, Mat] = new SubFlow(delegate.groupBy(maxSubstreams, f.apply, allowClosedSubstreamRecreation)) /** @@ -2058,7 +2103,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#concat]] */ - def concatMat[M, M2](that: Graph[SourceShape[Out], M], matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] = + def concatMat[M, M2](that: Graph[SourceShape[Out], M], + matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] = new Flow(delegate.concatMat(that)(combinerToScala(matF))) /** @@ -2097,7 +2143,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#prepend]] */ - def prependMat[M, M2](that: Graph[SourceShape[Out], M], matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] = + def prependMat[M, M2](that: Graph[SourceShape[Out], M], + matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] = new Flow(delegate.prependMat(that)(combinerToScala(matF))) /** @@ -2135,9 +2182,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#orElse]] */ - def orElseMat[M2, M3]( - secondary: Graph[SourceShape[Out], M2], - matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] = + def orElseMat[M2, M3](secondary: Graph[SourceShape[Out], M2], + matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] = new Flow(delegate.orElseMat(secondary)(combinerToScala(matF))) /** @@ -2168,9 +2214,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#alsoTo]] */ - def alsoToMat[M2, M3]( - that: Graph[SinkShape[Out], M2], - matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] = + def alsoToMat[M2, M3](that: Graph[SinkShape[Out], M2], + matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] = new Flow(delegate.alsoToMat(that)(combinerToScala(matF))) /** @@ -2197,7 +2242,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def divertToMat[M2, M3](that: Graph[SinkShape[Out], M2], when: function.Predicate[Out], matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] = + def divertToMat[M2, M3](that: Graph[SinkShape[Out], M2], + when: function.Predicate[Out], + matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] = new Flow(delegate.divertToMat(that, when.test)(combinerToScala(matF))) /** @@ -2216,7 +2263,6 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * '''Cancels when''' downstream cancels */ - def wireTap(that: Graph[SinkShape[Out], _]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.wireTap(that)) @@ -2232,9 +2278,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#wireTap]] */ - def wireTapMat[M2, M3]( - that: Graph[SinkShape[Out], M2], - matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] = + def wireTapMat[M2, M3](that: Graph[SinkShape[Out], M2], + matF: function.Function2[Mat, M2, M3]): javadsl.Flow[In, Out, M3] = new Flow(delegate.wireTapMat(that)(combinerToScala(matF))) /** @@ -2302,7 +2347,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#interleave]] */ - def interleaveMat[M, M2](that: Graph[SourceShape[Out], M], segmentSize: Int, + def interleaveMat[M, M2](that: Graph[SourceShape[Out], M], + segmentSize: Int, matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] = interleaveMat(that, segmentSize, eagerClose = false, matF) @@ -2322,7 +2368,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#interleave]] */ - def interleaveMat[M, M2](that: Graph[SourceShape[Out], M], segmentSize: Int, eagerClose: Boolean, + def interleaveMat[M, M2](that: Graph[SourceShape[Out], M], + segmentSize: Int, + eagerClose: Boolean, matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] = new Flow(delegate.interleaveMat(that, segmentSize, eagerClose)(combinerToScala(matF))) @@ -2365,9 +2413,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#merge]] */ - def mergeMat[M, M2]( - that: Graph[SourceShape[Out], M], - matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] = + def mergeMat[M, M2](that: Graph[SourceShape[Out], M], + matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out, M2] = mergeMat(that, matF, eagerComplete = false) /** @@ -2379,10 +2426,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#merge]] */ - def mergeMat[M, M2]( - that: Graph[SourceShape[Out], M], - matF: function.Function2[Mat, M, M2], - eagerComplete: Boolean): javadsl.Flow[In, Out, M2] = + def mergeMat[M, M2](that: Graph[SourceShape[Out], M], + matF: function.Function2[Mat, M, M2], + eagerComplete: Boolean): javadsl.Flow[In, Out, M2] = new Flow(delegate.mergeMat(that, eagerComplete)(combinerToScala(matF))) /** @@ -2415,7 +2461,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#mergeSorted]]. */ - def mergeSortedMat[Mat2, Mat3](that: Graph[SourceShape[Out], Mat2], comp: Comparator[Out], + def mergeSortedMat[Mat2, Mat3](that: Graph[SourceShape[Out], Mat2], + comp: Comparator[Out], matF: function.Function2[Mat, Mat2, Mat3]): javadsl.Flow[In, Out, Mat3] = new Flow(delegate.mergeSortedMat(that)(combinerToScala(matF))(Ordering.comparatorToOrdering(comp))) @@ -2441,18 +2488,18 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#zip]] */ - def zipMat[T, M, M2]( - that: Graph[SourceShape[T], M], - matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] = - this.viaMat(Flow.fromGraph(GraphDSL.create( - that, - new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { - def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { - val zip: FanInShape2[Out, T, Out Pair T] = b.add(Zip.create[Out, T]) - b.from(s).toInlet(zip.in1) - FlowShape(zip.in0, zip.out) - } - })), matF) + def zipMat[T, M, M2](that: Graph[SourceShape[T], M], + matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] = + this.viaMat( + Flow.fromGraph( + GraphDSL.create(that, new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { + def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { + val zip: FanInShape2[Out, T, Out Pair T] = b.add(Zip.create[Out, T]) + b.from(s).toInlet(zip.in1) + FlowShape(zip.in0, zip.out) + } + })), + matF) /** * Combine the elements of 2 streams into a stream of tuples, picking always the latest element of each. @@ -2481,18 +2528,18 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#zipLatest]] */ - def zipLatestMat[T, M, M2]( - that: Graph[SourceShape[T], M], - matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] = - this.viaMat(Flow.fromGraph(GraphDSL.create( - that, - new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { - def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { - val zip: FanInShape2[Out, T, Out Pair T] = b.add(ZipLatest.create[Out, T]) - b.from(s).toInlet(zip.in1) - FlowShape(zip.in0, zip.out) - } - })), matF) + def zipLatestMat[T, M, M2](that: Graph[SourceShape[T], M], + matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out Pair T, M2] = + this.viaMat( + Flow.fromGraph( + GraphDSL.create(that, new function.Function2[GraphDSL.Builder[M], SourceShape[T], FlowShape[Out, Out Pair T]] { + def apply(b: GraphDSL.Builder[M], s: SourceShape[T]): FlowShape[Out, Out Pair T] = { + val zip: FanInShape2[Out, T, Out Pair T] = b.add(ZipLatest.create[Out, T]) + b.from(s).toInlet(zip.in1) + FlowShape(zip.in0, zip.out) + } + })), + matF) /** * Put together the elements of current [[Flow]] and the given [[Source]] @@ -2506,9 +2553,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * '''Cancels when''' downstream cancels */ - def zipWith[Out2, Out3]( - that: Graph[SourceShape[Out2], _], - combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] = + def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] = new Flow(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine))) /** @@ -2520,10 +2566,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#zipWith]] */ - def zipWithMat[Out2, Out3, M, M2]( - that: Graph[SourceShape[Out2], M], - combine: function.Function2[Out, Out2, Out3], - matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] = + def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M], + combine: function.Function2[Out, Out2, Out3], + matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] = new Flow(delegate.zipWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF))) /** @@ -2543,9 +2588,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * '''Cancels when''' downstream cancels */ - def zipLatestWith[Out2, Out3]( - that: Graph[SourceShape[Out2], _], - combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] = + def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + combine: function.Function2[Out, Out2, Out3]): javadsl.Flow[In, Out3, Mat] = new Flow(delegate.zipLatestWith[Out2, Out3](that)(combinerToScala(combine))) /** @@ -2557,10 +2601,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * * @see [[#zipLatestWith]] */ - def zipLatestWithMat[Out2, Out3, M, M2]( - that: Graph[SourceShape[Out2], M], - combine: function.Function2[Out, Out2, Out3], - matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] = + def zipLatestWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M], + combine: function.Function2[Out, Out2, Out3], + matF: function.Function2[Mat, M, M2]): javadsl.Flow[In, Out3, M2] = new Flow(delegate.zipLatestWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF))) /** @@ -2823,8 +2866,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, - mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = + def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = new Flow(delegate.throttle(elements, per, maximumBurst, mode)) /** @@ -2863,7 +2905,9 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Cancels when''' downstream cancels * */ - def throttle(elements: Int, per: java.time.Duration, maximumBurst: Int, + def throttle(elements: Int, + per: java.time.Duration, + maximumBurst: Int, mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = new Flow(delegate.throttle(elements, per.asScala, maximumBurst, mode)) @@ -2908,8 +2952,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def throttle(cost: Int, per: FiniteDuration, maximumBurst: Int, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = + def throttle(cost: Int, + per: FiniteDuration, + maximumBurst: Int, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = new Flow(delegate.throttle(cost, per, maximumBurst, costCalculation.apply, mode)) /** @@ -2944,7 +2991,8 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: java.time.Duration, + def throttle(cost: Int, + per: java.time.Duration, costCalculation: function.Function[Out, Integer]): javadsl.Flow[In, Out, Mat] = new Flow(delegate.throttle(cost, per.asScala, costCalculation.apply)) @@ -2987,8 +3035,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: java.time.Duration, maximumBurst: Int, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = + def throttle(cost: Int, + per: java.time.Duration, + maximumBurst: Int, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = new Flow(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply, mode)) /** @@ -3033,8 +3084,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: FiniteDuration, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = + def throttleEven(cost: Int, + per: FiniteDuration, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = new Flow(delegate.throttleEven(cost, per, costCalculation.apply, mode)) /** @@ -3049,8 +3102,10 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: java.time.Duration, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = + def throttleEven(cost: Int, + per: java.time.Duration, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.Flow[In, Out, Mat] = throttleEven(cost, per.asScala, costCalculation, mode) /** @@ -3294,12 +3349,15 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr * API MAY CHANGE */ @ApiMayChange - def asFlowWithContext[U, CtxU, CtxOut](collapseContext: function.Function2[U, CtxU, In], extractContext: function.Function[Out, CtxOut]): FlowWithContext[U, CtxU, Out, CtxOut, Mat] = + def asFlowWithContext[U, CtxU, CtxOut]( + collapseContext: function.Function2[U, CtxU, In], + extractContext: function.Function[Out, CtxOut]): FlowWithContext[U, CtxU, Out, CtxOut, Mat] = this.asScala.asFlowWithContext((x: U, c: CtxU) => collapseContext.apply(x, c))(x => extractContext.apply(x)).asJava } object RunnableGraph { + /** * A graph with a closed shape is logically a runnable graph, this method makes * it so also in type. @@ -3331,6 +3389,7 @@ object RunnableGraph { override def asScala: scaladsl.RunnableGraph[Mat] = runnable } } + /** * Java API * @@ -3342,6 +3401,7 @@ abstract class RunnableGraph[+Mat] extends Graph[ClosedShape, Mat] { * Run this flow and return the materialized values of the flow. */ def run(materializer: Materializer): Mat + /** * Transform only the materialized value of this RunnableGraph, leaving all other properties as they were. */ diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala b/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala index fa33a4abf2..ff937d4dc9 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/FlowWithContext.scala @@ -5,7 +5,7 @@ package akka.stream.javadsl import akka.annotation.ApiMayChange -import akka.japi.{ Pair, Util, function } +import akka.japi.{ function, Pair, Util } import akka.stream._ import akka.event.LoggingAdapter import akka.util.ConstantFun @@ -29,8 +29,14 @@ object FlowWithContext { /** * Creates a FlowWithContext from a regular flow that operates on `Pair` elements. */ - def fromPairs[In, CtxIn, Out, CtxOut, Mat](under: Flow[Pair[In, CtxIn], Pair[Out, CtxOut], Mat]): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = { - new FlowWithContext(scaladsl.FlowWithContext.fromTuples(scaladsl.Flow[(In, CtxIn)].map { case (i, c) => Pair(i, c) }.viaMat(under.asScala.map(_.toScala))(scaladsl.Keep.right))) + def fromPairs[In, CtxIn, Out, CtxOut, Mat]( + under: Flow[Pair[In, CtxIn], Pair[Out, CtxOut], Mat]): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = { + new FlowWithContext( + scaladsl.FlowWithContext.fromTuples( + scaladsl + .Flow[(In, CtxIn)] + .map { case (i, c) => Pair(i, c) } + .viaMat(under.asScala.map(_.toScala))(scaladsl.Keep.right))) } } @@ -45,7 +51,10 @@ object FlowWithContext { * API MAY CHANGE */ @ApiMayChange -final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat]) extends GraphDelegate(delegate) { +final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat]( + delegate: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat]) + extends GraphDelegate(delegate) { + /** * Transform this flow by the regular flow. The given flow must support manual context propagation by * taking and producing tuples of (data, context). @@ -55,7 +64,9 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl * * @see [[akka.stream.javadsl.Flow.via]] */ - def via[Out2, CtxOut2, Mat2](viaFlow: Graph[FlowShape[Pair[Out @uncheckedVariance, CtxOut @uncheckedVariance], Pair[Out2, CtxOut2]], Mat2]): FlowWithContext[In, CtxIn, Out2, CtxOut2, Mat] = { + def via[Out2, CtxOut2, Mat2]( + viaFlow: Graph[FlowShape[Pair[Out @uncheckedVariance, CtxOut @uncheckedVariance], Pair[Out2, CtxOut2]], Mat2]) + : FlowWithContext[In, CtxIn, Out2, CtxOut2, Mat] = { val under = asFlow().via(viaFlow) FlowWithContext.fromPairs(under) } @@ -72,7 +83,8 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl * Creates a regular flow of pairs (data, context). */ def asFlow(): Flow[Pair[In, CtxIn], Pair[Out, CtxOut], Mat] @uncheckedVariance = - scaladsl.Flow[Pair[In, CtxIn]] + scaladsl + .Flow[Pair[In, CtxIn]] .map(_.toScala) .viaMat(delegate.asFlow)(scaladsl.Keep.right) .map { case (o, c) => Pair(o, c) } @@ -117,7 +129,11 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl * * @see [[akka.stream.javadsl.Flow.grouped]] */ - def grouped(n: Int): FlowWithContext[In, CtxIn, java.util.List[Out @uncheckedVariance], java.util.List[CtxOut @uncheckedVariance], Mat] = + def grouped(n: Int): FlowWithContext[In, + CtxIn, + java.util.List[Out @uncheckedVariance], + java.util.List[CtxOut @uncheckedVariance], + Mat] = viaScala(_.grouped(n).map(_.asJava).mapContext(_.asJava)) /** @@ -128,7 +144,8 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl def map[Out2](f: function.Function[Out, Out2]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] = viaScala(_.map(f.apply)) - def mapAsync[Out2](parallelism: Int, f: function.Function[Out, CompletionStage[Out2]]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] = + def mapAsync[Out2](parallelism: Int, + f: function.Function[Out, CompletionStage[Out2]]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] = viaScala(_.mapAsync[Out2](parallelism)(o => f.apply(o).toScala)) /** @@ -159,13 +176,15 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl * * @see [[akka.stream.javadsl.Flow.mapConcat]] */ - def mapConcat[Out2](f: function.Function[Out, _ <: java.lang.Iterable[Out2]]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] = + def mapConcat[Out2]( + f: function.Function[Out, _ <: java.lang.Iterable[Out2]]): FlowWithContext[In, CtxIn, Out2, CtxOut, Mat] = viaScala(_.mapConcat(elem => Util.immutableSeq(f.apply(elem)))) /** * Apply the given function to each context element (leaving the data elements unchanged). */ - def mapContext[CtxOut2](extractContext: function.Function[CtxOut, CtxOut2]): FlowWithContext[In, CtxIn, Out, CtxOut2, Mat] = { + def mapContext[CtxOut2]( + extractContext: function.Function[CtxOut, CtxOut2]): FlowWithContext[In, CtxIn, Out, CtxOut2, Mat] = { viaScala(_.mapContext(extractContext.apply)) } @@ -176,7 +195,11 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl * * @see [[akka.stream.javadsl.Flow.sliding]] */ - def sliding(n: Int, step: Int = 1): FlowWithContext[In, CtxIn, java.util.List[Out @uncheckedVariance], java.util.List[CtxOut @uncheckedVariance], Mat] = + def sliding(n: Int, step: Int = 1): FlowWithContext[In, + CtxIn, + java.util.List[Out @uncheckedVariance], + java.util.List[CtxOut @uncheckedVariance], + Mat] = viaScala(_.sliding(n, step).map(_.asJava).mapContext(_.asJava)) /** @@ -184,7 +207,9 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl * * @see [[akka.stream.javadsl.Flow.log]] */ - def log(name: String, extract: function.Function[Out, Any], log: LoggingAdapter): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = + def log(name: String, + extract: function.Function[Out, Any], + log: LoggingAdapter): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = viaScala(_.log(name, e => extract.apply(e))(log)) /** @@ -213,6 +238,12 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: scaladsl def asScala: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = delegate - private[this] def viaScala[In2, CtxIn2, Out2, CtxOut2, Mat2](f: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat] => scaladsl.FlowWithContext[In2, CtxIn2, Out2, CtxOut2, Mat2]): FlowWithContext[In2, CtxIn2, Out2, CtxOut2, Mat2] = + private[this] def viaScala[In2, CtxIn2, Out2, CtxOut2, Mat2]( + f: scaladsl.FlowWithContext[In, CtxIn, Out, CtxOut, Mat] => scaladsl.FlowWithContext[In2, + CtxIn2, + Out2, + CtxOut2, + Mat2]) + : FlowWithContext[In2, CtxIn2, Out2, CtxOut2, Mat2] = new FlowWithContext(f(delegate)) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala index 692584e235..461c44bb28 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Framing.scala @@ -47,7 +47,9 @@ object Framing { * @param maximumFrameLength The maximum length of allowed frames while decoding. If the maximum length is * exceeded this Flow will fail the stream. */ - def delimiter(delimiter: ByteString, maximumFrameLength: Int, allowTruncation: FramingTruncation): Flow[ByteString, ByteString, NotUsed] = { + def delimiter(delimiter: ByteString, + maximumFrameLength: Int, + allowTruncation: FramingTruncation): Flow[ByteString, ByteString, NotUsed] = { val truncationAllowed = allowTruncation == FramingTruncation.ALLOW scaladsl.Framing.delimiter(delimiter, maximumFrameLength, truncationAllowed).asJava } @@ -67,10 +69,7 @@ object Framing { * this Flow will fail the stream. This length *includes* the header (i.e the offset and * the length of the size field) */ - def lengthField( - fieldLength: Int, - fieldOffset: Int, - maximumFrameLength: Int): Flow[ByteString, ByteString, NotUsed] = + def lengthField(fieldLength: Int, fieldOffset: Int, maximumFrameLength: Int): Flow[ByteString, ByteString, NotUsed] = scaladsl.Framing.lengthField(fieldLength, fieldOffset, maximumFrameLength).asJava /** @@ -87,11 +86,10 @@ object Framing { * the length of the size field) * @param byteOrder The ''ByteOrder'' to be used when decoding the field */ - def lengthField( - fieldLength: Int, - fieldOffset: Int, - maximumFrameLength: Int, - byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] = + def lengthField(fieldLength: Int, + fieldOffset: Int, + maximumFrameLength: Int, + byteOrder: ByteOrder): Flow[ByteString, ByteString, NotUsed] = scaladsl.Framing.lengthField(fieldLength, fieldOffset, maximumFrameLength, byteOrder).asJava /** @@ -113,19 +111,19 @@ object Framing { * ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise. * */ - def lengthField( - fieldLength: Int, - fieldOffset: Int, - maximumFrameLength: Int, - byteOrder: ByteOrder, - computeFrameSize: akka.japi.function.Function2[Array[Byte], Integer, Integer]): Flow[ByteString, ByteString, NotUsed] = - scaladsl.Framing.lengthField( - fieldLength, - fieldOffset, - maximumFrameLength, - byteOrder, - (a: Array[Byte], s: Int) => computeFrameSize.apply(a, s) - ).asJava + def lengthField(fieldLength: Int, + fieldOffset: Int, + maximumFrameLength: Int, + byteOrder: ByteOrder, + computeFrameSize: akka.japi.function.Function2[Array[Byte], Integer, Integer]) + : Flow[ByteString, ByteString, NotUsed] = + scaladsl.Framing + .lengthField(fieldLength, + fieldOffset, + maximumFrameLength, + byteOrder, + (a: Array[Byte], s: Int) => computeFrameSize.apply(a, s)) + .asJava /** * Returns a BidiFlow that implements a simple framing protocol. This is a convenience wrapper over [[Framing#lengthField]] @@ -157,7 +155,8 @@ object Framing { * limit this BidiFlow will fail the stream. The header attached by this BidiFlow are not * included in this limit. */ - def simpleFramingProtocol(maximumMessageLength: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = + def simpleFramingProtocol( + maximumMessageLength: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = scaladsl.Framing.simpleFramingProtocol(maximumMessageLength).asJava } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala index 4fc9c7c0be..2bc0dab305 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Graph.scala @@ -8,7 +8,7 @@ import java.util import akka.NotUsed import akka.stream._ -import akka.japi.{ Pair, function } +import akka.japi.{ function, Pair } import akka.util.ConstantFun import scala.annotation.unchecked.uncheckedVariance @@ -75,6 +75,7 @@ object Merge { * '''Cancels when''' downstream cancels */ object MergePreferred { + /** * Create a new `MergePreferred` operator with the specified output type. */ @@ -84,7 +85,8 @@ object MergePreferred { /** * Create a new `MergePreferred` operator with the specified output type. */ - def create[T](clazz: Class[T], secondaryPorts: Int): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = create(secondaryPorts) + def create[T](clazz: Class[T], secondaryPorts: Int): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = + create(secondaryPorts) /** * Create a new `MergePreferred` operator with the specified output type. @@ -92,7 +94,8 @@ object MergePreferred { * @param eagerComplete set to true in order to make this operator eagerly * finish as soon as one of its inputs completes */ - def create[T](secondaryPorts: Int, eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = + def create[T](secondaryPorts: Int, + eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = scaladsl.MergePreferred(secondaryPorts, eagerComplete = eagerComplete) /** @@ -101,7 +104,9 @@ object MergePreferred { * @param eagerComplete set to true in order to make this operator eagerly * finish as soon as one of its inputs completes */ - def create[T](clazz: Class[T], secondaryPorts: Int, eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = + def create[T](clazz: Class[T], + secondaryPorts: Int, + eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = create(secondaryPorts, eagerComplete) } @@ -124,6 +129,7 @@ object MergePreferred { * A `Broadcast` has one `in` port and 2 or more `out` ports. */ object MergePrioritized { + /** * Create a new `MergePrioritized` operator with the specified output type. */ @@ -151,7 +157,9 @@ object MergePrioritized { * @param eagerComplete set to true in order to make this operator eagerly * finish as soon as one of its inputs completes */ - def create[T](clazz: Class[T], priorities: Array[Int], eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] = + def create[T](clazz: Class[T], + priorities: Array[Int], + eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] = create(priorities, eagerComplete) } @@ -171,6 +179,7 @@ object MergePrioritized { * If eagerCancel is enabled: when any downstream cancels; otherwise: when all downstreams cancel */ object Broadcast { + /** * Create a new `Broadcast` operator with the specified input type. * @@ -208,13 +217,15 @@ object Broadcast { * when any (eagerCancel=true) or all (eagerCancel=false) of the downstreams cancel */ object Partition { + /** * Create a new `Partition` operator with the specified input type, `eagerCancel` is `false`. * * @param outputCount number of output ports * @param partitioner function deciding which output each element will be targeted */ - def create[T](outputCount: Int, partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](outputCount: Int, + partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] = new scaladsl.Partition(outputCount, partitioner.apply) /** @@ -224,7 +235,9 @@ object Partition { * @param partitioner function deciding which output each element will be targeted * @param eagerCancel this operator cancels, when any (true) or all (false) of the downstreams cancel */ - def create[T](outputCount: Int, partitioner: function.Function[T, Integer], eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](outputCount: Int, + partitioner: function.Function[T, Integer], + eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = new scaladsl.Partition(outputCount, partitioner.apply, eagerCancel) /** @@ -234,7 +247,9 @@ object Partition { * @param outputCount number of output ports * @param partitioner function deciding which output each element will be targeted */ - def create[T](clazz: Class[T], outputCount: Int, partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](clazz: Class[T], + outputCount: Int, + partitioner: function.Function[T, Integer]): Graph[UniformFanOutShape[T, T], NotUsed] = new scaladsl.Partition(outputCount, partitioner.apply) /** @@ -245,7 +260,10 @@ object Partition { * @param partitioner function deciding which output each element will be targeted * @param eagerCancel this operator cancels, when any (true) or all (false) of the downstreams cancel */ - def create[T](clazz: Class[T], outputCount: Int, partitioner: function.Function[T, Integer], eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](clazz: Class[T], + outputCount: Int, + partitioner: function.Function[T, Integer], + eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = new scaladsl.Partition(outputCount, partitioner.apply, eagerCancel) } @@ -264,6 +282,7 @@ object Partition { * '''Cancels when''' If eagerCancel is enabled: when any downstream cancels; otherwise: when all downstreams cancel */ object Balance { + /** * Create a new `Balance` operator with the specified input type, `eagerCancel` is `false`. * @@ -281,7 +300,9 @@ object Balance { * @param waitForAllDownstreams if `true` it will not start emitting elements to downstream outputs until all of them have requested at least one element * @param eagerCancel if true, balance cancels upstream if any of its downstreams cancel, if false, when all have cancelled. */ - def create[T](outputCount: Int, waitForAllDownstreams: Boolean, eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](outputCount: Int, + waitForAllDownstreams: Boolean, + eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = new scaladsl.Balance(outputCount, waitForAllDownstreams, eagerCancel) /** @@ -308,7 +329,9 @@ object Balance { * @param outputCount number of output ports * @param waitForAllDownstreams if `true` it will not start emitting elements to downstream outputs until all of them have requested at least one element */ - def create[T](clazz: Class[T], outputCount: Int, waitForAllDownstreams: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](clazz: Class[T], + outputCount: Int, + waitForAllDownstreams: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = create(outputCount, waitForAllDownstreams) /** @@ -319,7 +342,10 @@ object Balance { * @param waitForAllDownstreams if `true` it will not start emitting elements to downstream outputs until all of them have requested at least one element * @param eagerCancel if true, balance cancels upstream if any of its downstreams cancel, if false, when all have cancelled. */ - def create[T](clazz: Class[T], outputCount: Int, waitForAllDownstreams: Boolean, eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = + def create[T](clazz: Class[T], + outputCount: Int, + waitForAllDownstreams: Boolean, + eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] = new scaladsl.Balance(outputCount, waitForAllDownstreams, eagerCancel) } @@ -461,6 +487,7 @@ object Unzip { * '''Cancels when''' downstream cancels */ object Concat { + /** * Create a new anonymous `Concat` operator with the specified input types. */ @@ -492,10 +519,10 @@ object GraphDSL extends GraphCreate { * Creates a new [[Graph]] by importing the given graph list `graphs` and passing their [[Shape]]s * along with the [[GraphDSL.Builder]] to the given create function. */ - def create[IS <: Shape, S <: Shape, M, G <: Graph[IS, M]]( - graphs: java.util.List[G], - buildBlock: function.Function2[GraphDSL.Builder[java.util.List[M]], java.util.List[IS], S]): Graph[S, java.util.List[M]] = { + graphs: java.util.List[G], + buildBlock: function.Function2[GraphDSL.Builder[java.util.List[M]], java.util.List[IS], S]) + : Graph[S, java.util.List[M]] = { require(!graphs.isEmpty, "The input list must have one or more Graph elements") val gbuilder = builder[java.util.List[M]]() val toList = (m1: M) => new util.ArrayList(util.Arrays.asList(m1)) diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala index 07798e2afa..c7c045f20f 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Hub.scala @@ -34,9 +34,7 @@ object MergeHub { * @param perProducerBufferSize Buffer space used per producer. */ def of[T](clazz: Class[T], perProducerBufferSize: Int): Source[T, Sink[T, NotUsed]] = { - akka.stream.scaladsl.MergeHub.source[T](perProducerBufferSize) - .mapMaterializedValue(_.asJava[T]) - .asJava + akka.stream.scaladsl.MergeHub.source[T](perProducerBufferSize).mapMaterializedValue(_.asJava[T]).asJava } /** @@ -86,9 +84,7 @@ object BroadcastHub { * is backpressured. Must be a power of two and less than 4096. */ def of[T](clazz: Class[T], bufferSize: Int): Sink[T, Source[T, NotUsed]] = { - akka.stream.scaladsl.BroadcastHub.sink[T](bufferSize) - .mapMaterializedValue(_.asJava) - .asJava + akka.stream.scaladsl.BroadcastHub.sink[T](bufferSize).mapMaterializedValue(_.asJava).asJava } def of[T](clazz: Class[T]): Sink[T, Source[T, NotUsed]] = of(clazz, 256) @@ -136,18 +132,22 @@ object PartitionHub { * @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer * is backpressured. */ - @ApiMayChange def ofStateful[T](clazz: Class[T], partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]], - startAfterNrOfConsumers: Int, bufferSize: Int): Sink[T, Source[T, NotUsed]] = { + @ApiMayChange def ofStateful[T](clazz: Class[T], + partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]], + startAfterNrOfConsumers: Int, + bufferSize: Int): Sink[T, Source[T, NotUsed]] = { val p: () => (akka.stream.scaladsl.PartitionHub.ConsumerInfo, T) => Long = () => { val f = partitioner.get() (info, elem) => f.applyAsLong(info, elem) } - akka.stream.scaladsl.PartitionHub.statefulSink[T](p, startAfterNrOfConsumers, bufferSize) + akka.stream.scaladsl.PartitionHub + .statefulSink[T](p, startAfterNrOfConsumers, bufferSize) .mapMaterializedValue(_.asJava) .asJava } - @ApiMayChange def ofStateful[T](clazz: Class[T], partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]], + @ApiMayChange def ofStateful[T](clazz: Class[T], + partitioner: Supplier[ToLongBiFunction[ConsumerInfo, T]], startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] = ofStateful(clazz, partitioner, startAfterNrOfConsumers, akka.stream.scaladsl.PartitionHub.defaultBufferSize) @@ -180,15 +180,18 @@ object PartitionHub { * @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer * is backpressured. */ - @ApiMayChange def of[T](clazz: Class[T], partitioner: BiFunction[Integer, T, Integer], startAfterNrOfConsumers: Int, + @ApiMayChange def of[T](clazz: Class[T], + partitioner: BiFunction[Integer, T, Integer], + startAfterNrOfConsumers: Int, bufferSize: Int): Sink[T, Source[T, NotUsed]] = - akka.stream.scaladsl.PartitionHub.sink[T]( - (size, elem) => partitioner.apply(size, elem), - startAfterNrOfConsumers, bufferSize) + akka.stream.scaladsl.PartitionHub + .sink[T]((size, elem) => partitioner.apply(size, elem), startAfterNrOfConsumers, bufferSize) .mapMaterializedValue(_.asJava) .asJava - @ApiMayChange def of[T](clazz: Class[T], partitioner: BiFunction[Integer, T, Integer], startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] = + @ApiMayChange def of[T](clazz: Class[T], + partitioner: BiFunction[Integer, T, Integer], + startAfterNrOfConsumers: Int): Sink[T, Source[T, NotUsed]] = of(clazz, partitioner, startAfterNrOfConsumers, akka.stream.scaladsl.PartitionHub.defaultBufferSize) @DoNotInherit @ApiMayChange trait ConsumerInfo { diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Keep.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Keep.scala index cbfc239ff5..7878046740 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Keep.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Keep.scala @@ -11,8 +11,12 @@ import akka.japi.Pair object Keep { private val _left = new function.Function2[Any, Any, Any] with ((Any, Any) => Any) { def apply(l: Any, r: Any) = l } private val _right = new function.Function2[Any, Any, Any] with ((Any, Any) => Any) { def apply(l: Any, r: Any) = r } - private val _both = new function.Function2[Any, Any, Any] with ((Any, Any) => Any) { def apply(l: Any, r: Any) = new akka.japi.Pair(l, r) } - private val _none = new function.Function2[Any, Any, NotUsed] with ((Any, Any) => NotUsed) { def apply(l: Any, r: Any) = NotUsed } + private val _both = new function.Function2[Any, Any, Any] with ((Any, Any) => Any) { + def apply(l: Any, r: Any) = new akka.japi.Pair(l, r) + } + private val _none = new function.Function2[Any, Any, NotUsed] with ((Any, Any) => NotUsed) { + def apply(l: Any, r: Any) = NotUsed + } def left[L, R]: function.Function2[L, R, L] = _left.asInstanceOf[function.Function2[L, R, L]] def right[L, R]: function.Function2[L, R, R] = _right.asInstanceOf[function.Function2[L, R, R]] diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala b/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala index cc161b229d..71761bdf24 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/MergeLatest.scala @@ -5,7 +5,7 @@ package akka.stream.javadsl import akka.stream.stage.GraphStage -import akka.stream.{ UniformFanInShape, scaladsl } +import akka.stream.{ scaladsl, UniformFanInShape } import scala.collection.JavaConverters._ @@ -23,6 +23,7 @@ import scala.collection.JavaConverters._ * */ object MergeLatest { + /** * Create a new `MergeLatest` with the specified number of input ports. * @@ -39,4 +40,3 @@ object MergeLatest { */ def create[T](inputPorts: Int): GraphStage[UniformFanInShape[T, java.util.List[T]]] = create(inputPorts, false) } - diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Queue.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Queue.scala index 1503976a1a..d00da3cc4b 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Queue.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Queue.scala @@ -42,6 +42,7 @@ trait SourceQueue[T] { * This trait adds completion support to [[SourceQueue]]. */ trait SourceQueueWithComplete[T] extends SourceQueue[T] { + /** * Complete the stream normally. Use `watchCompletion` to be notified of this * operation’s success. @@ -87,9 +88,9 @@ trait SinkQueue[T] { * This trait adds cancel support to [[SinkQueue]]. */ trait SinkQueueWithCancel[T] extends SinkQueue[T] { + /** * Cancel the stream. */ def cancel(): Unit } - diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/RestartFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/RestartFlow.scala index 5e64355514..578cb93c1b 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/RestartFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/RestartFlow.scala @@ -43,11 +43,15 @@ object RestartFlow { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def withBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, + def withBackoff[In, Out](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { - akka.stream.scaladsl.RestartFlow.withBackoff(minBackoff, maxBackoff, randomFactor) { () => - flowFactory.create().asScala - }.asJava + akka.stream.scaladsl.RestartFlow + .withBackoff(minBackoff, maxBackoff, randomFactor) { () => + flowFactory.create().asScala + } + .asJava } /** @@ -73,7 +77,9 @@ object RestartFlow { * In order to skip this additional delay pass in `0`. * @param flowFactory A factory for producing the [[Flow]] to wrap. */ - def withBackoff[In, Out](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, + def withBackoff[In, Out](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { import akka.util.JavaDurationConverters._ withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, flowFactory) @@ -106,11 +112,16 @@ object RestartFlow { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def withBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, - maxRestarts: Int, flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { - akka.stream.scaladsl.RestartFlow.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => - flowFactory.create().asScala - }.asJava + def withBackoff[In, Out](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int, + flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { + akka.stream.scaladsl.RestartFlow + .withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => + flowFactory.create().asScala + } + .asJava } /** @@ -138,8 +149,11 @@ object RestartFlow { * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param flowFactory A factory for producing the [[Flow]] to wrap. */ - def withBackoff[In, Out](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, - maxRestarts: Int, flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { + def withBackoff[In, Out](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + maxRestarts: Int, + flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { import akka.util.JavaDurationConverters._ withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, flowFactory) } @@ -171,11 +185,16 @@ object RestartFlow { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def onFailuresWithBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, - maxRestarts: Int, flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { - akka.stream.scaladsl.RestartFlow.onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => - flowFactory.create().asScala - }.asJava + def onFailuresWithBackoff[In, Out](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int, + flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { + akka.stream.scaladsl.RestartFlow + .onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => + flowFactory.create().asScala + } + .asJava } /** @@ -203,8 +222,11 @@ object RestartFlow { * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param flowFactory A factory for producing the [[Flow]] to wrap. */ - def onFailuresWithBackoff[In, Out](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, - maxRestarts: Int, flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { + def onFailuresWithBackoff[In, Out](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + maxRestarts: Int, + flowFactory: Creator[Flow[In, Out, _]]): Flow[In, Out, NotUsed] = { import akka.util.JavaDurationConverters._ onFailuresWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, flowFactory) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSink.scala b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSink.scala index b198e1e1ed..7ff521ee5d 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSink.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSink.scala @@ -44,11 +44,15 @@ object RestartSink { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, + def withBackoff[T](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = { - akka.stream.scaladsl.RestartSink.withBackoff(minBackoff, maxBackoff, randomFactor) { () => - sinkFactory.create().asScala - }.asJava + akka.stream.scaladsl.RestartSink + .withBackoff(minBackoff, maxBackoff, randomFactor) { () => + sinkFactory.create().asScala + } + .asJava } /** @@ -75,7 +79,9 @@ object RestartSink { * In order to skip this additional delay pass in `0`. * @param sinkFactory A factory for producing the [[Sink]] to wrap. */ - def withBackoff[T](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, + def withBackoff[T](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = { import akka.util.JavaDurationConverters._ withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, sinkFactory) @@ -109,11 +115,16 @@ object RestartSink { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, - maxRestarts: Int, sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = { - akka.stream.scaladsl.RestartSink.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => - sinkFactory.create().asScala - }.asJava + def withBackoff[T](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int, + sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = { + akka.stream.scaladsl.RestartSink + .withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => + sinkFactory.create().asScala + } + .asJava } /** @@ -142,8 +153,11 @@ object RestartSink { * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sinkFactory A factory for producing the [[Sink]] to wrap. */ - def withBackoff[T](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, - maxRestarts: Int, sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = { + def withBackoff[T](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + maxRestarts: Int, + sinkFactory: Creator[Sink[T, _]]): Sink[T, NotUsed] = { import akka.util.JavaDurationConverters._ withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, sinkFactory) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala index b61d5aa3b5..e878d6a667 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/RestartSource.scala @@ -40,11 +40,15 @@ object RestartSource { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, + def withBackoff[T](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { - akka.stream.scaladsl.RestartSource.withBackoff(minBackoff, maxBackoff, randomFactor) { () => - sourceFactory.create().asScala - }.asJava + akka.stream.scaladsl.RestartSource + .withBackoff(minBackoff, maxBackoff, randomFactor) { () => + sourceFactory.create().asScala + } + .asJava } /** @@ -67,7 +71,9 @@ object RestartSource { * In order to skip this additional delay pass in `0`. * @param sourceFactory A factory for producing the [[Source]] to wrap. */ - def withBackoff[T](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, + def withBackoff[T](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { import akka.util.JavaDurationConverters._ withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, sourceFactory) @@ -98,11 +104,16 @@ object RestartSource { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, - maxRestarts: Int, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { - akka.stream.scaladsl.RestartSource.withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => - sourceFactory.create().asScala - }.asJava + def withBackoff[T](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int, + sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { + akka.stream.scaladsl.RestartSource + .withBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => + sourceFactory.create().asScala + } + .asJava } /** @@ -128,8 +139,11 @@ object RestartSource { * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sourceFactory A factory for producing the [[Source]] to wrap. */ - def withBackoff[T](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, - maxRestarts: Int, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { + def withBackoff[T](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + maxRestarts: Int, + sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { import akka.util.JavaDurationConverters._ withBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, sourceFactory) } @@ -156,11 +170,15 @@ object RestartSource { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def onFailuresWithBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, + def onFailuresWithBackoff[T](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { - akka.stream.scaladsl.RestartSource.onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor) { () => - sourceFactory.create().asScala - }.asJava + akka.stream.scaladsl.RestartSource + .onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor) { () => + sourceFactory.create().asScala + } + .asJava } /** @@ -183,7 +201,9 @@ object RestartSource { * @param sourceFactory A factory for producing the [[Source]] to wrap. * */ - def onFailuresWithBackoff[T](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, + def onFailuresWithBackoff[T](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { import akka.util.JavaDurationConverters._ onFailuresWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, sourceFactory) @@ -213,11 +233,16 @@ object RestartSource { */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def onFailuresWithBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, - maxRestarts: Int, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { - akka.stream.scaladsl.RestartSource.onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => - sourceFactory.create().asScala - }.asJava + def onFailuresWithBackoff[T](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int, + sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { + akka.stream.scaladsl.RestartSource + .onFailuresWithBackoff(minBackoff, maxBackoff, randomFactor, maxRestarts) { () => + sourceFactory.create().asScala + } + .asJava } /** @@ -242,8 +267,11 @@ object RestartSource { * @param sourceFactory A factory for producing the [[Source]] to wrap. * */ - def onFailuresWithBackoff[T](minBackoff: java.time.Duration, maxBackoff: java.time.Duration, randomFactor: Double, - maxRestarts: Int, sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { + def onFailuresWithBackoff[T](minBackoff: java.time.Duration, + maxBackoff: java.time.Duration, + randomFactor: Double, + maxRestarts: Int, + sourceFactory: Creator[Source[T, _]]): Source[T, NotUsed] = { import akka.util.JavaDurationConverters._ onFailuresWithBackoff(minBackoff.asScala, maxBackoff.asScala, randomFactor, maxRestarts, sourceFactory) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala index d69b597b4e..5550bd1972 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Sink.scala @@ -6,7 +6,7 @@ package akka.stream.javadsl import java.util.Optional -import akka.{ Done, NotUsed, japi } +import akka.{ japi, Done, NotUsed } import akka.actor.{ ActorRef, Props } import akka.dispatch.ExecutionContexts import akka.japi.function @@ -24,6 +24,7 @@ import scala.compat.java8.FutureConverters._ /** Java API */ object Sink { + /** * A `Sink` that will invoke the given function for every received element, giving it its previous * output (or the given `zero` value) and the element as input. @@ -41,7 +42,9 @@ object Sink { * function evaluation when the input stream ends, or completed with `Failure` * if there is a failure is signaled in the stream. */ - def foldAsync[U, In](zero: U, f: function.Function2[U, In, CompletionStage[U]]): javadsl.Sink[In, CompletionStage[U]] = new Sink(scaladsl.Sink.foldAsync[U, In](zero)(f(_, _).toScala).toCompletionStage()) + def foldAsync[U, In](zero: U, + f: function.Function2[U, In, CompletionStage[U]]): javadsl.Sink[In, CompletionStage[U]] = + new Sink(scaladsl.Sink.foldAsync[U, In](zero)(f(_, _).toScala).toCompletionStage()) /** * A `Sink` that will invoke the given function for every received element, giving it its previous @@ -105,8 +108,12 @@ object Sink { * normal end of the stream, or completed with `Failure` if there is a failure signaled in * the stream. */ - def foreachAsync[T](parallelism: Int)(f: function.Function[T, CompletionStage[Void]]): Sink[T, CompletionStage[Done]] = - new Sink(scaladsl.Sink.foreachAsync(parallelism)((x: T) => f(x).toScala.map(_ => ())(ExecutionContexts.sameThreadExecutionContext)).toCompletionStage()) + def foreachAsync[T](parallelism: Int)( + f: function.Function[T, CompletionStage[Void]]): Sink[T, CompletionStage[Done]] = + new Sink( + scaladsl.Sink + .foreachAsync(parallelism)((x: T) => f(x).toScala.map(_ => ())(ExecutionContexts.sameThreadExecutionContext)) + .toCompletionStage()) /** * A `Sink` that will invoke the given procedure for each received element in parallel. The sink is materialized @@ -119,8 +126,11 @@ object Sink { * [[akka.stream.Supervision.Resume]] or [[akka.stream.Supervision.Restart]] the * element is dropped and the stream continues. */ - @deprecated("Use `foreachAsync` instead, it allows you to choose how to run the procedure, by calling some other API returning a CompletionStage or using CompletableFuture.supplyAsync.", since = "2.5.17") - def foreachParallel[T](parallel: Int)(f: function.Procedure[T])(ec: ExecutionContext): Sink[T, CompletionStage[Done]] = + @deprecated( + "Use `foreachAsync` instead, it allows you to choose how to run the procedure, by calling some other API returning a CompletionStage or using CompletableFuture.supplyAsync.", + since = "2.5.17") + def foreachParallel[T](parallel: Int)(f: function.Procedure[T])( + ec: ExecutionContext): Sink[T, CompletionStage[Done]] = new Sink(scaladsl.Sink.foreachParallel(parallel)(f.apply)(ec).toCompletionStage()) /** @@ -149,8 +159,10 @@ object Sink { * See also [[head]]. */ def headOption[In](): Sink[In, CompletionStage[Optional[In]]] = - new Sink(scaladsl.Sink.headOption[In].mapMaterializedValue( - _.map(_.asJava)(ExecutionContexts.sameThreadExecutionContext).toJava)) + new Sink( + scaladsl.Sink + .headOption[In] + .mapMaterializedValue(_.map(_.asJava)(ExecutionContexts.sameThreadExecutionContext).toJava)) /** * A `Sink` that materializes into a `CompletionStage` of the last value received. @@ -170,8 +182,10 @@ object Sink { * See also [[head]], [[takeLast]]. */ def lastOption[In](): Sink[In, CompletionStage[Optional[In]]] = - new Sink(scaladsl.Sink.lastOption[In].mapMaterializedValue( - _.map(_.asJava)(ExecutionContexts.sameThreadExecutionContext).toJava)) + new Sink( + scaladsl.Sink + .lastOption[In] + .mapMaterializedValue(_.map(_.asJava)(ExecutionContexts.sameThreadExecutionContext).toJava)) /** * A `Sink` that materializes into a a `CompletionStage` of `List` containing the last `n` collected elements. @@ -182,7 +196,10 @@ object Sink { */ def takeLast[In](n: Int): Sink[In, CompletionStage[java.util.List[In]]] = { import scala.collection.JavaConverters._ - new Sink(scaladsl.Sink.takeLast[In](n).mapMaterializedValue(fut => fut.map(sq => sq.asJava)(ExecutionContexts.sameThreadExecutionContext).toJava)) + new Sink( + scaladsl.Sink + .takeLast[In](n) + .mapMaterializedValue(fut => fut.map(sq => sq.asJava)(ExecutionContexts.sameThreadExecutionContext).toJava)) } /** @@ -197,7 +214,10 @@ object Sink { */ def seq[In]: Sink[In, CompletionStage[java.util.List[In]]] = { import scala.collection.JavaConverters._ - new Sink(scaladsl.Sink.seq[In].mapMaterializedValue(fut => fut.map(sq => sq.asJava)(ExecutionContexts.sameThreadExecutionContext).toJava)) + new Sink( + scaladsl.Sink + .seq[In] + .mapMaterializedValue(fut => fut.map(sq => sq.asJava)(ExecutionContexts.sameThreadExecutionContext).toJava)) } /** @@ -232,9 +252,13 @@ object Sink { * When the stream is completed with failure - result of `onFailureMessage(throwable)` * message will be sent to the destination actor. */ - def actorRefWithAck[In](ref: ActorRef, onInitMessage: Any, ackMessage: Any, onCompleteMessage: Any, + def actorRefWithAck[In](ref: ActorRef, + onInitMessage: Any, + ackMessage: Any, + onCompleteMessage: Any, onFailureMessage: function.Function[Throwable, Any]): Sink[In, NotUsed] = - new Sink(scaladsl.Sink.actorRefWithAck[In](ref, onInitMessage, ackMessage, onCompleteMessage, onFailureMessage.apply _)) + new Sink( + scaladsl.Sink.actorRefWithAck[In](ref, onInitMessage, ackMessage, onCompleteMessage, onFailureMessage.apply _)) /** * Creates a `Sink` that is materialized to an [[akka.actor.ActorRef]] which points to an Actor @@ -243,7 +267,9 @@ object Sink { * * @deprecated Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ - @deprecated("Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") + @deprecated( + "Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") def actorSubscriber[T](props: Props): Sink[T, ActorRef] = new Sink(scaladsl.Sink.actorSubscriber(props)) @@ -260,7 +286,11 @@ object Sink { /** * Combine several sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink`. */ - def combine[T, U](output1: Sink[U, _], output2: Sink[U, _], rest: java.util.List[Sink[U, _]], strategy: function.Function[java.lang.Integer, Graph[UniformFanOutShape[T, U], NotUsed]]): Sink[T, NotUsed] = { + def combine[T, U]( + output1: Sink[U, _], + output2: Sink[U, _], + rest: java.util.List[Sink[U, _]], + strategy: function.Function[java.lang.Integer, Graph[UniformFanOutShape[T, U], NotUsed]]): Sink[T, NotUsed] = { import scala.collection.JavaConverters._ val seq = if (rest != null) rest.asScala.map(_.asScala).toSeq else immutable.Seq() new Sink(scaladsl.Sink.combine(output1.asScala, output2.asScala, seq: _*)(num => strategy.apply(num))) @@ -296,11 +326,16 @@ object Sink { * Otherwise the `Future` is completed with the materialized value of the internal sink. */ @Deprecated - @deprecated("Use lazyInitAsync instead. (lazyInitAsync no more needs a fallback function and the materialized value more clearly indicates if the internal sink was materialized or not.)", "2.5.11") - def lazyInit[T, M](sinkFactory: function.Function[T, CompletionStage[Sink[T, M]]], fallback: function.Creator[M]): Sink[T, CompletionStage[M]] = - new Sink(scaladsl.Sink.lazyInit[T, M]( - t => sinkFactory.apply(t).toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext), - () => fallback.create()).mapMaterializedValue(_.toJava)) + @deprecated( + "Use lazyInitAsync instead. (lazyInitAsync no more needs a fallback function and the materialized value more clearly indicates if the internal sink was materialized or not.)", + "2.5.11") + def lazyInit[T, M](sinkFactory: function.Function[T, CompletionStage[Sink[T, M]]], + fallback: function.Creator[M]): Sink[T, CompletionStage[M]] = + new Sink( + scaladsl.Sink + .lazyInit[T, M](t => sinkFactory.apply(t).toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext), + () => fallback.create()) + .mapMaterializedValue(_.toJava)) /** * Creates a real `Sink` upon receiving the first element. Internal `Sink` will not be created if there are no elements, @@ -311,10 +346,16 @@ object Sink { * sink fails then the `Future` is completed with the exception. * Otherwise the `Future` is completed with the materialized value of the internal sink. */ - def lazyInitAsync[T, M](sinkFactory: function.Creator[CompletionStage[Sink[T, M]]]): Sink[T, CompletionStage[Optional[M]]] = { - val sSink = scaladsl.Sink.lazyInitAsync[T, M]( - () => sinkFactory.create().toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext) - ).mapMaterializedValue(fut => fut.map(_.fold(Optional.empty[M]())(m => Optional.ofNullable(m)))(ExecutionContexts.sameThreadExecutionContext).toJava) + def lazyInitAsync[T, M]( + sinkFactory: function.Creator[CompletionStage[Sink[T, M]]]): Sink[T, CompletionStage[Optional[M]]] = { + val sSink = scaladsl.Sink + .lazyInitAsync[T, M](() => + sinkFactory.create().toScala.map(_.asScala)(ExecutionContexts.sameThreadExecutionContext)) + .mapMaterializedValue( + fut => + fut + .map(_.fold(Optional.empty[M]())(m => Optional.ofNullable(m)))(ExecutionContexts.sameThreadExecutionContext) + .toJava) new Sink(sSink) } } @@ -366,7 +407,8 @@ final class Sink[In, Mat](delegate: scaladsl.Sink[In, Mat]) extends Graph[SinkSh * * Useful for when you need a materialized value of a Sink when handing it out to someone to materialize it for you. */ - def preMaterialize(materializer: Materializer): japi.Pair[Mat @uncheckedVariance, Sink[In @uncheckedVariance, NotUsed]] = { + def preMaterialize( + materializer: Materializer): japi.Pair[Mat @uncheckedVariance, Sink[In @uncheckedVariance, NotUsed]] = { val (mat, sink) = delegate.preMaterialize()(materializer) akka.japi.Pair(mat, sink.asJava) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala index 8c395c29d7..0e47301df6 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Source.scala @@ -10,7 +10,7 @@ import java.util.Optional import akka.actor.{ ActorRef, Cancellable, Props } import akka.annotation.ApiMayChange import akka.event.LoggingAdapter -import akka.japi.{ Pair, Util, function } +import akka.japi.{ function, Pair, Util } import akka.stream._ import akka.stream.impl.{ LinearTraversalBuilder, SourceQueueAdapter } import akka.util.{ ConstantFun, Timeout } @@ -61,8 +61,7 @@ object Source { new Source(scaladsl.Source.maybe[T].mapMaterializedValue { scalaOptionPromise: Promise[Option[T]] => val javaOptionPromise = new CompletableFuture[Optional[T]]() scalaOptionPromise.completeWith( - javaOptionPromise.toScala - .map(_.asScala)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext)) + javaOptionPromise.toScala.map(_.asScala)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext)) javaOptionPromise }) @@ -194,7 +193,8 @@ object Source { * If the [[Future]] fails the stream is failed with the exception from the future. If downstream cancels before the * stream completes the materialized [[Future]] will be failed with a [[StreamDetachedException]]. */ - def fromFutureSource[T, M](future: Future[_ <: Graph[SourceShape[T], M]]): javadsl.Source[T, Future[M]] = new Source(scaladsl.Source.fromFutureSource(future)) + def fromFutureSource[T, M](future: Future[_ <: Graph[SourceShape[T], M]]): javadsl.Source[T, Future[M]] = + new Source(scaladsl.Source.fromFutureSource(future)) /** * Streams the elements of an asynchronous source once its given [[CompletionStage]] completes. @@ -202,7 +202,8 @@ object Source { * If downstream cancels before the stream completes the materialized [[CompletionStage]] will be failed * with a [[StreamDetachedException]] */ - def fromSourceCompletionStage[T, M](completion: CompletionStage[_ <: Graph[SourceShape[T], M]]): javadsl.Source[T, CompletionStage[M]] = + def fromSourceCompletionStage[T, M]( + completion: CompletionStage[_ <: Graph[SourceShape[T], M]]): javadsl.Source[T, CompletionStage[M]] = new Source(scaladsl.Source.fromSourceCompletionStage(completion)) /** @@ -251,9 +252,8 @@ object Source { * Same as [[unfold]], but uses an async function to generate the next state-element tuple. */ def unfoldAsync[S, E](s: S, f: function.Function[S, CompletionStage[Optional[Pair[S, E]]]]): Source[E, NotUsed] = - new Source( - scaladsl.Source.unfoldAsync(s)( - (s: S) => f.apply(s).toScala.map(_.asScala.map(_.toScala))(akka.dispatch.ExecutionContexts.sameThreadExecutionContext))) + new Source(scaladsl.Source.unfoldAsync(s)((s: S) => + f.apply(s).toScala.map(_.asScala.map(_.toScala))(akka.dispatch.ExecutionContexts.sameThreadExecutionContext))) /** * Create a `Source` that immediately ends the stream with the `cause` failure to every connected `Sink`. @@ -292,7 +292,9 @@ object Source { * * @deprecated Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ - @deprecated("Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") + @deprecated( + "Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") def actorPublisher[T](props: Props): Source[T, ActorRef] = new Source(scaladsl.Source.actorPublisher(props)) @@ -350,8 +352,11 @@ object Source { /** * Combines several sources with fan-in strategy like `Merge` or `Concat` and returns `Source`. */ - def combine[T, U](first: Source[T, _ <: Any], second: Source[T, _ <: Any], rest: java.util.List[Source[T, _ <: Any]], - strategy: function.Function[java.lang.Integer, _ <: Graph[UniformFanInShape[T, U], NotUsed]]): Source[U, NotUsed] = { + def combine[T, U](first: Source[T, _ <: Any], + second: Source[T, _ <: Any], + rest: java.util.List[Source[T, _ <: Any]], + strategy: function.Function[java.lang.Integer, _ <: Graph[UniformFanInShape[T, U], NotUsed]]) + : Source[U, NotUsed] = { val seq = if (rest != null) Util.immutableSeq(rest).map(_.asScala) else immutable.Seq() new Source(scaladsl.Source.combine(first.asScala, second.asScala, seq: _*)(num => strategy.apply(num))) } @@ -359,10 +364,13 @@ object Source { /** * Combines two sources with fan-in strategy like `Merge` or `Concat` and returns `Source` with a materialized value. */ - def combineMat[T, U, M1, M2, M](first: Source[T, M1], second: Source[T, M2], - strategy: function.Function[java.lang.Integer, _ <: Graph[UniformFanInShape[T, U], NotUsed]], - combine: function.Function2[M1, M2, M]): Source[U, M] = { - new Source(scaladsl.Source.combineMat(first.asScala, second.asScala)(num => strategy.apply(num))(combinerToScala(combine))) + def combineMat[T, U, M1, M2, M]( + first: Source[T, M1], + second: Source[T, M2], + strategy: function.Function[java.lang.Integer, _ <: Graph[UniformFanInShape[T, U], NotUsed]], + combine: function.Function2[M1, M2, M]): Source[U, M] = { + new Source( + scaladsl.Source.combineMat(first.asScala, second.asScala)(num => strategy.apply(num))(combinerToScala(combine))) } /** @@ -376,7 +384,8 @@ object Source { /* * Combine the elements of multiple streams into a stream of lists using a combiner function. */ - def zipWithN[T, O](zipper: function.Function[java.util.List[T], O], sources: java.util.List[Source[T, _ <: Any]]): Source[O, NotUsed] = { + def zipWithN[T, O](zipper: function.Function[java.util.List[T], O], + sources: java.util.List[Source[T, _ <: Any]]): Source[O, NotUsed] = { val seq = if (sources != null) Util.immutableSeq(sources).map(_.asScala) else immutable.Seq() new Source(scaladsl.Source.zipWithN[T, O](seq => zipper.apply(seq.asJava))(seq)) } @@ -442,13 +451,10 @@ object Source { * is received. Stream calls close and completes when `read` returns None. * @param close - function that closes resource */ - def unfoldResource[T, S]( - create: function.Creator[S], - read: function.Function[S, Optional[T]], - close: function.Procedure[S]): javadsl.Source[T, NotUsed] = - new Source(scaladsl.Source.unfoldResource[T, S]( - create.create _, - (s: S) => read.apply(s).asScala, close.apply)) + def unfoldResource[T, S](create: function.Creator[S], + read: function.Function[S, Optional[T]], + close: function.Procedure[S]): javadsl.Source[T, NotUsed] = + new Source(scaladsl.Source.unfoldResource[T, S](create.create _, (s: S) => read.apply(s).asScala, close.apply)) /** * Start a new `Source` from some resource which can be opened, read and closed. @@ -470,14 +476,18 @@ object Source { * is received. Stream calls close and completes when `CompletionStage` from read function returns None. * @param close - function that closes resource */ - def unfoldResourceAsync[T, S]( - create: function.Creator[CompletionStage[S]], - read: function.Function[S, CompletionStage[Optional[T]]], - close: function.Function[S, CompletionStage[Done]]): javadsl.Source[T, NotUsed] = - new Source(scaladsl.Source.unfoldResourceAsync[T, S]( - () => create.create().toScala, - (s: S) => read.apply(s).toScala.map(_.asScala)(akka.dispatch.ExecutionContexts.sameThreadExecutionContext), - (s: S) => close.apply(s).toScala)) + def unfoldResourceAsync[T, S](create: function.Creator[CompletionStage[S]], + read: function.Function[S, CompletionStage[Optional[T]]], + close: function.Function[S, CompletionStage[Done]]): javadsl.Source[T, NotUsed] = + new Source( + scaladsl.Source.unfoldResourceAsync[T, S](() => create.create().toScala, + (s: S) => + read + .apply(s) + .toScala + .map(_.asScala)( + akka.dispatch.ExecutionContexts.sameThreadExecutionContext), + (s: S) => close.apply(s).toScala)) /** * Upcast a stream of elements to a stream of supertypes of that element. Useful in combination with @@ -533,7 +543,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * Materializes this Source, immediately returning (1) its materialized value, and (2) a new Source * that can be used to consume elements from the newly materialized Source. */ - def preMaterialize(materializer: Materializer): Pair[Mat @uncheckedVariance, Source[Out @uncheckedVariance, NotUsed]] = { + def preMaterialize( + materializer: Materializer): Pair[Mat @uncheckedVariance, Source[Out @uncheckedVariance, NotUsed]] = { val (mat, src) = delegate.preMaterialize()(materializer) Pair(mat, new Source(src)) } @@ -577,7 +588,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def viaMat[T, M, M2](flow: Graph[FlowShape[Out, T], M], combine: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] = + def viaMat[T, M, M2](flow: Graph[FlowShape[Out, T], M], + combine: function.Function2[Mat, M, M2]): javadsl.Source[T, M2] = new Source(delegate.viaMat(flow)(combinerToScala(combine))) /** @@ -648,7 +660,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * function evaluation when the input stream ends, or completed with `Failure` * if there is a failure is signaled in the stream. */ - def runFoldAsync[U](zero: U, f: function.Function2[U, Out, CompletionStage[U]], materializer: Materializer): CompletionStage[U] = runWith(Sink.foldAsync(zero, f), materializer) + def runFoldAsync[U](zero: U, + f: function.Function2[U, Out, CompletionStage[U]], + materializer: Materializer): CompletionStage[U] = runWith(Sink.foldAsync(zero, f), materializer) /** * Shortcut for running this `Source` with a reduce function. @@ -702,9 +716,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#concat]]. */ - def concatMat[M, M2]( - that: Graph[SourceShape[Out], M], - matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = + def concatMat[M, M2](that: Graph[SourceShape[Out], M], + matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = new Source(delegate.concatMat(that)(combinerToScala(matF))) /** @@ -743,9 +756,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#prepend]]. */ - def prependMat[M, M2]( - that: Graph[SourceShape[Out], M], - matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = + def prependMat[M, M2](that: Graph[SourceShape[Out], M], + matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = new Source(delegate.prependMat(that)(combinerToScala(matF))) /** @@ -783,7 +795,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#orElse]] */ - def orElseMat[M, M2](secondary: Graph[SourceShape[Out], M], matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = + def orElseMat[M, M2](secondary: Graph[SourceShape[Out], M], + matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = new Source(delegate.orElseMat(secondary)(combinerToScala(matF))) /** @@ -814,9 +827,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#alsoTo]] */ - def alsoToMat[M2, M3]( - that: Graph[SinkShape[Out], M2], - matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] = + def alsoToMat[M2, M3](that: Graph[SinkShape[Out], M2], + matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] = new Source(delegate.alsoToMat(that)(combinerToScala(matF))) /** @@ -843,7 +855,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def divertToMat[M2, M3](that: Graph[SinkShape[Out], M2], when: function.Predicate[Out], matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] = + def divertToMat[M2, M3](that: Graph[SinkShape[Out], M2], + when: function.Predicate[Out], + matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] = new Source(delegate.divertToMat(that, when.test)(combinerToScala(matF))) /** @@ -878,9 +892,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#wireTap]] */ - def wireTapMat[M2, M3]( - that: Graph[SinkShape[Out], M2], - matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] = + def wireTapMat[M2, M3](that: Graph[SinkShape[Out], M2], + matF: function.Function2[Mat, M2, M3]): javadsl.Source[Out, M3] = new Source(delegate.wireTapMat(that)(combinerToScala(matF))) /** @@ -947,7 +960,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#interleave]]. */ - def interleaveMat[M, M2](that: Graph[SourceShape[Out], M], segmentSize: Int, + def interleaveMat[M, M2](that: Graph[SourceShape[Out], M], + segmentSize: Int, matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = new Source(delegate.interleaveMat(that, segmentSize)(combinerToScala(matF))) @@ -967,7 +981,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#interleave]] */ - def interleaveMat[M, M2](that: Graph[SourceShape[Out], M], segmentSize: Int, eagerClose: Boolean, + def interleaveMat[M, M2](that: Graph[SourceShape[Out], M], + segmentSize: Int, + eagerClose: Boolean, matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = new Source(delegate.interleaveMat(that, segmentSize, eagerClose)(combinerToScala(matF))) @@ -1010,9 +1026,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#merge]]. */ - def mergeMat[M, M2]( - that: Graph[SourceShape[Out], M], - matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = + def mergeMat[M, M2](that: Graph[SourceShape[Out], M], matF: function.Function2[Mat, M, M2]): javadsl.Source[Out, M2] = new Source(delegate.mergeMat(that)(combinerToScala(matF))) /** @@ -1024,10 +1038,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#merge]] */ - def mergeMat[M, M2]( - that: Graph[SourceShape[Out], M], - matF: function.Function2[Mat, M, M2], - eagerComplete: Boolean): javadsl.Source[Out, M2] = + def mergeMat[M, M2](that: Graph[SourceShape[Out], M], + matF: function.Function2[Mat, M, M2], + eagerComplete: Boolean): javadsl.Source[Out, M2] = new Source(delegate.mergeMat(that, eagerComplete)(combinerToScala(matF))) /** @@ -1060,7 +1073,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#mergeSorted]]. */ - def mergeSortedMat[Mat2, Mat3](that: Graph[SourceShape[Out], Mat2], comp: util.Comparator[Out], + def mergeSortedMat[Mat2, Mat3](that: Graph[SourceShape[Out], Mat2], + comp: util.Comparator[Out], matF: function.Function2[Mat, Mat2, Mat3]): javadsl.Source[Out, Mat3] = new Source(delegate.mergeSortedMat(that)(combinerToScala(matF))(Ordering.comparatorToOrdering(comp))) @@ -1086,9 +1100,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#zip]]. */ - def zipMat[T, M, M2]( - that: Graph[SourceShape[T], M], - matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] = + def zipMat[T, M, M2](that: Graph[SourceShape[T], M], + matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] = this.viaMat(Flow.create[Out].zipMat(that, Keep.right[NotUsed, M]), matF) /** @@ -1118,9 +1131,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#zipLatest]]. */ - def zipLatestMat[T, M, M2]( - that: Graph[SourceShape[T], M], - matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] = + def zipLatestMat[T, M, M2](that: Graph[SourceShape[T], M], + matF: function.Function2[Mat, M, M2]): javadsl.Source[Out @uncheckedVariance Pair T, M2] = this.viaMat(Flow.create[Out].zipLatestMat(that, Keep.right[NotUsed, M]), matF) /** @@ -1135,9 +1147,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * '''Cancels when''' downstream cancels */ - def zipWith[Out2, Out3]( - that: Graph[SourceShape[Out2], _], - combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] = + def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] = new Source(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine))) /** @@ -1149,10 +1160,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#zipWith]]. */ - def zipWithMat[Out2, Out3, M, M2]( - that: Graph[SourceShape[Out2], M], - combine: function.Function2[Out, Out2, Out3], - matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] = + def zipWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M], + combine: function.Function2[Out, Out2, Out3], + matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] = new Source(delegate.zipWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF))) /** @@ -1172,9 +1182,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * '''Cancels when''' downstream cancels */ - def zipLatestWith[Out2, Out3]( - that: Graph[SourceShape[Out2], _], - combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] = + def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + combine: function.Function2[Out, Out2, Out3]): javadsl.Source[Out3, Mat] = new Source(delegate.zipLatestWith[Out2, Out3](that)(combinerToScala(combine))) /** @@ -1187,10 +1196,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * @see [[#zipLatestWith]]. */ - def zipLatestWithMat[Out2, Out3, M, M2]( - that: Graph[SourceShape[Out2], M], - combine: function.Function2[Out, Out2, Out3], - matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] = + def zipLatestWithMat[Out2, Out3, M, M2](that: Graph[SourceShape[Out2], M], + combine: function.Function2[Out, Out2, Out3], + matF: function.Function2[Mat, M, M2]): javadsl.Source[Out3, M2] = new Source(delegate.zipLatestWithMat[Out2, Out3, M, M2](that)(combinerToScala(combine))(combinerToScala(matF))) /** @@ -1365,7 +1373,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Cancels when''' downstream cancels * */ - def recoverWith(clazz: Class[_ <: Throwable], supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] = + def recoverWith(clazz: Class[_ <: Throwable], + supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] = recoverWith { case elem if clazz.isInstance(elem) => supplier.get() } @@ -1393,7 +1402,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Cancels when''' downstream cancels * */ - def recoverWithRetries(attempts: Int, pf: PartialFunction[Throwable, _ <: Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] = + def recoverWithRetries(attempts: Int, + pf: PartialFunction[Throwable, _ <: Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] = new Source(delegate.recoverWithRetries(attempts, pf)) /** @@ -1422,7 +1432,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * @param clazz the class object of the failure cause * @param supplier supply the new Source to be materialized */ - def recoverWithRetries(attempts: Int, clazz: Class[_ <: Throwable], supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] = + def recoverWithRetries(attempts: Int, + clazz: Class[_ <: Throwable], + supplier: Supplier[Graph[SourceShape[Out], NotUsed]]): Source[Out, Mat] = recoverWithRetries(attempts, { case elem if clazz.isInstance(elem) => supplier.get() }) @@ -1838,7 +1850,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * See also [[FlowOps.scan]] */ def scanAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): javadsl.Source[T, Mat] = - new Source(delegate.scanAsync(zero) { (out, in) => f(out, in).toScala }) + new Source(delegate.scanAsync(zero) { (out, in) => + f(out, in).toScala + }) /** * Similar to `scan` but only emits its result when the upstream completes, @@ -1885,7 +1899,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * '''Cancels when''' downstream cancels */ - def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): javadsl.Source[T, Mat] = new Source(delegate.foldAsync(zero) { (out, in) => f(out, in).toScala }) + def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): javadsl.Source[T, Mat] = + new Source(delegate.foldAsync(zero) { (out, in) => + f(out, in).toScala + }) /** * Similar to `fold` but uses first element as zero element. @@ -2027,7 +2044,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: FiniteDuration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, + costFn: function.Function[Out, java.lang.Long], + d: FiniteDuration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] = new Source(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava)) /** @@ -2048,7 +2067,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise * IllegalArgumentException is thrown. */ - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: java.time.Duration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, + costFn: function.Function[Out, java.lang.Long], + d: java.time.Duration): javadsl.Source[java.util.List[Out @uncheckedVariance], Mat] = groupedWeightedWithin(maxWeight, costFn, d.asScala) /** @@ -2175,7 +2196,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * See also [[Source.limit]], [[Source.limitWeighted]] */ - def takeWhile(p: function.Predicate[Out], inclusive: Boolean): javadsl.Source[Out, Mat] = new Source(delegate.takeWhile(p.test, inclusive)) + def takeWhile(p: function.Predicate[Out], inclusive: Boolean): javadsl.Source[Out, Mat] = + new Source(delegate.takeWhile(p.test, inclusive)) /** * Terminate processing (and cancel the upstream publisher) after predicate @@ -2304,7 +2326,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * @param seed Provides the first state for a conflated value using the first unconsumed element as a start * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate */ - def conflateWithSeed[S](seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] = + def conflateWithSeed[S](seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] = new Source(delegate.conflateWithSeed(seed.apply)(aggregate.apply)) /** @@ -2358,7 +2381,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate */ - def batch[S](max: Long, seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] = + def batch[S](max: Long, + seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] = new Source(delegate.batch(max, seed.apply)(aggregate.apply)) /** @@ -2389,7 +2414,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new batch */ - def batchWeighted[S](max: Long, costFn: function.Function[Out, java.lang.Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] = + def batchWeighted[S](max: Long, + costFn: function.Function[Out, java.lang.Long], + seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): javadsl.Source[S, Mat] = new Source(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply)) /** @@ -2445,7 +2473,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * on the original, to be emitted in case downstream signals demand. * @see [[#expand]] */ - def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]]): Source[Out, Mat] = + def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]]) + : Source[Out, Mat] = new Source(delegate.extrapolate(in => extrapolator(in).asScala)) /** @@ -2473,7 +2502,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * @param initial the initial element to be emitted, in case upstream is able to stall the entire stream. * @see [[#expand]] */ - def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]], initial: Out @uncheckedVariance): Source[Out, Mat] = + def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]], + initial: Out @uncheckedVariance): Source[Out, Mat] = new Source(delegate.extrapolate(in => extrapolator(in).asScala, Some(initial))) /** @@ -2521,7 +2551,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * * '''Cancels when''' downstream cancels or substream cancels */ - def prefixAndTail(n: Int): javadsl.Source[Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], Mat] = + def prefixAndTail(n: Int) + : javadsl.Source[Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], + Mat] = new Source(delegate.prefixAndTail(n).map { case (taken, tail) => Pair(taken.asJava, tail.asJava) }) /** @@ -2577,7 +2609,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * @param allowClosedSubstreamRecreation enables recreation of already closed substreams if elements with their * corresponding keys arrive after completion */ - def groupBy[K](maxSubstreams: Int, f: function.Function[Out, K], allowClosedSubstreamRecreation: Boolean): SubSource[Out, Mat] = + def groupBy[K](maxSubstreams: Int, + f: function.Function[Out, K], + allowClosedSubstreamRecreation: Boolean): SubSource[Out, Mat] = new SubSource(delegate.groupBy(maxSubstreams, f.apply, allowClosedSubstreamRecreation)) /** @@ -3022,8 +3056,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, - mode: ThrottleMode): javadsl.Source[Out, Mat] = + def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, mode: ThrottleMode): javadsl.Source[Out, Mat] = new Source(delegate.throttle(elements, per, maximumBurst, mode)) /** @@ -3062,7 +3095,9 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Cancels when''' downstream cancels * */ - def throttle(elements: Int, per: java.time.Duration, maximumBurst: Int, + def throttle(elements: Int, + per: java.time.Duration, + maximumBurst: Int, mode: ThrottleMode): javadsl.Source[Out, Mat] = new Source(delegate.throttle(elements, per.asScala, maximumBurst, mode)) @@ -3098,7 +3133,8 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: java.time.Duration, + def throttle(cost: Int, + per: java.time.Duration, costCalculation: function.Function[Out, Integer]): javadsl.Source[Out, Mat] = new Source(delegate.throttle(cost, per.asScala, costCalculation.apply _)) @@ -3143,8 +3179,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def throttle(cost: Int, per: FiniteDuration, maximumBurst: Int, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.Source[Out, Mat] = + def throttle(cost: Int, + per: FiniteDuration, + maximumBurst: Int, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.Source[Out, Mat] = new Source(delegate.throttle(cost, per, maximumBurst, costCalculation.apply _, mode)) /** @@ -3186,8 +3225,11 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: java.time.Duration, maximumBurst: Int, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.Source[Out, Mat] = + def throttle(cost: Int, + per: java.time.Duration, + maximumBurst: Int, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.Source[Out, Mat] = new Source(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply _, mode)) /** @@ -3232,8 +3274,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: FiniteDuration, - costCalculation: (Out) => Int, mode: ThrottleMode): javadsl.Source[Out, Mat] = + def throttleEven(cost: Int, + per: FiniteDuration, + costCalculation: (Out) => Int, + mode: ThrottleMode): javadsl.Source[Out, Mat] = new Source(delegate.throttleEven(cost, per, costCalculation.apply _, mode)) /** @@ -3248,8 +3292,10 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[ */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: java.time.Duration, - costCalculation: (Out) => Int, mode: ThrottleMode): javadsl.Source[Out, Mat] = + def throttleEven(cost: Int, + per: java.time.Duration, + costCalculation: (Out) => Int, + mode: ThrottleMode): javadsl.Source[Out, Mat] = throttleEven(cost, per.asScala, costCalculation, mode) /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala index 81efaf0385..ae7e8f3a3d 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SourceWithContext.scala @@ -5,7 +5,7 @@ package akka.stream.javadsl import akka.annotation.ApiMayChange -import akka.japi.{ Pair, Util, function } +import akka.japi.{ function, Pair, Util } import akka.stream._ import akka.event.LoggingAdapter import akka.util.ConstantFun @@ -41,7 +41,9 @@ object SourceWithContext { * API MAY CHANGE */ @ApiMayChange -final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithContext[Out, Ctx, Mat]) extends GraphDelegate(delegate) { +final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithContext[Out, Ctx, Mat]) + extends GraphDelegate(delegate) { + /** * Transform this flow by the regular flow. The given flow must support manual context propagation by * taking and producing tuples of (data, context). @@ -51,7 +53,9 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon * * @see [[akka.stream.javadsl.Flow.via]] */ - def via[Out2, Ctx2, Mat2](viaFlow: Graph[FlowShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance], Pair[Out2, Ctx2]], Mat2]): SourceWithContext[Out2, Ctx2, Mat] = + def via[Out2, Ctx2, Mat2]( + viaFlow: Graph[FlowShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance], Pair[Out2, Ctx2]], Mat2]) + : SourceWithContext[Out2, Ctx2, Mat] = viaScala(_.via(akka.stream.scaladsl.Flow[(Out, Ctx)].map { case (o, c) => Pair(o, c) }.via(viaFlow).map(_.toScala))) /** @@ -108,7 +112,8 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon * * @see [[akka.stream.javadsl.Source.grouped]] */ - def grouped(n: Int): SourceWithContext[java.util.List[Out @uncheckedVariance], java.util.List[Ctx @uncheckedVariance], Mat] = + def grouped( + n: Int): SourceWithContext[java.util.List[Out @uncheckedVariance], java.util.List[Ctx @uncheckedVariance], Mat] = viaScala(_.grouped(n).map(_.asJava).mapContext(_.asJava)) /** @@ -119,7 +124,8 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon def map[Out2](f: function.Function[Out, Out2]): SourceWithContext[Out2, Ctx, Mat] = viaScala(_.map(f.apply)) - def mapAsync[Out2](parallelism: Int, f: function.Function[Out, CompletionStage[Out2]]): SourceWithContext[Out2, Ctx, Mat] = + def mapAsync[Out2](parallelism: Int, + f: function.Function[Out, CompletionStage[Out2]]): SourceWithContext[Out2, Ctx, Mat] = viaScala(_.mapAsync[Out2](parallelism)(o => f.apply(o).toScala)) /** @@ -166,7 +172,8 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon * * @see [[akka.stream.javadsl.Source.sliding]] */ - def sliding(n: Int, step: Int = 1): SourceWithContext[java.util.List[Out @uncheckedVariance], java.util.List[Ctx @uncheckedVariance], Mat] = + def sliding(n: Int, step: Int = 1) + : SourceWithContext[java.util.List[Out @uncheckedVariance], java.util.List[Ctx @uncheckedVariance], Mat] = viaScala(_.sliding(n, step).map(_.asJava).mapContext(_.asJava)) /** @@ -205,25 +212,30 @@ final class SourceWithContext[+Out, +Ctx, +Mat](delegate: scaladsl.SourceWithCon * Connect this [[akka.stream.javadsl.SourceWithContext]] to a [[akka.stream.javadsl.Sink]], * concatenating the processing steps of both. */ - def to[Mat2](sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], Mat2]): javadsl.RunnableGraph[Mat] = + def to[Mat2]( + sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], Mat2]): javadsl.RunnableGraph[Mat] = RunnableGraph.fromGraph(asScala.asSource.map { case (o, e) => Pair(o, e) }.to(sink)) /** * Connect this [[akka.stream.javadsl.SourceWithContext]] to a [[akka.stream.javadsl.Sink]], * concatenating the processing steps of both. */ - def toMat[Mat2, Mat3](sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], Mat2], combine: function.Function2[Mat, Mat2, Mat3]): javadsl.RunnableGraph[Mat3] = + def toMat[Mat2, Mat3](sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], Mat2], + combine: function.Function2[Mat, Mat2, Mat3]): javadsl.RunnableGraph[Mat3] = RunnableGraph.fromGraph(asScala.asSource.map { case (o, e) => Pair(o, e) }.toMat(sink)(combinerToScala(combine))) /** * Connect this [[akka.stream.javadsl.SourceWithContext]] to a [[akka.stream.javadsl.Sink]] and run it. * The returned value is the materialized value of the `Sink`. */ - def runWith[M](sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], M], materializer: Materializer): M = + def runWith[M](sink: Graph[SinkShape[Pair[Out @uncheckedVariance, Ctx @uncheckedVariance]], M], + materializer: Materializer): M = toMat(sink, Keep.right[Mat, M]).run(materializer) def asScala: scaladsl.SourceWithContext[Out, Ctx, Mat] = delegate - private[this] def viaScala[Out2, Ctx2, Mat2](f: scaladsl.SourceWithContext[Out, Ctx, Mat] => scaladsl.SourceWithContext[Out2, Ctx2, Mat2]): SourceWithContext[Out2, Ctx2, Mat2] = + private[this] def viaScala[Out2, Ctx2, Mat2]( + f: scaladsl.SourceWithContext[Out, Ctx, Mat] => scaladsl.SourceWithContext[Out2, Ctx2, Mat2]) + : SourceWithContext[Out2, Ctx2, Mat2] = new SourceWithContext(f(delegate)) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala index 8d61089774..6ee203655f 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/StreamConverters.scala @@ -7,7 +7,7 @@ package akka.stream.javadsl import java.io.{ InputStream, OutputStream } import java.util.stream.Collector import akka.japi.function -import akka.stream.{ scaladsl, javadsl } +import akka.stream.{ javadsl, scaladsl } import akka.stream.IOResult import akka.util.ByteString import scala.concurrent.duration.FiniteDuration @@ -18,6 +18,7 @@ import akka.NotUsed * Converters for interacting with the blocking `java.io` streams APIs and Java 8 Streams */ object StreamConverters { + /** * Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function. * @@ -34,7 +35,8 @@ object StreamConverters { * * @param f A Creator which creates an OutputStream to write to */ - def fromOutputStream(f: function.Creator[OutputStream]): javadsl.Sink[ByteString, CompletionStage[IOResult]] = fromOutputStream(f, autoFlush = false) + def fromOutputStream(f: function.Creator[OutputStream]): javadsl.Sink[ByteString, CompletionStage[IOResult]] = + fromOutputStream(f, autoFlush = false) /** * Sink which writes incoming [[ByteString]]s to an [[OutputStream]] created by the given function. @@ -51,7 +53,8 @@ object StreamConverters { * @param f A Creator which creates an OutputStream to write to * @param autoFlush If true the OutputStream will be flushed whenever a byte array is written */ - def fromOutputStream(f: function.Creator[OutputStream], autoFlush: Boolean): javadsl.Sink[ByteString, CompletionStage[IOResult]] = + def fromOutputStream(f: function.Creator[OutputStream], + autoFlush: Boolean): javadsl.Sink[ByteString, CompletionStage[IOResult]] = new Sink(scaladsl.StreamConverters.fromOutputStream(() => f.create(), autoFlush).toCompletionStage()) /** @@ -123,7 +126,8 @@ object StreamConverters { * * The created [[InputStream]] will be closed when the [[Source]] is cancelled. */ - def fromInputStream(in: function.Creator[InputStream], chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] = + def fromInputStream(in: function.Creator[InputStream], + chunkSize: Int): javadsl.Source[ByteString, CompletionStage[IOResult]] = new Source(scaladsl.StreamConverters.fromInputStream(() => in.create(), chunkSize).toCompletionStage()) /** @@ -141,7 +145,8 @@ object StreamConverters { * * The created [[InputStream]] will be closed when the [[Source]] is cancelled. */ - def fromInputStream(in: function.Creator[InputStream]): javadsl.Source[ByteString, CompletionStage[IOResult]] = fromInputStream(in, 8192) + def fromInputStream(in: function.Creator[InputStream]): javadsl.Source[ByteString, CompletionStage[IOResult]] = + fromInputStream(in, 8192) /** * Creates a Source which when materialized will return an [[java.io.OutputStream]] which it is possible @@ -221,7 +226,8 @@ object StreamConverters { * You can use [[Source.async]] to create asynchronous boundaries between synchronous java stream * and the rest of flow. */ - def fromJavaStream[O, S <: java.util.stream.BaseStream[O, S]](stream: function.Creator[java.util.stream.BaseStream[O, S]]): javadsl.Source[O, NotUsed] = + def fromJavaStream[O, S <: java.util.stream.BaseStream[O, S]]( + stream: function.Creator[java.util.stream.BaseStream[O, S]]): javadsl.Source[O, NotUsed] = new Source(scaladsl.StreamConverters.fromJavaStream(stream.create _)) /** @@ -247,7 +253,11 @@ object StreamConverters { * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able * to handle multiple invocations. */ - def javaCollectorParallelUnordered[T, R](parallelism: Int)(collector: function.Creator[Collector[T, _ <: Any, R]]): Sink[T, CompletionStage[R]] = - new Sink(scaladsl.StreamConverters.javaCollectorParallelUnordered[T, R](parallelism)(() => collector.create()).toCompletionStage()) + def javaCollectorParallelUnordered[T, R](parallelism: Int)( + collector: function.Creator[Collector[T, _ <: Any, R]]): Sink[T, CompletionStage[R]] = + new Sink( + scaladsl.StreamConverters + .javaCollectorParallelUnordered[T, R](parallelism)(() => collector.create()) + .toCompletionStage()) } diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala index d983a30111..cc3ff2f0f6 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubFlow.scala @@ -23,6 +23,7 @@ import java.util.concurrent.CompletionStage import scala.reflect.ClassTag object SubFlow { + /** * Upcast a stream of elements to a stream of supertypes of that element. Useful in combination with * fan-in operators where you do not want to pay the cost of casting each element in a `map`. @@ -39,10 +40,12 @@ object SubFlow { * SubFlows cannot contribute to the super-flow’s materialized value since they * are materialized later, during the runtime of the flow graph processing. */ -class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]]) { +class SubFlow[In, Out, Mat]( + delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]]) { /** Converts this Flow to its Scala DSL counterpart */ - def asScala: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]] @uncheckedVariance = delegate + def asScala: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]] @uncheckedVariance = + delegate /** * Flatten the sub-flows back into the super-flow by performing a merge @@ -186,7 +189,9 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * '''Cancels when''' downstream cancels */ def mapConcat[T](f: function.Function[Out, java.lang.Iterable[T]]): SubFlow[In, T, Mat] = - new SubFlow(delegate.mapConcat { elem => Util.immutableSeq(f(elem)) }) + new SubFlow(delegate.mapConcat { elem => + Util.immutableSeq(f(elem)) + }) /** * Transform each input element into an `Iterable` of output elements that is @@ -442,7 +447,6 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * * '''Cancels when''' downstream cancels */ - def sliding(n: Int, step: Int = 1): SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] = new SubFlow(delegate.sliding(n, step).map(_.asJava)) // TODO optimize to one step @@ -500,7 +504,9 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * See also [[FlowOps.scan]] */ def scanAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): SubFlow[In, T, Mat] = - new SubFlow(delegate.scanAsync(zero) { (out, in) => f(out, in).toScala }) + new SubFlow(delegate.scanAsync(zero) { (out, in) => + f(out, in).toScala + }) /** * Similar to `scan` but only emits its result when the upstream completes, @@ -547,7 +553,10 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * * '''Cancels when''' downstream cancels */ - def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): SubFlow[In, T, Mat] = new SubFlow(delegate.foldAsync(zero) { (out, in) => f(out, in).toScala }) + def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): SubFlow[In, T, Mat] = + new SubFlow(delegate.foldAsync(zero) { (out, in) => + f(out, in).toScala + }) /** * Similar to `fold` but uses first element as zero element. @@ -690,7 +699,9 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: FiniteDuration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, + costFn: function.Function[Out, java.lang.Long], + d: FiniteDuration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] = new SubFlow(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava)) /** @@ -711,7 +722,9 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise * IllegalArgumentException is thrown. */ - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: java.time.Duration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, + costFn: function.Function[Out, java.lang.Long], + d: java.time.Duration): javadsl.SubFlow[In, java.util.List[Out @uncheckedVariance], Mat] = groupedWeightedWithin(maxWeight, costFn, d.asScala) /** @@ -915,7 +928,8 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * */ @deprecated("Use recoverWithRetries instead.", "2.4.4") - def recoverWith(pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubFlow[In, Out, Mat @uncheckedVariance] = + def recoverWith( + pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubFlow[In, Out, Mat @uncheckedVariance] = new SubFlow(delegate.recoverWith(pf)) /** @@ -941,7 +955,8 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * '''Cancels when''' downstream cancels * */ - def recoverWithRetries(attempts: Int, pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubFlow[In, Out, Mat] = + def recoverWithRetries(attempts: Int, + pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubFlow[In, Out, Mat] = new SubFlow(delegate.recoverWithRetries(attempts, pf)) /** @@ -1055,7 +1070,8 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate * */ - def conflateWithSeed[S](seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] = + def conflateWithSeed[S](seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] = new SubFlow(delegate.conflateWithSeed(seed.apply)(aggregate.apply)) /** @@ -1111,7 +1127,9 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate */ - def batch[S](max: Long, seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] = + def batch[S](max: Long, + seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] = new SubFlow(delegate.batch(max, seed.apply)(aggregate.apply)) /** @@ -1142,7 +1160,10 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new batch */ - def batchWeighted[S](max: Long, costFn: function.Function[Out, java.lang.Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] = + def batchWeighted[S](max: Long, + costFn: function.Function[Out, java.lang.Long], + seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): SubFlow[In, S, Mat] = new SubFlow(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply)) /** @@ -1199,7 +1220,8 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * on the original, to be emitted in case downstream signals demand. * @see [[#expand]] */ - def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]]): SubFlow[In, Out, Mat] = + def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]]) + : SubFlow[In, Out, Mat] = new SubFlow(delegate.extrapolate(in => extrapolator(in).asScala)) /** @@ -1227,7 +1249,8 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * @param initial the initial element to be emitted, in case upstream is able to stall the entire stream. * @see [[#expand]] */ - def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]], initial: Out @uncheckedVariance): SubFlow[In, Out, Mat] = + def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]], + initial: Out @uncheckedVariance): SubFlow[In, Out, Mat] = new SubFlow(delegate.extrapolate(in => extrapolator(in).asScala, Some(initial))) /** @@ -1275,7 +1298,10 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * * '''Cancels when''' downstream cancels or substream cancels */ - def prefixAndTail(n: Int): SubFlow[In, akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], Mat] = + def prefixAndTail(n: Int) + : SubFlow[In, + akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], + Mat] = new SubFlow(delegate.prefixAndTail(n).map { case (taken, tail) => akka.japi.Pair(taken.asJava, tail.asJava) }) /** @@ -1529,9 +1555,8 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * * '''Cancels when''' downstream cancels */ - def zipWith[Out2, Out3]( - that: Graph[SourceShape[Out2], _], - combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] = + def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] = new SubFlow(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine))) /** @@ -1547,9 +1572,8 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * * '''Cancels when''' downstream cancels */ - def zipLatestWith[Out2, Out3]( - that: Graph[SourceShape[Out2], _], - combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] = + def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + combine: function.Function2[Out, Out2, Out3]): SubFlow[In, Out3, Mat] = new SubFlow(delegate.zipLatestWith[Out2, Out3](that)(combinerToScala(combine))) /** @@ -1812,7 +1836,9 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, + def throttle(elements: Int, + per: FiniteDuration, + maximumBurst: Int, mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = new SubFlow(delegate.throttle(elements, per, maximumBurst, mode)) @@ -1852,7 +1878,9 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * '''Cancels when''' downstream cancels * */ - def throttle(elements: Int, per: java.time.Duration, maximumBurst: Int, + def throttle(elements: Int, + per: java.time.Duration, + maximumBurst: Int, mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = new SubFlow(delegate.throttle(elements, per.asScala, maximumBurst, mode)) @@ -1888,7 +1916,8 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: java.time.Duration, + def throttle(cost: Int, + per: java.time.Duration, costCalculation: function.Function[Out, Integer]): javadsl.SubFlow[In, Out, Mat] = new SubFlow(delegate.throttle(cost, per.asScala, costCalculation.apply)) @@ -1933,8 +1962,11 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def throttle(cost: Int, per: FiniteDuration, maximumBurst: Int, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = + def throttle(cost: Int, + per: FiniteDuration, + maximumBurst: Int, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = new SubFlow(delegate.throttle(cost, per, maximumBurst, costCalculation.apply, mode)) /** @@ -1976,8 +2008,11 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: java.time.Duration, maximumBurst: Int, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = + def throttle(cost: Int, + per: java.time.Duration, + maximumBurst: Int, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = new SubFlow(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply, mode)) /** @@ -2022,8 +2057,10 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: FiniteDuration, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = + def throttleEven(cost: Int, + per: FiniteDuration, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = new SubFlow(delegate.throttleEven(cost, per, costCalculation.apply, mode)) /** @@ -2038,8 +2075,10 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: java.time.Duration, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = + def throttleEven(cost: Int, + per: java.time.Duration, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.SubFlow[In, Out, Mat] = throttleEven(cost, per.asScala, costCalculation, mode) /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala index 2740c73032..02bda5e334 100755 --- a/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/SubSource.scala @@ -6,7 +6,7 @@ package akka.stream.javadsl import akka.NotUsed import akka.event.LoggingAdapter -import akka.japi.{ Util, function } +import akka.japi.{ function, Util } import akka.stream._ import akka.util.ConstantFun import akka.util.JavaDurationConverters._ @@ -33,10 +33,13 @@ object SubSource { * SubFlows cannot contribute to the super-flow’s materialized value since they * are materialized later, during the runtime of the flow graph processing. */ -class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[Out, Mat]#Repr, scaladsl.RunnableGraph[Mat]]) { +class SubSource[Out, Mat]( + delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[Out, Mat]#Repr, scaladsl.RunnableGraph[Mat]]) { /** Converts this Flow to its Scala DSL counterpart */ - def asScala: scaladsl.SubFlow[Out, Mat, scaladsl.Source[Out, Mat]#Repr, scaladsl.RunnableGraph[Mat]] @uncheckedVariance = delegate + def asScala + : scaladsl.SubFlow[Out, Mat, scaladsl.Source[Out, Mat]#Repr, scaladsl.RunnableGraph[Mat]] @uncheckedVariance = + delegate /** * Flatten the sub-flows back into the super-source by performing a merge @@ -175,7 +178,9 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * '''Cancels when''' downstream cancels */ def mapConcat[T](f: function.Function[Out, java.lang.Iterable[T]]): SubSource[T, Mat] = - new SubSource(delegate.mapConcat { elem => Util.immutableSeq(f(elem)) }) + new SubSource(delegate.mapConcat { elem => + Util.immutableSeq(f(elem)) + }) /** * Transform each input element into an `Iterable` of output elements that is @@ -378,7 +383,6 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * * '''Cancels when''' downstream cancels */ - /** * Ensure stream boundedness by limiting the number of elements from upstream. * If the number of incoming elements exceeds max, it will signal @@ -489,7 +493,9 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * See also [[FlowOps.scan]] */ def scanAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): SubSource[T, Mat] = - new SubSource(delegate.scanAsync(zero) { (out, in) => f(out, in).toScala }) + new SubSource(delegate.scanAsync(zero) { (out, in) => + f(out, in).toScala + }) /** * Similar to `scan` but only emits its result when the upstream completes, @@ -534,7 +540,10 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * * '''Cancels when''' downstream cancels */ - def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): SubSource[T, Mat] = new SubSource(delegate.foldAsync(zero) { (out, in) => f(out, in).toScala }) + def foldAsync[T](zero: T)(f: function.Function2[T, Out, CompletionStage[T]]): SubSource[T, Mat] = + new SubSource(delegate.foldAsync(zero) { (out, in) => + f(out, in).toScala + }) /** * Similar to `fold` but uses first element as zero element. @@ -677,7 +686,9 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: FiniteDuration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, + costFn: function.Function[Out, java.lang.Long], + d: FiniteDuration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] = new SubSource(delegate.groupedWeightedWithin(maxWeight, d)(costFn.apply).map(_.asJava)) /** @@ -698,7 +709,9 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * `maxWeight` must be positive, and `d` must be greater than 0 seconds, otherwise * IllegalArgumentException is thrown. */ - def groupedWeightedWithin(maxWeight: Long, costFn: function.Function[Out, java.lang.Long], d: java.time.Duration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] = + def groupedWeightedWithin(maxWeight: Long, + costFn: function.Function[Out, java.lang.Long], + d: java.time.Duration): javadsl.SubSource[java.util.List[Out @uncheckedVariance], Mat] = groupedWeightedWithin(maxWeight, costFn, d.asScala) /** @@ -922,7 +935,8 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * '''Cancels when''' downstream cancels * */ - def recoverWithRetries(attempts: Int, pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubSource[Out, Mat] = + def recoverWithRetries(attempts: Int, + pf: PartialFunction[Throwable, Graph[SourceShape[Out], NotUsed]]): SubSource[Out, Mat] = new SubSource(delegate.recoverWithRetries(attempts, pf)) /** @@ -1036,7 +1050,8 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate * */ - def conflateWithSeed[S](seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] = + def conflateWithSeed[S](seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] = new SubSource(delegate.conflateWithSeed(seed.apply)(aggregate.apply)) /** @@ -1092,7 +1107,9 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate */ - def batch[S](max: Long, seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] = + def batch[S](max: Long, + seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] = new SubSource(delegate.batch(max, seed.apply)(aggregate.apply)) /** @@ -1123,7 +1140,10 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * @param seed Provides the first state for a batched value using the first unconsumed element as a start * @param aggregate Takes the currently batched value and the current pending element to produce a new batch */ - def batchWeighted[S](max: Long, costFn: function.Function[Out, java.lang.Long], seed: function.Function[Out, S], aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] = + def batchWeighted[S](max: Long, + costFn: function.Function[Out, java.lang.Long], + seed: function.Function[Out, S], + aggregate: function.Function2[S, Out, S]): SubSource[S, Mat] = new SubSource(delegate.batchWeighted(max, costFn.apply, seed.apply)(aggregate.apply)) /** @@ -1179,7 +1199,8 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * on the original, to be emitted in case downstream signals demand. * @see [[#expand]] */ - def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]]): SubSource[Out, Mat] = + def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]]) + : SubSource[Out, Mat] = new SubSource(delegate.extrapolate(in => extrapolator(in).asScala)) /** @@ -1207,7 +1228,8 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * @param initial the initial element to be emitted, in case upstream is able to stall the entire stream. * @see [[#expand]] */ - def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]], initial: Out @uncheckedVariance): SubSource[Out, Mat] = + def extrapolate(extrapolator: function.Function[Out @uncheckedVariance, java.util.Iterator[Out @uncheckedVariance]], + initial: Out @uncheckedVariance): SubSource[Out, Mat] = new SubSource(delegate.extrapolate(in => extrapolator(in).asScala, Some(initial))) /** @@ -1255,7 +1277,9 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * * '''Cancels when''' downstream cancels or substream cancels */ - def prefixAndTail(n: Int): SubSource[akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], Mat] = + def prefixAndTail(n: Int): SubSource[ + akka.japi.Pair[java.util.List[Out @uncheckedVariance], javadsl.Source[Out @uncheckedVariance, NotUsed]], + Mat] = new SubSource(delegate.prefixAndTail(n).map { case (taken, tail) => akka.japi.Pair(taken.asJava, tail.asJava) }) /** @@ -1510,9 +1534,8 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * * '''Cancels when''' downstream cancels */ - def zipWith[Out2, Out3]( - that: Graph[SourceShape[Out2], _], - combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] = + def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] = new SubSource(delegate.zipWith[Out2, Out3](that)(combinerToScala(combine))) /** @@ -1528,9 +1551,8 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * * '''Cancels when''' downstream cancels */ - def zipLatestWith[Out2, Out3]( - that: Graph[SourceShape[Out2], _], - combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] = + def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _], + combine: function.Function2[Out, Out2, Out3]): SubSource[Out3, Mat] = new SubSource(delegate.zipLatestWith[Out2, Out3](that)(combinerToScala(combine))) /** @@ -1793,8 +1815,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, - mode: ThrottleMode): javadsl.SubSource[Out, Mat] = + def throttle(elements: Int, per: FiniteDuration, maximumBurst: Int, mode: ThrottleMode): javadsl.SubSource[Out, Mat] = new SubSource(delegate.throttle(elements, per, maximumBurst, mode)) /** @@ -1833,7 +1854,9 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * '''Cancels when''' downstream cancels * */ - def throttle(elements: Int, per: java.time.Duration, maximumBurst: Int, + def throttle(elements: Int, + per: java.time.Duration, + maximumBurst: Int, mode: ThrottleMode): javadsl.SubSource[Out, Mat] = new SubSource(delegate.throttle(elements, per.asScala, maximumBurst, mode)) @@ -1869,7 +1892,8 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: java.time.Duration, + def throttle(cost: Int, + per: java.time.Duration, costCalculation: function.Function[Out, Integer]): javadsl.SubSource[Out, Mat] = new SubSource(delegate.throttle(cost, per.asScala, costCalculation.apply _)) @@ -1914,8 +1938,11 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O */ @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.12") - def throttle(cost: Int, per: FiniteDuration, maximumBurst: Int, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.SubSource[Out, Mat] = + def throttle(cost: Int, + per: FiniteDuration, + maximumBurst: Int, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.SubSource[Out, Mat] = new SubSource(delegate.throttle(cost, per, maximumBurst, costCalculation.apply _, mode)) /** @@ -1957,8 +1984,11 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: java.time.Duration, maximumBurst: Int, - costCalculation: function.Function[Out, Integer], mode: ThrottleMode): javadsl.SubSource[Out, Mat] = + def throttle(cost: Int, + per: java.time.Duration, + maximumBurst: Int, + costCalculation: function.Function[Out, Integer], + mode: ThrottleMode): javadsl.SubSource[Out, Mat] = new SubSource(delegate.throttle(cost, per.asScala, maximumBurst, costCalculation.apply _, mode)) /** @@ -2003,8 +2033,10 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: FiniteDuration, - costCalculation: (Out) => Int, mode: ThrottleMode): javadsl.SubSource[Out, Mat] = + def throttleEven(cost: Int, + per: FiniteDuration, + costCalculation: (Out) => Int, + mode: ThrottleMode): javadsl.SubSource[Out, Mat] = new SubSource(delegate.throttleEven(cost, per, costCalculation.apply _, mode)) /** @@ -2019,8 +2051,10 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: java.time.Duration, - costCalculation: (Out) => Int, mode: ThrottleMode): javadsl.SubSource[Out, Mat] = + def throttleEven(cost: Int, + per: java.time.Duration, + costCalculation: (Out) => Int, + mode: ThrottleMode): javadsl.SubSource[Out, Mat] = throttleEven(cost, per.asScala, costCalculation, mode) /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala b/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala index 08ed7af63e..4a469e884c 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/TLS.scala @@ -8,7 +8,7 @@ import java.util.Optional import java.util.function.{ Consumer, Supplier } import javax.net.ssl.{ SSLContext, SSLEngine, SSLSession } -import akka.{ NotUsed, japi } +import akka.{ japi, NotUsed } import akka.stream._ import akka.stream.TLSProtocol._ import akka.util.ByteString @@ -66,7 +66,10 @@ object TLS { * * This method uses the default closing behavior or [[IgnoreComplete]]. */ - def create(sslContext: SSLContext, sslConfig: Optional[AkkaSSLConfig], firstSession: NegotiateNewSession, role: TLSRole): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + def create(sslContext: SSLContext, + sslConfig: Optional[AkkaSSLConfig], + firstSession: NegotiateNewSession, + role: TLSRole): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = new javadsl.BidiFlow(scaladsl.TLS.apply(sslContext, OptionConverters.toScala(sslConfig), firstSession, role)) /** @@ -80,7 +83,9 @@ object TLS { * * This method uses the default closing behavior or [[IgnoreComplete]]. */ - def create(sslContext: SSLContext, firstSession: NegotiateNewSession, role: TLSRole): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + def create(sslContext: SSLContext, + firstSession: NegotiateNewSession, + role: TLSRole): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = new javadsl.BidiFlow(scaladsl.TLS.apply(sslContext, None, firstSession, role)) /** @@ -99,8 +104,19 @@ object TLS { * The SSLEngine may use this information e.g. when an endpoint identification algorithm was * configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]]. */ - def create(sslContext: SSLContext, sslConfig: Optional[AkkaSSLConfig], firstSession: NegotiateNewSession, role: TLSRole, hostInfo: Optional[japi.Pair[String, java.lang.Integer]], closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = - new javadsl.BidiFlow(scaladsl.TLS.apply(sslContext, OptionConverters.toScala(sslConfig), firstSession, role, closing, OptionConverters.toScala(hostInfo).map(e => (e.first, e.second)))) + def create(sslContext: SSLContext, + sslConfig: Optional[AkkaSSLConfig], + firstSession: NegotiateNewSession, + role: TLSRole, + hostInfo: Optional[japi.Pair[String, java.lang.Integer]], + closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + new javadsl.BidiFlow( + scaladsl.TLS.apply(sslContext, + OptionConverters.toScala(sslConfig), + firstSession, + role, + closing, + OptionConverters.toScala(hostInfo).map(e => (e.first, e.second)))) /** * Create a StreamTls [[akka.stream.javadsl.BidiFlow]] in client mode. The @@ -118,8 +134,18 @@ object TLS { * The SSLEngine may use this information e.g. when an endpoint identification algorithm was * configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]]. */ - def create(sslContext: SSLContext, firstSession: NegotiateNewSession, role: TLSRole, hostInfo: Optional[japi.Pair[String, java.lang.Integer]], closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = - new javadsl.BidiFlow(scaladsl.TLS.apply(sslContext, None, firstSession, role, closing, OptionConverters.toScala(hostInfo).map(e => (e.first, e.second)))) + def create(sslContext: SSLContext, + firstSession: NegotiateNewSession, + role: TLSRole, + hostInfo: Optional[japi.Pair[String, java.lang.Integer]], + closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + new javadsl.BidiFlow( + scaladsl.TLS.apply(sslContext, + None, + firstSession, + role, + closing, + OptionConverters.toScala(hostInfo).map(e => (e.first, e.second)))) /** * Create a StreamTls [[akka.stream.javadsl.BidiFlow]]. This is a low-level interface. @@ -132,11 +158,11 @@ object TLS { * * For a description of the `closing` parameter please refer to [[TLSClosing]]. */ - def create(sslEngineCreator: Supplier[SSLEngine], sessionVerifier: Consumer[SSLSession], closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = - new javadsl.BidiFlow(scaladsl.TLS.apply( - () => sslEngineCreator.get(), - session => Try(sessionVerifier.accept(session)), - closing)) + def create(sslEngineCreator: Supplier[SSLEngine], + sessionVerifier: Consumer[SSLSession], + closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + new javadsl.BidiFlow( + scaladsl.TLS.apply(() => sslEngineCreator.get(), session => Try(sessionVerifier.accept(session)), closing)) /** * Create a StreamTls [[akka.stream.javadsl.BidiFlow]]. This is a low-level interface. @@ -146,10 +172,9 @@ object TLS { * * For a description of the `closing` parameter please refer to [[TLSClosing]]. */ - def create(sslEngineCreator: Supplier[SSLEngine], closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = - new javadsl.BidiFlow(scaladsl.TLS.apply( - () => sslEngineCreator.get(), - closing)) + def create(sslEngineCreator: Supplier[SSLEngine], + closing: TLSClosing): BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + new javadsl.BidiFlow(scaladsl.TLS.apply(() => sslEngineCreator.get(), closing)) } /** diff --git a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala index b088732105..2c85bf9d13 100644 --- a/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala +++ b/akka-stream/src/main/scala/akka/stream/javadsl/Tcp.scala @@ -38,6 +38,7 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { * Not indented for user construction */ final class ServerBinding @InternalApi private[akka] (delegate: scaladsl.Tcp.ServerBinding) { + /** * The local address of the endpoint bound by the materialization of the `connections` [[Source]]. */ @@ -61,6 +62,7 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { * Represents an accepted incoming TCP connection. */ class IncomingConnection private[akka] (delegate: scaladsl.Tcp.IncomingConnection) { + /** * The local address this connection is bound to. */ @@ -91,6 +93,7 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { * Represents a prospective outgoing TCP connection. */ class OutgoingConnection private[akka] (delegate: scaladsl.Tcp.OutgoingConnection) { + /** * The remote address this connection is or will be bound to. */ @@ -136,16 +139,17 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * independently whether the client is still attempting to write. This setting is recommended * for servers, and therefore it is the default setting. */ - def bind( - interface: String, - port: Int, - backlog: Int, - options: JIterable[SocketOption], - halfClose: Boolean, - idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] = - Source.fromGraph(delegate.bind(interface, port, backlog, immutableSeq(options), halfClose, idleTimeout) - .map(new IncomingConnection(_)) - .mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava)) + def bind(interface: String, + port: Int, + backlog: Int, + options: JIterable[SocketOption], + halfClose: Boolean, + idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] = + Source.fromGraph( + delegate + .bind(interface, port, backlog, immutableSeq(options), halfClose, idleTimeout) + .map(new IncomingConnection(_)) + .mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava)) /** * Creates a [[Tcp.ServerBinding]] without specifying options. @@ -156,9 +160,11 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * completes is the server ready to accept client connections. */ def bind(interface: String, port: Int): Source[IncomingConnection, CompletionStage[ServerBinding]] = - Source.fromGraph(delegate.bind(interface, port) - .map(new IncomingConnection(_)) - .mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava)) + Source.fromGraph( + delegate + .bind(interface, port) + .map(new IncomingConnection(_)) + .mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava)) /** * Creates an [[Tcp.OutgoingConnection]] instance representing a prospective TCP client connection to the given endpoint. @@ -180,15 +186,21 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * If set to false, the connection will immediately closed once the client closes its write side, * independently whether the server is still attempting to write. */ - def outgoingConnection( - remoteAddress: InetSocketAddress, - localAddress: Optional[InetSocketAddress], - options: JIterable[SocketOption], - halfClose: Boolean, - connectTimeout: Duration, - idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = - Flow.fromGraph(delegate.outgoingConnection(remoteAddress, localAddress.asScala, immutableSeq(options), halfClose, connectTimeout, idleTimeout) - .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) + def outgoingConnection(remoteAddress: InetSocketAddress, + localAddress: Optional[InetSocketAddress], + options: JIterable[SocketOption], + halfClose: Boolean, + connectTimeout: Duration, + idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = + Flow.fromGraph( + delegate + .outgoingConnection(remoteAddress, + localAddress.asScala, + immutableSeq(options), + halfClose, + connectTimeout, + idleTimeout) + .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) /** * Creates an [[Tcp.OutgoingConnection]] without specifying options. @@ -199,8 +211,10 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * for example using the [[Framing]] operators. */ def outgoingConnection(host: String, port: Int): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = - Flow.fromGraph(delegate.outgoingConnection(new InetSocketAddress(host, port)) - .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) + Flow.fromGraph( + delegate + .outgoingConnection(new InetSocketAddress(host, port)) + .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) /** * Creates an [[Tcp.OutgoingConnection]] with TLS. @@ -209,38 +223,43 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * * @see [[Tcp.outgoingConnection()]] */ - def outgoingTlsConnection(host: String, port: Int, sslContext: SSLContext, negotiateNewSession: NegotiateNewSession): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = - Flow.fromGraph(delegate.outgoingTlsConnection(host, port, sslContext, negotiateNewSession) - .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) - - /** - * Creates an [[Tcp.OutgoingConnection]] with TLS. - * The returned flow represents a TCP client connection to the given endpoint where all bytes in and - * out go through TLS. - * - * @see [[Tcp.outgoingConnection()]] - * - * Marked API-may-change to leave room for an improvement around the very long parameter list. - */ - @ApiMayChange def outgoingTlsConnection( - remoteAddress: InetSocketAddress, - sslContext: SSLContext, - negotiateNewSession: NegotiateNewSession, - localAddress: Optional[InetSocketAddress], - options: JIterable[SocketOption], - connectTimeout: Duration, - idleTimeout: Duration - ): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = - Flow.fromGraph(delegate.outgoingTlsConnection( - remoteAddress, - sslContext, - negotiateNewSession, - localAddress.asScala, - immutableSeq(options), - connectTimeout, - idleTimeout) - .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) + host: String, + port: Int, + sslContext: SSLContext, + negotiateNewSession: NegotiateNewSession): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = + Flow.fromGraph( + delegate + .outgoingTlsConnection(host, port, sslContext, negotiateNewSession) + .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) + + /** + * Creates an [[Tcp.OutgoingConnection]] with TLS. + * The returned flow represents a TCP client connection to the given endpoint where all bytes in and + * out go through TLS. + * + * @see [[Tcp.outgoingConnection()]] + * + * Marked API-may-change to leave room for an improvement around the very long parameter list. + */ + @ApiMayChange + def outgoingTlsConnection(remoteAddress: InetSocketAddress, + sslContext: SSLContext, + negotiateNewSession: NegotiateNewSession, + localAddress: Optional[InetSocketAddress], + options: JIterable[SocketOption], + connectTimeout: Duration, + idleTimeout: Duration): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] = + Flow.fromGraph( + delegate + .outgoingTlsConnection(remoteAddress, + sslContext, + negotiateNewSession, + localAddress.asScala, + immutableSeq(options), + connectTimeout, + idleTimeout) + .mapMaterializedValue(_.map(new OutgoingConnection(_))(ec).toJava)) /** * Creates a [[Tcp.ServerBinding]] instance which represents a prospective TCP server binding on the given `endpoint` @@ -250,19 +269,19 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * Marked API-may-change to leave room for an improvement around the very long parameter list. */ @ApiMayChange - def bindTls( - interface: String, - port: Int, - sslContext: SSLContext, - negotiateNewSession: NegotiateNewSession, - backlog: Int, - options: JIterable[SocketOption], - halfClose: Boolean, - idleTimeout: Duration - ): Source[IncomingConnection, CompletionStage[ServerBinding]] = - Source.fromGraph(delegate.bindTls(interface, port, sslContext, negotiateNewSession, backlog, immutableSeq(options), idleTimeout) - .map(new IncomingConnection(_)) - .mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava)) + def bindTls(interface: String, + port: Int, + sslContext: SSLContext, + negotiateNewSession: NegotiateNewSession, + backlog: Int, + options: JIterable[SocketOption], + halfClose: Boolean, + idleTimeout: Duration): Source[IncomingConnection, CompletionStage[ServerBinding]] = + Source.fromGraph( + delegate + .bindTls(interface, port, sslContext, negotiateNewSession, backlog, immutableSeq(options), idleTimeout) + .map(new IncomingConnection(_)) + .mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava)) /** * Creates a [[Tcp.ServerBinding]] instance which represents a prospective TCP server binding on the given `endpoint` @@ -270,14 +289,14 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * * @see [[Tcp.bind()]] */ - def bindTls( - interface: String, - port: Int, - sslContext: SSLContext, - negotiateNewSession: NegotiateNewSession - ): Source[IncomingConnection, CompletionStage[ServerBinding]] = - Source.fromGraph(delegate.bindTls(interface, port, sslContext, negotiateNewSession) - .map(new IncomingConnection(_)) - .mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava)) + def bindTls(interface: String, + port: Int, + sslContext: SSLContext, + negotiateNewSession: NegotiateNewSession): Source[IncomingConnection, CompletionStage[ServerBinding]] = + Source.fromGraph( + delegate + .bindTls(interface, port, sslContext, negotiateNewSession) + .map(new IncomingConnection(_)) + .mapMaterializedValue(_.map(new ServerBinding(_))(ec).toJava)) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala index e819c969a5..b1f854b844 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/BidiFlow.scala @@ -10,10 +10,9 @@ import akka.stream.impl.{ LinearTraversalBuilder, Timers, TraversalBuilder } import scala.concurrent.duration.FiniteDuration -final class BidiFlow[-I1, +O1, -I2, +O2, +Mat]( - override val traversalBuilder: TraversalBuilder, - override val shape: BidiShape[I1, O1, I2, O2] -) extends Graph[BidiShape[I1, O1, I2, O2], Mat] { +final class BidiFlow[-I1, +O1, -I2, +O2, +Mat](override val traversalBuilder: TraversalBuilder, + override val shape: BidiShape[I1, O1, I2, O2]) + extends Graph[BidiShape[I1, O1, I2, O2], Mat] { def asJava[JI1 <: I1, JO1 >: O1, JI2 <: I2, JO2 >: O2, JMat >: Mat]: javadsl.BidiFlow[JI1, JO1, JI2, JO2, JMat] = new javadsl.BidiFlow(this) @@ -37,7 +36,8 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat]( * value of the current flow (ignoring the other BidiFlow’s value), use * [[BidiFlow#atopMat atopMat]] if a different strategy is needed. */ - def atop[OO1, II2, Mat2](bidi: Graph[BidiShape[O1, OO1, II2, I2], Mat2]): BidiFlow[I1, OO1, II2, O2, Mat] = atopMat(bidi)(Keep.left) + def atop[OO1, II2, Mat2](bidi: Graph[BidiShape[O1, OO1, II2, I2], Mat2]): BidiFlow[I1, OO1, II2, O2, Mat] = + atopMat(bidi)(Keep.left) /** * Add the given BidiFlow as the next step in a bidirectional transformation @@ -57,23 +57,23 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat]( * The `combine` function is used to compose the materialized values of this flow and that * flow into the materialized value of the resulting BidiFlow. */ - def atopMat[OO1, II2, Mat2, M](bidi: Graph[BidiShape[O1, OO1, II2, I2], Mat2])(combine: (Mat, Mat2) => M): BidiFlow[I1, OO1, II2, O2, M] = { + def atopMat[OO1, II2, Mat2, M](bidi: Graph[BidiShape[O1, OO1, II2, I2], Mat2])( + combine: (Mat, Mat2) => M): BidiFlow[I1, OO1, II2, O2, M] = { val newBidi1Shape = shape.deepCopy() val newBidi2Shape = bidi.shape.deepCopy() // We MUST add the current module as an explicit submodule. The composite builder otherwise *grows* the // existing module, which is not good if there are islands present (the new module will "join" the island). val newTraversalBuilder = - TraversalBuilder.empty() + TraversalBuilder + .empty() .add(traversalBuilder, newBidi1Shape, Keep.right) .add(bidi.traversalBuilder, newBidi2Shape, combine) .wire(newBidi1Shape.out1, newBidi2Shape.in1) .wire(newBidi2Shape.out2, newBidi1Shape.in2) - new BidiFlow( - newTraversalBuilder, - BidiShape(newBidi1Shape.in1, newBidi2Shape.out1, newBidi2Shape.in2, newBidi1Shape.out2) - ) + new BidiFlow(newTraversalBuilder, + BidiShape(newBidi1Shape.in1, newBidi2Shape.out1, newBidi2Shape.in2, newBidi1Shape.out2)) } /** @@ -121,7 +121,8 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat]( // We MUST add the current module as an explicit submodule. The composite builder otherwise *grows* the // existing module, which is not good if there are islands present (the new module will "join" the island). - val resultBuilder = TraversalBuilder.empty() + val resultBuilder = TraversalBuilder + .empty() .add(traversalBuilder, newBidiShape, Keep.right) .add(flow.traversalBuilder, newFlowShape, combine) .wire(newBidiShape.out1, newFlowShape.in) @@ -129,29 +130,20 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat]( val newShape = FlowShape(newBidiShape.in1, newBidiShape.out2) - new Flow( - LinearTraversalBuilder.fromBuilder(resultBuilder, newShape, Keep.right), - newShape - ) + new Flow(LinearTraversalBuilder.fromBuilder(resultBuilder, newShape, Keep.right), newShape) } /** * Turn this BidiFlow around by 180 degrees, logically flipping it upside down in a protocol stack. */ def reversed: BidiFlow[I2, O2, I1, O1, Mat] = - new BidiFlow( - traversalBuilder, - BidiShape(shape.in2, shape.out2, shape.in1, shape.out1) - ) + new BidiFlow(traversalBuilder, BidiShape(shape.in2, shape.out2, shape.in1, shape.out1)) /** * Transform only the materialized value of this BidiFlow, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: Mat => Mat2): BidiFlow[I1, O1, I2, O2, Mat2] = - new BidiFlow( - traversalBuilder.transformMat(f.asInstanceOf[Any => Any]), - shape - ) + new BidiFlow(traversalBuilder.transformMat(f.asInstanceOf[Any => Any]), shape) /** * Change the attributes of this [[Source]] to the given ones and seal the list @@ -161,10 +153,7 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat]( * only to the contained processing operators). */ override def withAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] = - new BidiFlow( - traversalBuilder.setAttributes(attr), - shape - ) + new BidiFlow(traversalBuilder.setAttributes(attr), shape) /** * Add the given attributes to this Source. Further calls to `withAttributes` @@ -221,10 +210,7 @@ object BidiFlow { case bidi: BidiFlow[I1, O1, I2, O2, Mat] => bidi case bidi: javadsl.BidiFlow[I1, O1, I2, O2, Mat] => bidi.asScala case other => - new BidiFlow( - other.traversalBuilder, - other.shape - ) + new BidiFlow(other.traversalBuilder, other.shape) } /** @@ -246,18 +232,16 @@ object BidiFlow { * }}} * */ - def fromFlowsMat[I1, O1, I2, O2, M1, M2, M]( - flow1: Graph[FlowShape[I1, O1], M1], - flow2: Graph[FlowShape[I2, O2], M2])(combine: (M1, M2) => M): BidiFlow[I1, O1, I2, O2, M] = { + def fromFlowsMat[I1, O1, I2, O2, M1, M2, M](flow1: Graph[FlowShape[I1, O1], M1], flow2: Graph[FlowShape[I2, O2], M2])( + combine: (M1, M2) => M): BidiFlow[I1, O1, I2, O2, M] = { val newFlow1Shape = flow1.shape.deepCopy() val newFlow2Shape = flow2.shape.deepCopy() - new BidiFlow( - TraversalBuilder.empty() - .add(flow1.traversalBuilder, newFlow1Shape, Keep.right) - .add(flow2.traversalBuilder, newFlow2Shape, combine), - BidiShape(newFlow1Shape.in, newFlow1Shape.out, newFlow2Shape.in, newFlow2Shape.out) - ) + new BidiFlow(TraversalBuilder + .empty() + .add(flow1.traversalBuilder, newFlow1Shape, Keep.right) + .add(flow2.traversalBuilder, newFlow2Shape, combine), + BidiShape(newFlow1Shape.in, newFlow1Shape.out, newFlow2Shape.in, newFlow2Shape.out)) } /** @@ -278,9 +262,8 @@ object BidiFlow { * }}} * */ - def fromFlows[I1, O1, I2, O2, M1, M2]( - flow1: Graph[FlowShape[I1, O1], M1], - flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] = + def fromFlows[I1, O1, I2, O2, M1, M2](flow1: Graph[FlowShape[I1, O1], M1], + flow2: Graph[FlowShape[I2, O2], M2]): BidiFlow[I1, O1, I2, O2, NotUsed] = fromFlowsMat(flow1, flow2)(Keep.none) /** diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala index b3b02d6463..3a14400ca3 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Compression.scala @@ -37,8 +37,7 @@ object Compression { * @param maxBytesPerChunk Maximum length of an output [[ByteString]] chunk. */ def gunzip(maxBytesPerChunk: Int = MaxBytesPerChunkDefault): Flow[ByteString, ByteString, NotUsed] = - Flow[ByteString].via(new GzipDecompressor(maxBytesPerChunk)) - .named("gunzip") + Flow[ByteString].via(new GzipDecompressor(maxBytesPerChunk)).named("gunzip") /** * Creates a flow that deflate-compresses a stream of ByteString. Note that the compressor @@ -75,6 +74,5 @@ object Compression { * @param nowrap if true then use GZIP compatible decompression */ def inflate(maxBytesPerChunk: Int, nowrap: Boolean): Flow[ByteString, ByteString, NotUsed] = - Flow[ByteString].via(new DeflateDecompressor(maxBytesPerChunk, nowrap)) - .named("inflate") + Flow[ByteString].via(new DeflateDecompressor(maxBytesPerChunk, nowrap)).named("inflate") } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala index 46a7e65d86..1c4a667f05 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FileIO.scala @@ -89,7 +89,8 @@ object FileIO { * @param options File open options, see [[java.nio.file.StandardOpenOption]], defaults to Set(WRITE, TRUNCATE_EXISTING, CREATE) */ @deprecated("Use `toPath` instead", "2.4.5") - def toFile(f: File, options: Set[OpenOption] = Set(WRITE, TRUNCATE_EXISTING, CREATE)): Sink[ByteString, Future[IOResult]] = + def toFile(f: File, + options: Set[OpenOption] = Set(WRITE, TRUNCATE_EXISTING, CREATE)): Sink[ByteString, Future[IOResult]] = toPath(f.toPath, options) /** @@ -111,7 +112,8 @@ object FileIO { * @param f the file path to write to * @param options File open options, see [[java.nio.file.StandardOpenOption]], defaults to Set(WRITE, TRUNCATE_EXISTING, CREATE) */ - def toPath(f: Path, options: Set[OpenOption] = Set(WRITE, TRUNCATE_EXISTING, CREATE)): Sink[ByteString, Future[IOResult]] = + def toPath(f: Path, + options: Set[OpenOption] = Set(WRITE, TRUNCATE_EXISTING, CREATE)): Sink[ByteString, Future[IOResult]] = toPath(f, options, startPosition = 0) /** diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala index c0db5cdd27..44b84f4d4a 100755 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Flow.scala @@ -7,7 +7,15 @@ package akka.stream.scaladsl import akka.event.LoggingAdapter import akka.stream._ import akka.Done -import akka.stream.impl.{ LinearTraversalBuilder, ProcessorModule, Timers, SubFlowImpl, TraversalBuilder, Throttle, fusing } +import akka.stream.impl.{ + fusing, + LinearTraversalBuilder, + ProcessorModule, + SubFlowImpl, + Throttle, + Timers, + TraversalBuilder +} import akka.stream.impl.fusing._ import akka.stream.stage._ import akka.util.{ ConstantFun, Timeout } @@ -29,10 +37,10 @@ import scala.reflect.ClassTag /** * A `Flow` is a set of stream processing steps that has one open input and one open output. */ -final class Flow[-In, +Out, +Mat]( - override val traversalBuilder: LinearTraversalBuilder, - override val shape: FlowShape[In, Out]) - extends FlowOpsMat[Out, Mat] with Graph[FlowShape[In, Out], Mat] { +final class Flow[-In, +Out, +Mat](override val traversalBuilder: LinearTraversalBuilder, + override val shape: FlowShape[In, Out]) + extends FlowOpsMat[Out, Mat] + with Graph[FlowShape[In, Out], Mat] { // TODO: debug string override def toString: String = s"Flow($shape)" @@ -47,7 +55,8 @@ final class Flow[-In, +Out, +Mat]( override def via[T, Mat2](flow: Graph[FlowShape[Out, T], Mat2]): Repr[T] = viaMat(flow)(Keep.left) - override def viaMat[T, Mat2, Mat3](flow: Graph[FlowShape[Out, T], Mat2])(combine: (Mat, Mat2) => Mat3): Flow[In, T, Mat3] = { + override def viaMat[T, Mat2, Mat3](flow: Graph[FlowShape[Out, T], Mat2])( + combine: (Mat, Mat2) => Mat3): Flow[In, T, Mat3] = { if (this.isIdentity) { // optimization by returning flow if possible since we know Mat2 == Mat3 from flow if (combine == Keep.right) Flow.fromGraph(flow).asInstanceOf[Flow[In, T, Mat3]] @@ -56,9 +65,8 @@ final class Flow[-In, +Out, +Mat]( val useCombine = if (combine == Keep.left) Keep.none else combine - new Flow( - LinearTraversalBuilder.empty().append(flow.traversalBuilder, flow.shape, useCombine), - flow.shape).asInstanceOf[Flow[In, T, Mat3]] + new Flow(LinearTraversalBuilder.empty().append(flow.traversalBuilder, flow.shape, useCombine), flow.shape) + .asInstanceOf[Flow[In, T, Mat3]] } } else if (flow.traversalBuilder eq Flow.identityTraversalBuilder) { // optimization by returning this if possible since we know Mat2 == Mat from this @@ -68,14 +76,12 @@ final class Flow[-In, +Out, +Mat]( val useCombine = if (combine == Keep.right) Keep.none else combine - new Flow( - traversalBuilder.append(LinearTraversalBuilder.empty(), shape, useCombine), - FlowShape[In, T](shape.in, flow.shape.out)) + new Flow(traversalBuilder.append(LinearTraversalBuilder.empty(), shape, useCombine), + FlowShape[In, T](shape.in, flow.shape.out)) } } else { - new Flow( - traversalBuilder.append(flow.traversalBuilder, flow.shape, combine), - FlowShape[In, T](shape.in, flow.shape.out)) + new Flow(traversalBuilder.append(flow.traversalBuilder, flow.shape, combine), + FlowShape[In, T](shape.in, flow.shape.out)) } } @@ -121,13 +127,10 @@ final class Flow[-In, +Out, +Mat]( */ def toMat[Mat2, Mat3](sink: Graph[SinkShape[Out], Mat2])(combine: (Mat, Mat2) => Mat3): Sink[In, Mat3] = { if (isIdentity) { - new Sink( - LinearTraversalBuilder.fromBuilder(sink.traversalBuilder, sink.shape, combine), - SinkShape(sink.shape.in)).asInstanceOf[Sink[In, Mat3]] + new Sink(LinearTraversalBuilder.fromBuilder(sink.traversalBuilder, sink.shape, combine), SinkShape(sink.shape.in)) + .asInstanceOf[Sink[In, Mat3]] } else { - new Sink( - traversalBuilder.append(sink.traversalBuilder, sink.shape, combine), - SinkShape(shape.in)) + new Sink(traversalBuilder.append(sink.traversalBuilder, sink.shape, combine), SinkShape(shape.in)) } } @@ -135,9 +138,7 @@ final class Flow[-In, +Out, +Mat]( * Transform the materialized value of this Flow, leaving all other properties as they were. */ override def mapMaterializedValue[Mat2](f: Mat => Mat2): ReprMat[Out, Mat2] = - new Flow( - traversalBuilder.transformMat(f), - shape) + new Flow(traversalBuilder.transformMat(f), shape) /** * Join this [[Flow]] to another [[Flow]], by cross connecting the inputs and outputs, creating a [[RunnableGraph]]. @@ -170,9 +171,8 @@ final class Flow[-In, +Out, +Mat]( * where appropriate instead of manually writing functions that pass through one of the values. */ def joinMat[Mat2, Mat3](flow: Graph[FlowShape[Out, In], Mat2])(combine: (Mat, Mat2) => Mat3): RunnableGraph[Mat3] = { - val resultBuilder = traversalBuilder - .append(flow.traversalBuilder, flow.shape, combine) - .wire(flow.shape.out, shape.in) + val resultBuilder = + traversalBuilder.append(flow.traversalBuilder, flow.shape, combine).wire(flow.shape.out, shape.in) RunnableGraph(resultBuilder) } @@ -215,12 +215,14 @@ final class Flow[-In, +Out, +Mat]( * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def joinMat[I2, O2, Mat2, M](bidi: Graph[BidiShape[Out, O2, I2, In], Mat2])(combine: (Mat, Mat2) => M): Flow[I2, O2, M] = { + def joinMat[I2, O2, Mat2, M](bidi: Graph[BidiShape[Out, O2, I2, In], Mat2])( + combine: (Mat, Mat2) => M): Flow[I2, O2, M] = { val newBidiShape = bidi.shape.deepCopy() val newFlowShape = shape.deepCopy() val resultBuilder = - TraversalBuilder.empty() + TraversalBuilder + .empty() .add(traversalBuilder, newFlowShape, Keep.right) .add(bidi.traversalBuilder, newBidiShape, combine) .wire(newFlowShape.out, newBidiShape.in1) @@ -228,9 +230,7 @@ final class Flow[-In, +Out, +Mat]( val newShape = FlowShape(newBidiShape.in2, newBidiShape.out1) - new Flow( - LinearTraversalBuilder.fromBuilder(resultBuilder, newShape, Keep.right), - newShape) + new Flow(LinearTraversalBuilder.fromBuilder(resultBuilder, newShape, Keep.right), newShape) } /** @@ -242,9 +242,7 @@ final class Flow[-In, +Out, +Mat]( * only to the contained processing operators). */ override def withAttributes(attr: Attributes): Repr[Out] = - new Flow( - traversalBuilder.setAttributes(attr), - shape) + new Flow(traversalBuilder.setAttributes(attr), shape) /** * Add the given attributes to this [[Flow]]. If the specific attribute was already present @@ -286,7 +284,8 @@ final class Flow[-In, +Out, +Mat]( * the materialized values of the `Source` and `Sink`, e.g. the `Subscriber` of a of a [[Source#subscriber]] and * and `Publisher` of a [[Sink#publisher]]. */ - def runWith[Mat1, Mat2](source: Graph[SourceShape[In], Mat1], sink: Graph[SinkShape[Out], Mat2])(implicit materializer: Materializer): (Mat1, Mat2) = + def runWith[Mat1, Mat2](source: Graph[SourceShape[In], Mat1], sink: Graph[SinkShape[Out], Mat2])( + implicit materializer: Materializer): (Mat1, Mat2) = Source.fromGraph(source).via(this).toMat(sink)(Keep.both).run() /** @@ -297,15 +296,19 @@ final class Flow[-In, +Out, +Mat]( * @return A [[RunnableGraph]] that materializes to a Processor when run() is called on it. */ def toProcessor: RunnableGraph[Processor[In @uncheckedVariance, Out @uncheckedVariance]] = - Source.asSubscriber[In].via(this).toMat(Sink.asPublisher[Out](false))(Keep.both[Subscriber[In], Publisher[Out]]) + Source + .asSubscriber[In] + .via(this) + .toMat(Sink.asPublisher[Out](false))(Keep.both[Subscriber[In], Publisher[Out]]) .mapMaterializedValue { - case (sub, pub) => new Processor[In, Out] { - override def onError(t: Throwable): Unit = sub.onError(t) - override def onSubscribe(s: Subscription): Unit = sub.onSubscribe(s) - override def onComplete(): Unit = sub.onComplete() - override def onNext(t: In): Unit = sub.onNext(t) - override def subscribe(s: Subscriber[_ >: Out]): Unit = pub.subscribe(s) - } + case (sub, pub) => + new Processor[In, Out] { + override def onError(t: Throwable): Unit = sub.onError(t) + override def onSubscribe(s: Subscription): Unit = sub.onSubscribe(s) + override def onComplete(): Unit = sub.onComplete() + override def onNext(t: In): Unit = sub.onNext(t) + override def subscribe(s: Subscriber[_ >: Out]): Unit = pub.subscribe(s) + } } /** @@ -317,11 +320,16 @@ final class Flow[-In, +Out, +Mat]( * API MAY CHANGE */ @ApiMayChange - def asFlowWithContext[U, CtxU, CtxOut](collapseContext: (U, CtxU) => In)(extractContext: Out => CtxOut): FlowWithContext[U, CtxU, Out, CtxOut, Mat] = - new FlowWithContext(Flow[(U, CtxU)].map { - case (e, ctx) => - collapseContext(e, ctx) - }.viaMat(this)(Keep.right).map(e => (e, extractContext(e)))) + def asFlowWithContext[U, CtxU, CtxOut](collapseContext: (U, CtxU) => In)( + extractContext: Out => CtxOut): FlowWithContext[U, CtxU, Out, CtxOut, Mat] = + new FlowWithContext( + Flow[(U, CtxU)] + .map { + case (e, ctx) => + collapseContext(e, ctx) + } + .viaMat(this)(Keep.right) + .map(e => (e, extractContext(e)))) /** Converts this Scala DSL element to it's Java DSL counterpart. */ def asJava[JIn <: In]: javadsl.Flow[JIn, Out @uncheckedVariance, Mat @uncheckedVariance] = @@ -332,9 +340,8 @@ object Flow { private[stream] val identityTraversalBuilder = LinearTraversalBuilder.fromBuilder(GraphStages.identity.traversalBuilder, GraphStages.identity.shape, Keep.right) - private[this] val identity: Flow[Any, Any, NotUsed] = new Flow[Any, Any, NotUsed]( - identityTraversalBuilder, - GraphStages.identity.shape) + private[this] val identity: Flow[Any, Any, NotUsed] = + new Flow[Any, Any, NotUsed](identityTraversalBuilder, GraphStages.identity.shape) /** * Creates a Flow from a Reactive Streams [[org.reactivestreams.Processor]] @@ -366,21 +373,17 @@ object Flow { */ def fromGraph[I, O, M](g: Graph[FlowShape[I, O], M]): Flow[I, O, M] = g match { - case f: Flow[I, O, M] => f - case f: javadsl.Flow[I, O, M] => f.asScala + case f: Flow[I, O, M] => f + case f: javadsl.Flow[I, O, M] => f.asScala case g: GraphStageWithMaterializedValue[FlowShape[I, O], M] => // move these from the operator itself to make the returned source // behave as it is the operator with regards to attributes val attrs = g.traversalBuilder.attributes val noAttrStage = g.withAttributes(Attributes.none) - new Flow( - LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right), - noAttrStage.shape - ).withAttributes(attrs) + new Flow(LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right), + noAttrStage.shape).withAttributes(attrs) - case other => new Flow( - LinearTraversalBuilder.fromBuilder(g.traversalBuilder, g.shape, Keep.right), - g.shape) + case other => new Flow(LinearTraversalBuilder.fromBuilder(g.traversalBuilder, g.shape, Keep.right), g.shape) } /** @@ -435,8 +438,11 @@ object Flow { * The `combine` function is used to compose the materialized values of the `sink` and `source` * into the materialized value of the resulting [[Flow]]. */ - def fromSinkAndSourceMat[I, O, M1, M2, M](sink: Graph[SinkShape[I], M1], source: Graph[SourceShape[O], M2])(combine: (M1, M2) => M): Flow[I, O, M] = - fromGraph(GraphDSL.create(sink, source)(combine) { implicit b => (in, out) => FlowShape(in.in, out.out) }) + def fromSinkAndSourceMat[I, O, M1, M2, M](sink: Graph[SinkShape[I], M1], source: Graph[SourceShape[O], M2])( + combine: (M1, M2) => M): Flow[I, O, M] = + fromGraph(GraphDSL.create(sink, source)(combine) { implicit b => (in, out) => + FlowShape(in.in, out.out) + }) /** * Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them. @@ -498,7 +504,8 @@ object Flow { * * See also [[fromSinkAndSourceCoupledMat]] when access to materialized values of the parameters is needed. */ - def fromSinkAndSourceCoupled[I, O](sink: Graph[SinkShape[I], _], source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] = + def fromSinkAndSourceCoupled[I, O](sink: Graph[SinkShape[I], _], + source: Graph[SourceShape[O], _]): Flow[I, O, NotUsed] = fromSinkAndSourceCoupledMat(sink, source)(Keep.none) /** @@ -525,8 +532,9 @@ object Flow { * The `combine` function is used to compose the materialized values of the `sink` and `source` * into the materialized value of the resulting [[Flow]]. */ - def fromSinkAndSourceCoupledMat[I, O, M1, M2, M](sink: Graph[SinkShape[I], M1], source: Graph[SourceShape[O], M2])(combine: (M1, M2) => M): Flow[I, O, M] = - // format: OFF + def fromSinkAndSourceCoupledMat[I, O, M1, M2, M](sink: Graph[SinkShape[I], M1], source: Graph[SourceShape[O], M2])( + combine: (M1, M2) => M): Flow[I, O, M] = + // format: OFF Flow.fromGraph(GraphDSL.create(sink, source)(combine) { implicit b => (i, o) => import GraphDSL.Implicits._ val bidi = b.add(new CoupledTerminationBidi[I, O]) @@ -550,7 +558,8 @@ object Flow { * '''Cancels when''' downstream cancels */ @Deprecated - @deprecated("Use lazyInitAsync instead. (lazyInitAsync returns a flow with a more useful materialized value.)", "2.5.12") + @deprecated("Use lazyInitAsync instead. (lazyInitAsync returns a flow with a more useful materialized value.)", + "2.5.12") def lazyInit[I, O, M](flowFactory: I => Future[Flow[I, O, M]], fallback: () => M): Flow[I, O, M] = Flow.fromGraph(new LazyFlow[I, O, M](flowFactory)).mapMaterializedValue(_ => fallback()) @@ -575,6 +584,7 @@ object Flow { } object RunnableGraph { + /** * A graph with a closed shape is logically a runnable graph, this method makes * it so also in type. @@ -585,6 +595,7 @@ object RunnableGraph { case other => RunnableGraph(other.traversalBuilder) } } + /** * Flow with attached input and output, can be executed. */ @@ -743,7 +754,8 @@ trait FlowOps[+Out, +Mat] { * @param pf Receives the failure cause and returns the new Source to be materialized if any * */ - def recoverWithRetries[T >: Out](attempts: Int, pf: PartialFunction[Throwable, Graph[SourceShape[T], NotUsed]]): Repr[T] = + def recoverWithRetries[T >: Out](attempts: Int, + pf: PartialFunction[Throwable, Graph[SourceShape[T], NotUsed]]): Repr[T] = via(new RecoverWith(attempts, pf)) /** @@ -1619,7 +1631,8 @@ trait FlowOps[+Out, +Mat] { * * See also [[FlowOps.conflate]], [[FlowOps.limit]], [[FlowOps.limitWeighted]] [[FlowOps.batch]] [[FlowOps.batchWeighted]] */ - def conflate[O2 >: Out](aggregate: (O2, O2) => O2): Repr[O2] = conflateWithSeed[O2](ConstantFun.scalaIdentityFunction)(aggregate) + def conflate[O2 >: Out](aggregate: (O2, O2) => O2): Repr[O2] = + conflateWithSeed[O2](ConstantFun.scalaIdentityFunction)(aggregate) /** * Allows a faster upstream to progress independently of a slower subscriber by aggregating elements into batches @@ -1839,7 +1852,9 @@ trait FlowOps[+Out, +Mat] { * @param allowClosedSubstreamRecreation enables recreation of already closed substreams if elements with their * corresponding keys arrive after completion */ - def groupBy[K](maxSubstreams: Int, f: Out => K, allowClosedSubstreamRecreation: Boolean): SubFlow[Out, Mat, Repr, Closed] = { + def groupBy[K](maxSubstreams: Int, + f: Out => K, + allowClosedSubstreamRecreation: Boolean): SubFlow[Out, Mat, Repr, Closed] = { val merge = new SubFlowImpl.MergeBack[Out, Repr] { override def apply[T](flow: Flow[Out, T, NotUsed], breadth: Int): Repr[T] = via(new GroupBy(maxSubstreams, f, allowClosedSubstreamRecreation)) @@ -1921,12 +1936,11 @@ trait FlowOps[+Out, +Mat] { * * See also [[FlowOps.splitAfter]]. */ - def splitWhen(substreamCancelStrategy: SubstreamCancelStrategy)(p: Out => Boolean): SubFlow[Out, Mat, Repr, Closed] = { + def splitWhen(substreamCancelStrategy: SubstreamCancelStrategy)( + p: Out => Boolean): SubFlow[Out, Mat, Repr, Closed] = { val merge = new SubFlowImpl.MergeBack[Out, Repr] { override def apply[T](flow: Flow[Out, T, NotUsed], breadth: Int): Repr[T] = - via(Split.when(p, substreamCancelStrategy)) - .map(_.via(flow)) - .via(new FlattenMerge(breadth)) + via(Split.when(p, substreamCancelStrategy)).map(_.via(flow)).via(new FlattenMerge(breadth)) } val finish: (Sink[Out, NotUsed]) => Closed = s => @@ -1991,12 +2005,11 @@ trait FlowOps[+Out, +Mat] { * * See also [[FlowOps.splitWhen]]. */ - def splitAfter(substreamCancelStrategy: SubstreamCancelStrategy)(p: Out => Boolean): SubFlow[Out, Mat, Repr, Closed] = { + def splitAfter(substreamCancelStrategy: SubstreamCancelStrategy)( + p: Out => Boolean): SubFlow[Out, Mat, Repr, Closed] = { val merge = new SubFlowImpl.MergeBack[Out, Repr] { override def apply[T](flow: Flow[Out, T, NotUsed], breadth: Int): Repr[T] = - via(Split.after(p, substreamCancelStrategy)) - .map(_.via(flow)) - .via(new FlattenMerge(breadth)) + via(Split.after(p, substreamCancelStrategy)).map(_.via(flow)).via(new FlattenMerge(breadth)) } val finish: (Sink[Out, NotUsed]) => Closed = s => via(Split.after(p, substreamCancelStrategy)) @@ -2042,7 +2055,8 @@ trait FlowOps[+Out, +Mat] { * * '''Cancels when''' downstream cancels */ - def flatMapMerge[T, M](breadth: Int, f: Out => Graph[SourceShape[T], M]): Repr[T] = map(f).via(new FlattenMerge[T, M](breadth)) + def flatMapMerge[T, M](breadth: Int, f: Out => Graph[SourceShape[T], M]): Repr[T] = + map(f).via(new FlattenMerge[T, M](breadth)) /** * If the first element has not passed through this operator before the provided timeout, the stream is failed @@ -2268,8 +2282,11 @@ trait FlowOps[+Out, +Mat] { * '''Cancels when''' downstream cancels * */ - def throttle(cost: Int, per: FiniteDuration, maximumBurst: Int, - costCalculation: (Out) => Int, mode: ThrottleMode): Repr[Out] = + def throttle(cost: Int, + per: FiniteDuration, + maximumBurst: Int, + costCalculation: (Out) => Int, + mode: ThrottleMode): Repr[Out] = via(new Throttle(cost, per, maximumBurst, costCalculation, mode)) /** @@ -2300,8 +2317,7 @@ trait FlowOps[+Out, +Mat] { */ @Deprecated @deprecated("Use throttle without `maximumBurst` parameter instead.", "2.5.12") - def throttleEven(cost: Int, per: FiniteDuration, - costCalculation: (Out) => Int, mode: ThrottleMode): Repr[Out] = + def throttleEven(cost: Int, per: FiniteDuration, costCalculation: (Out) => Int, mode: ThrottleMode): Repr[Out] = throttle(cost, per, Throttle.AutomaticMaximumBurst, costCalculation, mode) /** @@ -2350,7 +2366,8 @@ trait FlowOps[+Out, +Mat] { * * '''Cancels when''' downstream cancels */ - def log(name: String, extract: Out => Any = ConstantFun.scalaIdentityFunction)(implicit log: LoggingAdapter = null): Repr[Out] = + def log(name: String, extract: Out => Any = ConstantFun.scalaIdentityFunction)( + implicit log: LoggingAdapter = null): Repr[Out] = via(Log(name, extract.asInstanceOf[Any => Any], Option(log))) /** @@ -2391,7 +2408,8 @@ trait FlowOps[+Out, +Mat] { */ def zipLatest[U](that: Graph[SourceShape[U], _]): Repr[(Out, U)] = via(zipLatestGraph(that)) - protected def zipLatestGraph[U, M](that: Graph[SourceShape[U], M]): Graph[FlowShape[Out @uncheckedVariance, (Out, U)], M] = + protected def zipLatestGraph[U, M]( + that: Graph[SourceShape[U], M]): Graph[FlowShape[Out @uncheckedVariance, (Out, U)], M] = GraphDSL.create(that) { implicit b => r => val zip = b.add(ZipLatest[Out, U]()) r ~> zip.in1 @@ -2413,7 +2431,8 @@ trait FlowOps[+Out, +Mat] { def zipWith[Out2, Out3](that: Graph[SourceShape[Out2], _])(combine: (Out, Out2) => Out3): Repr[Out3] = via(zipWithGraph(that)(combine)) - protected def zipWithGraph[Out2, Out3, M](that: Graph[SourceShape[Out2], M])(combine: (Out, Out2) => Out3): Graph[FlowShape[Out @uncheckedVariance, Out3], M] = + protected def zipWithGraph[Out2, Out3, M](that: Graph[SourceShape[Out2], M])( + combine: (Out, Out2) => Out3): Graph[FlowShape[Out @uncheckedVariance, Out3], M] = GraphDSL.create(that) { implicit b => r => val zip = b.add(ZipWith[Out, Out2, Out3](combine)) r ~> zip.in1 @@ -2440,7 +2459,8 @@ trait FlowOps[+Out, +Mat] { def zipLatestWith[Out2, Out3](that: Graph[SourceShape[Out2], _])(combine: (Out, Out2) => Out3): Repr[Out3] = via(zipLatestWithGraph(that)(combine)) - protected def zipLatestWithGraph[Out2, Out3, M](that: Graph[SourceShape[Out2], M])(combine: (Out, Out2) => Out3): Graph[FlowShape[Out @uncheckedVariance, Out3], M] = + protected def zipLatestWithGraph[Out2, Out3, M](that: Graph[SourceShape[Out2], M])( + combine: (Out, Out2) => Out3): Graph[FlowShape[Out @uncheckedVariance, Out3], M] = GraphDSL.create(that) { implicit b => r => val zip = b.add(ZipLatestWith[Out, Out2, Out3](combine)) r ~> zip.in1 @@ -2520,9 +2540,9 @@ trait FlowOps[+Out, +Mat] { via(interleaveGraph(that, segmentSize, eagerClose)) protected def interleaveGraph[U >: Out, M]( - that: Graph[SourceShape[U], M], - segmentSize: Int, - eagerClose: Boolean = false): Graph[FlowShape[Out @uncheckedVariance, U], M] = + that: Graph[SourceShape[U], M], + segmentSize: Int, + eagerClose: Boolean = false): Graph[FlowShape[Out @uncheckedVariance, U], M] = GraphDSL.create(that) { implicit b => r => val interleave = b.add(Interleave[U](2, segmentSize, eagerClose)) r ~> interleave.in(1) @@ -2544,7 +2564,8 @@ trait FlowOps[+Out, +Mat] { def merge[U >: Out, M](that: Graph[SourceShape[U], M], eagerComplete: Boolean = false): Repr[U] = via(mergeGraph(that, eagerComplete)) - protected def mergeGraph[U >: Out, M](that: Graph[SourceShape[U], M], eagerComplete: Boolean): Graph[FlowShape[Out @uncheckedVariance, U], M] = + protected def mergeGraph[U >: Out, M](that: Graph[SourceShape[U], M], + eagerComplete: Boolean): Graph[FlowShape[Out @uncheckedVariance, U], M] = GraphDSL.create(that) { implicit b => r => val merge = b.add(Merge[U](2, eagerComplete)) r ~> merge.in(1) @@ -2569,7 +2590,8 @@ trait FlowOps[+Out, +Mat] { def mergeSorted[U >: Out, M](that: Graph[SourceShape[U], M])(implicit ord: Ordering[U]): Repr[U] = via(mergeSortedGraph(that)) - protected def mergeSortedGraph[U >: Out, M](that: Graph[SourceShape[U], M])(implicit ord: Ordering[U]): Graph[FlowShape[Out @uncheckedVariance, U], M] = + protected def mergeSortedGraph[U >: Out, M](that: Graph[SourceShape[U], M])( + implicit ord: Ordering[U]): Graph[FlowShape[Out @uncheckedVariance, U], M] = GraphDSL.create(that) { implicit b => r => val merge = b.add(new MergeSorted[U]) r ~> merge.in1 @@ -2597,7 +2619,8 @@ trait FlowOps[+Out, +Mat] { def concat[U >: Out, Mat2](that: Graph[SourceShape[U], Mat2]): Repr[U] = via(concatGraph(that)) - protected def concatGraph[U >: Out, Mat2](that: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] = + protected def concatGraph[U >: Out, Mat2]( + that: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] = GraphDSL.create(that) { implicit b => r => val merge = b.add(Concat[U]()) r ~> merge.in(1) @@ -2625,7 +2648,8 @@ trait FlowOps[+Out, +Mat] { def prepend[U >: Out, Mat2](that: Graph[SourceShape[U], Mat2]): Repr[U] = via(prependGraph(that)) - protected def prependGraph[U >: Out, Mat2](that: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] = + protected def prependGraph[U >: Out, Mat2]( + that: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] = GraphDSL.create(that) { implicit b => r => val merge = b.add(Concat[U]()) r ~> merge.in(0) @@ -2657,7 +2681,8 @@ trait FlowOps[+Out, +Mat] { def orElse[U >: Out, Mat2](secondary: Graph[SourceShape[U], Mat2]): Repr[U] = via(orElseGraph(secondary)) - protected def orElseGraph[U >: Out, Mat2](secondary: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] = + protected def orElseGraph[U >: Out, Mat2]( + secondary: Graph[SourceShape[U], Mat2]): Graph[FlowShape[Out @uncheckedVariance, U], Mat2] = GraphDSL.create(secondary) { implicit b => secondary => val orElse = b.add(OrElse[U]()) @@ -2735,7 +2760,8 @@ trait FlowOps[+Out, +Mat] { */ def divertTo(that: Graph[SinkShape[Out], _], when: Out => Boolean): Repr[Out] = via(divertToGraph(that, when)) - protected def divertToGraph[M](that: Graph[SinkShape[Out], M], when: Out => Boolean): Graph[FlowShape[Out @uncheckedVariance, Out], M] = + protected def divertToGraph[M](that: Graph[SinkShape[Out], M], + when: Out => Boolean): Graph[FlowShape[Out @uncheckedVariance, Out], M] = GraphDSL.create(that) { implicit b => r => import GraphDSL.Implicits._ val partition = b.add(new Partition[Out](2, out => if (when(out)) 1 else 0, true)) @@ -2870,7 +2896,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def zipWithMat[Out2, Out3, Mat2, Mat3](that: Graph[SourceShape[Out2], Mat2])(combine: (Out, Out2) => Out3)(matF: (Mat, Mat2) => Mat3): ReprMat[Out3, Mat3] = + def zipWithMat[Out2, Out3, Mat2, Mat3](that: Graph[SourceShape[Out2], Mat2])(combine: (Out, Out2) => Out3)( + matF: (Mat, Mat2) => Mat3): ReprMat[Out3, Mat3] = viaMat(zipWithGraph(that)(combine))(matF) /** @@ -2882,7 +2909,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def zipLatestMat[U, Mat2, Mat3](that: Graph[SourceShape[U], Mat2])(matF: (Mat, Mat2) => Mat3): ReprMat[(Out, U), Mat3] = + def zipLatestMat[U, Mat2, Mat3](that: Graph[SourceShape[U], Mat2])( + matF: (Mat, Mat2) => Mat3): ReprMat[(Out, U), Mat3] = viaMat(zipLatestGraph(that))(matF) /** @@ -2894,7 +2922,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def zipLatestWithMat[Out2, Out3, Mat2, Mat3](that: Graph[SourceShape[Out2], Mat2])(combine: (Out, Out2) => Out3)(matF: (Mat, Mat2) => Mat3): ReprMat[Out3, Mat3] = + def zipLatestWithMat[Out2, Out3, Mat2, Mat3](that: Graph[SourceShape[Out2], Mat2])(combine: (Out, Out2) => Out3)( + matF: (Mat, Mat2) => Mat3): ReprMat[Out3, Mat3] = viaMat(zipLatestWithGraph(that)(combine))(matF) /** @@ -2906,7 +2935,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def mergeMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2], eagerComplete: Boolean = false)(matF: (Mat, Mat2) => Mat3): ReprMat[U, Mat3] = + def mergeMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2], eagerComplete: Boolean = false)( + matF: (Mat, Mat2) => Mat3): ReprMat[U, Mat3] = viaMat(mergeGraph(that, eagerComplete))(matF) /** @@ -2923,7 +2953,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def interleaveMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2], request: Int)(matF: (Mat, Mat2) => Mat3): ReprMat[U, Mat3] = + def interleaveMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2], request: Int)( + matF: (Mat, Mat2) => Mat3): ReprMat[U, Mat3] = interleaveMat(that, request, eagerClose = false)(matF) /** @@ -2942,7 +2973,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def interleaveMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2], request: Int, eagerClose: Boolean)(matF: (Mat, Mat2) => Mat3): ReprMat[U, Mat3] = + def interleaveMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2], request: Int, eagerClose: Boolean)( + matF: (Mat, Mat2) => Mat3): ReprMat[U, Mat3] = viaMat(interleaveGraph(that, request, eagerClose))(matF) /** @@ -2957,7 +2989,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def mergeSortedMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2])(matF: (Mat, Mat2) => Mat3)(implicit ord: Ordering[U]): ReprMat[U, Mat3] = + def mergeSortedMat[U >: Out, Mat2, Mat3](that: Graph[SourceShape[U], Mat2])(matF: (Mat, Mat2) => Mat3)( + implicit ord: Ordering[U]): ReprMat[U, Mat3] = viaMat(mergeSortedGraph(that))(matF) /** @@ -3018,7 +3051,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * '''Cancels when''' downstream cancels and additionally the alternative is cancelled as soon as an element passes * by from this stream. */ - def orElseMat[U >: Out, Mat2, Mat3](secondary: Graph[SourceShape[U], Mat2])(matF: (Mat, Mat2) => Mat3): ReprMat[U, Mat3] = + def orElseMat[U >: Out, Mat2, Mat3](secondary: Graph[SourceShape[U], Mat2])( + matF: (Mat, Mat2) => Mat3): ReprMat[U, Mat3] = viaMat(orElseGraph(secondary))(matF) /** @@ -3042,7 +3076,8 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] { * It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners * where appropriate instead of manually writing functions that pass through one of the values. */ - def divertToMat[Mat2, Mat3](that: Graph[SinkShape[Out], Mat2], when: Out => Boolean)(matF: (Mat, Mat2) => Mat3): ReprMat[Out, Mat3] = + def divertToMat[Mat2, Mat3](that: Graph[SinkShape[Out], Mat2], when: Out => Boolean)( + matF: (Mat, Mat2) => Mat3): ReprMat[Out, Mat3] = viaMat(divertToGraph(that, when))(matF) /** diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala index c34a6b4fab..075bb4a526 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContext.scala @@ -25,7 +25,8 @@ object FlowWithContext { /** * Creates a FlowWithContext from a regular flow that operates on a tuple of `(data, context)` elements. */ - def fromTuples[In, CtxIn, Out, CtxOut, Mat](flow: Flow[(In, CtxIn), (Out, CtxOut), Mat]): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = + def fromTuples[In, CtxIn, Out, CtxOut, Mat]( + flow: Flow[(In, CtxIn), (Out, CtxOut), Mat]): FlowWithContext[In, CtxIn, Out, CtxOut, Mat] = new FlowWithContext(flow) } @@ -40,15 +41,17 @@ object FlowWithContext { * API MAY CHANGE */ @ApiMayChange -final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat]( - delegate: Flow[(In, CtxIn), (Out, CtxOut), Mat] -) extends GraphDelegate(delegate) with FlowWithContextOps[Out, CtxOut, Mat] { - override type ReprMat[+O, +C, +M] = FlowWithContext[In @uncheckedVariance, CtxIn @uncheckedVariance, O, C, M @uncheckedVariance] +final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat](delegate: Flow[(In, CtxIn), (Out, CtxOut), Mat]) + extends GraphDelegate(delegate) + with FlowWithContextOps[Out, CtxOut, Mat] { + override type ReprMat[+O, +C, +M] = + FlowWithContext[In @uncheckedVariance, CtxIn @uncheckedVariance, O, C, M @uncheckedVariance] override def via[Out2, Ctx2, Mat2](viaFlow: Graph[FlowShape[(Out, CtxOut), (Out2, Ctx2)], Mat2]): Repr[Out2, Ctx2] = new FlowWithContext(delegate.via(viaFlow)) - override def viaMat[Out2, Ctx2, Mat2, Mat3](flow: Graph[FlowShape[(Out, CtxOut), (Out2, Ctx2)], Mat2])(combine: (Mat, Mat2) => Mat3): FlowWithContext[In, CtxIn, Out2, Ctx2, Mat3] = + override def viaMat[Out2, Ctx2, Mat2, Mat3](flow: Graph[FlowShape[(Out, CtxOut), (Out2, Ctx2)], Mat2])( + combine: (Mat, Mat2) => Mat3): FlowWithContext[In, CtxIn, Out2, Ctx2, Mat3] = new FlowWithContext(delegate.viaMat(flow)(combine)) /** @@ -61,6 +64,7 @@ final class FlowWithContext[-In, -CtxIn, +Out, +CtxOut, +Mat]( def asFlow: Flow[(In, CtxIn), (Out, CtxOut), Mat] = delegate - def asJava[JIn <: In, JCtxIn <: CtxIn, JOut >: Out, JCtxOut >: CtxOut, JMat >: Mat]: javadsl.FlowWithContext[JIn, JCtxIn, JOut, JCtxOut, JMat] = + def asJava[JIn <: In, JCtxIn <: CtxIn, JOut >: Out, JCtxOut >: CtxOut, JMat >: Mat] + : javadsl.FlowWithContext[JIn, JCtxIn, JOut, JCtxOut, JMat] = new javadsl.FlowWithContext(this) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala index dd08135158..0dfe0a52d2 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/FlowWithContextOps.scala @@ -51,7 +51,8 @@ trait FlowWithContextOps[+Out, +Ctx, +Mat] { * * @see [[akka.stream.scaladsl.FlowOps.viaMat]] */ - def viaMat[Out2, Ctx2, Mat2, Mat3](flow: Graph[FlowShape[(Out, Ctx), (Out2, Ctx2)], Mat2])(combine: (Mat, Mat2) => Mat3): ReprMat[Out2, Ctx2, Mat3] + def viaMat[Out2, Ctx2, Mat2, Mat3](flow: Graph[FlowShape[(Out, Ctx), (Out2, Ctx2)], Mat2])( + combine: (Mat, Mat2) => Mat3): ReprMat[Out2, Ctx2, Mat3] /** * Context-preserving variant of [[akka.stream.scaladsl.FlowOps.map]]. @@ -67,7 +68,9 @@ trait FlowWithContextOps[+Out, +Ctx, +Mat] { * @see [[akka.stream.scaladsl.FlowOps.mapAsync]] */ def mapAsync[Out2](parallelism: Int)(f: Out => Future[Out2]): Repr[Out2, Ctx] = - via(flow.mapAsync(parallelism) { case (e, ctx) => f(e).map(o => (o, ctx))(ExecutionContexts.sameThreadExecutionContext) }) + via(flow.mapAsync(parallelism) { + case (e, ctx) => f(e).map(o => (o, ctx))(ExecutionContexts.sameThreadExecutionContext) + }) /** * Context-preserving variant of [[akka.stream.scaladsl.FlowOps.collect]]. @@ -171,7 +174,8 @@ trait FlowWithContextOps[+Out, +Ctx, +Mat] { * * @see [[akka.stream.scaladsl.FlowOps.log]] */ - def log(name: String, extract: Out => Any = ConstantFun.scalaIdentityFunction)(implicit log: LoggingAdapter = null): Repr[Out, Ctx] = { + def log(name: String, extract: Out => Any = ConstantFun.scalaIdentityFunction)( + implicit log: LoggingAdapter = null): Repr[Out, Ctx] = { val extractWithContext: ((Out, Ctx)) => Any = { case (e, _) => extract(e) } via(flow.log(name, extractWithContext)(log)) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala index de5e9a2434..1cbcc63b6f 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Framing.scala @@ -33,8 +33,11 @@ object Framing { * @param maximumFrameLength The maximum length of allowed frames while decoding. If the maximum length is * exceeded this Flow will fail the stream. */ - def delimiter(delimiter: ByteString, maximumFrameLength: Int, allowTruncation: Boolean = false): Flow[ByteString, ByteString, NotUsed] = - Flow[ByteString].via(new DelimiterFramingStage(delimiter, maximumFrameLength, allowTruncation)) + def delimiter(delimiter: ByteString, + maximumFrameLength: Int, + allowTruncation: Boolean = false): Flow[ByteString, ByteString, NotUsed] = + Flow[ByteString] + .via(new DelimiterFramingStage(delimiter, maximumFrameLength, allowTruncation)) .named("delimiterFraming") /** @@ -51,13 +54,13 @@ object Framing { * the length of the size field) * @param byteOrder The ''ByteOrder'' to be used when decoding the field */ - def lengthField( - fieldLength: Int, - fieldOffset: Int = 0, - maximumFrameLength: Int, - byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN): Flow[ByteString, ByteString, NotUsed] = { + def lengthField(fieldLength: Int, + fieldOffset: Int = 0, + maximumFrameLength: Int, + byteOrder: ByteOrder = ByteOrder.LITTLE_ENDIAN): Flow[ByteString, ByteString, NotUsed] = { require(fieldLength >= 1 && fieldLength <= 4, "Length field length must be 1, 2, 3 or 4.") - Flow[ByteString].via(new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder)) + Flow[ByteString] + .via(new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder)) .named("lengthFieldFraming") } @@ -80,14 +83,14 @@ object Framing { * ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise. * */ - def lengthField( - fieldLength: Int, - fieldOffset: Int, - maximumFrameLength: Int, - byteOrder: ByteOrder, - computeFrameSize: (Array[Byte], Int) => Int): Flow[ByteString, ByteString, NotUsed] = { + def lengthField(fieldLength: Int, + fieldOffset: Int, + maximumFrameLength: Int, + byteOrder: ByteOrder, + computeFrameSize: (Array[Byte], Int) => Int): Flow[ByteString, ByteString, NotUsed] = { require(fieldLength >= 1 && fieldLength <= 4, "Length field length must be 1, 2, 3 or 4.") - Flow[ByteString].via(new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder, Some(computeFrameSize))) + Flow[ByteString] + .via(new LengthFieldFramingStage(fieldLength, fieldOffset, maximumFrameLength, byteOrder, Some(computeFrameSize))) .named("lengthFieldFraming") } @@ -121,8 +124,10 @@ object Framing { * limit this BidiFlow will fail the stream. The header attached by this BidiFlow are not * included in this limit. */ - def simpleFramingProtocol(maximumMessageLength: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = { - BidiFlow.fromFlowsMat(simpleFramingProtocolEncoder(maximumMessageLength), simpleFramingProtocolDecoder(maximumMessageLength))(Keep.left) + def simpleFramingProtocol( + maximumMessageLength: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, NotUsed] = { + BidiFlow.fromFlowsMat(simpleFramingProtocolEncoder(maximumMessageLength), + simpleFramingProtocolDecoder(maximumMessageLength))(Keep.left) } /** @@ -164,27 +169,33 @@ object Framing { } private class SimpleFramingProtocolEncoder(maximumMessageLength: Long) extends SimpleLinearGraphStage[ByteString] { - override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { - setHandlers(in, out, this) + override def createLogic(inheritedAttributes: Attributes) = + new GraphStageLogic(shape) with InHandler with OutHandler { + setHandlers(in, out, this) - override def onPush(): Unit = { - val message = grab(in) - val msgSize = message.size + override def onPush(): Unit = { + val message = grab(in) + val msgSize = message.size - if (msgSize > maximumMessageLength) - failStage(new FramingException(s"Maximum allowed message size is $maximumMessageLength but tried to send $msgSize bytes")) - else { - val header = ByteString((msgSize >> 24) & 0xFF, (msgSize >> 16) & 0xFF, (msgSize >> 8) & 0xFF, msgSize & 0xFF) - push(out, header ++ message) + if (msgSize > maximumMessageLength) + failStage( + new FramingException( + s"Maximum allowed message size is $maximumMessageLength but tried to send $msgSize bytes")) + else { + val header = + ByteString((msgSize >> 24) & 0xFF, (msgSize >> 16) & 0xFF, (msgSize >> 8) & 0xFF, msgSize & 0xFF) + push(out, header ++ message) + } } - } - override def onPull(): Unit = pull(in) - } + override def onPull(): Unit = pull(in) + } } - private class DelimiterFramingStage(val separatorBytes: ByteString, val maximumLineBytes: Int, val allowTruncation: Boolean) - extends GraphStage[FlowShape[ByteString, ByteString]] { + private class DelimiterFramingStage(val separatorBytes: ByteString, + val maximumLineBytes: Int, + val allowTruncation: Boolean) + extends GraphStage[FlowShape[ByteString, ByteString]] { val in = Inlet[ByteString]("DelimiterFramingStage.in") val out = Outlet[ByteString]("DelimiterFramingStage.out") @@ -193,168 +204,168 @@ object Framing { override def initialAttributes: Attributes = DefaultAttributes.delimiterFraming override def toString: String = "DelimiterFraming" - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private val firstSeparatorByte = separatorBytes.head - private var buffer = ByteString.empty - private var nextPossibleMatch = 0 + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private val firstSeparatorByte = separatorBytes.head + private var buffer = ByteString.empty + private var nextPossibleMatch = 0 - // We use an efficient unsafe array implementation and must be use with caution. - // It contains all indices computed during search phase. - // The capacity is fixed at 256 to preserve fairness and prevent uneccessary allocation during parsing phase. - // This array provide a way to check remaining capacity and must be use to prevent out of bounds exception. - // In this use case, we compute all possibles indices up to 256 and then parse everything. - private val indices = new LightArray[(Int, Int)](256) + // We use an efficient unsafe array implementation and must be use with caution. + // It contains all indices computed during search phase. + // The capacity is fixed at 256 to preserve fairness and prevent uneccessary allocation during parsing phase. + // This array provide a way to check remaining capacity and must be use to prevent out of bounds exception. + // In this use case, we compute all possibles indices up to 256 and then parse everything. + private val indices = new LightArray[(Int, Int)](256) - override def onPush(): Unit = { - buffer ++= grab(in) - searchIndices() - } - - override def onPull(): Unit = searchIndices() - - override def onUpstreamFinish(): Unit = { - if (buffer.isEmpty) { - completeStage() - } else if (isAvailable(out)) { + override def onPush(): Unit = { + buffer ++= grab(in) searchIndices() - } // else swallow the termination and wait for pull - } - - private def tryPull(): Unit = { - if (isClosed(in)) { - if (allowTruncation) { - push(out, buffer) - completeStage() - } else - failStage(new FramingException( - "Stream finished but there was a truncated final frame in the buffer")) - } else pull(in) - } - - @tailrec - private def searchIndices(): Unit = { - // Next possible position for the delimiter - val possibleMatchPos = buffer.indexOf(firstSeparatorByte, from = nextPossibleMatch) - - // Retrive previous position - val previous = indices.lastOption match { - case OptionVal.Some((_, i)) => i + separatorBytes.size - case OptionVal.None => 0 } - if (possibleMatchPos - previous > maximumLineBytes) { - failStage(new FramingException(s"Read ${possibleMatchPos - previous} bytes " + - s"which is more than $maximumLineBytes without seeing a line terminator")) - } else if (possibleMatchPos == -1) { - if (buffer.size - previous > maximumLineBytes) - failStage(new FramingException(s"Read ${buffer.size - previous} bytes " + - s"which is more than $maximumLineBytes without seeing a line terminator")) - else { - // No matching character, we need to accumulate more bytes into the buffer - nextPossibleMatch = buffer.size - doParse() + override def onPull(): Unit = searchIndices() + + override def onUpstreamFinish(): Unit = { + if (buffer.isEmpty) { + completeStage() + } else if (isAvailable(out)) { + searchIndices() + } // else swallow the termination and wait for pull + } + + private def tryPull(): Unit = { + if (isClosed(in)) { + if (allowTruncation) { + push(out, buffer) + completeStage() + } else + failStage(new FramingException("Stream finished but there was a truncated final frame in the buffer")) + } else pull(in) + } + + @tailrec + private def searchIndices(): Unit = { + // Next possible position for the delimiter + val possibleMatchPos = buffer.indexOf(firstSeparatorByte, from = nextPossibleMatch) + + // Retrive previous position + val previous = indices.lastOption match { + case OptionVal.Some((_, i)) => i + separatorBytes.size + case OptionVal.None => 0 } - } else if (possibleMatchPos + separatorBytes.size > buffer.size) { - // We have found a possible match (we found the first character of the terminator - // sequence) but we don't have yet enough bytes. We remember the position to - // retry from next time. - nextPossibleMatch = possibleMatchPos - doParse() - } else if (buffer.slice(possibleMatchPos, possibleMatchPos + separatorBytes.size) == separatorBytes) { - // Found a match, mark start and end position and iterate if possible - indices += (previous, possibleMatchPos) - nextPossibleMatch = possibleMatchPos + separatorBytes.size - if (nextPossibleMatch == buffer.size || indices.isFull) { + + if (possibleMatchPos - previous > maximumLineBytes) { + failStage( + new FramingException( + s"Read ${possibleMatchPos - previous} bytes " + + s"which is more than $maximumLineBytes without seeing a line terminator")) + } else if (possibleMatchPos == -1) { + if (buffer.size - previous > maximumLineBytes) + failStage( + new FramingException( + s"Read ${buffer.size - previous} bytes " + + s"which is more than $maximumLineBytes without seeing a line terminator")) + else { + // No matching character, we need to accumulate more bytes into the buffer + nextPossibleMatch = buffer.size + doParse() + } + } else if (possibleMatchPos + separatorBytes.size > buffer.size) { + // We have found a possible match (we found the first character of the terminator + // sequence) but we don't have yet enough bytes. We remember the position to + // retry from next time. + nextPossibleMatch = possibleMatchPos doParse() + } else if (buffer.slice(possibleMatchPos, possibleMatchPos + separatorBytes.size) == separatorBytes) { + // Found a match, mark start and end position and iterate if possible + indices += (previous, possibleMatchPos) + nextPossibleMatch = possibleMatchPos + separatorBytes.size + if (nextPossibleMatch == buffer.size || indices.isFull) { + doParse() + } else { + searchIndices() + } } else { + // possibleMatchPos was not actually a match + nextPossibleMatch += 1 searchIndices() } - } else { - // possibleMatchPos was not actually a match - nextPossibleMatch += 1 - searchIndices() } - } - private def doParse(): Unit = - if (indices.isEmpty) tryPull() - else if (indices.length == 1) { - // Emit result and compact buffer - val indice = indices(0) - push(out, buffer.slice(indice._1, indice._2).compact) - reset() - if (isClosed(in) && buffer.isEmpty) completeStage() - } else { - // Emit results and compact buffer - emitMultiple(out, new FrameIterator(), () => { + private def doParse(): Unit = + if (indices.isEmpty) tryPull() + else if (indices.length == 1) { + // Emit result and compact buffer + val indice = indices(0) + push(out, buffer.slice(indice._1, indice._2).compact) reset() if (isClosed(in) && buffer.isEmpty) completeStage() - }) + } else { + // Emit results and compact buffer + emitMultiple(out, new FrameIterator(), () => { + reset() + if (isClosed(in) && buffer.isEmpty) completeStage() + }) + } + + private def reset(): Unit = { + val previous = indices.lastOption match { + case OptionVal.Some((_, i)) => i + separatorBytes.size + case OptionVal.None => 0 + } + + buffer = buffer.drop(previous).compact + indices.setLength(0) + nextPossibleMatch = 0 } - private def reset(): Unit = { - val previous = indices.lastOption match { - case OptionVal.Some((_, i)) => i + separatorBytes.size - case OptionVal.None => 0 + // Iterator able to iterate over precompute frame based on start and end position + private class FrameIterator(private var index: Int = 0) extends Iterator[ByteString] { + def hasNext: Boolean = index != indices.length + + def next(): ByteString = { + val indice = indices(index) + index += 1 + buffer.slice(indice._1, indice._2).compact + } } - buffer = buffer.drop(previous).compact - indices.setLength(0) - nextPossibleMatch = 0 + // Basic array implementation that allow unsafe resize. + private class LightArray[T: ClassTag](private val capacity: Int, private var index: Int = 0) { + + private val underlying = Array.ofDim[T](capacity) + + def apply(i: Int) = underlying(i) + + def +=(el: T): Unit = { + underlying(index) = el + index += 1 + } + + def isEmpty: Boolean = length == 0 + + def isFull: Boolean = capacity == length + + def setLength(length: Int): Unit = index = length + + def length: Int = index + + def lastOption: OptionVal[T] = + if (index > 0) OptionVal.Some(underlying(index - 1)) + else OptionVal.none + } + setHandlers(in, out, this) } - - // Iterator able to iterate over precompute frame based on start and end position - private class FrameIterator(private var index: Int = 0) extends Iterator[ByteString] { - def hasNext: Boolean = index != indices.length - - def next(): ByteString = { - val indice = indices(index) - index += 1 - buffer.slice(indice._1, indice._2).compact - } - } - - // Basic array implementation that allow unsafe resize. - private class LightArray[T: ClassTag](private val capacity: Int, private var index: Int = 0) { - - private val underlying = Array.ofDim[T](capacity) - - def apply(i: Int) = underlying(i) - - def +=(el: T): Unit = { - underlying(index) = el - index += 1 - } - - def isEmpty: Boolean = length == 0 - - def isFull: Boolean = capacity == length - - def setLength(length: Int): Unit = index = length - - def length: Int = index - - def lastOption: OptionVal[T] = - if (index > 0) OptionVal.Some(underlying(index - 1)) - else OptionVal.none - } - setHandlers(in, out, this) - } } - private final class LengthFieldFramingStage( - val lengthFieldLength: Int, - val lengthFieldOffset: Int, - val maximumFrameLength: Int, - val byteOrder: ByteOrder, - computeFrameSize: Option[(Array[Byte], Int) => Int]) extends GraphStage[FlowShape[ByteString, ByteString]] { + private final class LengthFieldFramingStage(val lengthFieldLength: Int, + val lengthFieldOffset: Int, + val maximumFrameLength: Int, + val byteOrder: ByteOrder, + computeFrameSize: Option[(Array[Byte], Int) => Int]) + extends GraphStage[FlowShape[ByteString, ByteString]] { //for the sake of binary compatibility - def this( - lengthFieldLength: Int, - lengthFieldOffset: Int, - maximumFrameLength: Int, - byteOrder: ByteOrder) { + def this(lengthFieldLength: Int, lengthFieldOffset: Int, maximumFrameLength: Int, byteOrder: ByteOrder) { this(lengthFieldLength, lengthFieldOffset, maximumFrameLength, byteOrder, None) } @@ -368,73 +379,77 @@ object Framing { val out = Outlet[ByteString]("LengthFieldFramingStage.out") override val shape: FlowShape[ByteString, ByteString] = FlowShape(in, out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler { - private var buffer = ByteString.empty - private var frameSize = Int.MaxValue + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler with OutHandler { + private var buffer = ByteString.empty + private var frameSize = Int.MaxValue - /** - * push, and reset frameSize and buffer - * - */ - private def pushFrame() = { - val emit = buffer.take(frameSize).compact - buffer = buffer.drop(frameSize) - frameSize = Int.MaxValue - push(out, emit) - if (buffer.isEmpty && isClosed(in)) { - completeStage() - } - } - - /** - * try to push downstream, if failed then try to pull upstream - * - */ - private def tryPushFrame() = { - val buffSize = buffer.size - if (buffSize >= frameSize) { - pushFrame() - } else if (buffSize >= minimumChunkSize) { - val parsedLength = intDecoder(buffer.iterator.drop(lengthFieldOffset), lengthFieldLength) - frameSize = computeFrameSize match { - case Some(f) => f(buffer.take(lengthFieldOffset).toArray, parsedLength) - case None => parsedLength + minimumChunkSize + /** + * push, and reset frameSize and buffer + * + */ + private def pushFrame() = { + val emit = buffer.take(frameSize).compact + buffer = buffer.drop(frameSize) + frameSize = Int.MaxValue + push(out, emit) + if (buffer.isEmpty && isClosed(in)) { + completeStage() } - if (frameSize > maximumFrameLength) { - failStage(new FramingException(s"Maximum allowed frame size is $maximumFrameLength but decoded frame header reported size $frameSize")) - } else if (parsedLength < 0) { - failStage(new FramingException(s"Decoded frame header reported negative size $parsedLength")) - } else if (frameSize < minimumChunkSize) { - failStage(new FramingException(s"Computed frame size $frameSize is less than minimum chunk size $minimumChunkSize")) - } else if (buffSize >= frameSize) { + } + + /** + * try to push downstream, if failed then try to pull upstream + * + */ + private def tryPushFrame() = { + val buffSize = buffer.size + if (buffSize >= frameSize) { pushFrame() + } else if (buffSize >= minimumChunkSize) { + val parsedLength = intDecoder(buffer.iterator.drop(lengthFieldOffset), lengthFieldLength) + frameSize = computeFrameSize match { + case Some(f) => f(buffer.take(lengthFieldOffset).toArray, parsedLength) + case None => parsedLength + minimumChunkSize + } + if (frameSize > maximumFrameLength) { + failStage(new FramingException( + s"Maximum allowed frame size is $maximumFrameLength but decoded frame header reported size $frameSize")) + } else if (parsedLength < 0) { + failStage(new FramingException(s"Decoded frame header reported negative size $parsedLength")) + } else if (frameSize < minimumChunkSize) { + failStage( + new FramingException( + s"Computed frame size $frameSize is less than minimum chunk size $minimumChunkSize")) + } else if (buffSize >= frameSize) { + pushFrame() + } else tryPull() } else tryPull() - } else tryPull() - } + } - private def tryPull() = { - if (isClosed(in)) { - failStage(new FramingException("Stream finished but there was a truncated final frame in the buffer")) - } else pull(in) - } + private def tryPull() = { + if (isClosed(in)) { + failStage(new FramingException("Stream finished but there was a truncated final frame in the buffer")) + } else pull(in) + } - override def onPush(): Unit = { - buffer ++= grab(in) - tryPushFrame() - } - - override def onPull() = tryPushFrame() - - override def onUpstreamFinish(): Unit = { - if (buffer.isEmpty) { - completeStage() - } else if (isAvailable(out)) { + override def onPush(): Unit = { + buffer ++= grab(in) tryPushFrame() - } // else swallow the termination and wait for pull - } + } - setHandlers(in, out, this) - } + override def onPull() = tryPushFrame() + + override def onUpstreamFinish(): Unit = { + if (buffer.isEmpty) { + completeStage() + } else if (isAvailable(out)) { + tryPushFrame() + } // else swallow the termination and wait for pull + } + + setHandlers(in, out, this) + } } } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala index 416569356c..4c17c2c914 100755 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Graph.scala @@ -27,8 +27,9 @@ import scala.util.control.{ NoStackTrace, NonFatal } * * The implementation of a graph with an arbitrary shape. */ -private[stream] final class GenericGraph[S <: Shape, Mat](override val shape: S, override val traversalBuilder: TraversalBuilder) - extends Graph[S, Mat] { outer => +private[stream] final class GenericGraph[S <: Shape, Mat](override val shape: S, + override val traversalBuilder: TraversalBuilder) + extends Graph[S, Mat] { outer => override def toString: String = s"GenericGraph($shape)" @@ -42,8 +43,11 @@ private[stream] final class GenericGraph[S <: Shape, Mat](override val shape: S, * The implementation of a graph with an arbitrary shape with changed attributes. Changing attributes again * prevents building up a chain of changes. */ -private[stream] final class GenericGraphWithChangedAttributes[S <: Shape, Mat](override val shape: S, originalTraversalBuilder: TraversalBuilder, newAttributes: Attributes) - extends Graph[S, Mat] { outer => +private[stream] final class GenericGraphWithChangedAttributes[S <: Shape, Mat]( + override val shape: S, + originalTraversalBuilder: TraversalBuilder, + newAttributes: Attributes) + extends Graph[S, Mat] { outer => private[stream] def traversalBuilder: TraversalBuilder = originalTraversalBuilder.setAttributes(newAttributes) @@ -54,6 +58,7 @@ private[stream] final class GenericGraphWithChangedAttributes[S <: Shape, Mat](o } object Merge { + /** * Create a new `Merge` with the specified number of input ports. * @@ -85,84 +90,87 @@ final class Merge[T](val inputPorts: Int, val eagerComplete: Boolean) extends Gr override def initialAttributes = DefaultAttributes.merge override val shape: UniformFanInShape[T, T] = UniformFanInShape(out, in: _*) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { - private val pendingQueue = FixedSizeBuffer[Inlet[T]](inputPorts) - private def pending: Boolean = pendingQueue.nonEmpty + private val pendingQueue = FixedSizeBuffer[Inlet[T]](inputPorts) + private def pending: Boolean = pendingQueue.nonEmpty - private var runningUpstreams = inputPorts - private def upstreamsClosed = runningUpstreams == 0 + private var runningUpstreams = inputPorts + private def upstreamsClosed = runningUpstreams == 0 + + override def preStart(): Unit = { + var ix = 0 + while (ix < in.size) { + tryPull(in(ix)) + ix += 1 + } + } + + @tailrec + private def dequeueAndDispatch(): Unit = { + val in = pendingQueue.dequeue() + if (in == null) { + // in is null if we reached the end of the queue + if (upstreamsClosed) completeStage() + } else if (isAvailable(in)) { + push(out, grab(in)) + if (upstreamsClosed && !pending) completeStage() + else tryPull(in) + } else { + // in was closed after being enqueued + // try next in queue + dequeueAndDispatch() + } + } - override def preStart(): Unit = { var ix = 0 while (ix < in.size) { - tryPull(in(ix)) + val i = in(ix) ix += 1 + + setHandler(i, + new InHandler { + override def onPush(): Unit = { + if (isAvailable(out)) { + // isAvailable(out) implies !pending + // -> grab and push immediately + push(out, grab(i)) + tryPull(i) + } else pendingQueue.enqueue(i) + } + + override def onUpstreamFinish() = + if (eagerComplete) { + var ix2 = 0 + while (ix2 < in.size) { + cancel(in(ix2)) + ix2 += 1 + } + runningUpstreams = 0 + if (!pending) completeStage() + } else { + runningUpstreams -= 1 + if (upstreamsClosed && !pending) completeStage() + } + }) } - } - @tailrec - private def dequeueAndDispatch(): Unit = { - val in = pendingQueue.dequeue() - if (in == null) { - // in is null if we reached the end of the queue - if (upstreamsClosed) completeStage() - } else if (isAvailable(in)) { - push(out, grab(in)) - if (upstreamsClosed && !pending) completeStage() - else tryPull(in) - } else { - // in was closed after being enqueued - // try next in queue - dequeueAndDispatch() + override def onPull(): Unit = { + if (pending) + dequeueAndDispatch() } + + setHandler(out, this) } - var ix = 0 - while (ix < in.size) { - val i = in(ix) - ix += 1 - - setHandler(i, new InHandler { - override def onPush(): Unit = { - if (isAvailable(out)) { - // isAvailable(out) implies !pending - // -> grab and push immediately - push(out, grab(i)) - tryPull(i) - } else pendingQueue.enqueue(i) - } - - override def onUpstreamFinish() = - if (eagerComplete) { - var ix2 = 0 - while (ix2 < in.size) { - cancel(in(ix2)) - ix2 += 1 - } - runningUpstreams = 0 - if (!pending) completeStage() - } else { - runningUpstreams -= 1 - if (upstreamsClosed && !pending) completeStage() - } - }) - } - - override def onPull(): Unit = { - if (pending) - dequeueAndDispatch() - } - - setHandler(out, this) - } - override def toString = "Merge" } object MergePreferred { import FanInShape._ - final class MergePreferredShape[T](val secondaryPorts: Int, _init: Init[T]) extends UniformFanInShape[T, T](secondaryPorts, _init) { + final class MergePreferredShape[T](val secondaryPorts: Int, _init: Init[T]) + extends UniformFanInShape[T, T](secondaryPorts, _init) { def this(secondaryPorts: Int, name: String) = this(secondaryPorts, Name[T](name)) override protected def construct(init: Init[T]): FanInShape[T] = new MergePreferredShape(secondaryPorts, init) override def deepCopy(): MergePreferredShape[T] = super.deepCopy().asInstanceOf[MergePreferredShape[T]] @@ -176,7 +184,8 @@ object MergePreferred { * @param secondaryPorts number of secondary input ports * @param eagerComplete if true, the merge will complete as soon as one of its inputs completes. */ - def apply[T](secondaryPorts: Int, eagerComplete: Boolean = false): MergePreferred[T] = new MergePreferred(secondaryPorts, eagerComplete) + def apply[T](secondaryPorts: Int, eagerComplete: Boolean = false): MergePreferred[T] = + new MergePreferred(secondaryPorts, eagerComplete) } /** @@ -194,7 +203,8 @@ object MergePreferred { * * '''Cancels when''' downstream cancels */ -final class MergePreferred[T](val secondaryPorts: Int, val eagerComplete: Boolean) extends GraphStage[MergePreferred.MergePreferredShape[T]] { +final class MergePreferred[T](val secondaryPorts: Int, val eagerComplete: Boolean) + extends GraphStage[MergePreferred.MergePreferredShape[T]] { require(secondaryPorts >= 1, "A MergePreferred must have 1 or more secondary input ports") override def initialAttributes = DefaultAttributes.mergePreferred @@ -233,47 +243,49 @@ final class MergePreferred[T](val secondaryPorts: Int, val eagerComplete: Boolea val maxEmitting = 2 var preferredEmitting = 0 - setHandler(preferred, new InHandler { - override def onUpstreamFinish(): Unit = onComplete() - override def onPush(): Unit = - if (preferredEmitting == maxEmitting) () // blocked - else emitPreferred() + setHandler(preferred, + new InHandler { + override def onUpstreamFinish(): Unit = onComplete() + override def onPush(): Unit = + if (preferredEmitting == maxEmitting) () // blocked + else emitPreferred() - def emitPreferred(): Unit = { - preferredEmitting += 1 - emit(out, grab(preferred), emitted) - tryPull(preferred) - } + def emitPreferred(): Unit = { + preferredEmitting += 1 + emit(out, grab(preferred), emitted) + tryPull(preferred) + } - val emitted = () => { - preferredEmitting -= 1 - if (isAvailable(preferred)) emitPreferred() - else if (preferredEmitting == 0) emitSecondary() - } + val emitted = () => { + preferredEmitting -= 1 + if (isAvailable(preferred)) emitPreferred() + else if (preferredEmitting == 0) emitSecondary() + } - def emitSecondary(): Unit = { - var i = 0 - while (i < secondaryPorts) { - val port = in(i) - if (isAvailable(port)) emit(out, grab(port), pullMe(i)) - i += 1 - } - } - }) + def emitSecondary(): Unit = { + var i = 0 + while (i < secondaryPorts) { + val port = in(i) + if (isAvailable(port)) emit(out, grab(port), pullMe(i)) + i += 1 + } + } + }) var i = 0 while (i < secondaryPorts) { val port = in(i) val pullPort = pullMe(i) - setHandler(port, new InHandler { - override def onPush(): Unit = { - if (preferredEmitting > 0) () // blocked - else { - emit(out, grab(port), pullPort) - } - } - override def onUpstreamFinish(): Unit = onComplete() - }) + setHandler(port, + new InHandler { + override def onPush(): Unit = { + if (preferredEmitting > 0) () // blocked + else { + emit(out, grab(port), pullPort) + } + } + override def onUpstreamFinish(): Unit = onComplete() + }) i += 1 } @@ -281,13 +293,15 @@ final class MergePreferred[T](val secondaryPorts: Int, val eagerComplete: Boolea } object MergePrioritized { + /** * Create a new `MergePrioritized` with specified number of input ports. * * @param priorities priorities of the input ports * @param eagerComplete if true, the merge will complete as soon as one of its inputs completes. */ - def apply[T](priorities: Seq[Int], eagerComplete: Boolean = false): GraphStage[UniformFanInShape[T, T]] = new MergePrioritized(priorities, eagerComplete) + def apply[T](priorities: Seq[Int], eagerComplete: Boolean = false): GraphStage[UniformFanInShape[T, T]] = + new MergePrioritized(priorities, eagerComplete) } /** @@ -305,7 +319,8 @@ object MergePrioritized { * * '''Cancels when''' downstream cancels */ -final class MergePrioritized[T] private (val priorities: Seq[Int], val eagerComplete: Boolean) extends GraphStage[UniformFanInShape[T, T]] { +final class MergePrioritized[T] private (val priorities: Seq[Int], val eagerComplete: Boolean) + extends GraphStage[UniformFanInShape[T, T]] { private val inputPorts = priorities.size require(inputPorts > 0, "A Merge must have one or more input ports") require(priorities.forall(_ > 0), "Priorities should be positive integers") @@ -315,85 +330,88 @@ final class MergePrioritized[T] private (val priorities: Seq[Int], val eagerComp override def initialAttributes: Attributes = DefaultAttributes.mergePrioritized override val shape: UniformFanInShape[T, T] = UniformFanInShape(out, in: _*) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { - private val allBuffers = Vector.tabulate(priorities.size)(i => FixedSizeBuffer[Inlet[T]](priorities(i))) - private var runningUpstreams = inputPorts - private val randomGen = new SplittableRandom + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { + private val allBuffers = Vector.tabulate(priorities.size)(i => FixedSizeBuffer[Inlet[T]](priorities(i))) + private var runningUpstreams = inputPorts + private val randomGen = new SplittableRandom - override def preStart(): Unit = in.foreach(tryPull) + override def preStart(): Unit = in.foreach(tryPull) - (in zip allBuffers).foreach { - case (inlet, buffer) => - setHandler(inlet, new InHandler { - override def onPush(): Unit = { - if (isAvailable(out) && !hasPending) { - push(out, grab(inlet)) - tryPull(inlet) - } else { - buffer.enqueue(inlet) - } - } + in.zip(allBuffers).foreach { + case (inlet, buffer) => + setHandler(inlet, + new InHandler { + override def onPush(): Unit = { + if (isAvailable(out) && !hasPending) { + push(out, grab(inlet)) + tryPull(inlet) + } else { + buffer.enqueue(inlet) + } + } - override def onUpstreamFinish(): Unit = { - if (eagerComplete) { - in.foreach(cancel) - runningUpstreams = 0 - if (!hasPending) completeStage() - } else { - runningUpstreams -= 1 - if (upstreamsClosed && !hasPending) completeStage() - } - } - }) - } - - override def onPull(): Unit = { - if (hasPending) dequeueAndDispatch() - } - - setHandler(out, this) - - private def hasPending: Boolean = allBuffers.exists(_.nonEmpty) - - private def upstreamsClosed = runningUpstreams == 0 - - private def dequeueAndDispatch(): Unit = { - val in = selectNextElement() - push(out, grab(in)) - if (upstreamsClosed && !hasPending) completeStage() else tryPull(in) - } - - private def selectNextElement() = { - var tp = 0 - var ix = 0 - - while (ix < in.size) { - if (allBuffers(ix).nonEmpty) { - tp += priorities(ix) - } - ix += 1 + override def onUpstreamFinish(): Unit = { + if (eagerComplete) { + in.foreach(cancel) + runningUpstreams = 0 + if (!hasPending) completeStage() + } else { + runningUpstreams -= 1 + if (upstreamsClosed && !hasPending) completeStage() + } + } + }) } - var r = randomGen.nextInt(tp) - var next: Inlet[T] = null - ix = 0 - - while (ix < in.size && next == null) { - if (allBuffers(ix).nonEmpty) { - r -= priorities(ix) - if (r < 0) next = allBuffers(ix).dequeue() - } - ix += 1 + override def onPull(): Unit = { + if (hasPending) dequeueAndDispatch() } - next + setHandler(out, this) + + private def hasPending: Boolean = allBuffers.exists(_.nonEmpty) + + private def upstreamsClosed = runningUpstreams == 0 + + private def dequeueAndDispatch(): Unit = { + val in = selectNextElement() + push(out, grab(in)) + if (upstreamsClosed && !hasPending) completeStage() else tryPull(in) + } + + private def selectNextElement() = { + var tp = 0 + var ix = 0 + + while (ix < in.size) { + if (allBuffers(ix).nonEmpty) { + tp += priorities(ix) + } + ix += 1 + } + + var r = randomGen.nextInt(tp) + var next: Inlet[T] = null + ix = 0 + + while (ix < in.size && next == null) { + if (allBuffers(ix).nonEmpty) { + r -= priorities(ix) + if (r < 0) next = allBuffers(ix).dequeue() + } + ix += 1 + } + + next + } } - } override def toString = "MergePrioritized" } object Interleave { + /** * Create a new `Interleave` with the specified number of input ports and given size of elements * to take from each input. @@ -402,7 +420,9 @@ object Interleave { * @param segmentSize number of elements to send downstream before switching to next input port * @param eagerClose if true, interleave completes upstream if any of its upstream completes. */ - def apply[T](inputPorts: Int, segmentSize: Int, eagerClose: Boolean = false): Graph[UniformFanInShape[T, T], NotUsed] = + def apply[T](inputPorts: Int, + segmentSize: Int, + eagerClose: Boolean = false): Graph[UniformFanInShape[T, T], NotUsed] = GraphStages.withDetachedInputs(new Interleave[T](inputPorts, segmentSize, eagerClose)) } @@ -419,7 +439,8 @@ object Interleave { * '''Cancels when''' downstream cancels * */ -final class Interleave[T](val inputPorts: Int, val segmentSize: Int, val eagerClose: Boolean) extends GraphStage[UniformFanInShape[T, T]] { +final class Interleave[T](val inputPorts: Int, val segmentSize: Int, val eagerClose: Boolean) + extends GraphStage[UniformFanInShape[T, T]] { require(inputPorts > 1, "input ports must be > 1") require(segmentSize > 0, "segmentSize must be > 0") @@ -427,61 +448,63 @@ final class Interleave[T](val inputPorts: Int, val segmentSize: Int, val eagerCl val out: Outlet[T] = Outlet[T]("Interleave.out") override val shape: UniformFanInShape[T, T] = UniformFanInShape(out, in: _*) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { - private var counter = 0 - private var currentUpstreamIndex = 0 - private var runningUpstreams = inputPorts - private def upstreamsClosed = runningUpstreams == 0 - private def currentUpstream = in(currentUpstreamIndex) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { + private var counter = 0 + private var currentUpstreamIndex = 0 + private var runningUpstreams = inputPorts + private def upstreamsClosed = runningUpstreams == 0 + private def currentUpstream = in(currentUpstreamIndex) - private def switchToNextInput(): Unit = { - @tailrec - def nextInletIndex(index: Int): Int = { - val successor = index + 1 match { - case `inputPorts` => 0 - case x => x - } - if (!isClosed(in(successor))) successor - else { - if (successor != currentUpstreamIndex) nextInletIndex(successor) + private def switchToNextInput(): Unit = { + @tailrec + def nextInletIndex(index: Int): Int = { + val successor = index + 1 match { + case `inputPorts` => 0 + case x => x + } + if (!isClosed(in(successor))) successor else { - completeStage() - 0 // return dummy/min value to exit stage logic gracefully + if (successor != currentUpstreamIndex) nextInletIndex(successor) + else { + completeStage() + 0 // return dummy/min value to exit stage logic gracefully + } } } + counter = 0 + currentUpstreamIndex = nextInletIndex(currentUpstreamIndex) } - counter = 0 - currentUpstreamIndex = nextInletIndex(currentUpstreamIndex) + + in.foreach { i => + setHandler(i, + new InHandler { + override def onPush(): Unit = { + push(out, grab(i)) + counter += 1 + if (counter == segmentSize) switchToNextInput() + } + + override def onUpstreamFinish(): Unit = { + if (!eagerClose) { + runningUpstreams -= 1 + if (!upstreamsClosed) { + if (i == currentUpstream) { + switchToNextInput() + if (isAvailable(out)) pull(currentUpstream) + } + } else completeStage() + } else completeStage() + } + }) + } + + def onPull(): Unit = + if (!hasBeenPulled(currentUpstream)) tryPull(currentUpstream) + + setHandler(out, this) } - in.foreach { i => - setHandler(i, new InHandler { - override def onPush(): Unit = { - push(out, grab(i)) - counter += 1 - if (counter == segmentSize) switchToNextInput() - } - - override def onUpstreamFinish(): Unit = { - if (!eagerClose) { - runningUpstreams -= 1 - if (!upstreamsClosed) { - if (i == currentUpstream) { - switchToNextInput() - if (isAvailable(out)) pull(currentUpstream) - } - } else completeStage() - } else completeStage() - } - }) - } - - def onPull(): Unit = - if (!hasBeenPulled(currentUpstream)) tryPull(currentUpstream) - - setHandler(out, this) - } - override def toString = "Interleave" } @@ -513,8 +536,11 @@ final class MergeSorted[T: Ordering] extends GraphStage[FanInShape2[T, T, T]] { def nullOut(): Unit = other = null.asInstanceOf[T] def dispatch(l: T, r: T): Unit = - if (l < r) { other = r; emit(out, l, readL) } - else { other = l; emit(out, r, readR) } + if (l < r) { + other = r; emit(out, l, readL) + } else { + other = l; emit(out, r, readR) + } val dispatchR = dispatch(other, _: T) val dispatchL = dispatch(_: T, other) @@ -535,6 +561,7 @@ final class MergeSorted[T: Ordering] extends GraphStage[FanInShape2[T, T, T]] { } object Broadcast { + /** * Create a new `Broadcast` with the specified number of output ports. * @@ -567,63 +594,65 @@ final class Broadcast[T](val outputPorts: Int, val eagerCancel: Boolean) extends override def initialAttributes = DefaultAttributes.broadcast override val shape: UniformFanOutShape[T, T] = UniformFanOutShape(in, out: _*) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler { - private var pendingCount = outputPorts - private val pending = Array.fill[Boolean](outputPorts)(true) - private var downstreamsRunning = outputPorts + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler { + private var pendingCount = outputPorts + private val pending = Array.fill[Boolean](outputPorts)(true) + private var downstreamsRunning = outputPorts - def onPush(): Unit = { - pendingCount = downstreamsRunning - val elem = grab(in) + def onPush(): Unit = { + pendingCount = downstreamsRunning + val elem = grab(in) - val size = out.size - var idx = 0 - while (idx < size) { - val o = out(idx) - if (!isClosed(o)) { - push(o, elem) - pending(idx) = true + val size = out.size + var idx = 0 + while (idx < size) { + val o = out(idx) + if (!isClosed(o)) { + push(o, elem) + pending(idx) = true + } + idx += 1 } - idx += 1 } - } - setHandler(in, this) + setHandler(in, this) - private def tryPull(): Unit = - if (pendingCount == 0 && !hasBeenPulled(in)) pull(in) + private def tryPull(): Unit = + if (pendingCount == 0 && !hasBeenPulled(in)) pull(in) - { - val size = out.size - var idx = 0 - while (idx < size) { - val o = out(idx) - val i = idx // close over val - setHandler(o, new OutHandler { - override def onPull(): Unit = { - pending(i) = false - pendingCount -= 1 - tryPull() - } + { + val size = out.size + var idx = 0 + while (idx < size) { + val o = out(idx) + val i = idx // close over val + setHandler(o, + new OutHandler { + override def onPull(): Unit = { + pending(i) = false + pendingCount -= 1 + tryPull() + } - override def onDownstreamFinish() = { - if (eagerCancel) completeStage() - else { - downstreamsRunning -= 1 - if (downstreamsRunning == 0) completeStage() - else if (pending(i)) { - pending(i) = false - pendingCount -= 1 - tryPull() - } - } - } - }) - idx += 1 + override def onDownstreamFinish() = { + if (eagerCancel) completeStage() + else { + downstreamsRunning -= 1 + if (downstreamsRunning == 0) completeStage() + else if (pending(i)) { + pending(i) = false + pendingCount -= 1 + tryPull() + } + } + } + }) + idx += 1 + } } - } - } + } override def toString = "Broadcast" @@ -687,26 +716,27 @@ private[stream] final class WireTap[T] extends GraphStage[FanOutShape2[T, T, T]] }) // The 'tap' output can neither backpressure, nor cancel, the stage. - setHandler(outTap, new OutHandler { - override def onPull() = { - pendingTap match { - case Some(elem) => - push(outTap, elem) - pendingTap = None - case None => // no pending element to emit - } - } + setHandler(outTap, + new OutHandler { + override def onPull() = { + pendingTap match { + case Some(elem) => + push(outTap, elem) + pendingTap = None + case None => // no pending element to emit + } + } - override def onDownstreamFinish(): Unit = { - setHandler(in, new InHandler { - override def onPush() = { - push(outMain, grab(in)) - } - }) - // Allow any outstanding element to be garbage-collected - pendingTap = None - } - }) + override def onDownstreamFinish(): Unit = { + setHandler(in, new InHandler { + override def onPush() = { + push(outMain, grab(in)) + } + }) + // Allow any outstanding element to be garbage-collected + pendingTap = None + } + }) } override def toString = "WireTap" } @@ -739,8 +769,8 @@ object Partition { * * '''Cancels when''' all downstreams have cancelled (eagerCancel=false) or one downstream cancels (eagerCancel=true) */ - -final class Partition[T](val outputPorts: Int, val partitioner: T => Int, val eagerCancel: Boolean) extends GraphStage[UniformFanOutShape[T, T]] { +final class Partition[T](val outputPorts: Int, val partitioner: T => Int, val eagerCancel: Boolean) + extends GraphStage[UniformFanOutShape[T, T]] { /** * Sets `eagerCancel` to `false`. @@ -752,78 +782,82 @@ final class Partition[T](val outputPorts: Int, val partitioner: T => Int, val ea val out: Seq[Outlet[T]] = Seq.tabulate(outputPorts)(i => Outlet[T]("Partition.out" + i)) // FIXME BC make this immutable.IndexedSeq as type + Vector as concrete impl override val shape: UniformFanOutShape[T, T] = UniformFanOutShape[T, T](in, out: _*) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler { - private var outPendingElem: Any = null - private var outPendingIdx: Int = _ - private var downstreamRunning = outputPorts + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler { + private var outPendingElem: Any = null + private var outPendingIdx: Int = _ + private var downstreamRunning = outputPorts - def onPush() = { - val elem = grab(in) - val idx = partitioner(elem) - if (idx < 0 || idx >= outputPorts) { - failStage(PartitionOutOfBoundsException(s"partitioner must return an index in the range [0,${outputPorts - 1}]. returned: [$idx] for input [${elem.getClass.getName}].")) - } else if (!isClosed(out(idx))) { - if (isAvailable(out(idx))) { - push(out(idx), elem) - if (out.exists(isAvailable(_))) - pull(in) - } else { - outPendingElem = elem - outPendingIdx = idx - } - - } else if (out.exists(isAvailable(_))) - pull(in) - } - - override def onUpstreamFinish(): Unit = { - if (outPendingElem == null) completeStage() - } - - setHandler(in, this) - - out.iterator.zipWithIndex.foreach { - case (o, idx) => - setHandler(o, new OutHandler { - override def onPull() = { - if (outPendingElem != null) { - val elem = outPendingElem.asInstanceOf[T] - if (idx == outPendingIdx) { - push(o, elem) - outPendingElem = null - if (!isClosed(in)) { - if (!hasBeenPulled(in)) { - pull(in) - } - } else - completeStage() - } - } else if (!hasBeenPulled(in)) + def onPush() = { + val elem = grab(in) + val idx = partitioner(elem) + if (idx < 0 || idx >= outputPorts) { + failStage(PartitionOutOfBoundsException( + s"partitioner must return an index in the range [0,${outputPorts - 1}]. returned: [$idx] for input [${elem.getClass.getName}].")) + } else if (!isClosed(out(idx))) { + if (isAvailable(out(idx))) { + push(out(idx), elem) + if (out.exists(isAvailable(_))) pull(in) + } else { + outPendingElem = elem + outPendingIdx = idx } - override def onDownstreamFinish(): Unit = - if (eagerCancel) completeStage() - else { - downstreamRunning -= 1 - if (downstreamRunning == 0) - completeStage() - else if (outPendingElem != null) { - if (idx == outPendingIdx) { - outPendingElem = null - if (!hasBeenPulled(in)) - pull(in) - } - } - } - }) + } else if (out.exists(isAvailable(_))) + pull(in) + } + + override def onUpstreamFinish(): Unit = { + if (outPendingElem == null) completeStage() + } + + setHandler(in, this) + + out.iterator.zipWithIndex.foreach { + case (o, idx) => + setHandler(o, + new OutHandler { + override def onPull() = { + if (outPendingElem != null) { + val elem = outPendingElem.asInstanceOf[T] + if (idx == outPendingIdx) { + push(o, elem) + outPendingElem = null + if (!isClosed(in)) { + if (!hasBeenPulled(in)) { + pull(in) + } + } else + completeStage() + } + } else if (!hasBeenPulled(in)) + pull(in) + } + + override def onDownstreamFinish(): Unit = + if (eagerCancel) completeStage() + else { + downstreamRunning -= 1 + if (downstreamRunning == 0) + completeStage() + else if (outPendingElem != null) { + if (idx == outPendingIdx) { + outPendingElem = null + if (!hasBeenPulled(in)) + pull(in) + } + } + } + }) + } } - } override def toString = s"Partition($outputPorts)" } object Balance { + /** * Create a new `Balance` with the specified number of output ports. This method sets `eagerCancel` to `false`. * To specify a different value for the `eagerCancel` parameter, then instantiate Balance using the constructor. @@ -854,7 +888,8 @@ object Balance { * * '''Cancels when''' If eagerCancel is enabled: when any downstream cancels; otherwise: when all downstreams cancel */ -final class Balance[T](val outputPorts: Int, val waitForAllDownstreams: Boolean, val eagerCancel: Boolean) extends GraphStage[UniformFanOutShape[T, T]] { +final class Balance[T](val outputPorts: Int, val waitForAllDownstreams: Boolean, val eagerCancel: Boolean) + extends GraphStage[UniformFanOutShape[T, T]] { // one output might seem counter intuitive but saves us from special handling in other places require(outputPorts >= 1, "A Balance must have one or more output ports") @@ -867,73 +902,76 @@ final class Balance[T](val outputPorts: Int, val waitForAllDownstreams: Boolean, override def initialAttributes = DefaultAttributes.balance override val shape: UniformFanOutShape[T, T] = UniformFanOutShape[T, T](in, out: _*) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler { - private val pendingQueue = FixedSizeBuffer[Outlet[T]](outputPorts) - private def noPending: Boolean = pendingQueue.isEmpty + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler { + private val pendingQueue = FixedSizeBuffer[Outlet[T]](outputPorts) + private def noPending: Boolean = pendingQueue.isEmpty - private var needDownstreamPulls: Int = if (waitForAllDownstreams) outputPorts else 0 - private var downstreamsRunning: Int = outputPorts + private var needDownstreamPulls: Int = if (waitForAllDownstreams) outputPorts else 0 + private var downstreamsRunning: Int = outputPorts - @tailrec - private def dequeueAndDispatch(): Unit = { - val out = pendingQueue.dequeue() - // out is null if depleted pendingQueue without reaching - // an out that is not closed, in which case we just return - if (out ne null) { - if (!isClosed(out)) { - push(out, grab(in)) - if (!noPending) pull(in) - } else if (!noPending) { - // if they are pending outlets, try to find one output that isn't closed - dequeueAndDispatch() + @tailrec + private def dequeueAndDispatch(): Unit = { + val out = pendingQueue.dequeue() + // out is null if depleted pendingQueue without reaching + // an out that is not closed, in which case we just return + if (out ne null) { + if (!isClosed(out)) { + push(out, grab(in)) + if (!noPending) pull(in) + } else if (!noPending) { + // if they are pending outlets, try to find one output that isn't closed + dequeueAndDispatch() + } } } + + def onPush(): Unit = dequeueAndDispatch() + setHandler(in, this) + + out.foreach { o => + setHandler(o, + new OutHandler { + private var hasPulled = false + + override def onPull(): Unit = { + if (!hasPulled) { + hasPulled = true + if (needDownstreamPulls > 0) needDownstreamPulls -= 1 + } + + if (needDownstreamPulls == 0) { + if (isAvailable(in)) { + if (noPending) { + push(o, grab(in)) + } + } else { + if (!hasBeenPulled(in)) pull(in) + pendingQueue.enqueue(o) + } + } else pendingQueue.enqueue(o) + } + + override def onDownstreamFinish() = { + if (eagerCancel) completeStage() + else { + downstreamsRunning -= 1 + if (downstreamsRunning == 0) completeStage() + else if (!hasPulled && needDownstreamPulls > 0) { + needDownstreamPulls -= 1 + if (needDownstreamPulls == 0 && !hasBeenPulled(in)) pull(in) + } + } + } + }) + } } - def onPush(): Unit = dequeueAndDispatch() - setHandler(in, this) - - out.foreach { o => - setHandler(o, new OutHandler { - private var hasPulled = false - - override def onPull(): Unit = { - if (!hasPulled) { - hasPulled = true - if (needDownstreamPulls > 0) needDownstreamPulls -= 1 - } - - if (needDownstreamPulls == 0) { - if (isAvailable(in)) { - if (noPending) { - push(o, grab(in)) - } - } else { - if (!hasBeenPulled(in)) pull(in) - pendingQueue.enqueue(o) - } - } else pendingQueue.enqueue(o) - } - - override def onDownstreamFinish() = { - if (eagerCancel) completeStage() - else { - downstreamsRunning -= 1 - if (downstreamsRunning == 0) completeStage() - else if (!hasPulled && needDownstreamPulls > 0) { - needDownstreamPulls -= 1 - if (needDownstreamPulls == 0 && !hasBeenPulled(in)) pull(in) - } - } - } - }) - } - } - override def toString = "Balance" } object Zip { + /** * Create a new `Zip`. */ @@ -958,6 +996,7 @@ final class Zip[A, B] extends ZipWith2[A, B, (A, B)](Tuple2.apply) { } object ZipLatest { + /** * Create a new `ZipLatest`. */ @@ -1030,6 +1069,7 @@ object ZipLatestWith extends ZipLatestWithApply * '''Cancels when''' any downstream cancels */ object Unzip { + /** * Create a new `Unzip`. */ @@ -1067,6 +1107,7 @@ final class Unzip[A, B]() extends UnzipWith2[(A, B), A, B](ConstantFun.scalaIden object UnzipWith extends UnzipWithApply object ZipN { + /** * Create a new `ZipN`. */ @@ -1092,6 +1133,7 @@ final class ZipN[A](n: Int) extends ZipWithN[A, immutable.Seq[A]](ConstantFun.sc } object ZipWithN { + /** * Create a new `ZipWithN`. */ @@ -1119,51 +1161,53 @@ class ZipWithN[A, O](zipper: immutable.Seq[A] => O)(n: Int) extends GraphStage[U @deprecated("use `shape.inlets` or `shape.in(id)` instead", "2.5.5") def inSeq: immutable.IndexedSeq[Inlet[A]] = shape.inlets.asInstanceOf[immutable.IndexedSeq[Inlet[A]]] - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { - var pending = 0 - // Without this field the completion signalling would take one extra pull - var willShutDown = false + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { + var pending = 0 + // Without this field the completion signalling would take one extra pull + var willShutDown = false - val grabInlet = grab[A] _ - val pullInlet = pull[A] _ + val grabInlet = grab[A] _ + val pullInlet = pull[A] _ - private def pushAll(): Unit = { - push(out, zipper(shape.inlets.map(grabInlet))) - if (willShutDown) completeStage() - else shape.inlets.foreach(pullInlet) - } + private def pushAll(): Unit = { + push(out, zipper(shape.inlets.map(grabInlet))) + if (willShutDown) completeStage() + else shape.inlets.foreach(pullInlet) + } - override def preStart(): Unit = { - shape.inlets.foreach(pullInlet) - } + override def preStart(): Unit = { + shape.inlets.foreach(pullInlet) + } - shape.inlets.foreach(in => { - setHandler(in, new InHandler { - override def onPush(): Unit = { - pending -= 1 - if (pending == 0) pushAll() - } + shape.inlets.foreach(in => { + setHandler(in, new InHandler { + override def onPush(): Unit = { + pending -= 1 + if (pending == 0) pushAll() + } - override def onUpstreamFinish(): Unit = { - if (!isAvailable(in)) completeStage() - willShutDown = true - } + override def onUpstreamFinish(): Unit = { + if (!isAvailable(in)) completeStage() + willShutDown = true + } + }) }) - }) - def onPull(): Unit = { - pending += n - if (pending == 0) pushAll() + def onPull(): Unit = { + pending += n + if (pending == 0) pushAll() + } + + setHandler(out, this) } - setHandler(out, this) - } - override def toString = "ZipWithN" } object Concat { + /** * Create a new `Concat`. */ @@ -1202,21 +1246,22 @@ final class Concat[T](val inputPorts: Int) extends GraphStage[UniformFanInShape[ while (idxx < size) { val i = in(idxx) val idx = idxx // close over val - setHandler(i, new InHandler { - override def onPush() = { - push(out, grab(i)) - } + setHandler(i, + new InHandler { + override def onPush() = { + push(out, grab(i)) + } - override def onUpstreamFinish() = { - if (idx == activeStream) { - activeStream += 1 - // Skip closed inputs - while (activeStream < inputPorts && isClosed(in(activeStream))) activeStream += 1 - if (activeStream == inputPorts) completeStage() - else if (isAvailable(out)) pull(in(activeStream)) - } - } - }) + override def onUpstreamFinish() = { + if (idx == activeStream) { + activeStream += 1 + // Skip closed inputs + while (activeStream < inputPorts && isClosed(in(activeStream))) activeStream += 1 + if (activeStream == inputPorts) completeStage() + else if (isAvailable(out)) pull(in(activeStream)) + } + } + }) idxx += 1 } } @@ -1266,47 +1311,48 @@ private[stream] final class OrElse[T] extends GraphStage[UniformFanInShape[T, T] override protected def initialAttributes: Attributes = DefaultAttributes.orElse - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler with InHandler { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler with InHandler { - private[this] var currentIn = primary - private[this] var primaryPushed = false + private[this] var currentIn = primary + private[this] var primaryPushed = false - override def onPull(): Unit = { - pull(currentIn) - } - - // for the primary inHandler - override def onPush(): Unit = { - if (!primaryPushed) { - primaryPushed = true - cancel(secondary) + override def onPull(): Unit = { + pull(currentIn) } - val elem = grab(primary) - push(out, elem) - } - // for the primary inHandler - override def onUpstreamFinish(): Unit = { - if (!primaryPushed && !isClosed(secondary)) { - currentIn = secondary - if (isAvailable(out)) pull(secondary) - } else { - completeStage() - } - } - - setHandler(secondary, new InHandler { + // for the primary inHandler override def onPush(): Unit = { - push(out, grab(secondary)) + if (!primaryPushed) { + primaryPushed = true + cancel(secondary) + } + val elem = grab(primary) + push(out, elem) } + // for the primary inHandler override def onUpstreamFinish(): Unit = { - if (isClosed(primary)) completeStage() + if (!primaryPushed && !isClosed(secondary)) { + currentIn = secondary + if (isAvailable(out)) pull(secondary) + } else { + completeStage() + } } - }) - setHandlers(primary, out, this) - } + setHandler(secondary, new InHandler { + override def onPush(): Unit = { + push(out, grab(secondary)) + } + + override def onUpstreamFinish(): Unit = { + if (isClosed(primary)) completeStage() + } + }) + + setHandlers(primary, out, this) + } override def toString: String = s"OrElse" @@ -1318,7 +1364,8 @@ object GraphDSL extends GraphApply { * Creates a new [[Graph]] by importing the given graph list `graphs` and passing their [[Shape]]s * along with the [[GraphDSL.Builder]] to the given create function. */ - def create[S <: Shape, IS <: Shape, Mat](graphs: immutable.Seq[Graph[IS, Mat]])(buildBlock: GraphDSL.Builder[immutable.Seq[Mat]] => immutable.Seq[IS] => S): Graph[S, immutable.Seq[Mat]] = { + def create[S <: Shape, IS <: Shape, Mat](graphs: immutable.Seq[Graph[IS, Mat]])( + buildBlock: GraphDSL.Builder[immutable.Seq[Mat]] => immutable.Seq[IS] => S): Graph[S, immutable.Seq[Mat]] = { require(graphs.nonEmpty, "The input list must have one or more Graph elements") val builder = new GraphDSL.Builder val toList = (m1: Mat) => Seq(m1) @@ -1376,10 +1423,8 @@ object GraphDSL extends GraphApply { */ private[stream] def add[S <: Shape, A](graph: Graph[S, _], transform: (A) => Any): S = { val newShape = graph.shape.deepCopy() - traversalBuilderInProgress = traversalBuilderInProgress.add( - graph.traversalBuilder.transformMat(transform), - newShape, - Keep.right) + traversalBuilderInProgress = + traversalBuilderInProgress.add(graph.traversalBuilder.transformMat(transform), newShape, Keep.right) unwiredIns ++= newShape.inlets unwiredOuts ++= newShape.outlets @@ -1418,7 +1463,9 @@ object GraphDSL extends GraphApply { * @return The outlet that will emit the materialized value. */ def materializedValue: Outlet[M @uncheckedVariance] = - add(Source.maybe[M], { (prev: M, prom: Promise[Option[M]]) => prom.success(Some(prev)); prev }).out + add(Source.maybe[M], { (prev: M, prom: Promise[Option[M]]) => + prom.success(Some(prev)); prev + }).out private[GraphDSL] def traversalBuilder: TraversalBuilder = traversalBuilderInProgress @@ -1430,11 +1477,13 @@ object GraphDSL extends GraphApply { val forwardMessage = if (diff1.isEmpty) "" - else s" $tag [${diff1.map(format).mkString(", ")}] were returned in the resulting shape but were already connected." + else + s" $tag [${diff1.map(format).mkString(", ")}] were returned in the resulting shape but were already connected." val backwardMessage = if (diff2.isEmpty) "" - else s" $tag [${diff2.map(format).mkString(", ")}] were not returned in the resulting shape and not connected." + else + s" $tag [${diff2.map(format).mkString(", ")}] were not returned in the resulting shape and not connected." forwardMessage + backwardMessage } else "" @@ -1565,7 +1614,7 @@ object GraphDSL extends GraphApply { } private class PortOpsImpl[+Out](override val outlet: Outlet[Out @uncheckedVariance], b: Builder[_]) - extends PortOps[Out] { + extends PortOps[Out] { override def withAttributes(attr: Attributes): Repr[Out] = throw settingAttrNotSupported override def addAttributes(attr: Attributes): Repr[Out] = throw settingAttrNotSupported @@ -1600,12 +1649,17 @@ object GraphDSL extends GraphApply { override def importAndGetPortReverse(b: Builder[_]): Inlet[In] = throw new IllegalArgumentException(msg) } - implicit final class FanInOps[In, Out](val j: UniformFanInShape[In, Out]) extends AnyVal with CombinerBase[Out] with ReverseCombinerBase[In] { + implicit final class FanInOps[In, Out](val j: UniformFanInShape[In, Out]) + extends AnyVal + with CombinerBase[Out] + with ReverseCombinerBase[In] { override def importAndGetPort(b: Builder[_]): Outlet[Out] = j.out override def importAndGetPortReverse(b: Builder[_]): Inlet[In] = findIn(b, j, 0) } - implicit final class FanOutOps[In, Out](val j: UniformFanOutShape[In, Out]) extends AnyVal with ReverseCombinerBase[In] { + implicit final class FanOutOps[In, Out](val j: UniformFanOutShape[In, Out]) + extends AnyVal + with ReverseCombinerBase[In] { override def importAndGetPortReverse(b: Builder[_]): Inlet[In] = j.in } @@ -1620,7 +1674,8 @@ object GraphDSL extends GraphApply { implicit final class FlowShapeArrow[I, O](val f: FlowShape[I, O]) extends AnyVal with ReverseCombinerBase[I] { override def importAndGetPortReverse(b: Builder[_]): Inlet[I] = f.in - def <~>[I2, O2, Mat](bidi: Graph[BidiShape[O, O2, I2, I], Mat])(implicit b: Builder[_]): BidiShape[O, O2, I2, I] = { + def <~>[I2, O2, Mat](bidi: Graph[BidiShape[O, O2, I2, I], Mat])( + implicit b: Builder[_]): BidiShape[O, O2, I2, I] = { val shape = b.add(bidi) b.addEdge(f.out, shape.in1) b.addEdge(shape.out2, f.in) @@ -1641,7 +1696,8 @@ object GraphDSL extends GraphApply { } implicit final class FlowArrow[I, O, M](val f: Graph[FlowShape[I, O], M]) extends AnyVal { - def <~>[I2, O2, Mat](bidi: Graph[BidiShape[O, O2, I2, I], Mat])(implicit b: Builder[_]): BidiShape[O, O2, I2, I] = { + def <~>[I2, O2, Mat](bidi: Graph[BidiShape[O, O2, I2, I], Mat])( + implicit b: Builder[_]): BidiShape[O, O2, I2, I] = { val shape = b.add(bidi) val flow = b.add(f) b.addEdge(flow.out, shape.in1) @@ -1671,7 +1727,8 @@ object GraphDSL extends GraphApply { other } - def <~>[I3, O3, M](otherFlow: Graph[BidiShape[O1, O3, I3, I2], M])(implicit b: Builder[_]): BidiShape[O1, O3, I3, I2] = { + def <~>[I3, O3, M](otherFlow: Graph[BidiShape[O1, O3, I3, I2], M])( + implicit b: Builder[_]): BidiShape[O1, O3, I3, I2] = { val other = b.add(otherFlow) b.addEdge(bidi.out1, other.in1) b.addEdge(other.out2, bidi.in2) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala index 4b99d5b33e..70dec972a1 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Hub.scala @@ -72,7 +72,8 @@ object MergeHub { /** * INTERNAL API */ -private[akka] class MergeHub[T](perProducerBufferSize: Int) extends GraphStageWithMaterializedValue[SourceShape[T], Sink[T, NotUsed]] { +private[akka] class MergeHub[T](perProducerBufferSize: Int) + extends GraphStageWithMaterializedValue[SourceShape[T], Sink[T, NotUsed]] { require(perProducerBufferSize > 0, "Buffer size must be positive") val out: Outlet[T] = Outlet("MergeHub.out") @@ -104,7 +105,9 @@ private[akka] class MergeHub[T](perProducerBufferSize: Int) extends GraphStageWi } - final class MergedSourceLogic(_shape: Shape, producerCount: AtomicLong) extends GraphStageLogic(_shape) with OutHandler { + final class MergedSourceLogic(_shape: Shape, producerCount: AtomicLong) + extends GraphStageLogic(_shape) + with OutHandler { /* * Basically all merged messages are shared in this queue. Individual buffer sizes are enforced by tracking * demand per producer in the 'demands' Map. One twist here is that the same queue contains control messages, @@ -116,9 +119,10 @@ private[akka] class MergeHub[T](perProducerBufferSize: Int) extends GraphStageWi @volatile private[this] var shuttingDown = false private[this] val demands = scala.collection.mutable.LongMap.empty[InputState] - private[this] val wakeupCallback = getAsyncCallback[NotUsed]((_) => - // We are only allowed to dequeue if we are not backpressured. See comment in tryProcessNext() for details. - if (isAvailable(out)) tryProcessNext(firstAttempt = true)) + private[this] val wakeupCallback = getAsyncCallback[NotUsed]( + (_) => + // We are only allowed to dequeue if we are not backpressured. See comment in tryProcessNext() for details. + if (isAvailable(out)) tryProcessNext(firstAttempt = true)) setHandler(out, this) @@ -228,59 +232,61 @@ private[akka] class MergeHub[T](perProducerBufferSize: Int) extends GraphStageWi val in: Inlet[T] = Inlet("MergeHub.in") override val shape: SinkShape[T] = SinkShape(in) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler { - // Start from non-zero demand to avoid initial delays. - // The HUB will expect this behavior. - private[this] var demand: Long = perProducerBufferSize - private[this] val id = idCounter.getAndIncrement() + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with InHandler { + // Start from non-zero demand to avoid initial delays. + // The HUB will expect this behavior. + private[this] var demand: Long = perProducerBufferSize + private[this] val id = idCounter.getAndIncrement() - override def preStart(): Unit = { - if (!logic.isShuttingDown) { - logic.enqueue(Register(id, getAsyncCallback(onDemand))) + override def preStart(): Unit = { + if (!logic.isShuttingDown) { + logic.enqueue(Register(id, getAsyncCallback(onDemand))) - // At this point, we could be in the unfortunate situation that: - // - we missed the shutdown announcement and entered this arm of the if statement - // - *before* we enqueued our Register event, the Hub already finished looking at the queue - // and is now dead, so we are never notified again. - // To safeguard against this, we MUST check the announcement again. This is enough: - // if the Hub is no longer looking at the queue, then it must be that isShuttingDown must be already true. - if (!logic.isShuttingDown) pullWithDemand() - else completeStage() - } else { - completeStage() + // At this point, we could be in the unfortunate situation that: + // - we missed the shutdown announcement and entered this arm of the if statement + // - *before* we enqueued our Register event, the Hub already finished looking at the queue + // and is now dead, so we are never notified again. + // To safeguard against this, we MUST check the announcement again. This is enough: + // if the Hub is no longer looking at the queue, then it must be that isShuttingDown must be already true. + if (!logic.isShuttingDown) pullWithDemand() + else completeStage() + } else { + completeStage() + } } - } - override def postStop(): Unit = { - // Unlike in the case of preStart, we don't care about the Hub no longer looking at the queue. - if (!logic.isShuttingDown) logic.enqueue(Deregister(id)) - } - - override def onPush(): Unit = { - logic.enqueue(Element(id, grab(in))) - if (demand > 0) pullWithDemand() - } - - private def pullWithDemand(): Unit = { - demand -= 1 - pull(in) - } - - // Make some noise - override def onUpstreamFailure(ex: Throwable): Unit = { - throw new MergeHub.ProducerFailed("Upstream producer failed with exception, " + - "removing from MergeHub now", ex) - } - - private def onDemand(moreDemand: Long): Unit = { - if (moreDemand == MergeHub.Cancel) completeStage() - else { - demand += moreDemand - if (!hasBeenPulled(in)) pullWithDemand() + override def postStop(): Unit = { + // Unlike in the case of preStart, we don't care about the Hub no longer looking at the queue. + if (!logic.isShuttingDown) logic.enqueue(Deregister(id)) } - } - setHandler(in, this) - } + override def onPush(): Unit = { + logic.enqueue(Element(id, grab(in))) + if (demand > 0) pullWithDemand() + } + + private def pullWithDemand(): Unit = { + demand -= 1 + pull(in) + } + + // Make some noise + override def onUpstreamFailure(ex: Throwable): Unit = { + throw new MergeHub.ProducerFailed("Upstream producer failed with exception, " + + "removing from MergeHub now", + ex) + } + + private def onDemand(moreDemand: Long): Unit = { + if (moreDemand == MergeHub.Cancel) completeStage() + else { + demand += moreDemand + if (!hasBeenPulled(in)) pullWithDemand() + } + } + + setHandler(in, this) + } } @@ -347,7 +353,8 @@ object BroadcastHub { /** * INTERNAL API */ -private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMaterializedValue[SinkShape[T], Source[T, NotUsed]] { +private[akka] class BroadcastHub[T](bufferSize: Int) + extends GraphStageWithMaterializedValue[SinkShape[T], Source[T, NotUsed]] { require(bufferSize > 0, "Buffer size must be positive") require(bufferSize < 4096, "Buffer size larger then 4095 is not allowed") require((bufferSize & bufferSize - 1) == 0, "Buffer size must be a power of two") @@ -373,11 +380,11 @@ private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMater private object Completed private sealed trait HubState - private case class Open(callbackFuture: Future[AsyncCallback[HubEvent]], registrations: List[Consumer]) extends HubState + private case class Open(callbackFuture: Future[AsyncCallback[HubEvent]], registrations: List[Consumer]) + extends HubState private case class Closed(failure: Option[Throwable]) extends HubState - private class BroadcastSinkLogic(_shape: Shape) - extends GraphStageLogic(_shape) with InHandler { + private class BroadcastSinkLogic(_shape: Shape) extends GraphStageLogic(_shape) with InHandler { private[this] val callbackPromise: Promise[AsyncCallback[HubEvent]] = Promise() private[this] val noRegistrationsState = Open(callbackPromise.future, Nil) @@ -425,7 +432,7 @@ private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMater private def onEvent(ev: HubEvent): Unit = { ev match { case RegistrationPending => - state.getAndSet(noRegistrationsState).asInstanceOf[Open].registrations foreach { consumer => + state.getAndSet(noRegistrationsState).asInstanceOf[Open].registrations.foreach { consumer => val startFrom = head activeConsumers += 1 addConsumer(consumer, startFrom) @@ -434,8 +441,7 @@ private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMater consumer.callback.invokeWithFeedback(Initialize(startFrom)).failed.foreach { case _: StreamDetachedException => callbackPromise.future.foreach(callback => - callback.invoke(UnRegister(consumer.id, startFrom, startFrom)) - ) + callback.invoke(UnRegister(consumer.id, startFrom, startFrom))) case _ => () } } @@ -483,12 +489,12 @@ private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMater val failMessage = HubCompleted(Some(ex)) // Notify pending consumers and set tombstone - state.getAndSet(Closed(Some(ex))).asInstanceOf[Open].registrations foreach { consumer => + state.getAndSet(Closed(Some(ex))).asInstanceOf[Open].registrations.foreach { consumer => consumer.callback.invoke(failMessage) } // Notify registered consumers - consumerWheel.iterator.flatMap(_.iterator) foreach { consumer => + consumerWheel.iterator.flatMap(_.iterator).foreach { consumer => consumer.callback.invoke(failMessage) } failStage(ex) @@ -577,7 +583,7 @@ private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMater case open: Open => if (state.compareAndSet(open, Closed(None))) { val completedMessage = HubCompleted(None) - open.registrations foreach { consumer => + open.registrations.foreach { consumer => consumer.callback.invoke(completedMessage) } } else tryClose() @@ -610,7 +616,8 @@ private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMater private final case class HubCompleted(failure: Option[Throwable]) extends ConsumerEvent private final case class Initialize(offset: Int) extends ConsumerEvent - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Source[T, NotUsed]) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, Source[T, NotUsed]) = { val idCounter = new AtomicLong() val logic = new BroadcastSinkLogic(shape) @@ -619,100 +626,101 @@ private[akka] class BroadcastHub[T](bufferSize: Int) extends GraphStageWithMater val out: Outlet[T] = Outlet("BroadcastHub.out") override val shape: SourceShape[T] = SourceShape(out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { - private[this] var untilNextAdvanceSignal = DemandThreshold - private[this] val id = idCounter.getAndIncrement() - private[this] var offsetInitialized = false - private[this] var hubCallback: AsyncCallback[HubEvent] = _ - - /* - * We need to track our last offset that we published to the Hub. The reason is, that for efficiency reasons, - * the Hub can only look up and move/remove Consumers with known wheel slots. This means that no extra hash-map - * is needed, but it also means that we need to keep track of both our current offset, and the last one that - * we published. - */ - private[this] var previousPublishedOffset = 0 - private[this] var offset = 0 - - override def preStart(): Unit = { - val callback = getAsyncCallback(onCommand) - - val onHubReady: Try[AsyncCallback[HubEvent]] => Unit = { - case Success(callback) => - hubCallback = callback - if (isAvailable(out) && offsetInitialized) onPull() - callback.invoke(RegistrationPending) - case Failure(ex) => - failStage(ex) - } - - @tailrec def register(): Unit = { - logic.state.get() match { - case Closed(Some(ex)) => failStage(ex) - case Closed(None) => completeStage() - case previousState @ Open(callbackFuture, registrations) => - val newRegistrations = Consumer(id, callback) :: registrations - if (logic.state.compareAndSet(previousState, Open(callbackFuture, newRegistrations))) { - callbackFuture.onComplete(getAsyncCallback(onHubReady).invoke)(materializer.executionContext) - } else register() - } - } + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { + private[this] var untilNextAdvanceSignal = DemandThreshold + private[this] val id = idCounter.getAndIncrement() + private[this] var offsetInitialized = false + private[this] var hubCallback: AsyncCallback[HubEvent] = _ /* - * Note that there is a potential race here. First we add ourselves to the pending registrations, then - * we send RegistrationPending. However, another downstream might have triggered our registration by its - * own RegistrationPending message, since we are in the list already. - * This means we might receive an onCommand(Initialize(offset)) *before* onHubReady fires so it is important - * to only serve elements after both offsetInitialized = true and hubCallback is not null. + * We need to track our last offset that we published to the Hub. The reason is, that for efficiency reasons, + * the Hub can only look up and move/remove Consumers with known wheel slots. This means that no extra hash-map + * is needed, but it also means that we need to keep track of both our current offset, and the last one that + * we published. */ - register() + private[this] var previousPublishedOffset = 0 + private[this] var offset = 0 - } + override def preStart(): Unit = { + val callback = getAsyncCallback(onCommand) - override def onPull(): Unit = { - if (offsetInitialized && (hubCallback ne null)) { - val elem = logic.poll(offset) + val onHubReady: Try[AsyncCallback[HubEvent]] => Unit = { + case Success(callback) => + hubCallback = callback + if (isAvailable(out) && offsetInitialized) onPull() + callback.invoke(RegistrationPending) + case Failure(ex) => + failStage(ex) + } - elem match { - case null => - hubCallback.invoke(NeedWakeup(id, previousPublishedOffset, offset)) - previousPublishedOffset = offset - untilNextAdvanceSignal = DemandThreshold - case Completed => - completeStage() - case _ => - push(out, elem.asInstanceOf[T]) - offset += 1 - untilNextAdvanceSignal -= 1 - if (untilNextAdvanceSignal == 0) { + @tailrec def register(): Unit = { + logic.state.get() match { + case Closed(Some(ex)) => failStage(ex) + case Closed(None) => completeStage() + case previousState @ Open(callbackFuture, registrations) => + val newRegistrations = Consumer(id, callback) :: registrations + if (logic.state.compareAndSet(previousState, Open(callbackFuture, newRegistrations))) { + callbackFuture.onComplete(getAsyncCallback(onHubReady).invoke)(materializer.executionContext) + } else register() + } + } + + /* + * Note that there is a potential race here. First we add ourselves to the pending registrations, then + * we send RegistrationPending. However, another downstream might have triggered our registration by its + * own RegistrationPending message, since we are in the list already. + * This means we might receive an onCommand(Initialize(offset)) *before* onHubReady fires so it is important + * to only serve elements after both offsetInitialized = true and hubCallback is not null. + */ + register() + + } + + override def onPull(): Unit = { + if (offsetInitialized && (hubCallback ne null)) { + val elem = logic.poll(offset) + + elem match { + case null => + hubCallback.invoke(NeedWakeup(id, previousPublishedOffset, offset)) + previousPublishedOffset = offset untilNextAdvanceSignal = DemandThreshold - val previousOffset = previousPublishedOffset - previousPublishedOffset += DemandThreshold - hubCallback.invoke(Advance(id, previousOffset)) - } + case Completed => + completeStage() + case _ => + push(out, elem.asInstanceOf[T]) + offset += 1 + untilNextAdvanceSignal -= 1 + if (untilNextAdvanceSignal == 0) { + untilNextAdvanceSignal = DemandThreshold + val previousOffset = previousPublishedOffset + previousPublishedOffset += DemandThreshold + hubCallback.invoke(Advance(id, previousOffset)) + } + } } } - } - override def postStop(): Unit = { - if (hubCallback ne null) - hubCallback.invoke(UnRegister(id, previousPublishedOffset, offset)) - } + override def postStop(): Unit = { + if (hubCallback ne null) + hubCallback.invoke(UnRegister(id, previousPublishedOffset, offset)) + } - private def onCommand(cmd: ConsumerEvent): Unit = cmd match { - case HubCompleted(Some(ex)) => failStage(ex) - case HubCompleted(None) => completeStage() - case Wakeup => - if (isAvailable(out)) onPull() - case Initialize(initialOffset) => - offsetInitialized = true - previousPublishedOffset = initialOffset - offset = initialOffset - if (isAvailable(out) && (hubCallback ne null)) onPull() - } + private def onCommand(cmd: ConsumerEvent): Unit = cmd match { + case HubCompleted(Some(ex)) => failStage(ex) + case HubCompleted(None) => completeStage() + case Wakeup => + if (isAvailable(out)) onPull() + case Initialize(initialOffset) => + offsetInitialized = true + previousPublishedOffset = initialOffset + offset = initialOffset + if (isAvailable(out) && (hubCallback ne null)) onPull() + } - setHandler(out, this) - } + setHandler(out, this) + } } (logic, Source.fromGraph(source)) @@ -765,7 +773,8 @@ object PartitionHub { * @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer * is backpressured. */ - @ApiMayChange def statefulSink[T](partitioner: () => (ConsumerInfo, T) => Long, startAfterNrOfConsumers: Int, + @ApiMayChange def statefulSink[T](partitioner: () => (ConsumerInfo, T) => Long, + startAfterNrOfConsumers: Int, bufferSize: Int = defaultBufferSize): Sink[T, Source[T, NotUsed]] = Sink.fromGraph(new PartitionHub[T](partitioner, startAfterNrOfConsumers, bufferSize)) @@ -799,7 +808,8 @@ object PartitionHub { * is backpressured. */ @ApiMayChange - def sink[T](partitioner: (Int, T) => Int, startAfterNrOfConsumers: Int, + def sink[T](partitioner: (Int, T) => Int, + startAfterNrOfConsumers: Int, bufferSize: Int = defaultBufferSize): Sink[T, Source[T, NotUsed]] = { val fun: (ConsumerInfo, T) => Long = { (info, elem) => val idx = partitioner(info.size, elem) @@ -859,7 +869,8 @@ object PartitionHub { case object Completed sealed trait HubState - final case class Open(callbackFuture: Future[AsyncCallback[HubEvent]], registrations: List[Consumer]) extends HubState + final case class Open(callbackFuture: Future[AsyncCallback[HubEvent]], registrations: List[Consumer]) + extends HubState final case class Closed(failure: Option[Throwable]) extends HubState // The reason for the two implementations here is that the common case (as I see it) is to have a few (< 100) @@ -982,7 +993,7 @@ object PartitionHub { override def remove(id: Long): Unit = { (if (id < FixedQueues) queues1.getAndSet(id.toInt, null) - else queues2.remove(id)) match { + else queues2.remove(id)) match { case null => case queue => _totalSize.addAndGet(-queue.size) } @@ -995,10 +1006,10 @@ object PartitionHub { /** * INTERNAL API */ -@InternalApi private[akka] class PartitionHub[T]( - partitioner: () => (PartitionHub.ConsumerInfo, T) => Long, - startAfterNrOfConsumers: Int, bufferSize: Int) - extends GraphStageWithMaterializedValue[SinkShape[T], Source[T, NotUsed]] { +@InternalApi private[akka] class PartitionHub[T](partitioner: () => (PartitionHub.ConsumerInfo, T) => Long, + startAfterNrOfConsumers: Int, + bufferSize: Int) + extends GraphStageWithMaterializedValue[SinkShape[T], Source[T, NotUsed]] { import PartitionHub.Internal._ import PartitionHub.ConsumerInfo @@ -1009,8 +1020,7 @@ object PartitionHub { // queue in Artery def createQueue(): PartitionQueue = new PartitionQueueImpl - private class PartitionSinkLogic(_shape: Shape) - extends GraphStageLogic(_shape) with InHandler { + private class PartitionSinkLogic(_shape: Shape) extends GraphStageLogic(_shape) with InHandler { // Half of buffer size, rounded up private val DemandThreshold = (bufferSize / 2) + (bufferSize % 2) @@ -1029,8 +1039,7 @@ object PartitionHub { private var callbackCount = 0L - private final class ConsumerInfoImpl(val consumers: Vector[Consumer]) - extends ConsumerInfo { info => + private final class ConsumerInfoImpl(val consumers: Vector[Consumer]) extends ConsumerInfo { info => override def queueSize(consumerId: Long): Int = queue.size(consumerId) @@ -1122,7 +1131,7 @@ object PartitionHub { tryPull() case RegistrationPending => - state.getAndSet(noRegistrationsState).asInstanceOf[Open].registrations foreach { consumer => + state.getAndSet(noRegistrationsState).asInstanceOf[Open].registrations.foreach { consumer => val newConsumers = (consumerInfo.consumers :+ consumer).sortBy(_.id) consumerInfo = new ConsumerInfoImpl(newConsumers) queue.init(consumer.id) @@ -1155,7 +1164,7 @@ object PartitionHub { val failMessage = HubCompleted(Some(ex)) // Notify pending consumers and set tombstone - state.getAndSet(Closed(Some(ex))).asInstanceOf[Open].registrations foreach { consumer => + state.getAndSet(Closed(Some(ex))).asInstanceOf[Open].registrations.foreach { consumer => consumer.callback.invoke(failMessage) } @@ -1174,7 +1183,7 @@ object PartitionHub { case open: Open => if (state.compareAndSet(open, Closed(None))) { val completedMessage = HubCompleted(None) - open.registrations foreach { consumer => + open.registrations.foreach { consumer => consumer.callback.invoke(completedMessage) } } else tryClose() @@ -1196,7 +1205,8 @@ object PartitionHub { setHandler(in, this) } - override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Source[T, NotUsed]) = { + override def createLogicAndMaterializedValue( + inheritedAttributes: Attributes): (GraphStageLogic, Source[T, NotUsed]) = { val idCounter = new AtomicLong val logic = new PartitionSinkLogic(shape) @@ -1205,74 +1215,75 @@ object PartitionHub { val out: Outlet[T] = Outlet("PartitionHub.out") override val shape: SourceShape[T] = SourceShape(out) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { - private val id = idCounter.getAndIncrement() - private var hubCallback: AsyncCallback[HubEvent] = _ - private val callback = getAsyncCallback(onCommand) - private val consumer = Consumer(id, callback) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { + private val id = idCounter.getAndIncrement() + private var hubCallback: AsyncCallback[HubEvent] = _ + private val callback = getAsyncCallback(onCommand) + private val consumer = Consumer(id, callback) - private var callbackCount = 0L + private var callbackCount = 0L + + override def preStart(): Unit = { + val onHubReady: Try[AsyncCallback[HubEvent]] => Unit = { + case Success(callback) => + hubCallback = callback + callback.invoke(RegistrationPending) + if (isAvailable(out)) onPull() + case Failure(ex) => + failStage(ex) + } + + @tailrec def register(): Unit = { + logic.state.get() match { + case Closed(Some(ex)) => failStage(ex) + case Closed(None) => completeStage() + case previousState @ Open(callbackFuture, registrations) => + val newRegistrations = consumer :: registrations + if (logic.state.compareAndSet(previousState, Open(callbackFuture, newRegistrations))) { + callbackFuture.onComplete(getAsyncCallback(onHubReady).invoke)(materializer.executionContext) + } else register() + } + } + + register() - override def preStart(): Unit = { - val onHubReady: Try[AsyncCallback[HubEvent]] => Unit = { - case Success(callback) => - hubCallback = callback - callback.invoke(RegistrationPending) - if (isAvailable(out)) onPull() - case Failure(ex) => - failStage(ex) } - @tailrec def register(): Unit = { - logic.state.get() match { - case Closed(Some(ex)) => failStage(ex) - case Closed(None) => completeStage() - case previousState @ Open(callbackFuture, registrations) => - val newRegistrations = consumer :: registrations - if (logic.state.compareAndSet(previousState, Open(callbackFuture, newRegistrations))) { - callbackFuture.onComplete(getAsyncCallback(onHubReady).invoke)(materializer.executionContext) - } else register() + override def onPull(): Unit = { + if (hubCallback ne null) { + val elem = logic.poll(id, hubCallback) + + elem match { + case null => + hubCallback.invoke(NeedWakeup(consumer)) + case Completed => + completeStage() + case _ => + push(out, elem.asInstanceOf[T]) + } } } - register() + override def postStop(): Unit = { + if (hubCallback ne null) + hubCallback.invoke(UnRegister(id)) + } - } - - override def onPull(): Unit = { - if (hubCallback ne null) { - val elem = logic.poll(id, hubCallback) - - elem match { - case null => - hubCallback.invoke(NeedWakeup(consumer)) - case Completed => - completeStage() - case _ => - push(out, elem.asInstanceOf[T]) + private def onCommand(cmd: ConsumerEvent): Unit = { + callbackCount += 1 + cmd match { + case HubCompleted(Some(ex)) => failStage(ex) + case HubCompleted(None) => completeStage() + case Wakeup => + if (isAvailable(out)) onPull() + case Initialize => + if (isAvailable(out) && (hubCallback ne null)) onPull() } } - } - override def postStop(): Unit = { - if (hubCallback ne null) - hubCallback.invoke(UnRegister(id)) + setHandler(out, this) } - - private def onCommand(cmd: ConsumerEvent): Unit = { - callbackCount += 1 - cmd match { - case HubCompleted(Some(ex)) => failStage(ex) - case HubCompleted(None) => completeStage() - case Wakeup => - if (isAvailable(out)) onPull() - case Initialize => - if (isAvailable(out) && (hubCallback ne null)) onPull() - } - } - - setHandler(out, this) - } } (logic, Source.fromGraph(source)) diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala index 2befc0084f..a27208d96f 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/JsonFraming.scala @@ -8,7 +8,7 @@ import akka.NotUsed import akka.stream.Attributes import akka.stream.impl.JsonObjectParser import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage -import akka.stream.stage.{ InHandler, OutHandler, GraphStageLogic } +import akka.stream.stage.{ GraphStageLogic, InHandler, OutHandler } import akka.util.ByteString import scala.util.control.NonFatal @@ -45,35 +45,36 @@ object JsonFraming { override protected def initialAttributes: Attributes = Attributes.name("JsonFraming.objectScanner") - override def createLogic(inheritedAttributes: Attributes) = new GraphStageLogic(shape) with InHandler with OutHandler { - private val buffer = new JsonObjectParser(maximumObjectLength) + override def createLogic(inheritedAttributes: Attributes) = + new GraphStageLogic(shape) with InHandler with OutHandler { + private val buffer = new JsonObjectParser(maximumObjectLength) - setHandlers(in, out, this) + setHandlers(in, out, this) - override def onPush(): Unit = { - buffer.offer(grab(in)) - tryPopBuffer() - } + override def onPush(): Unit = { + buffer.offer(grab(in)) + tryPopBuffer() + } - override def onPull(): Unit = - tryPopBuffer() + override def onPull(): Unit = + tryPopBuffer() - override def onUpstreamFinish(): Unit = { - buffer.poll() match { - case Some(json) => emit(out, json) - case _ => completeStage() + override def onUpstreamFinish(): Unit = { + buffer.poll() match { + case Some(json) => emit(out, json) + case _ => completeStage() + } + } + + def tryPopBuffer() = { + try buffer.poll() match { + case Some(json) => push(out, json) + case _ => if (isClosed(in)) completeStage() else pull(in) + } catch { + case NonFatal(ex) => failStage(ex) + } } } - - def tryPopBuffer() = { - try buffer.poll() match { - case Some(json) => push(out, json) - case _ => if (isClosed(in)) completeStage() else pull(in) - } catch { - case NonFatal(ex) => failStage(ex) - } - } - } }) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala index 27513ea565..12566f092a 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/MergeLatest.scala @@ -23,6 +23,7 @@ import scala.collection.immutable * */ object MergeLatest { + /** * Create a new `MergeLatest` with the specified number of input ports. * @@ -34,51 +35,54 @@ object MergeLatest { } -final class MergeLatest[T, M](val inputPorts: Int, val eagerClose: Boolean)(buildElem: Array[T] => M) extends GraphStage[UniformFanInShape[T, M]] { +final class MergeLatest[T, M](val inputPorts: Int, val eagerClose: Boolean)(buildElem: Array[T] => M) + extends GraphStage[UniformFanInShape[T, M]] { require(inputPorts >= 1, "input ports must be >= 1") val in: immutable.IndexedSeq[Inlet[T]] = Vector.tabulate(inputPorts)(i => Inlet[T]("MergeLatest.in" + i)) val out: Outlet[M] = Outlet[M]("MergeLatest.out") override val shape: UniformFanInShape[T, M] = UniformFanInShape(out, in: _*) - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler { - private val activeStreams: java.util.HashSet[Int] = new java.util.HashSet[Int]() - private var runningUpstreams: Int = inputPorts - private def upstreamsClosed: Boolean = runningUpstreams == 0 - private def allMessagesReady: Boolean = activeStreams.size == inputPorts - private val messages: Array[Any] = new Array[Any](inputPorts) + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new GraphStageLogic(shape) with OutHandler { + private val activeStreams: java.util.HashSet[Int] = new java.util.HashSet[Int]() + private var runningUpstreams: Int = inputPorts + private def upstreamsClosed: Boolean = runningUpstreams == 0 + private def allMessagesReady: Boolean = activeStreams.size == inputPorts + private val messages: Array[Any] = new Array[Any](inputPorts) - override def preStart(): Unit = in.foreach(tryPull) + override def preStart(): Unit = in.foreach(tryPull) - in.zipWithIndex.foreach { - case (input, index) => - setHandler(input, new InHandler { - override def onPush(): Unit = { - messages.update(index, grab(input)) - activeStreams.add(index) - if (allMessagesReady) emit(out, buildElem(messages.asInstanceOf[Array[T]])) - tryPull(input) - } + in.zipWithIndex.foreach { + case (input, index) => + setHandler(input, + new InHandler { + override def onPush(): Unit = { + messages.update(index, grab(input)) + activeStreams.add(index) + if (allMessagesReady) emit(out, buildElem(messages.asInstanceOf[Array[T]])) + tryPull(input) + } - override def onUpstreamFinish(): Unit = { - if (!eagerClose) { - runningUpstreams -= 1 - if (upstreamsClosed) completeStage() - } else completeStage() - } - }) - } - - override def onPull(): Unit = { - var i = 0 - while (i < inputPorts) { - if (!hasBeenPulled(in(i))) tryPull(in(i)) - i += 1 + override def onUpstreamFinish(): Unit = { + if (!eagerClose) { + runningUpstreams -= 1 + if (upstreamsClosed) completeStage() + } else completeStage() + } + }) } - } - setHandler(out, this) - } + override def onPull(): Unit = { + var i = 0 + while (i < inputPorts) { + if (!hasBeenPulled(in(i))) tryPull(in(i)) + i += 1 + } + } + + setHandler(out, this) + } override def toString = "MergeLatest" } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Queue.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Queue.scala index 43d078d706..2bb5008b4e 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Queue.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Queue.scala @@ -43,6 +43,7 @@ trait SourceQueue[T] { * This trait adds completion support to [[SourceQueue]]. */ trait SourceQueueWithComplete[T] extends SourceQueue[T] { + /** * Complete the stream normally. Use `watchCompletion` to be notified of this * operation’s success. @@ -88,6 +89,7 @@ trait SinkQueue[T] { * This trait adds cancel support to [[SinkQueue]]. */ trait SinkQueueWithCancel[T] extends SinkQueue[T] { + /** * Cancel the stream. This method returns right away without waiting for actual finalizing stream. */ diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala index 77c38ef3d1..21387abda5 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartFlow.scala @@ -48,8 +48,15 @@ object RestartFlow { * In order to skip this additional delay pass in `0`. * @param flowFactory A factory for producing the [[Flow]] to wrap. */ - def withBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = { - Flow.fromGraph(new RestartWithBackoffFlow(flowFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = false, Int.MaxValue)) + def withBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)( + flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = { + Flow.fromGraph( + new RestartWithBackoffFlow(flowFactory, + minBackoff, + maxBackoff, + randomFactor, + onlyOnFailures = false, + Int.MaxValue)) } /** @@ -77,8 +84,17 @@ object RestartFlow { * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param flowFactory A factory for producing the [[Flow]] to wrap. */ - def withBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = { - Flow.fromGraph(new RestartWithBackoffFlow(flowFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = false, maxRestarts)) + def withBackoff[In, Out](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = { + Flow.fromGraph( + new RestartWithBackoffFlow(flowFactory, + minBackoff, + maxBackoff, + randomFactor, + onlyOnFailures = false, + maxRestarts)) } /** @@ -107,86 +123,91 @@ object RestartFlow { * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param flowFactory A factory for producing the [[Flow]] to wrap. */ - def onFailuresWithBackoff[In, Out](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = { - Flow.fromGraph(new RestartWithBackoffFlow(flowFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = true, maxRestarts)) + def onFailuresWithBackoff[In, Out](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int)(flowFactory: () => Flow[In, Out, _]): Flow[In, Out, NotUsed] = { + Flow.fromGraph( + new RestartWithBackoffFlow(flowFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = true, maxRestarts)) } } -private final class RestartWithBackoffFlow[In, Out]( - flowFactory: () => Flow[In, Out, _], - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - onlyOnFailures: Boolean, - maxRestarts: Int) extends GraphStage[FlowShape[In, Out]] { self => +private final class RestartWithBackoffFlow[In, Out](flowFactory: () => Flow[In, Out, _], + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + onlyOnFailures: Boolean, + maxRestarts: Int) + extends GraphStage[FlowShape[In, Out]] { self => val in = Inlet[In]("RestartWithBackoffFlow.in") val out = Outlet[Out]("RestartWithBackoffFlow.out") override def shape = FlowShape(in, out) - override def createLogic(inheritedAttributes: Attributes) = new RestartWithBackoffLogic( - "Flow", shape, minBackoff, maxBackoff, randomFactor, onlyOnFailures, maxRestarts) { - val delay = inheritedAttributes.get[Delay](Delay(50.millis)).duration + override def createLogic(inheritedAttributes: Attributes) = + new RestartWithBackoffLogic("Flow", shape, minBackoff, maxBackoff, randomFactor, onlyOnFailures, maxRestarts) { + val delay = inheritedAttributes.get[Delay](Delay(50.millis)).duration - var activeOutIn: Option[(SubSourceOutlet[In], SubSinkInlet[Out])] = None + var activeOutIn: Option[(SubSourceOutlet[In], SubSinkInlet[Out])] = None - override protected def logSource = self.getClass + override protected def logSource = self.getClass - override protected def startGraph() = { - val sourceOut: SubSourceOutlet[In] = createSubOutlet(in) - val sinkIn: SubSinkInlet[Out] = createSubInlet(out) + override protected def startGraph() = { + val sourceOut: SubSourceOutlet[In] = createSubOutlet(in) + val sinkIn: SubSinkInlet[Out] = createSubInlet(out) - Source.fromGraph(sourceOut.source) - // Temp fix while waiting cause of cancellation. See #23909 - .via(RestartWithBackoffFlow.delayCancellation[In](delay)) - .via(flowFactory()) - .runWith(sinkIn.sink)(subFusingMaterializer) + Source + .fromGraph(sourceOut.source) + // Temp fix while waiting cause of cancellation. See #23909 + .via(RestartWithBackoffFlow.delayCancellation[In](delay)) + .via(flowFactory()) + .runWith(sinkIn.sink)(subFusingMaterializer) - if (isAvailable(out)) { - sinkIn.pull() + if (isAvailable(out)) { + sinkIn.pull() + } + activeOutIn = Some((sourceOut, sinkIn)) } - activeOutIn = Some((sourceOut, sinkIn)) - } - override protected def backoff() = { - setHandler(in, new InHandler { - override def onPush() = () - }) - setHandler(out, new OutHandler { - override def onPull() = () - }) + override protected def backoff() = { + setHandler(in, new InHandler { + override def onPush() = () + }) + setHandler(out, new OutHandler { + override def onPull() = () + }) - // We need to ensure that the other end of the sub flow is also completed, so that we don't - // receive any callbacks from it. - activeOutIn.foreach { - case (sourceOut, sinkIn) => - if (!sourceOut.isClosed) { - sourceOut.complete() - } - if (!sinkIn.isClosed) { - sinkIn.cancel() - } - activeOutIn = None + // We need to ensure that the other end of the sub flow is also completed, so that we don't + // receive any callbacks from it. + activeOutIn.foreach { + case (sourceOut, sinkIn) => + if (!sourceOut.isClosed) { + sourceOut.complete() + } + if (!sinkIn.isClosed) { + sinkIn.cancel() + } + activeOutIn = None + } } - } - backoff() - } + backoff() + } } /** * Shared logic for all restart with backoff logics. */ -private abstract class RestartWithBackoffLogic[S <: Shape]( - name: String, - shape: S, - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - onlyOnFailures: Boolean, - maxRestarts: Int) extends TimerGraphStageLogicWithLogging(shape) { +private abstract class RestartWithBackoffLogic[S <: Shape](name: String, + shape: S, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + onlyOnFailures: Boolean, + maxRestarts: Int) + extends TimerGraphStageLogicWithLogging(shape) { var restartCount = 0 var resetDeadline = minBackoff.fromNow @@ -246,13 +267,14 @@ private abstract class RestartWithBackoffLogic[S <: Shape]( val sourceOut = new SubSourceOutlet[T](s"RestartWithBackoff$name.subOut") sourceOut.setHandler(new OutHandler { - override def onPull() = if (isAvailable(in)) { - sourceOut.push(grab(in)) - } else { - if (!hasBeenPulled(in)) { - pull(in) + override def onPull() = + if (isAvailable(in)) { + sourceOut.push(grab(in)) + } else { + if (!hasBeenPulled(in)) { + pull(in) + } } - } /* * Downstream in this context is the wrapped stage. @@ -269,19 +291,20 @@ private abstract class RestartWithBackoffLogic[S <: Shape]( } }) - setHandler(in, new InHandler { - override def onPush() = if (sourceOut.isAvailable) { - sourceOut.push(grab(in)) - } - override def onUpstreamFinish() = { - finishing = true - sourceOut.complete() - } - override def onUpstreamFailure(ex: Throwable) = { - finishing = true - sourceOut.fail(ex) - } - }) + setHandler(in, + new InHandler { + override def onPush() = if (sourceOut.isAvailable) { + sourceOut.push(grab(in)) + } + override def onUpstreamFinish() = { + finishing = true + sourceOut.complete() + } + override def onUpstreamFailure(ex: Throwable) = { + finishing = true + sourceOut.fail(ex) + } + }) sourceOut } @@ -338,27 +361,25 @@ object RestartWithBackoffFlow { private final class DelayCancellationStage[T](delay: FiniteDuration) extends SimpleLinearGraphStage[T] { - override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new TimerGraphStageLogic(shape) with InHandler with OutHandler with StageLogging { + override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = + new TimerGraphStageLogic(shape) with InHandler with OutHandler with StageLogging { - setHandlers(in, out, this) + setHandlers(in, out, this) - def onPush(): Unit = push(out, grab(in)) - def onPull(): Unit = pull(in) + def onPush(): Unit = push(out, grab(in)) + def onPull(): Unit = pull(in) - override def onDownstreamFinish(): Unit = { - scheduleOnce("CompleteState", delay) - setHandler( - in, - new InHandler { + override def onDownstreamFinish(): Unit = { + scheduleOnce("CompleteState", delay) + setHandler(in, new InHandler { def onPush(): Unit = {} - } - ) - } + }) + } - override protected def onTimer(timerKey: Any): Unit = { - log.debug(s"Stage was canceled after delay of $delay") - completeStage() + override protected def onTimer(timerKey: Any): Unit = { + log.debug(s"Stage was canceled after delay of $delay") + completeStage() + } } - } } } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSink.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSink.scala index 8ddb7b6d12..0912355707 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSink.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSink.scala @@ -43,7 +43,8 @@ object RestartSink { * In order to skip this additional delay pass in `0`. * @param sinkFactory A factory for producing the [[Sink]] to wrap. */ - def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(sinkFactory: () => Sink[T, _]): Sink[T, NotUsed] = { + def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)( + sinkFactory: () => Sink[T, _]): Sink[T, NotUsed] = { Sink.fromGraph(new RestartWithBackoffSink(sinkFactory, minBackoff, maxBackoff, randomFactor, Int.MaxValue)) } @@ -73,36 +74,43 @@ object RestartSink { * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sinkFactory A factory for producing the [[Sink]] to wrap. */ - def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)(sinkFactory: () => Sink[T, _]): Sink[T, NotUsed] = { + def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)( + sinkFactory: () => Sink[T, _]): Sink[T, NotUsed] = { Sink.fromGraph(new RestartWithBackoffSink(sinkFactory, minBackoff, maxBackoff, randomFactor, maxRestarts)) } } -private final class RestartWithBackoffSink[T]( - sinkFactory: () => Sink[T, _], - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - maxRestarts: Int) extends GraphStage[SinkShape[T]] { self => +private final class RestartWithBackoffSink[T](sinkFactory: () => Sink[T, _], + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int) + extends GraphStage[SinkShape[T]] { self => val in = Inlet[T]("RestartWithBackoffSink.in") override def shape = SinkShape(in) - override def createLogic(inheritedAttributes: Attributes) = new RestartWithBackoffLogic( - "Sink", shape, minBackoff, maxBackoff, randomFactor, onlyOnFailures = false, maxRestarts) { - override protected def logSource = self.getClass + override def createLogic(inheritedAttributes: Attributes) = + new RestartWithBackoffLogic("Sink", + shape, + minBackoff, + maxBackoff, + randomFactor, + onlyOnFailures = false, + maxRestarts) { + override protected def logSource = self.getClass - override protected def startGraph() = { - val sourceOut = createSubOutlet(in) - Source.fromGraph(sourceOut.source).runWith(sinkFactory())(subFusingMaterializer) + override protected def startGraph() = { + val sourceOut = createSubOutlet(in) + Source.fromGraph(sourceOut.source).runWith(sinkFactory())(subFusingMaterializer) + } + + override protected def backoff() = { + setHandler(in, new InHandler { + override def onPush() = () + }) + } + + backoff() } - - override protected def backoff() = { - setHandler(in, new InHandler { - override def onPush() = () - }) - } - - backoff() - } } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala index 0a68ca9f24..81adf95a77 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/RestartSource.scala @@ -39,8 +39,15 @@ object RestartSource { * In order to skip this additional delay pass in `0`. * @param sourceFactory A factory for producing the [[Source]] to wrap. */ - def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = { - Source.fromGraph(new RestartWithBackoffSource(sourceFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = false, Int.MaxValue)) + def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)( + sourceFactory: () => Source[T, _]): Source[T, NotUsed] = { + Source.fromGraph( + new RestartWithBackoffSource(sourceFactory, + minBackoff, + maxBackoff, + randomFactor, + onlyOnFailures = false, + Int.MaxValue)) } /** @@ -66,8 +73,15 @@ object RestartSource { * Passing `0` will cause no restarts and a negative number will not cap the amount of restarts. * @param sourceFactory A factory for producing the [[Source]] to wrap. */ - def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = { - Source.fromGraph(new RestartWithBackoffSource(sourceFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = false, maxRestarts)) + def withBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)( + sourceFactory: () => Source[T, _]): Source[T, NotUsed] = { + Source.fromGraph( + new RestartWithBackoffSource(sourceFactory, + minBackoff, + maxBackoff, + randomFactor, + onlyOnFailures = false, + maxRestarts)) } /** @@ -90,8 +104,15 @@ object RestartSource { * @param sourceFactory A factory for producing the [[Source]] to wrap. * */ - def onFailuresWithBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = { - Source.fromGraph(new RestartWithBackoffSource(sourceFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = true, Int.MaxValue)) + def onFailuresWithBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double)( + sourceFactory: () => Source[T, _]): Source[T, NotUsed] = { + Source.fromGraph( + new RestartWithBackoffSource(sourceFactory, + minBackoff, + maxBackoff, + randomFactor, + onlyOnFailures = true, + Int.MaxValue)) } /** @@ -117,41 +138,50 @@ object RestartSource { * @param sourceFactory A factory for producing the [[Source]] to wrap. * */ - def onFailuresWithBackoff[T](minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = { - Source.fromGraph(new RestartWithBackoffSource(sourceFactory, minBackoff, maxBackoff, randomFactor, onlyOnFailures = true, maxRestarts)) + def onFailuresWithBackoff[T](minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + maxRestarts: Int)(sourceFactory: () => Source[T, _]): Source[T, NotUsed] = { + Source.fromGraph( + new RestartWithBackoffSource(sourceFactory, + minBackoff, + maxBackoff, + randomFactor, + onlyOnFailures = true, + maxRestarts)) } } -private final class RestartWithBackoffSource[T]( - sourceFactory: () => Source[T, _], - minBackoff: FiniteDuration, - maxBackoff: FiniteDuration, - randomFactor: Double, - onlyOnFailures: Boolean, - maxRestarts: Int) extends GraphStage[SourceShape[T]] { self => +private final class RestartWithBackoffSource[T](sourceFactory: () => Source[T, _], + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double, + onlyOnFailures: Boolean, + maxRestarts: Int) + extends GraphStage[SourceShape[T]] { self => val out = Outlet[T]("RestartWithBackoffSource.out") override def shape = SourceShape(out) - override def createLogic(inheritedAttributes: Attributes) = new RestartWithBackoffLogic( - "Source", shape, minBackoff, maxBackoff, randomFactor, onlyOnFailures, maxRestarts) { + override def createLogic(inheritedAttributes: Attributes) = + new RestartWithBackoffLogic("Source", shape, minBackoff, maxBackoff, randomFactor, onlyOnFailures, maxRestarts) { - override protected def logSource = self.getClass + override protected def logSource = self.getClass - override protected def startGraph() = { - val sinkIn = createSubInlet(out) - sourceFactory().runWith(sinkIn.sink)(subFusingMaterializer) - if (isAvailable(out)) { - sinkIn.pull() + override protected def startGraph() = { + val sinkIn = createSubInlet(out) + sourceFactory().runWith(sinkIn.sink)(subFusingMaterializer) + if (isAvailable(out)) { + sinkIn.pull() + } } - } - override protected def backoff() = { - setHandler(out, new OutHandler { - override def onPull() = () - }) - } + override protected def backoff() = { + setHandler(out, new OutHandler { + override def onPull() = () + }) + } - backoff() - } + backoff() + } } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala index ecfff583c0..70e9b355ca 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Sink.scala @@ -29,10 +29,8 @@ import scala.annotation.unchecked.uncheckedVariance * A `Sink` is a set of stream processing steps that has one open input. * Can be used as a `Subscriber` */ -final class Sink[-In, +Mat]( - override val traversalBuilder: LinearTraversalBuilder, - override val shape: SinkShape[In]) - extends Graph[SinkShape[In], Mat] { +final class Sink[-In, +Mat](override val traversalBuilder: LinearTraversalBuilder, override val shape: SinkShape[In]) + extends Graph[SinkShape[In], Mat] { // TODO: Debug string override def toString: String = s"Sink($shape)" @@ -58,9 +56,7 @@ final class Sink[-In, +Mat]( * Transform only the materialized value of this Sink, leaving all other properties as they were. */ def mapMaterializedValue[Mat2](f: Mat => Mat2): Sink[In, Mat2] = - new Sink( - traversalBuilder.transformMat(f.asInstanceOf[Any => Any]), - shape) + new Sink(traversalBuilder.transformMat(f.asInstanceOf[Any => Any]), shape) /** * Materializes this Sink, immediately returning (1) its materialized value, and (2) a new Sink @@ -79,9 +75,7 @@ final class Sink[-In, +Mat]( * set directly on the individual graphs of the composite. */ override def withAttributes(attr: Attributes): Sink[In, Mat] = - new Sink( - traversalBuilder.setAttributes(attr), - shape) + new Sink(traversalBuilder.setAttributes(attr), shape) /** * Add the given attributes to this [[Sink]]. If the specific attribute was already present @@ -136,21 +130,18 @@ object Sink { */ def fromGraph[T, M](g: Graph[SinkShape[T], M]): Sink[T, M] = g match { - case s: Sink[T, M] => s - case s: javadsl.Sink[T, M] => s.asScala + case s: Sink[T, M] => s + case s: javadsl.Sink[T, M] => s.asScala case g: GraphStageWithMaterializedValue[SinkShape[T], M] => // move these from the stage itself to make the returned source // behave as it is the stage with regards to attributes val attrs = g.traversalBuilder.attributes val noAttrStage = g.withAttributes(Attributes.none) - new Sink( - LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right), - noAttrStage.shape - ).withAttributes(attrs) + new Sink(LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right), + noAttrStage.shape).withAttributes(attrs) - case other => new Sink( - LinearTraversalBuilder.fromBuilder(other.traversalBuilder, other.shape, Keep.right), - other.shape) + case other => + new Sink(LinearTraversalBuilder.fromBuilder(other.traversalBuilder, other.shape, Keep.right), other.shape) } /** @@ -173,8 +164,12 @@ object Sink { * See also [[headOption]]. */ def head[T]: Sink[T, Future[T]] = - Sink.fromGraph(new HeadOptionStage[T]).withAttributes(DefaultAttributes.headSink) - .mapMaterializedValue(e => e.map(_.getOrElse(throw new NoSuchElementException("head of empty stream")))(ExecutionContexts.sameThreadExecutionContext)) + Sink + .fromGraph(new HeadOptionStage[T]) + .withAttributes(DefaultAttributes.headSink) + .mapMaterializedValue(e => + e.map(_.getOrElse(throw new NoSuchElementException("head of empty stream")))( + ExecutionContexts.sameThreadExecutionContext)) /** * A `Sink` that materializes into a `Future` of the optional first value received. @@ -194,10 +189,10 @@ object Sink { * See also [[lastOption]], [[takeLast]]. */ def last[T]: Sink[T, Future[T]] = { - Sink.fromGraph(new TakeLastStage[T](1)).withAttributes(DefaultAttributes.lastSink) - .mapMaterializedValue { e => - e.map(_.headOption.getOrElse(throw new NoSuchElementException("last of empty stream")))(ExecutionContexts.sameThreadExecutionContext) - } + Sink.fromGraph(new TakeLastStage[T](1)).withAttributes(DefaultAttributes.lastSink).mapMaterializedValue { e => + e.map(_.headOption.getOrElse(throw new NoSuchElementException("last of empty stream")))( + ExecutionContexts.sameThreadExecutionContext) + } } /** @@ -208,10 +203,9 @@ object Sink { * See also [[last]], [[takeLast]]. */ def lastOption[T]: Sink[T, Future[Option[T]]] = { - Sink.fromGraph(new TakeLastStage[T](1)).withAttributes(DefaultAttributes.lastOptionSink) - .mapMaterializedValue { e => - e.map(_.headOption)(ExecutionContexts.sameThreadExecutionContext) - } + Sink.fromGraph(new TakeLastStage[T](1)).withAttributes(DefaultAttributes.lastOptionSink).mapMaterializedValue { e => + e.map(_.headOption)(ExecutionContexts.sameThreadExecutionContext) + } } /** @@ -292,8 +286,8 @@ object Sink { /** * Combine several sinks with fan-out strategy like `Broadcast` or `Balance` and returns `Sink`. */ - def combine[T, U](first: Sink[U, _], second: Sink[U, _], rest: Sink[U, _]*)(strategy: Int => Graph[UniformFanOutShape[T, U], NotUsed]): Sink[T, NotUsed] = - + def combine[T, U](first: Sink[U, _], second: Sink[U, _], rest: Sink[U, _]*)( + strategy: Int => Graph[UniformFanOutShape[T, U], NotUsed]): Sink[T, NotUsed] = Sink.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val d = b.add(strategy(rest.size + 2)) @@ -322,7 +316,9 @@ object Sink { * * See also [[Flow.mapAsyncUnordered]] */ - @deprecated("Use `foreachAsync` instead, it allows you to choose how to run the procedure, by calling some other API returning a Future or spawning a new Future.", since = "2.5.17") + @deprecated( + "Use `foreachAsync` instead, it allows you to choose how to run the procedure, by calling some other API returning a Future or spawning a new Future.", + since = "2.5.17") def foreachParallel[T](parallelism: Int)(f: T => Unit)(implicit ec: ExecutionContext): Sink[T, Future[Done]] = Flow[T].mapAsyncUnordered(parallelism)(t => Future(f(t))).toMat(Sink.ignore)(Keep.right) @@ -347,7 +343,8 @@ object Sink { * * @see [[#fold]] */ - def foldAsync[U, T](zero: U)(f: (U, T) => Future[U]): Sink[T, Future[U]] = Flow[T].foldAsync(zero)(f).toMat(Sink.head)(Keep.right).named("foldAsyncSink") + def foldAsync[U, T](zero: U)(f: (U, T) => Future[U]): Sink[T, Future[U]] = + Flow[T].foldAsync(zero)(f).toMat(Sink.head)(Keep.right).named("foldAsyncSink") /** * A `Sink` that will invoke the given function for every received element, giving it its previous @@ -430,9 +427,11 @@ object Sink { * to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate * limiting operator in front of this `Sink`. */ - @InternalApi private[akka] def actorRef[T](ref: ActorRef, onCompleteMessage: Any, onFailureMessage: Throwable => Any): Sink[T, NotUsed] = - fromGraph(new ActorRefSink(ref, onCompleteMessage, onFailureMessage, - DefaultAttributes.actorRefSink, shape("ActorRefSink"))) + @InternalApi private[akka] def actorRef[T](ref: ActorRef, + onCompleteMessage: Any, + onFailureMessage: Throwable => Any): Sink[T, NotUsed] = + fromGraph( + new ActorRefSink(ref, onCompleteMessage, onFailureMessage, DefaultAttributes.actorRefSink, shape("ActorRefSink"))) /** * Sends the elements of the stream to the given `ActorRef`. @@ -450,8 +449,12 @@ object Sink { * limiting operator in front of this `Sink`. */ def actorRef[T](ref: ActorRef, onCompleteMessage: Any): Sink[T, NotUsed] = - fromGraph(new ActorRefSink(ref, onCompleteMessage, t => Status.Failure(t), - DefaultAttributes.actorRefSink, shape("ActorRefSink"))) + fromGraph( + new ActorRefSink(ref, + onCompleteMessage, + t => Status.Failure(t), + DefaultAttributes.actorRefSink, + shape("ActorRefSink"))) /** * INTERNAL API @@ -473,10 +476,19 @@ object Sink { * When the stream is completed with failure - result of `onFailureMessage(throwable)` * function will be sent to the destination actor. */ - @InternalApi private[akka] def actorRefWithAck[T](ref: ActorRef, messageAdapter: ActorRef => T => Any, - onInitMessage: ActorRef => Any, ackMessage: Any, onCompleteMessage: Any, + @InternalApi private[akka] def actorRefWithAck[T](ref: ActorRef, + messageAdapter: ActorRef => T => Any, + onInitMessage: ActorRef => Any, + ackMessage: Any, + onCompleteMessage: Any, onFailureMessage: (Throwable) => Any): Sink[T, NotUsed] = - Sink.fromGraph(new ActorRefBackpressureSinkStage(ref, messageAdapter, onInitMessage, ackMessage, onCompleteMessage, onFailureMessage)) + Sink.fromGraph( + new ActorRefBackpressureSinkStage(ref, + messageAdapter, + onInitMessage, + ackMessage, + onCompleteMessage, + onFailureMessage)) /** * Sends the elements of the stream to the given `ActorRef` that sends back back-pressure signal. @@ -492,7 +504,10 @@ object Sink { * function will be sent to the destination actor. * */ - def actorRefWithAck[T](ref: ActorRef, onInitMessage: Any, ackMessage: Any, onCompleteMessage: Any, + def actorRefWithAck[T](ref: ActorRef, + onInitMessage: Any, + ackMessage: Any, + onCompleteMessage: Any, onFailureMessage: (Throwable) => Any = Status.Failure): Sink[T, NotUsed] = actorRefWithAck(ref, _ => identity, _ => onInitMessage, ackMessage, onCompleteMessage, onFailureMessage) @@ -503,7 +518,9 @@ object Sink { * * @deprecated Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ - @deprecated("Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") + @deprecated( + "Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") def actorSubscriber[T](props: Props): Sink[T, ActorRef] = { require(classOf[ActorSubscriber].isAssignableFrom(props.actorClass()), "Actor must be ActorSubscriber") fromGraph(new ActorSubscriberSink(props, DefaultAttributes.actorSubscriberSink, shape("ActorSubscriberSink"))) @@ -539,9 +556,13 @@ object Sink { * Otherwise the `Future` is completed with the materialized value of the internal sink. */ @Deprecated - @deprecated("Use lazyInitAsync instead. (lazyInitAsync no more needs a fallback function and the materialized value more clearly indicates if the internal sink was materialized or not.)", "2.5.11") + @deprecated( + "Use lazyInitAsync instead. (lazyInitAsync no more needs a fallback function and the materialized value more clearly indicates if the internal sink was materialized or not.)", + "2.5.11") def lazyInit[T, M](sinkFactory: T => Future[Sink[T, M]], fallback: () => M): Sink[T, Future[M]] = - Sink.fromGraph(new LazySink[T, M](sinkFactory)).mapMaterializedValue(_.map(_.getOrElse(fallback()))(ExecutionContexts.sameThreadExecutionContext)) + Sink + .fromGraph(new LazySink[T, M](sinkFactory)) + .mapMaterializedValue(_.map(_.getOrElse(fallback()))(ExecutionContexts.sameThreadExecutionContext)) /** * Creates a real `Sink` upon receiving the first element. Internal `Sink` will not be created if there are no elements, diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala index e90880db35..9003a7b524 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Source.scala @@ -32,10 +32,10 @@ import scala.compat.java8.FutureConverters._ * an “atomic” source, e.g. from a collection or a file. Materialization turns a Source into * a Reactive Streams `Publisher` (at least conceptually). */ -final class Source[+Out, +Mat]( - override val traversalBuilder: LinearTraversalBuilder, - override val shape: SourceShape[Out]) - extends FlowOpsMat[Out, Mat] with Graph[SourceShape[Out], Mat] { +final class Source[+Out, +Mat](override val traversalBuilder: LinearTraversalBuilder, + override val shape: SourceShape[Out]) + extends FlowOpsMat[Out, Mat] + with Graph[SourceShape[Out], Mat] { override type Repr[+O] = Source[O, Mat @uncheckedVariance] override type ReprMat[+O, +M] = Source[O, M] @@ -47,24 +47,22 @@ final class Source[+Out, +Mat]( override def via[T, Mat2](flow: Graph[FlowShape[Out, T], Mat2]): Repr[T] = viaMat(flow)(Keep.left) - override def viaMat[T, Mat2, Mat3](flow: Graph[FlowShape[Out, T], Mat2])(combine: (Mat, Mat2) => Mat3): Source[T, Mat3] = { + override def viaMat[T, Mat2, Mat3](flow: Graph[FlowShape[Out, T], Mat2])( + combine: (Mat, Mat2) => Mat3): Source[T, Mat3] = { if (flow.traversalBuilder eq Flow.identityTraversalBuilder) if (combine == Keep.left) //optimization by returning this this.asInstanceOf[Source[T, Mat3]] //Mat == Mat3, due to Keep.left else if (combine == Keep.right || combine == Keep.none) // Mat3 = NotUsed //optimization with LinearTraversalBuilder.empty() - new Source[T, Mat3]( - traversalBuilder.append(LinearTraversalBuilder.empty(), flow.shape, combine), - SourceShape(shape.out).asInstanceOf[SourceShape[T]]) + new Source[T, Mat3](traversalBuilder.append(LinearTraversalBuilder.empty(), flow.shape, combine), + SourceShape(shape.out).asInstanceOf[SourceShape[T]]) else - new Source[T, Mat3]( - traversalBuilder.append(flow.traversalBuilder, flow.shape, combine), - SourceShape(flow.shape.out)) + new Source[T, Mat3](traversalBuilder.append(flow.traversalBuilder, flow.shape, combine), + SourceShape(flow.shape.out)) else - new Source[T, Mat3]( - traversalBuilder.append(flow.traversalBuilder, flow.shape, combine), - SourceShape(flow.shape.out)) + new Source[T, Mat3](traversalBuilder.append(flow.traversalBuilder, flow.shape, combine), + SourceShape(flow.shape.out)) } /** @@ -100,7 +98,8 @@ final class Source[+Out, +Mat]( * Connect this `Source` to a `Sink` and run it. The returned value is the materialized value * of the `Sink`, e.g. the `Publisher` of a [[akka.stream.scaladsl.Sink#publisher]]. */ - def runWith[Mat2](sink: Graph[SinkShape[Out], Mat2])(implicit materializer: Materializer): Mat2 = toMat(sink)(Keep.right).run() + def runWith[Mat2](sink: Graph[SinkShape[Out], Mat2])(implicit materializer: Materializer): Mat2 = + toMat(sink)(Keep.right).run() /** * Shortcut for running this `Source` with a fold function. @@ -110,7 +109,8 @@ final class Source[+Out, +Mat]( * function evaluation when the input stream ends, or completed with `Failure` * if there is a failure signaled in the stream. */ - def runFold[U](zero: U)(f: (U, Out) => U)(implicit materializer: Materializer): Future[U] = runWith(Sink.fold(zero)(f)) + def runFold[U](zero: U)(f: (U, Out) => U)(implicit materializer: Materializer): Future[U] = + runWith(Sink.fold(zero)(f)) /** * Shortcut for running this `Source` with a foldAsync function. @@ -120,7 +120,8 @@ final class Source[+Out, +Mat]( * function evaluation when the input stream ends, or completed with `Failure` * if there is a failure signaled in the stream. */ - def runFoldAsync[U](zero: U)(f: (U, Out) => Future[U])(implicit materializer: Materializer): Future[U] = runWith(Sink.foldAsync(zero)(f)) + def runFoldAsync[U](zero: U)(f: (U, Out) => Future[U])(implicit materializer: Materializer): Future[U] = + runWith(Sink.foldAsync(zero)(f)) /** * Shortcut for running this `Source` with a reduce function. @@ -200,7 +201,8 @@ final class Source[+Out, +Mat]( * Combines several sources with fan-in strategy like `Merge` or `Concat` and returns `Source`. */ @deprecated("Use `Source.combine` on companion object instead", "2.5.5") - def combine[T, U](first: Source[T, _], second: Source[T, _], rest: Source[T, _]*)(strategy: Int => Graph[UniformFanInShape[T, U], NotUsed]): Source[U, NotUsed] = + def combine[T, U](first: Source[T, _], second: Source[T, _], rest: Source[T, _]*)( + strategy: Int => Graph[UniformFanInShape[T, U], NotUsed]): Source[U, NotUsed] = Source.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val c = b.add(strategy(rest.size + 2)) @@ -220,10 +222,12 @@ final class Source[+Out, +Mat]( * API MAY CHANGE */ @ApiMayChange - def asSourceWithContext[Ctx](f: Out => Ctx): SourceWithContext[Out, Ctx, Mat] = new SourceWithContext(this.map(e => (e, f(e)))) + def asSourceWithContext[Ctx](f: Out => Ctx): SourceWithContext[Out, Ctx, Mat] = + new SourceWithContext(this.map(e => (e, f(e)))) } object Source { + /** INTERNAL API */ def shape[T](name: String): SourceShape[T] = SourceShape(Outlet(name + ".out")) @@ -261,7 +265,9 @@ object Source { * will continue infinitely by repeating the sequence of elements provided by function parameter. */ def cycle[T](f: () => Iterator[T]): Source[T, NotUsed] = { - val iterator = Iterator.continually { val i = f(); if (i.isEmpty) throw new IllegalArgumentException("empty iterator") else i }.flatten + val iterator = Iterator.continually { + val i = f(); if (i.isEmpty) throw new IllegalArgumentException("empty iterator") else i + }.flatten fromIterator(() => iterator).withAttributes(DefaultAttributes.cycledSource) } @@ -270,22 +276,18 @@ object Source { * it so also in type. */ def fromGraph[T, M](g: Graph[SourceShape[T], M]): Source[T, M] = g match { - case s: Source[T, M] => s - case s: javadsl.Source[T, M] => s.asScala + case s: Source[T, M] => s + case s: javadsl.Source[T, M] => s.asScala case g: GraphStageWithMaterializedValue[SourceShape[T], M] => // move these from the stage itself to make the returned source // behave as it is the stage with regards to attributes val attrs = g.traversalBuilder.attributes val noAttrStage = g.withAttributes(Attributes.none) - new Source( - LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right), - noAttrStage.shape - ).withAttributes(attrs) + new Source(LinearTraversalBuilder.fromBuilder(noAttrStage.traversalBuilder, noAttrStage.shape, Keep.right), + noAttrStage.shape).withAttributes(attrs) case other => // composite source shaped graph - new Source( - LinearTraversalBuilder.fromBuilder(other.traversalBuilder, other.shape, Keep.right), - other.shape) + new Source(LinearTraversalBuilder.fromBuilder(other.traversalBuilder, other.shape, Keep.right), other.shape) } /** @@ -323,7 +325,8 @@ object Source { * If the [[Future]] fails the stream is failed with the exception from the future. If downstream cancels before the * stream completes the materialized `Future` will be failed with a [[StreamDetachedException]] */ - def fromFutureSource[T, M](future: Future[Graph[SourceShape[T], M]]): Source[T, Future[M]] = fromGraph(new FutureFlattenSource(future)) + def fromFutureSource[T, M](future: Future[Graph[SourceShape[T], M]]): Source[T, Future[M]] = + fromGraph(new FutureFlattenSource(future)) /** * Streams the elements of an asynchronous source once its given `completion` operator completes. @@ -331,7 +334,9 @@ object Source { * If downstream cancels before the stream completes the materialized `Future` will be failed * with a [[StreamDetachedException]] */ - def fromSourceCompletionStage[T, M](completion: CompletionStage[_ <: Graph[SourceShape[T], M]]): Source[T, CompletionStage[M]] = fromFutureSource(completion.toScala).mapMaterializedValue(_.toJava) + def fromSourceCompletionStage[T, M]( + completion: CompletionStage[_ <: Graph[SourceShape[T], M]]): Source[T, CompletionStage[M]] = + fromFutureSource(completion.toScala).mapMaterializedValue(_.toJava) /** * Elements are emitted periodically with the specified interval. @@ -450,7 +455,9 @@ object Source { * * @deprecated Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant. */ - @deprecated("Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", since = "2.5.0") + @deprecated( + "Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant.", + since = "2.5.0") def actorPublisher[T](props: Props): Source[T, ActorRef] = { require(classOf[ActorPublisher[_]].isAssignableFrom(props.actorClass()), "Actor must be ActorPublisher") fromGraph(new ActorPublisherSource(props, DefaultAttributes.actorPublisherSource, shape("ActorPublisherSource"))) @@ -495,13 +502,19 @@ object Source { * @param bufferSize The size of the buffer in element count * @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer */ - @InternalApi private[akka] def actorRef[T]( - completionMatcher: PartialFunction[Any, Unit], - failureMatcher: PartialFunction[Any, Throwable], - bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef] = { + @InternalApi private[akka] def actorRef[T](completionMatcher: PartialFunction[Any, Unit], + failureMatcher: PartialFunction[Any, Throwable], + bufferSize: Int, + overflowStrategy: OverflowStrategy): Source[T, ActorRef] = { require(bufferSize >= 0, "bufferSize must be greater than or equal to 0") require(!overflowStrategy.isBackpressure, "Backpressure overflowStrategy not supported") - fromGraph(new ActorRefSource(completionMatcher, failureMatcher, bufferSize, overflowStrategy, DefaultAttributes.actorRefSource, shape("ActorRefSource"))) + fromGraph( + new ActorRefSource(completionMatcher, + failureMatcher, + bufferSize, + overflowStrategy, + DefaultAttributes.actorRefSource, + shape("ActorRefSource"))) } /** @@ -538,18 +551,16 @@ object Source { * @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer */ def actorRef[T](bufferSize: Int, overflowStrategy: OverflowStrategy): Source[T, ActorRef] = - actorRef( - { - case akka.actor.Status.Success => - case akka.actor.Status.Success(_) => - }, - { case akka.actor.Status.Failure(cause) => cause }, - bufferSize, overflowStrategy) + actorRef({ + case akka.actor.Status.Success => + case akka.actor.Status.Success(_) => + }, { case akka.actor.Status.Failure(cause) => cause }, bufferSize, overflowStrategy) /** * Combines several sources with fan-in strategy like `Merge` or `Concat` and returns `Source`. */ - def combine[T, U](first: Source[T, _], second: Source[T, _], rest: Source[T, _]*)(strategy: Int => Graph[UniformFanInShape[T, U], NotUsed]): Source[U, NotUsed] = + def combine[T, U](first: Source[T, _], second: Source[T, _], rest: Source[T, _]*)( + strategy: Int => Graph[UniformFanInShape[T, U], NotUsed]): Source[U, NotUsed] = Source.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val c = b.add(strategy(rest.size + 2)) @@ -568,7 +579,8 @@ object Source { /** * Combines two sources with fan-in strategy like `Merge` or `Concat` and returns `Source` with a materialized value. */ - def combineMat[T, U, M1, M2, M](first: Source[T, M1], second: Source[T, M2])(strategy: Int => Graph[UniformFanInShape[T, U], NotUsed])(matF: (M1, M2) => M): Source[U, M] = { + def combineMat[T, U, M1, M2, M](first: Source[T, M1], second: Source[T, M2])( + strategy: Int => Graph[UniformFanInShape[T, U], NotUsed])(matF: (M1, M2) => M): Source[U, M] = { val secondPartiallyCombined = GraphDSL.create(second) { implicit b => secondShape => import GraphDSL.Implicits._ val c = b.add(strategy(2)) @@ -581,7 +593,8 @@ object Source { /** * Combine the elements of multiple streams into a stream of sequences. */ - def zipN[T](sources: immutable.Seq[Source[T, _]]): Source[immutable.Seq[T], NotUsed] = zipWithN(ConstantFun.scalaIdentityFunction[immutable.Seq[T]])(sources).addAttributes(DefaultAttributes.zipN) + def zipN[T](sources: immutable.Seq[Source[T, _]]): Source[immutable.Seq[T], NotUsed] = + zipWithN(ConstantFun.scalaIdentityFunction[immutable.Seq[T]])(sources).addAttributes(DefaultAttributes.zipN) /* * Combine the elements of multiple streams into a stream of sequences using a combiner function. @@ -678,7 +691,9 @@ object Source { * is received. Stream calls close and completes when `Future` from read function returns None. * @param close - function that closes resource */ - def unfoldResourceAsync[T, S](create: () => Future[S], read: (S) => Future[Option[T]], close: (S) => Future[Done]): Source[T, NotUsed] = + def unfoldResourceAsync[T, S](create: () => Future[S], + read: (S) => Future[Option[T]], + close: (S) => Future[Done]): Source[T, NotUsed] = Source.fromGraph(new UnfoldResourceSourceAsync(create, read, close)) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala index f19300a106..4adbae9ea5 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/SourceWithContext.scala @@ -33,15 +33,16 @@ object SourceWithContext { * API MAY CHANGE */ @ApiMayChange -final class SourceWithContext[+Out, +Ctx, +Mat] private[stream] ( - delegate: Source[(Out, Ctx), Mat] -) extends GraphDelegate(delegate) with FlowWithContextOps[Out, Ctx, Mat] { +final class SourceWithContext[+Out, +Ctx, +Mat] private[stream] (delegate: Source[(Out, Ctx), Mat]) + extends GraphDelegate(delegate) + with FlowWithContextOps[Out, Ctx, Mat] { override type ReprMat[+O, +C, +M] = SourceWithContext[O, C, M @uncheckedVariance] override def via[Out2, Ctx2, Mat2](viaFlow: Graph[FlowShape[(Out, Ctx), (Out2, Ctx2)], Mat2]): Repr[Out2, Ctx2] = new SourceWithContext(delegate.via(viaFlow)) - override def viaMat[Out2, Ctx2, Mat2, Mat3](flow: Graph[FlowShape[(Out, Ctx), (Out2, Ctx2)], Mat2])(combine: (Mat, Mat2) => Mat3): SourceWithContext[Out2, Ctx2, Mat3] = + override def viaMat[Out2, Ctx2, Mat2, Mat3](flow: Graph[FlowShape[(Out, Ctx), (Out2, Ctx2)], Mat2])( + combine: (Mat, Mat2) => Mat3): SourceWithContext[Out2, Ctx2, Mat3] = new SourceWithContext(delegate.viaMat(flow)(combine)) /** @@ -82,4 +83,3 @@ final class SourceWithContext[+Out, +Ctx, +Mat] private[stream] ( def asJava[JOut >: Out, JCtx >: Ctx, JMat >: Mat]: javadsl.SourceWithContext[JOut, JCtx, JMat] = new javadsl.SourceWithContext(this) } - diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala index 3e72848998..2be2c185e7 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/StreamConverters.scala @@ -4,14 +4,14 @@ package akka.stream.scaladsl -import java.io.{ OutputStream, InputStream } +import java.io.{ InputStream, OutputStream } import java.util.Spliterators import java.util.stream.{ Collector, StreamSupport } -import akka.stream.{ Attributes, SinkShape, IOResult } +import akka.stream.{ Attributes, IOResult, SinkShape } import akka.stream.impl._ import akka.stream.impl.Stages.DefaultAttributes -import akka.stream.impl.io.{ InputStreamSinkStage, OutputStreamSink, OutputStreamSourceStage, InputStreamSource } +import akka.stream.impl.io.{ InputStreamSinkStage, InputStreamSource, OutputStreamSink, OutputStreamSourceStage } import akka.util.ByteString import scala.concurrent.duration.Duration._ @@ -46,7 +46,8 @@ object StreamConverters { * @param chunkSize the size of each read operation, defaults to 8192 */ def fromInputStream(in: () => InputStream, chunkSize: Int = 8192): Source[ByteString, Future[IOResult]] = - Source.fromGraph(new InputStreamSource(in, chunkSize, DefaultAttributes.inputStreamSource, sourceShape("InputStreamSource"))) + Source.fromGraph( + new InputStreamSource(in, chunkSize, DefaultAttributes.inputStreamSource, sourceShape("InputStreamSource"))) /** * Creates a Source which when materialized will return an [[OutputStream]] which it is possible @@ -79,7 +80,8 @@ object StreamConverters { * will cancel the stream when the [[OutputStream]] is no longer writable. */ def fromOutputStream(out: () => OutputStream, autoFlush: Boolean = false): Sink[ByteString, Future[IOResult]] = - Sink.fromGraph(new OutputStreamSink(out, DefaultAttributes.outputStreamSink, sinkShape("OutputStreamSink"), autoFlush)) + Sink.fromGraph( + new OutputStreamSink(out, DefaultAttributes.outputStreamSink, sinkShape("OutputStreamSink"), autoFlush)) /** * Creates a Sink which when materialized will return an [[InputStream]] which it is possible @@ -109,10 +111,14 @@ object StreamConverters { * to handle multiple invocations. */ def javaCollector[T, R](collectorFactory: () => java.util.stream.Collector[T, _ <: Any, R]): Sink[T, Future[R]] = - Flow[T].fold(() => - new CollectorState[T, R](collectorFactory().asInstanceOf[Collector[T, Any, R]])) { (state, elem) => () => state().update(elem) } + Flow[T] + .fold(() => new CollectorState[T, R](collectorFactory().asInstanceOf[Collector[T, Any, R]])) { + (state, elem) => () => + state().update(elem) + } .map(state => state().finish()) - .toMat(Sink.head)(Keep.right).withAttributes(DefaultAttributes.javaCollector) + .toMat(Sink.head)(Keep.right) + .withAttributes(DefaultAttributes.javaCollector) /** * Creates a sink which materializes into a ``Future`` which will be completed with result of the Java 8 ``Collector`` transformation @@ -124,29 +130,36 @@ object StreamConverters { * Note that a flow can be materialized multiple times, so the function producing the ``Collector`` must be able * to handle multiple invocations. */ - def javaCollectorParallelUnordered[T, R](parallelism: Int)(collectorFactory: () => java.util.stream.Collector[T, _ <: Any, R]): Sink[T, Future[R]] = { + def javaCollectorParallelUnordered[T, R](parallelism: Int)( + collectorFactory: () => java.util.stream.Collector[T, _ <: Any, R]): Sink[T, Future[R]] = { if (parallelism == 1) javaCollector[T, R](collectorFactory) else { - Sink.fromGraph(GraphDSL.create(Sink.head[R]) { implicit b => sink => - import GraphDSL.Implicits._ - val collector = collectorFactory().asInstanceOf[Collector[T, Any, R]] - val balance = b.add(Balance[T](parallelism)) - val merge = b.add(Merge[() => CollectorState[T, R]](parallelism)) + Sink + .fromGraph(GraphDSL.create(Sink.head[R]) { implicit b => sink => + import GraphDSL.Implicits._ + val collector = collectorFactory().asInstanceOf[Collector[T, Any, R]] + val balance = b.add(Balance[T](parallelism)) + val merge = b.add(Merge[() => CollectorState[T, R]](parallelism)) - for (i <- 0 until parallelism) { - val worker = Flow[T] - .fold(() => new CollectorState(collector)) { (state, elem) => () => state().update(elem) } - .async + for (i <- 0 until parallelism) { + val worker = Flow[T] + .fold(() => new CollectorState(collector)) { (state, elem) => () => + state().update(elem) + } + .async - balance.out(i) ~> worker ~> merge.in(i) - } + balance.out(i) ~> worker ~> merge.in(i) + } - merge.out - .fold(() => new ReducerState(collector)) { (state, elem) => () => state().update(elem().accumulated) } - .map(state => state().finish()) ~> sink.in + merge.out + .fold(() => new ReducerState(collector)) { (state, elem) => () => + state().update(elem().accumulated) + } + .map(state => state().finish()) ~> sink.in - SinkShape(balance.in) - }).withAttributes(DefaultAttributes.javaCollectorParallelUnordered) + SinkShape(balance.in) + }) + .withAttributes(DefaultAttributes.javaCollectorParallelUnordered) } } @@ -165,23 +178,31 @@ object StreamConverters { */ def asJavaStream[T](): Sink[T, java.util.stream.Stream[T]] = { // TODO removing the QueueSink name, see issue #22523 - Sink.fromGraph(new QueueSink[T]().withAttributes(Attributes.none)) - .mapMaterializedValue(queue => StreamSupport.stream( - Spliterators.spliteratorUnknownSize(new java.util.Iterator[T] { - var nextElementFuture: Future[Option[T]] = queue.pull() - var nextElement: Option[T] = _ + Sink + .fromGraph(new QueueSink[T]().withAttributes(Attributes.none)) + .mapMaterializedValue( + queue => + StreamSupport + .stream( + Spliterators.spliteratorUnknownSize( + new java.util.Iterator[T] { + var nextElementFuture: Future[Option[T]] = queue.pull() + var nextElement: Option[T] = _ - override def hasNext: Boolean = { - nextElement = Await.result(nextElementFuture, Inf) - nextElement.isDefined - } + override def hasNext: Boolean = { + nextElement = Await.result(nextElementFuture, Inf) + nextElement.isDefined + } - override def next(): T = { - val next = nextElement.get - nextElementFuture = queue.pull() - next - } - }, 0), false).onClose(new Runnable { def run = queue.cancel() })) + override def next(): T = { + val next = nextElement.get + nextElementFuture = queue.pull() + next + } + }, + 0), + false) + .onClose(new Runnable { def run = queue.cancel() })) .withAttributes(DefaultAttributes.asJavaStream) } @@ -194,6 +215,7 @@ object StreamConverters { * You can use [[Source.async]] to create asynchronous boundaries between synchronous Java ``Stream`` * and the rest of flow. */ - def fromJavaStream[T, S <: java.util.stream.BaseStream[T, S]](stream: () => java.util.stream.BaseStream[T, S]): Source[T, NotUsed] = + def fromJavaStream[T, S <: java.util.stream.BaseStream[T, S]]( + stream: () => java.util.stream.BaseStream[T, S]): Source[T, NotUsed] = Source.fromGraph(new JavaStreamSource[T, S](stream)).withAttributes(DefaultAttributes.fromJavaStream) } diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/SubFlow.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/SubFlow.scala index c4d64aeb4e..4e269db0f9 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/SubFlow.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/SubFlow.scala @@ -13,7 +13,7 @@ import scala.annotation.unchecked.uncheckedVariance * SubFlows cannot contribute to the super-flow’s materialized value since they * are materialized later, during the runtime of the flow graph processing. */ -trait SubFlow[+Out, +Mat, +F[+_], C] extends FlowOps[Out, Mat] { +trait SubFlow[+Out, +Mat, +F[+ _], C] extends FlowOps[Out, Mat] { override type Repr[+T] = SubFlow[T, Mat @uncheckedVariance, F @uncheckedVariance, C @uncheckedVariance] override type Closed = C diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala index 89c4334c6f..22fea0e9b9 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/TLS.scala @@ -71,11 +71,13 @@ object TLS { * The SSLEngine may use this information e.g. when an endpoint identification algorithm was * configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]]. */ - def apply( - sslContext: SSLContext, - sslConfig: Option[AkkaSSLConfig], - firstSession: NegotiateNewSession, role: TLSRole, - closing: TLSClosing = IgnoreComplete, hostInfo: Option[(String, Int)] = None): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = { + def apply(sslContext: SSLContext, + sslConfig: Option[AkkaSSLConfig], + firstSession: NegotiateNewSession, + role: TLSRole, + closing: TLSClosing = IgnoreComplete, + hostInfo: Option[(String, Int)] = None) + : scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = { def theSslConfig(system: ActorSystem): AkkaSSLConfig = sslConfig.getOrElse(AkkaSSLConfig(system)) @@ -137,10 +139,12 @@ object TLS { * The SSLEngine may use this information e.g. when an endpoint identification algorithm was * configured using [[javax.net.ssl.SSLParameters.setEndpointIdentificationAlgorithm]]. */ - def apply( - sslContext: SSLContext, - firstSession: NegotiateNewSession, role: TLSRole, - closing: TLSClosing, hostInfo: Option[(String, Int)]): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + def apply(sslContext: SSLContext, + firstSession: NegotiateNewSession, + role: TLSRole, + closing: TLSClosing, + hostInfo: Option[(String, Int)]) + : scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = apply(sslContext, None, firstSession, role, closing, hostInfo) /** @@ -152,9 +156,9 @@ object TLS { * that is not a requirement and depends entirely on the application * protocol. */ - def apply( - sslContext: SSLContext, - firstSession: NegotiateNewSession, role: TLSRole): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + def apply(sslContext: SSLContext, + firstSession: NegotiateNewSession, + role: TLSRole): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = apply(sslContext, None, firstSession, role, IgnoreComplete, None) /** @@ -169,11 +173,11 @@ object TLS { * For a description of the `closing` parameter please refer to [[TLSClosing]]. */ def apply( - createSSLEngine: () => SSLEngine, // we don't offer the internal `ActorSystem => SSLEngine` API here, see #21753 - verifySession: SSLSession => Try[Unit], // we don't offer the internal API that provides `ActorSystem` here, see #21753 - closing: TLSClosing - ): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = - scaladsl.BidiFlow.fromGraph(TlsModule(Attributes.none, _ => createSSLEngine(), (_, session) => verifySession(session), closing)) + createSSLEngine: () => SSLEngine, // we don't offer the internal `ActorSystem => SSLEngine` API here, see #21753 + verifySession: SSLSession => Try[Unit], // we don't offer the internal API that provides `ActorSystem` here, see #21753 + closing: TLSClosing): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + scaladsl.BidiFlow.fromGraph( + TlsModule(Attributes.none, _ => createSSLEngine(), (_, session) => verifySession(session), closing)) /** * Create a StreamTls [[akka.stream.scaladsl.BidiFlow]]. This is a low-level interface. @@ -184,9 +188,8 @@ object TLS { * For a description of the `closing` parameter please refer to [[TLSClosing]]. */ def apply( - createSSLEngine: () => SSLEngine, // we don't offer the internal `ActorSystem => SSLEngine` API here, see #21753 - closing: TLSClosing - ): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = + createSSLEngine: () => SSLEngine, // we don't offer the internal `ActorSystem => SSLEngine` API here, see #21753 + closing: TLSClosing): scaladsl.BidiFlow[SslTlsOutbound, ByteString, ByteString, SslTlsInbound, NotUsed] = apply(createSSLEngine, _ => Success(()), closing) } @@ -228,11 +231,13 @@ trait ScalaSessionAPI { * certificates were used. */ def localCertificates: List[Certificate] = Option(session.getLocalCertificates).map(_.toList).getOrElse(Nil) + /** * Scala API: Extract the Principal that was actually used by this engine * during this session’s negotiation. */ def localPrincipal: Option[Principal] = Option(session.getLocalPrincipal) + /** * Scala API: Extract the certificates that were used by the peer engine * during this session’s negotiation. The list is empty if no certificates @@ -241,6 +246,7 @@ trait ScalaSessionAPI { def peerCertificates: List[Certificate] = try Option(session.getPeerCertificates).map(_.toList).getOrElse(Nil) catch { case e: SSLPeerUnverifiedException => Nil } + /** * Scala API: Extract the Principal that the peer engine presented during * this session’s negotiation. @@ -251,6 +257,7 @@ trait ScalaSessionAPI { } object ScalaSessionAPI { + /** Constructs a ScalaSessionAPI instance from an SSLSession */ def apply(_session: SSLSession): ScalaSessionAPI = new ScalaSessionAPI { diff --git a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala index e90ccbcd4b..5c4ec0d10c 100644 --- a/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala +++ b/akka-stream/src/main/scala/akka/stream/scaladsl/Tcp.scala @@ -39,19 +39,17 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { * @param whenUnbound A future that is completed when the server is unbound, or failed if the server binding fails */ final case class ServerBinding @InternalApi private[akka] (localAddress: InetSocketAddress)( - private val unbindAction: () => Future[Unit], - val whenUnbound: Future[Done] - ) { + private val unbindAction: () => Future[Unit], + val whenUnbound: Future[Done]) { def unbind(): Future[Unit] = unbindAction() } /** * Represents an accepted incoming TCP connection. */ - final case class IncomingConnection( - localAddress: InetSocketAddress, - remoteAddress: InetSocketAddress, - flow: Flow[ByteString, ByteString, NotUsed]) { + final case class IncomingConnection(localAddress: InetSocketAddress, + remoteAddress: InetSocketAddress, + flow: Flow[ByteString, ByteString, NotUsed]) { /** * Handles the connection using the given flow, which is materialized exactly once and the respective @@ -78,13 +76,11 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider { def createExtension(system: ExtendedActorSystem): Tcp = new Tcp(system) // just wraps/unwraps the TLS byte events to provide ByteString, ByteString flows - private val tlsWrapping: BidiFlow[ByteString, TLSProtocol.SendBytes, TLSProtocol.SslTlsInbound, ByteString, NotUsed] = BidiFlow.fromFlows( - Flow[ByteString].map(TLSProtocol.SendBytes), - Flow[TLSProtocol.SslTlsInbound].collect { + private val tlsWrapping: BidiFlow[ByteString, TLSProtocol.SendBytes, TLSProtocol.SslTlsInbound, ByteString, NotUsed] = + BidiFlow.fromFlows(Flow[ByteString].map(TLSProtocol.SendBytes), Flow[TLSProtocol.SslTlsInbound].collect { case sb: TLSProtocol.SessionBytes => sb.bytes // ignore other kinds of inbounds (currently only Truncated) - } - ) + }) } final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { @@ -116,22 +112,21 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * independently whether the client is still attempting to write. This setting is recommended * for servers, and therefore it is the default setting. */ - def bind( - interface: String, - port: Int, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - halfClose: Boolean = false, - idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] = - Source.fromGraph(new ConnectionSourceStage( - IO(IoTcp)(system), - new InetSocketAddress(interface, port), - backlog, - options, - halfClose, - idleTimeout, - bindShutdownTimeout, - settings.ioSettings)) + def bind(interface: String, + port: Int, + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + halfClose: Boolean = false, + idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] = + Source.fromGraph( + new ConnectionSourceStage(IO(IoTcp)(system), + new InetSocketAddress(interface, port), + backlog, + options, + halfClose, + idleTimeout, + bindShutdownTimeout, + settings.ioSettings)) /** * Creates a [[Tcp.ServerBinding]] instance which represents a prospective TCP server binding on the given `endpoint` @@ -156,17 +151,18 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * independently whether the client is still attempting to write. This setting is recommended * for servers, and therefore it is the default setting. */ - def bindAndHandle( - handler: Flow[ByteString, ByteString, _], - interface: String, - port: Int, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - halfClose: Boolean = false, - idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = { - bind(interface, port, backlog, options, halfClose, idleTimeout).to(Sink.foreach { conn: IncomingConnection => - conn.flow.join(handler).run() - }).run() + def bindAndHandle(handler: Flow[ByteString, ByteString, _], + interface: String, + port: Int, + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + halfClose: Boolean = false, + idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = { + bind(interface, port, backlog, options, halfClose, idleTimeout) + .to(Sink.foreach { conn: IncomingConnection => + conn.flow.join(handler).run() + }) + .run() } /** @@ -190,21 +186,23 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * independently whether the server is still attempting to write. */ def outgoingConnection( - remoteAddress: InetSocketAddress, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil, - halfClose: Boolean = true, - connectTimeout: Duration = Duration.Inf, - idleTimeout: Duration = Duration.Inf): Flow[ByteString, ByteString, Future[OutgoingConnection]] = { + remoteAddress: InetSocketAddress, + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil, + halfClose: Boolean = true, + connectTimeout: Duration = Duration.Inf, + idleTimeout: Duration = Duration.Inf): Flow[ByteString, ByteString, Future[OutgoingConnection]] = { - val tcpFlow = Flow.fromGraph(new OutgoingConnectionStage( - IO(IoTcp)(system), - remoteAddress, - localAddress, - options, - halfClose, - connectTimeout, - settings.ioSettings)).via(detacher[ByteString]) // must read ahead for proper completions + val tcpFlow = Flow + .fromGraph( + new OutgoingConnectionStage(IO(IoTcp)(system), + remoteAddress, + localAddress, + options, + halfClose, + connectTimeout, + settings.ioSettings)) + .via(detacher[ByteString]) // must read ahead for proper completions idleTimeout match { case d: FiniteDuration => tcpFlow.join(TcpIdleTimeout(d, Some(remoteAddress))) @@ -237,10 +235,10 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * @see [[Tcp.outgoingConnection()]] */ def outgoingTlsConnection( - host: String, - port: Int, - sslContext: SSLContext, - negotiateNewSession: NegotiateNewSession): Flow[ByteString, ByteString, Future[OutgoingConnection]] = + host: String, + port: Int, + sslContext: SSLContext, + negotiateNewSession: NegotiateNewSession): Flow[ByteString, ByteString, Future[OutgoingConnection]] = outgoingTlsConnection(InetSocketAddress.createUnresolved(host, port), sslContext, negotiateNewSession) /** @@ -256,13 +254,13 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { */ @ApiMayChange def outgoingTlsConnection( - remoteAddress: InetSocketAddress, - sslContext: SSLContext, - negotiateNewSession: NegotiateNewSession, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil, - connectTimeout: Duration = Duration.Inf, - idleTimeout: Duration = Duration.Inf): Flow[ByteString, ByteString, Future[OutgoingConnection]] = { + remoteAddress: InetSocketAddress, + sslContext: SSLContext, + negotiateNewSession: NegotiateNewSession, + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil, + connectTimeout: Duration = Duration.Inf, + idleTimeout: Duration = Duration.Inf): Flow[ByteString, ByteString, Future[OutgoingConnection]] = { val connection = outgoingConnection(remoteAddress, localAddress, options, true, connectTimeout, idleTimeout) val tls = TLS(sslContext, negotiateNewSession, TLSRole.client) @@ -273,14 +271,14 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * INTERNAL API: for raw SSLEngine */ @InternalApi private[akka] def outgoingTlsConnectionWithSSLEngine( - remoteAddress: InetSocketAddress, - createSSLEngine: () => SSLEngine, - localAddress: Option[InetSocketAddress] = None, - options: immutable.Traversable[SocketOption] = Nil, - connectTimeout: Duration = Duration.Inf, - idleTimeout: Duration = Duration.Inf, - verifySession: SSLSession => Try[Unit], - closing: TLSClosing = IgnoreComplete): Flow[ByteString, ByteString, Future[OutgoingConnection]] = { + remoteAddress: InetSocketAddress, + createSSLEngine: () => SSLEngine, + localAddress: Option[InetSocketAddress] = None, + options: immutable.Traversable[SocketOption] = Nil, + connectTimeout: Duration = Duration.Inf, + idleTimeout: Duration = Duration.Inf, + verifySession: SSLSession => Try[Unit], + closing: TLSClosing = IgnoreComplete): Flow[ByteString, ByteString, Future[OutgoingConnection]] = { val connection = outgoingConnection(remoteAddress, localAddress, options, true, connectTimeout, idleTimeout) val tls = TLS(createSSLEngine, verifySession, closing) @@ -298,21 +296,18 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * Marked API-may-change to leave room for an improvement around the very long parameter list. */ @ApiMayChange - def bindTls( - interface: String, - port: Int, - sslContext: SSLContext, - negotiateNewSession: NegotiateNewSession, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] = { + def bindTls(interface: String, + port: Int, + sslContext: SSLContext, + negotiateNewSession: NegotiateNewSession, + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + idleTimeout: Duration = Duration.Inf): Source[IncomingConnection, Future[ServerBinding]] = { val tls = tlsWrapping.atop(TLS(sslContext, negotiateNewSession, TLSRole.server)).reversed bind(interface, port, backlog, options, true, idleTimeout).map { incomingConnection => - incomingConnection.copy( - flow = incomingConnection.flow.join(tls) - ) + incomingConnection.copy(flow = incomingConnection.flow.join(tls)) } } @@ -320,21 +315,19 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * INTERNAL API */ @InternalApi private[akka] def bindTlsWithSSLEngine( - interface: String, - port: Int, - createSSLEngine: () => SSLEngine, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - idleTimeout: Duration = Duration.Inf, - verifySession: SSLSession => Try[Unit], - closing: TLSClosing = IgnoreComplete): Source[IncomingConnection, Future[ServerBinding]] = { + interface: String, + port: Int, + createSSLEngine: () => SSLEngine, + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + idleTimeout: Duration = Duration.Inf, + verifySession: SSLSession => Try[Unit], + closing: TLSClosing = IgnoreComplete): Source[IncomingConnection, Future[ServerBinding]] = { val tls = tlsWrapping.atop(TLS(createSSLEngine, verifySession, closing)).reversed bind(interface, port, backlog, options, true, idleTimeout).map { incomingConnection => - incomingConnection.copy( - flow = incomingConnection.flow.join(tls) - ) + incomingConnection.copy(flow = incomingConnection.flow.join(tls)) } } @@ -349,23 +342,23 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension { * Marked API-may-change to leave room for an improvement around the very long parameter list. */ @ApiMayChange - def bindAndHandleTls( - handler: Flow[ByteString, ByteString, _], - interface: String, - port: Int, - sslContext: SSLContext, - negotiateNewSession: NegotiateNewSession, - backlog: Int = 100, - options: immutable.Traversable[SocketOption] = Nil, - idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = { + def bindAndHandleTls(handler: Flow[ByteString, ByteString, _], + interface: String, + port: Int, + sslContext: SSLContext, + negotiateNewSession: NegotiateNewSession, + backlog: Int = 100, + options: immutable.Traversable[SocketOption] = Nil, + idleTimeout: Duration = Duration.Inf)(implicit m: Materializer): Future[ServerBinding] = { bindTls(interface, port, sslContext, negotiateNewSession, backlog, options, idleTimeout) .to(Sink.foreach { conn: IncomingConnection => conn.handleWith(handler) - }).run() + }) + .run() } } final class TcpIdleTimeoutException(msg: String, timeout: Duration) - extends TimeoutException(msg: String) - with NoStackTrace // only used from a single stage + extends TimeoutException(msg: String) + with NoStackTrace // only used from a single stage diff --git a/akka-stream/src/main/scala/akka/stream/serialization/StreamRefSerializer.scala b/akka-stream/src/main/scala/akka/stream/serialization/StreamRefSerializer.scala index d41f99a12e..99fb79a165 100644 --- a/akka-stream/src/main/scala/akka/stream/serialization/StreamRefSerializer.scala +++ b/akka-stream/src/main/scala/akka/stream/serialization/StreamRefSerializer.scala @@ -13,8 +13,9 @@ import akka.stream.impl.streamref._ /** INTERNAL API */ @InternalApi -private[akka] final class StreamRefSerializer(val system: ExtendedActorSystem) extends SerializerWithStringManifest - with BaseSerializer { +private[akka] final class StreamRefSerializer(val system: ExtendedActorSystem) + extends SerializerWithStringManifest + with BaseSerializer { private[this] lazy val serialization = SerializationExtension(system) @@ -28,33 +29,33 @@ private[akka] final class StreamRefSerializer(val system: ExtendedActorSystem) e override def manifest(o: AnyRef): String = o match { // protocol - case _: StreamRefsProtocol.SequencedOnNext[_] => SequencedOnNextManifest - case _: StreamRefsProtocol.CumulativeDemand => CumulativeDemandManifest + case _: StreamRefsProtocol.SequencedOnNext[_] => SequencedOnNextManifest + case _: StreamRefsProtocol.CumulativeDemand => CumulativeDemandManifest // handshake - case _: StreamRefsProtocol.OnSubscribeHandshake => OnSubscribeHandshakeManifest + case _: StreamRefsProtocol.OnSubscribeHandshake => OnSubscribeHandshakeManifest // completion case _: StreamRefsProtocol.RemoteStreamFailure => RemoteSinkFailureManifest case _: StreamRefsProtocol.RemoteStreamCompleted => RemoteSinkCompletedManifest // refs - case _: SourceRefImpl[_] => SourceRefManifest + case _: SourceRefImpl[_] => SourceRefManifest // case _: MaterializedSourceRef[_] => SourceRefManifest - case _: SinkRefImpl[_] => SinkRefManifest + case _: SinkRefImpl[_] => SinkRefManifest // case _: MaterializedSinkRef[_] => SinkRefManifest } override def toBinary(o: AnyRef): Array[Byte] = o match { // protocol - case o: StreamRefsProtocol.SequencedOnNext[_] => serializeSequencedOnNext(o).toByteArray - case d: StreamRefsProtocol.CumulativeDemand => serializeCumulativeDemand(d).toByteArray + case o: StreamRefsProtocol.SequencedOnNext[_] => serializeSequencedOnNext(o).toByteArray + case d: StreamRefsProtocol.CumulativeDemand => serializeCumulativeDemand(d).toByteArray // handshake - case h: StreamRefsProtocol.OnSubscribeHandshake => serializeOnSubscribeHandshake(h).toByteArray + case h: StreamRefsProtocol.OnSubscribeHandshake => serializeOnSubscribeHandshake(h).toByteArray // termination case d: StreamRefsProtocol.RemoteStreamFailure => serializeRemoteSinkFailure(d).toByteArray case d: StreamRefsProtocol.RemoteStreamCompleted => serializeRemoteSinkCompleted(d).toByteArray // refs - case ref: SinkRefImpl[_] => serializeSinkRef(ref).toByteArray + case ref: SinkRefImpl[_] => serializeSinkRef(ref).toByteArray // case ref: MaterializedSinkRef[_] => ??? // serializeSinkRef(ref).toByteArray - case ref: SourceRefImpl[_] => serializeSourceRef(ref).toByteArray + case ref: SourceRefImpl[_] => serializeSourceRef(ref).toByteArray // case ref: MaterializedSourceRef[_] => serializeSourceRef(ref.).toByteArray } @@ -66,34 +67,31 @@ private[akka] final class StreamRefSerializer(val system: ExtendedActorSystem) e case RemoteSinkCompletedManifest => deserializeRemoteStreamCompleted(bytes) case RemoteSinkFailureManifest => deserializeRemoteStreamFailure(bytes) // refs - case SinkRefManifest => deserializeSinkRef(bytes) - case SourceRefManifest => deserializeSourceRef(bytes) + case SinkRefManifest => deserializeSinkRef(bytes) + case SourceRefManifest => deserializeSourceRef(bytes) } // ----- private def serializeCumulativeDemand(d: StreamRefsProtocol.CumulativeDemand): StreamRefMessages.CumulativeDemand = { - StreamRefMessages.CumulativeDemand.newBuilder() - .setSeqNr(d.seqNr) - .build() + StreamRefMessages.CumulativeDemand.newBuilder().setSeqNr(d.seqNr).build() } - private def serializeRemoteSinkFailure(d: StreamRefsProtocol.RemoteStreamFailure): StreamRefMessages.RemoteStreamFailure = { - StreamRefMessages.RemoteStreamFailure.newBuilder() - .setCause(ByteString.copyFrom(d.msg.getBytes)) - .build() + private def serializeRemoteSinkFailure( + d: StreamRefsProtocol.RemoteStreamFailure): StreamRefMessages.RemoteStreamFailure = { + StreamRefMessages.RemoteStreamFailure.newBuilder().setCause(ByteString.copyFrom(d.msg.getBytes)).build() } - private def serializeRemoteSinkCompleted(d: StreamRefsProtocol.RemoteStreamCompleted): StreamRefMessages.RemoteStreamCompleted = { - StreamRefMessages.RemoteStreamCompleted.newBuilder() - .setSeqNr(d.seqNr) - .build() + private def serializeRemoteSinkCompleted( + d: StreamRefsProtocol.RemoteStreamCompleted): StreamRefMessages.RemoteStreamCompleted = { + StreamRefMessages.RemoteStreamCompleted.newBuilder().setSeqNr(d.seqNr).build() } - private def serializeOnSubscribeHandshake(o: StreamRefsProtocol.OnSubscribeHandshake): StreamRefMessages.OnSubscribeHandshake = { - StreamRefMessages.OnSubscribeHandshake.newBuilder() - .setTargetRef(StreamRefMessages.ActorRef.newBuilder() - .setPath(Serialization.serializedActorPath(o.targetRef))) + private def serializeOnSubscribeHandshake( + o: StreamRefsProtocol.OnSubscribeHandshake): StreamRefMessages.OnSubscribeHandshake = { + StreamRefMessages.OnSubscribeHandshake + .newBuilder() + .setTargetRef(StreamRefMessages.ActorRef.newBuilder().setPath(Serialization.serializedActorPath(o.targetRef))) .build() } @@ -101,31 +99,30 @@ private[akka] final class StreamRefSerializer(val system: ExtendedActorSystem) e val p = o.payload.asInstanceOf[AnyRef] val msgSerializer = serialization.findSerializerFor(p) - val payloadBuilder = StreamRefMessages.Payload.newBuilder() + val payloadBuilder = StreamRefMessages.Payload + .newBuilder() .setEnclosedMessage(ByteString.copyFrom(msgSerializer.toBinary(p))) .setSerializerId(msgSerializer.identifier) val ms = Serializers.manifestFor(msgSerializer, p) if (ms.nonEmpty) payloadBuilder.setMessageManifest(ByteString.copyFromUtf8(ms)) - StreamRefMessages.SequencedOnNext.newBuilder() - .setSeqNr(o.seqNr) - .setPayload(payloadBuilder.build()) - .build() + StreamRefMessages.SequencedOnNext.newBuilder().setSeqNr(o.seqNr).setPayload(payloadBuilder.build()).build() } private def serializeSinkRef(sink: SinkRefImpl[_]): StreamRefMessages.SinkRef = { - StreamRefMessages.SinkRef.newBuilder() - .setTargetRef(StreamRefMessages.ActorRef.newBuilder() - .setPath(Serialization.serializedActorPath(sink.initialPartnerRef))) + StreamRefMessages.SinkRef + .newBuilder() + .setTargetRef( + StreamRefMessages.ActorRef.newBuilder().setPath(Serialization.serializedActorPath(sink.initialPartnerRef))) .build() } private def serializeSourceRef(source: SourceRefImpl[_]): StreamRefMessages.SourceRef = { - StreamRefMessages.SourceRef.newBuilder() + StreamRefMessages.SourceRef + .newBuilder() .setOriginRef( - StreamRefMessages.ActorRef.newBuilder() - .setPath(Serialization.serializedActorPath(source.initialPartnerRef))) + StreamRefMessages.ActorRef.newBuilder().setPath(Serialization.serializedActorPath(source.initialPartnerRef))) .build() } @@ -154,11 +151,8 @@ private[akka] final class StreamRefSerializer(val system: ExtendedActorSystem) e private def deserializeSequencedOnNext(bytes: Array[Byte]): StreamRefsProtocol.SequencedOnNext[AnyRef] = { val o = StreamRefMessages.SequencedOnNext.parseFrom(bytes) val p = o.getPayload - val payload = serialization.deserialize( - p.getEnclosedMessage.toByteArray, - p.getSerializerId, - p.getMessageManifest.toStringUtf8 - ) + val payload = + serialization.deserialize(p.getEnclosedMessage.toByteArray, p.getSerializerId, p.getMessageManifest.toStringUtf8) StreamRefsProtocol.SequencedOnNext(o.getSeqNr, payload.get) } diff --git a/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala b/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala index 40fbd5d26a..138e067740 100644 --- a/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala +++ b/akka-stream/src/main/scala/akka/stream/snapshot/MaterializerState.scala @@ -40,14 +40,13 @@ object MaterializerState { /** INTERNAL API */ @InternalApi - private[akka] def requestFromSupervisor(supervisor: ActorRef)(implicit ec: ExecutionContext): Future[immutable.Seq[StreamSnapshot]] = { + private[akka] def requestFromSupervisor(supervisor: ActorRef)( + implicit ec: ExecutionContext): Future[immutable.Seq[StreamSnapshot]] = { // FIXME arbitrary timeout implicit val timeout: Timeout = 10.seconds (supervisor ? StreamSupervisor.GetChildren) .mapTo[StreamSupervisor.Children] - .flatMap(msg => - Future.sequence(msg.children.toVector.map(requestFromChild)) - ) + .flatMap(msg => Future.sequence(msg.children.toVector.map(requestFromChild))) } /** INTERNAL API */ @@ -67,6 +66,7 @@ object MaterializerState { */ @DoNotInherit @ApiMayChange sealed trait StreamSnapshot { + /** * Running interpreters */ @@ -102,10 +102,12 @@ sealed trait UninitializedInterpreter extends InterpreterSnapshot */ @DoNotInherit @ApiMayChange sealed trait RunningInterpreter extends InterpreterSnapshot { + /** * Each of the materialized graph stage logics running inside the interpreter */ def logics: immutable.Seq[LogicSnapshot] + /** * Each connection between logics in the interpreter */ @@ -154,43 +156,49 @@ sealed trait ConnectionSnapshot { * INTERNAL API */ @InternalApi -final private[akka] case class StreamSnapshotImpl( - self: ActorPath, - activeInterpreters: Seq[RunningInterpreter], - newShells: Seq[UninitializedInterpreter]) extends StreamSnapshot with HideImpl +final private[akka] case class StreamSnapshotImpl(self: ActorPath, + activeInterpreters: Seq[RunningInterpreter], + newShells: Seq[UninitializedInterpreter]) + extends StreamSnapshot + with HideImpl /** * INTERNAL API */ @InternalApi -private[akka] final case class UninitializedInterpreterImpl(logics: immutable.Seq[LogicSnapshot]) extends UninitializedInterpreter +private[akka] final case class UninitializedInterpreterImpl(logics: immutable.Seq[LogicSnapshot]) + extends UninitializedInterpreter /** * INTERNAL API */ @InternalApi -private[akka] final case class RunningInterpreterImpl( - logics: immutable.Seq[LogicSnapshot], - connections: immutable.Seq[ConnectionSnapshot], - queueStatus: String, - runningLogicsCount: Int, - stoppedLogics: immutable.Seq[LogicSnapshot]) extends RunningInterpreter with HideImpl +private[akka] final case class RunningInterpreterImpl(logics: immutable.Seq[LogicSnapshot], + connections: immutable.Seq[ConnectionSnapshot], + queueStatus: String, + runningLogicsCount: Int, + stoppedLogics: immutable.Seq[LogicSnapshot]) + extends RunningInterpreter + with HideImpl /** * INTERNAL API */ @InternalApi -private[akka] final case class LogicSnapshotImpl(index: Int, label: String, attributes: Attributes) extends LogicSnapshot with HideImpl +private[akka] final case class LogicSnapshotImpl(index: Int, label: String, attributes: Attributes) + extends LogicSnapshot + with HideImpl /** * INTERNAL API */ @InternalApi -private[akka] final case class ConnectionSnapshotImpl( - id: Int, - in: LogicSnapshot, - out: LogicSnapshot, - state: ConnectionSnapshot.ConnectionState) extends ConnectionSnapshot with HideImpl +private[akka] final case class ConnectionSnapshotImpl(id: Int, + in: LogicSnapshot, + out: LogicSnapshot, + state: ConnectionSnapshot.ConnectionState) + extends ConnectionSnapshot + with HideImpl /** * INTERNAL API diff --git a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala index c954a20806..8d58ddf0f1 100644 --- a/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala +++ b/akka-stream/src/main/scala/akka/stream/stage/GraphStage.scala @@ -103,7 +103,7 @@ private object TimerMessages { object GraphStageLogic { final case class StageActorRefNotInitializedException() - extends RuntimeException("You must first call getStageActor, to initialize the Actors behavior") + extends RuntimeException("You must first call getStageActor, to initialize the Actors behavior") /** * Input handler that terminates the operator upon receiving completion. @@ -180,18 +180,15 @@ object GraphStageLogic { * * @param name leave empty to use plain auto generated names */ - final class StageActor( - materializer: ActorMaterializer, - getAsyncCallback: StageActorRef.Receive => AsyncCallback[(ActorRef, Any)], - initialReceive: StageActorRef.Receive, - name: String) { + final class StageActor(materializer: ActorMaterializer, + getAsyncCallback: StageActorRef.Receive => AsyncCallback[(ActorRef, Any)], + initialReceive: StageActorRef.Receive, + name: String) { // not really needed, but let's keep MiMa happy - def this( - materializer: akka.stream.ActorMaterializer, - getAsyncCallback: StageActorRef.Receive => AsyncCallback[(ActorRef, Any)], - initialReceive: StageActorRef.Receive - ) { + def this(materializer: akka.stream.ActorMaterializer, + getAsyncCallback: StageActorRef.Receive => AsyncCallback[(ActorRef, Any)], + initialReceive: StageActorRef.Receive) { this(materializer, getAsyncCallback, initialReceive, "") } @@ -204,12 +201,17 @@ object GraphStageLogic { } private val functionRef: FunctionRef = - cell.addFunctionRef({ - case (_, m @ (PoisonPill | Kill)) => - materializer.logger.warning("{} message sent to StageActor({}) will be ignored, since it is not a real Actor." + - "Use a custom message type to communicate with it instead.", m, functionRef.path) - case pair => callback.invoke(pair) - }, name) + cell.addFunctionRef( + { + case (_, m @ (PoisonPill | Kill)) => + materializer.logger.warning( + "{} message sent to StageActor({}) will be ignored, since it is not a real Actor." + + "Use a custom message type to communicate with it instead.", + m, + functionRef.path) + case pair => callback.invoke(pair) + }, + name) /** * The ActorRef by which this StageActor can be contacted from the outside. @@ -316,13 +318,15 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: */ private[stream] def inHandler(id: Int): InHandler = { if (id > inCount) throw new IllegalArgumentException(s"$id not in inHandler range $inCount in $this") - if (inCount < 1) throw new IllegalArgumentException(s"Tried to access inHandler $id but there are no in ports in $this") + if (inCount < 1) + throw new IllegalArgumentException(s"Tried to access inHandler $id but there are no in ports in $this") handlers(id).asInstanceOf[InHandler] } private[stream] def outHandler(id: Int): OutHandler = { if (id > outCount) throw new IllegalArgumentException(s"$id not in outHandler range $outCount in $this") - if (outCount < 1) throw new IllegalArgumentException(s"Tried to access outHandler $id but there are no out ports $this") + if (outCount < 1) + throw new IllegalArgumentException(s"Tried to access outHandler $id but there are no out ports $this") handlers(inCount + id).asInstanceOf[OutHandler] } @@ -368,16 +372,20 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * The operator fails upon receiving a failure. */ final protected def eagerTerminateInput: InHandler = EagerTerminateInput + /** * Input handler that does not terminate the operator upon receiving completion. * The operator fails upon receiving a failure. */ final protected def ignoreTerminateInput: InHandler = IgnoreTerminateInput + /** * Input handler that terminates the state upon receiving completion if the * given condition holds at that time. The operator fails upon receiving a failure. */ - final protected def conditionalTerminateInput(predicate: () => Boolean): InHandler = new ConditionalTerminateInput(predicate) + final protected def conditionalTerminateInput(predicate: () => Boolean): InHandler = + new ConditionalTerminateInput(predicate) + /** * Input handler that does not terminate the operator upon receiving completion * nor failure. @@ -398,7 +406,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * Output handler that terminates the state upon receiving completion if the * given condition holds at that time. The operator fails upon receiving a failure. */ - final protected def conditionalTerminateOutput(predicate: () => Boolean): OutHandler = new ConditionalTerminateOutput(predicate) + final protected def conditionalTerminateOutput(predicate: () => Boolean): OutHandler = + new ConditionalTerminateOutput(predicate) /** * Assigns callbacks for the events for an [[Inlet]] @@ -501,7 +510,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: elem.asInstanceOf[T] } else { // Slow path - if (!isAvailable(in)) throw new IllegalArgumentException(s"Cannot get element from already empty input port ($in)") + if (!isAvailable(in)) + throw new IllegalArgumentException(s"Cannot get element from already empty input port ($in)") val failed = connection.slot.asInstanceOf[Failed] val elem = failed.previousElem.asInstanceOf[T] connection.slot = Failed(failed.ex, Empty) @@ -566,7 +576,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: // Detailed error information should not add overhead to the hot path ReactiveStreamsCompliance.requireNonNullElement(elem) if (isClosed(out)) throw new IllegalArgumentException(s"Cannot push closed port ($out)") - if (!isAvailable(out)) throw new IllegalArgumentException(s"Cannot push port ($out) twice, or before it being pulled") + if (!isAvailable(out)) + throw new IllegalArgumentException(s"Cannot push port ($out) twice, or before it being pulled") // No error, just InClosed caused the actual pull to be ignored, but the status flag still needs to be flipped connection.portState = portState ^ PushStartFlip @@ -606,10 +617,11 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: while (i < portToConn.length) { if (i < inCount) interpreter.cancel(portToConn(i)) - else handlers(i) match { - case e: Emitting[_] => e.addFollowUp(new EmittingCompletion(e.out, e.previous)) - case _ => interpreter.complete(portToConn(i)) - } + else + handlers(i) match { + case e: Emitting[_] => e.addFollowUp(new EmittingCompletion(e.out, e.previous)) + case _ => interpreter.complete(portToConn(i)) + } i += 1 } setKeepGoing(false) @@ -667,13 +679,11 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: if (n != pos) { // If we aren't already done requireNotReading(in) if (!hasBeenPulled(in)) pull(in) - setHandler(in, new Reading(in, n - pos, getHandler(in))( - (elem: T) => { - result(pos) = elem - pos += 1 - if (pos == n) andThen(result) - }, - () => onClose(result.take(pos)))) + setHandler(in, new Reading(in, n - pos, getHandler(in))((elem: T) => { + result(pos) = elem + pos += 1 + if (pos == n) andThen(result) + }, () => onClose(result.take(pos)))) } else andThen(result) } @@ -683,7 +693,10 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * for the given inlet if suspension is needed and reinstalls the current * handler upon receiving the last `onPush()` signal (before invoking the `andThen` function). */ - final protected def readN[T](in: Inlet[T], n: Int, andThen: Procedure[java.util.List[T]], onClose: Procedure[java.util.List[T]]): Unit = { + final protected def readN[T](in: Inlet[T], + n: Int, + andThen: Procedure[java.util.List[T]], + onClose: Procedure[java.util.List[T]]): Unit = { //FIXME `onClose` is a poor name for `onComplete` rename this at the earliest possible opportunity import collection.JavaConverters._ readN(in, n)(seq => andThen(seq.asJava), seq => onClose(seq.asJava)) @@ -738,7 +751,9 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * Caution: for n == 1 andThen is called after resetting the handler, for * other values it is called without resetting the handler. n MUST be positive. */ - private final class Reading[T](in: Inlet[T], private var n: Int, val previous: InHandler)(andThen: T => Unit, onComplete: () => Unit) extends InHandler { + private final class Reading[T](in: Inlet[T], private var n: Int, val previous: InHandler)(andThen: T => Unit, + onComplete: () => Unit) + extends InHandler { require(n > 0, "number of elements to read must be positive!") override def onPush(): Unit = { @@ -779,7 +794,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * is needed and reinstalls the current handler upon receiving an `onPull()` * signal. */ - final protected def emitMultiple[T](out: Outlet[T], elems: immutable.Iterable[T]): Unit = emitMultiple(out, elems, DoNothing) + final protected def emitMultiple[T](out: Outlet[T], elems: immutable.Iterable[T]): Unit = + emitMultiple(out, elems, DoNothing) /** * Java API @@ -878,7 +894,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: case _ => setHandler(out, next) } - private abstract class Emitting[T](val out: Outlet[T], val previous: OutHandler, andThen: () => Unit) extends OutHandler { + private abstract class Emitting[T](val out: Outlet[T], val previous: OutHandler, andThen: () => Unit) + extends OutHandler { private var followUps: Emitting[T] = _ private var followUpsTail: Emitting[T] = _ private def as[U] = this.asInstanceOf[Emitting[U]] @@ -887,6 +904,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: setHandler(out, previous) andThen() if (followUps != null) { + /** * If (while executing andThen() callback) handler was changed to new emitting, * we should add it to the end of emission queue @@ -897,6 +915,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: val next = dequeue() if (next.isInstanceOf[EmittingCompletion[_]]) { + /** * If next element is emitting completion and there are some elements after it, * we to need pass them before completion @@ -953,7 +972,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: } private class EmittingSingle[T](_out: Outlet[T], elem: T, _previous: OutHandler, _andThen: () => Unit) - extends Emitting(_out, _previous, _andThen) { + extends Emitting(_out, _previous, _andThen) { override def onPull(): Unit = { push(out, elem) @@ -962,7 +981,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: } private class EmittingIterator[T](_out: Outlet[T], elems: Iterator[T], _previous: OutHandler, _andThen: () => Unit) - extends Emitting(_out, _previous, _andThen) { + extends Emitting(_out, _previous, _andThen) { override def onPull(): Unit = { push(out, elems.next()) @@ -972,7 +991,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: } } - private class EmittingCompletion[T](_out: Outlet[T], _previous: OutHandler) extends Emitting(_out, _previous, DoNothing) { + private class EmittingCompletion[T](_out: Outlet[T], _previous: OutHandler) + extends Emitting(_out, _previous, DoNothing) { override def onPull(): Unit = complete(out) } @@ -982,8 +1002,10 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * completion or failure of the given inlet shall lead to operator termination or not. * `doPull` instructs to perform one initial pull on the `from` port. */ - final protected def passAlong[Out, In <: Out](from: Inlet[In], to: Outlet[Out], - doFinish: Boolean = true, doFail: Boolean = true, + final protected def passAlong[Out, In <: Out](from: Inlet[In], + to: Outlet[Out], + doFinish: Boolean = true, + doFail: Boolean = true, doPull: Boolean = false): Unit = { class PassAlongHandler extends InHandler with (() => Unit) { override def apply(): Unit = tryPull(from) @@ -1297,7 +1319,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: def hasBeenPulled: Boolean = pulled && !isClosed def grab(): T = { - if (elem == null) throw new IllegalArgumentException(s"cannot grab element from port ($this) when data have not yet arrived") + if (elem == null) + throw new IllegalArgumentException(s"cannot grab element from port ($this) when data have not yet arrived") val ret = elem elem = null.asInstanceOf[T] ret @@ -1425,6 +1448,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: * event to a stream */ trait AsyncCallback[T] { + /** * Dispatch an asynchronous notification. This method is thread-safe and * may be invoked from external execution contexts. @@ -1433,6 +1457,7 @@ trait AsyncCallback[T] { * see [AsyncCallback#invokeWithFeedback]] */ def invoke(t: T): Unit + /** * Dispatch an asynchronous notification. This method is thread-safe and * may be invoked from external execution contexts. @@ -1452,7 +1477,7 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap import TimerMessages._ private val keyToTimers = mutable.Map[Any, Timer]() - private val timerIdGen = Iterator from 1 + private val timerIdGen = Iterator.from(1) private var _timerAsyncCallback: AsyncCallback[Scheduled] = _ private def getTimerAsyncCallback: AsyncCallback[Scheduled] = { @@ -1496,10 +1521,9 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap * Any existing timer with the same key will automatically be canceled before * adding the new timer. */ - final protected def schedulePeriodicallyWithInitialDelay( - timerKey: Any, - initialDelay: FiniteDuration, - interval: FiniteDuration): Unit = { + final protected def schedulePeriodicallyWithInitialDelay(timerKey: Any, + initialDelay: FiniteDuration, + interval: FiniteDuration): Unit = { cancelTimer(timerKey) val id = timerIdGen.next() val task = interpreter.materializer.schedulePeriodically(initialDelay, interval, new Runnable { @@ -1514,10 +1538,9 @@ abstract class TimerGraphStageLogic(_shape: Shape) extends GraphStageLogic(_shap * Any existing timer with the same key will automatically be canceled before * adding the new timer. */ - final protected def schedulePeriodicallyWithInitialDelay( - timerKey: Any, - initialDelay: java.time.Duration, - interval: java.time.Duration): Unit = { + final protected def schedulePeriodicallyWithInitialDelay(timerKey: Any, + initialDelay: java.time.Duration, + interval: java.time.Duration): Unit = { import akka.util.JavaDurationConverters._ schedulePeriodicallyWithInitialDelay(timerKey, initialDelay.asScala, interval.asScala) } @@ -1593,6 +1616,7 @@ abstract class TimerGraphStageLogicWithLogging(_shape: Shape) extends TimerGraph * Collection of callbacks for an input port of a [[GraphStage]] */ trait InHandler { + /** * Called when the input port has a new element available. The actual element can be retrieved via the * [[GraphStageLogic.grab()]] method. @@ -1617,6 +1641,7 @@ trait InHandler { * Collection of callbacks for an output port of a [[GraphStage]] */ trait OutHandler { + /** * Called when the output port has received a pull, and therefore ready to emit an element, i.e. [[GraphStageLogic.push()]] * is now allowed to be called on this port. @@ -1630,10 +1655,7 @@ trait OutHandler { */ @throws(classOf[Exception]) def onDownstreamFinish(): Unit = { - GraphInterpreter - .currentInterpreter - .activeStage - .completeStage() + GraphInterpreter.currentInterpreter.activeStage.completeStage() } } diff --git a/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/AkkaSSLConfig.scala b/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/AkkaSSLConfig.scala index b3367bd18a..1e3af59d04 100644 --- a/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/AkkaSSLConfig.scala +++ b/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/AkkaSSLConfig.scala @@ -31,7 +31,7 @@ object AkkaSSLConfig extends ExtensionId[AkkaSSLConfig] with ExtensionIdProvider def defaultSSLConfigSettings(system: ActorSystem): SSLConfigSettings = { val akkaOverrides = system.settings.config.getConfig("akka.ssl-config") val defaults = system.settings.config.getConfig("ssl-config") - SSLConfigFactory.parse(akkaOverrides withFallback defaults) + SSLConfigFactory.parse(akkaOverrides.withFallback(defaults)) } } @@ -110,10 +110,12 @@ final class AkkaSSLConfig(system: ExtendedActorSystem, val config: SSLConfigSett def buildHostnameVerifier(conf: SSLConfigSettings): HostnameVerifier = { val clazz: Class[HostnameVerifier] = - if (config.loose.disableHostnameVerification) classOf[DisabledComplainingHostnameVerifier].asInstanceOf[Class[HostnameVerifier]] + if (config.loose.disableHostnameVerification) + classOf[DisabledComplainingHostnameVerifier].asInstanceOf[Class[HostnameVerifier]] else config.hostnameVerifierClass.asInstanceOf[Class[HostnameVerifier]] - val v = system.dynamicAccess.createInstanceFor[HostnameVerifier](clazz, Nil) + val v = system.dynamicAccess + .createInstanceFor[HostnameVerifier](clazz, Nil) .orElse(system.dynamicAccess.createInstanceFor[HostnameVerifier](clazz, List(classOf[LoggerFactory] -> mkLogger))) .getOrElse(throw new Exception("Unable to obtain hostname verifier for class: " + clazz)) @@ -140,14 +142,17 @@ final class AkkaSSLConfig(system: ExtendedActorSystem, val config: SSLConfigSett // val disabledKeyAlgorithms = sslConfig.disabledKeyAlgorithms.getOrElse(Algorithms.disabledKeyAlgorithms) // was Option val disabledKeyAlgorithms = sslConfig.disabledKeyAlgorithms.mkString(",") // TODO Sub optimal, we got a Seq... - val constraints = AlgorithmConstraintsParser.parseAll(AlgorithmConstraintsParser.line, disabledKeyAlgorithms).get.toSet + val constraints = + AlgorithmConstraintsParser.parseAll(AlgorithmConstraintsParser.line, disabledKeyAlgorithms).get.toSet val algorithmChecker = new AlgorithmChecker(mkLogger, keyConstraints = constraints, signatureConstraints = Set()) for (cert <- trustManager.getAcceptedIssuers) { try { algorithmChecker.checkKeyAlgorithms(cert) } catch { case e: CertPathValidatorException => - log.warning("You are using ssl-config.default=true and have a weak certificate in your default trust store! (You can modify akka.ssl-config.disabledKeyAlgorithms to remove this message.)", e) + log.warning( + "You are using ssl-config.default=true and have a weak certificate in your default trust store! (You can modify akka.ssl-config.disabledKeyAlgorithms to remove this message.)", + e) } } } @@ -202,7 +207,8 @@ final class AkkaSSLConfig(system: ExtendedActorSystem, val config: SSLConfigSett private def looseDisableSNI(defaultParams: SSLParameters): Unit = if (config.loose.disableSNI) { // this will be logged once for each AkkaSSLConfig - log.warning("You are using ssl-config.loose.disableSNI=true! " + + log.warning( + "You are using ssl-config.loose.disableSNI=true! " + "It is strongly discouraged to disable Server Name Indication, as it is crucial to preventing man-in-the-middle attacks.") defaultParams.setServerNames(Collections.emptyList()) diff --git a/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/SSLEngineConfigurator.scala b/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/SSLEngineConfigurator.scala index 3560a7cc28..35e9dd3383 100644 --- a/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/SSLEngineConfigurator.scala +++ b/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/SSLEngineConfigurator.scala @@ -16,8 +16,10 @@ trait SSLEngineConfigurator { def configure(engine: SSLEngine, sslContext: SSLContext): SSLEngine } -final class DefaultSSLEngineConfigurator(config: SSLConfigSettings, enabledProtocols: Array[String], enabledCipherSuites: Array[String]) - extends SSLEngineConfigurator { +final class DefaultSSLEngineConfigurator(config: SSLConfigSettings, + enabledProtocols: Array[String], + enabledCipherSuites: Array[String]) + extends SSLEngineConfigurator { def configure(engine: SSLEngine, sslContext: SSLContext): SSLEngine = { engine.setSSLParameters(sslContext.getDefaultSSLParameters) engine.setEnabledProtocols(enabledProtocols) diff --git a/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/util/AkkaLoggerBridge.scala b/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/util/AkkaLoggerBridge.scala index df302c3e3f..ef21316378 100644 --- a/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/util/AkkaLoggerBridge.scala +++ b/akka-stream/src/main/scala/com/typesafe/sslconfig/akka/util/AkkaLoggerBridge.scala @@ -12,7 +12,8 @@ import com.typesafe.sslconfig.util.{ LoggerFactory, NoDepsLogger } final class AkkaLoggerFactory(system: ActorSystem) extends LoggerFactory { override def apply(clazz: Class[_]): NoDepsLogger = new AkkaLoggerBridge(system.eventStream, clazz) - override def apply(name: String): NoDepsLogger = new AkkaLoggerBridge(system.eventStream, name, classOf[DummyClassForStringSources]) + override def apply(name: String): NoDepsLogger = + new AkkaLoggerBridge(system.eventStream, name, classOf[DummyClassForStringSources]) } class AkkaLoggerBridge(bus: EventStream, logSource: String, logClass: Class[_]) extends NoDepsLogger { diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index 7fd6f82343..21660a7838 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -10,9 +10,27 @@ import java.lang.ref.WeakReference import java.util.concurrent.locks.ReentrantLock import scala.annotation.tailrec import com.typesafe.config.Config -import akka.actor.{ ActorInitializationException, ExtensionIdProvider, ExtensionId, Extension, ExtendedActorSystem, ActorRef, ActorCell } -import akka.dispatch.{ MessageQueue, MailboxType, TaskInvocation, MessageDispatcherConfigurator, MessageDispatcher, Mailbox, Envelope, DispatcherPrerequisites, DefaultSystemMessageQueue } -import akka.dispatch.sysmsg.{ SystemMessage, Suspend, Resume } +import akka.actor.{ + ActorCell, + ActorInitializationException, + ActorRef, + ExtendedActorSystem, + Extension, + ExtensionId, + ExtensionIdProvider +} +import akka.dispatch.{ + DefaultSystemMessageQueue, + DispatcherPrerequisites, + Envelope, + Mailbox, + MailboxType, + MessageDispatcher, + MessageDispatcherConfigurator, + MessageQueue, + TaskInvocation +} +import akka.dispatch.sysmsg.{ Resume, Suspend, SystemMessage } import scala.concurrent.duration._ import akka.util.Switch import scala.concurrent.duration.Duration @@ -36,9 +54,12 @@ import java.util.concurrent.TimeUnit * System messages always go directly to the actors SystemMessageQueue which isn't thread local. */ -private[testkit] object CallingThreadDispatcherQueues extends ExtensionId[CallingThreadDispatcherQueues] with ExtensionIdProvider { +private[testkit] object CallingThreadDispatcherQueues + extends ExtensionId[CallingThreadDispatcherQueues] + with ExtensionIdProvider { override def lookup = CallingThreadDispatcherQueues - override def createExtension(system: ExtendedActorSystem): CallingThreadDispatcherQueues = new CallingThreadDispatcherQueues + override def createExtension(system: ExtendedActorSystem): CallingThreadDispatcherQueues = + new CallingThreadDispatcherQueues } private[testkit] class CallingThreadDispatcherQueues extends Extension { @@ -46,15 +67,17 @@ private[testkit] class CallingThreadDispatcherQueues extends Extension { // PRIVATE DATA private var queues = Map[CallingThreadMailbox, Set[WeakReference[MessageQueue]]]() - private var lastGC = 0l + private var lastGC = 0L // we have to forget about long-gone threads sometime private def gc(): Unit = { - queues = queues.foldLeft(Map.newBuilder[CallingThreadMailbox, Set[WeakReference[MessageQueue]]]) { - case (m, (k, v)) => - val nv = v filter (_.get ne null) - if (nv.isEmpty) m else m += (k -> nv) - }.result + queues = queues + .foldLeft(Map.newBuilder[CallingThreadMailbox, Set[WeakReference[MessageQueue]]]) { + case (m, (k, v)) => + val nv = v.filter(_.get ne null) + if (nv.isEmpty) m else m += (k -> nv) + } + .result } protected[akka] def registerQueue(mbox: CallingThreadMailbox, q: MessageQueue): Unit = synchronized { @@ -65,7 +88,7 @@ private[testkit] class CallingThreadDispatcherQueues extends Extension { queues += mbox -> Set(new WeakReference(q)) } val now = System.nanoTime - if (now - lastGC > 1000000000l) { + if (now - lastGC > 1000000000L) { lastGC = now gc() } @@ -141,7 +164,9 @@ class CallingThreadDispatcher(_configurator: MessageDispatcherConfigurator) exte protected[akka] override def throughput = 0 protected[akka] override def throughputDeadlineTime = Duration.Zero - protected[akka] override def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = false + protected[akka] override def registerForExecution(mbox: Mailbox, + hasMessageHint: Boolean, + hasSystemMessageHint: Boolean): Boolean = false protected[akka] override def shutdownTimeout = 1 second @@ -161,7 +186,7 @@ class CallingThreadDispatcher(_configurator: MessageDispatcherConfigurator) exte case _ => None } super.unregister(actor) - mbox foreach CallingThreadDispatcherQueues(actor.system).unregisterQueues + mbox.foreach(CallingThreadDispatcherQueues(actor.system).unregisterQueues) } protected[akka] override def suspend(actor: ActorCell): Unit = { @@ -220,7 +245,9 @@ class CallingThreadDispatcher(_configurator: MessageDispatcherConfigurator) exte * it is suspendSwitch and resumed. */ @tailrec - private def runQueue(mbox: CallingThreadMailbox, queue: MessageQueue, interruptedEx: InterruptedException = null): Unit = { + private def runQueue(mbox: CallingThreadMailbox, + queue: MessageQueue, + interruptedEx: InterruptedException = null): Unit = { def checkThreadInterruption(intEx: InterruptedException): InterruptedException = { if (Thread.interrupted()) { // clear interrupted flag before we continue, exception will be thrown later val ie = new InterruptedException("Interrupted during message processing") @@ -299,7 +326,7 @@ class CallingThreadDispatcher(_configurator: MessageDispatcherConfigurator) exte } class CallingThreadDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) - extends MessageDispatcherConfigurator(config, prerequisites) { + extends MessageDispatcherConfigurator(config, prerequisites) { private val instance = new CallingThreadDispatcher(this) @@ -307,7 +334,8 @@ class CallingThreadDispatcherConfigurator(config: Config, prerequisites: Dispatc } class CallingThreadMailbox(_receiver: akka.actor.Cell, val mailboxType: MailboxType) - extends Mailbox(null) with DefaultSystemMessageQueue { + extends Mailbox(null) + with DefaultSystemMessageQueue { val system = _receiver.system val self = _receiver.self @@ -327,7 +355,8 @@ class CallingThreadMailbox(_receiver: akka.actor.Cell, val mailboxType: MailboxT override val messageQueue: MessageQueue = q.get override def enqueue(receiver: ActorRef, msg: Envelope): Unit = q.get.enqueue(receiver, msg) - override def dequeue(): Envelope = throw new UnsupportedOperationException("CallingThreadMailbox cannot dequeue normally") + override def dequeue(): Envelope = + throw new UnsupportedOperationException("CallingThreadMailbox cannot dequeue normally") override def hasMessages: Boolean = q.get.hasMessages override def numberOfMessages: Int = 0 diff --git a/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala b/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala index 97f2f7a95a..98beee3e82 100644 --- a/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala +++ b/akka-testkit/src/main/scala/akka/testkit/ExplicitlyTriggeredScheduler.scala @@ -30,17 +30,20 @@ import com.typesafe.config.Config * easier, but these tests might fail to catch race conditions that only * happen when tasks are scheduled in parallel in 'real time'. */ -class ExplicitlyTriggeredScheduler(@unused config: Config, log: LoggingAdapter, @unused tf: ThreadFactory) extends Scheduler { +class ExplicitlyTriggeredScheduler(@unused config: Config, log: LoggingAdapter, @unused tf: ThreadFactory) + extends Scheduler { private case class Item(time: Long, interval: Option[FiniteDuration], runnable: Runnable) private val currentTime = new AtomicLong() private val scheduled = new ConcurrentHashMap[Item, Unit]() - override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = + override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)( + implicit executor: ExecutionContext): Cancellable = schedule(initialDelay, Some(interval), runnable) - override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)( + implicit executor: ExecutionContext): Cancellable = schedule(delay, None, runnable) /** @@ -58,19 +61,16 @@ class ExplicitlyTriggeredScheduler(@unused config: Config, log: LoggingAdapter, val newTime = currentTime.get + amount.toMillis if (log.isDebugEnabled) - log.debug(s"Time proceeds from ${currentTime.get} to $newTime, currently scheduled for this period:" + scheduledTasks(newTime).map(item => s"\n- $item")) + log.debug( + s"Time proceeds from ${currentTime.get} to $newTime, currently scheduled for this period:" + scheduledTasks( + newTime).map(item => s"\n- $item")) executeTasks(newTime) currentTime.set(newTime) } private def scheduledTasks(runTo: Long): Seq[Item] = - scheduled - .keySet() - .asScala - .filter(_.time <= runTo) - .toList - .sortBy(_.time) + scheduled.keySet().asScala.filter(_.time <= runTo).toList.sortBy(_.time) @tailrec private[testkit] final def executeTasks(runTo: Long): Unit = { @@ -89,7 +89,9 @@ class ExplicitlyTriggeredScheduler(@unused config: Config, log: LoggingAdapter, } } - private def schedule(initialDelay: FiniteDuration, interval: Option[FiniteDuration], runnable: Runnable): Cancellable = { + private def schedule(initialDelay: FiniteDuration, + interval: Option[FiniteDuration], + runnable: Runnable): Cancellable = { val firstTime = currentTime.get + initialDelay.toMillis val item = Item(firstTime, interval, runnable) log.debug("Scheduled item for {}: {}", firstTime, item) diff --git a/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala b/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala index 0521067e04..173409f1a8 100644 --- a/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala +++ b/akka-testkit/src/main/scala/akka/testkit/SocketUtil.scala @@ -73,28 +73,32 @@ object SocketUtil { def temporaryServerAddress(address: String = RANDOM_LOOPBACK_ADDRESS, udp: Boolean = false): InetSocketAddress = temporaryServerAddresses(1, address, udp).head - def temporaryServerAddresses(numberOfAddresses: Int, hostname: String = RANDOM_LOOPBACK_ADDRESS, udp: Boolean = false): immutable.IndexedSeq[InetSocketAddress] = { - Vector.fill(numberOfAddresses) { + def temporaryServerAddresses(numberOfAddresses: Int, + hostname: String = RANDOM_LOOPBACK_ADDRESS, + udp: Boolean = false): immutable.IndexedSeq[InetSocketAddress] = { + Vector + .fill(numberOfAddresses) { + + val address = hostname match { + case RANDOM_LOOPBACK_ADDRESS => + if (canBindOnAlternativeLoopbackAddresses) s"127.20.${Random.nextInt(256)}.${Random.nextInt(256)}" + else "127.0.0.1" + case other => + other + } + + if (udp) { + val ds = DatagramChannel.open().socket() + ds.bind(new InetSocketAddress(address, 0)) + (ds, new InetSocketAddress(address, ds.getLocalPort)) + } else { + val ss = ServerSocketChannel.open().socket() + ss.bind(new InetSocketAddress(address, 0)) + (ss, new InetSocketAddress(address, ss.getLocalPort)) + } - val address = hostname match { - case RANDOM_LOOPBACK_ADDRESS => - if (canBindOnAlternativeLoopbackAddresses) s"127.20.${Random.nextInt(256)}.${Random.nextInt(256)}" - else "127.0.0.1" - case other => - other } - - if (udp) { - val ds = DatagramChannel.open().socket() - ds.bind(new InetSocketAddress(address, 0)) - (ds, new InetSocketAddress(address, ds.getLocalPort)) - } else { - val ss = ServerSocketChannel.open().socket() - ss.bind(new InetSocketAddress(address, 0)) - (ss, new InetSocketAddress(address, ss.getLocalPort)) - } - - } collect { case (socket, address) => socket.close(); address } + .collect { case (socket, address) => socket.close(); address } } def temporaryServerHostnameAndPort(interface: String = RANDOM_LOOPBACK_ADDRESS): (String, Int) = { diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 7bd908be68..d8ce9b1710 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -18,41 +18,48 @@ import akka.pattern.ask * * @since 1.1 */ -class TestActorRef[T <: Actor]( - _system: ActorSystem, - _props: Props, - _supervisor: ActorRef, - name: String) - extends { - val props = - _props.withDispatcher( - if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id - else _props.dispatcher) - val dispatcher = _system.dispatchers.lookup(props.dispatcher) - private val disregard = _supervisor match { - case l: LocalActorRef => l.underlying.reserveChild(name) - case r: RepointableActorRef => r.underlying match { - case _: UnstartedCell => throw new IllegalStateException("cannot attach a TestActor to an unstarted top-level actor, ensure that it is started by sending a message and observing the reply") - case c: ActorCell => c.reserveChild(name) - case o => _system.log.error("trying to attach child {} to unknown type of supervisor cell {}, this is not going to end well", name, o.getClass) +class TestActorRef[T <: Actor](_system: ActorSystem, _props: Props, _supervisor: ActorRef, name: String) extends { + val props = + _props.withDispatcher( + if (_props.deploy.dispatcher == Deploy.NoDispatcherGiven) CallingThreadDispatcher.Id + else _props.dispatcher) + val dispatcher = _system.dispatchers.lookup(props.dispatcher) + private val disregard = _supervisor match { + case l: LocalActorRef => l.underlying.reserveChild(name) + case r: RepointableActorRef => + r.underlying match { + case _: UnstartedCell => + throw new IllegalStateException( + "cannot attach a TestActor to an unstarted top-level actor, ensure that it is started by sending a message and observing the reply") + case c: ActorCell => c.reserveChild(name) + case o => + _system.log.error( + "trying to attach child {} to unknown type of supervisor cell {}, this is not going to end well", + name, + o.getClass) } - case s => _system.log.error("trying to attach child {} to unknown type of supervisor {}, this is not going to end well", name, s.getClass) - } - } with LocalActorRef( - _system.asInstanceOf[ActorSystemImpl], - props, - dispatcher, - _system.mailboxes.getMailboxType(props, dispatcher.configurator.config), - _supervisor.asInstanceOf[InternalActorRef], - _supervisor.path / name) { + case s => + _system.log.error("trying to attach child {} to unknown type of supervisor {}, this is not going to end well", + name, + s.getClass) + } +} with LocalActorRef(_system.asInstanceOf[ActorSystemImpl], + props, + dispatcher, + _system.mailboxes.getMailboxType(props, dispatcher.configurator.config), + _supervisor.asInstanceOf[InternalActorRef], + _supervisor.path / name) { // we need to start ourselves since the creation of an actor has been split into initialization and starting underlying.start() import TestActorRef.InternalGetActor - protected override def newActorCell(system: ActorSystemImpl, ref: InternalActorRef, props: Props, - dispatcher: MessageDispatcher, supervisor: InternalActorRef): ActorCell = + protected override def newActorCell(system: ActorSystemImpl, + ref: InternalActorRef, + props: Props, + dispatcher: MessageDispatcher, + supervisor: InternalActorRef): ActorCell = new ActorCell(system, ref, props, dispatcher, supervisor) { override def autoReceiveMessage(msg: Envelope): Unit = { msg.message match { @@ -74,10 +81,12 @@ class TestActorRef[T <: Actor]( * thrown will be available to you, while still being able to use * become/unbecome. */ - def receive(o: Any, sender: ActorRef): Unit = try { - underlying.currentMessage = Envelope(o, if (sender eq null) underlying.system.deadLetters else sender, underlying.system) - underlying.receiveMessage(o) - } finally underlying.currentMessage = null + def receive(o: Any, sender: ActorRef): Unit = + try { + underlying.currentMessage = + Envelope(o, if (sender eq null) underlying.system.deadLetters else sender, underlying.system) + underlying.receiveMessage(o) + } finally underlying.currentMessage = null /** * Retrieve reference to the underlying actor, where the static type matches the factory used inside the @@ -127,9 +136,11 @@ object TestActorRef { "$" + akka.util.Helpers.base64(l) } - def apply[T <: Actor: ClassTag](factory: => T)(implicit system: ActorSystem): TestActorRef[T] = apply[T](Props(factory), randomName) + def apply[T <: Actor: ClassTag](factory: => T)(implicit system: ActorSystem): TestActorRef[T] = + apply[T](Props(factory), randomName) - def apply[T <: Actor: ClassTag](factory: => T, name: String)(implicit system: ActorSystem): TestActorRef[T] = apply[T](Props(factory), name) + def apply[T <: Actor: ClassTag](factory: => T, name: String)(implicit system: ActorSystem): TestActorRef[T] = + apply[T](Props(factory), name) def apply[T <: Actor](props: Props)(implicit system: ActorSystem): TestActorRef[T] = apply[T](props, randomName) @@ -141,7 +152,8 @@ object TestActorRef { new TestActorRef(sysImpl, props, supervisor.asInstanceOf[InternalActorRef], randomName) } - def apply[T <: Actor](props: Props, supervisor: ActorRef, name: String)(implicit system: ActorSystem): TestActorRef[T] = { + def apply[T <: Actor](props: Props, supervisor: ActorRef, name: String)( + implicit system: ActorSystem): TestActorRef[T] = { val sysImpl = system.asInstanceOf[ActorSystemImpl] new TestActorRef(sysImpl, props, supervisor.asInstanceOf[InternalActorRef], name) } @@ -149,40 +161,62 @@ object TestActorRef { def apply[T <: Actor](implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = apply[T](randomName) private def dynamicCreateRecover[U]: PartialFunction[Throwable, U] = { - case exception => throw ActorInitializationException( - null, - "Could not instantiate Actor" + - "\nMake sure Actor is NOT defined inside a class/trait," + - "\nif so put it outside the class/trait, f.e. in a companion object," + - "\nOR try to change: 'actorOf(Props[MyActor]' to 'actorOf(Props(new MyActor)'.", exception) + case exception => + throw ActorInitializationException(null, + "Could not instantiate Actor" + + "\nMake sure Actor is NOT defined inside a class/trait," + + "\nif so put it outside the class/trait, f.e. in a companion object," + + "\nOR try to change: 'actorOf(Props[MyActor]' to 'actorOf(Props(new MyActor)'.", + exception) } - def apply[T <: Actor](name: String)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = apply[T](Props({ - system.asInstanceOf[ExtendedActorSystem].dynamicAccess - .createInstanceFor[T](t.runtimeClass, Nil).recover(dynamicCreateRecover).get - }), name) + def apply[T <: Actor](name: String)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = + apply[T](Props({ + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[T](t.runtimeClass, Nil) + .recover(dynamicCreateRecover) + .get + }), name) - def apply[T <: Actor](supervisor: ActorRef)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = apply[T](Props({ - system.asInstanceOf[ExtendedActorSystem].dynamicAccess - .createInstanceFor[T](t.runtimeClass, Nil).recover(dynamicCreateRecover).get - }), supervisor) + def apply[T <: Actor](supervisor: ActorRef)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = + apply[T](Props({ + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[T](t.runtimeClass, Nil) + .recover(dynamicCreateRecover) + .get + }), supervisor) - def apply[T <: Actor](supervisor: ActorRef, name: String)(implicit t: ClassTag[T], system: ActorSystem): TestActorRef[T] = apply[T](Props({ - system.asInstanceOf[ExtendedActorSystem] - .dynamicAccess.createInstanceFor[T](t.runtimeClass, Nil).recover(dynamicCreateRecover).get - }), supervisor, name) + def apply[T <: Actor](supervisor: ActorRef, name: String)(implicit t: ClassTag[T], + system: ActorSystem): TestActorRef[T] = + apply[T]( + Props({ + system + .asInstanceOf[ExtendedActorSystem] + .dynamicAccess + .createInstanceFor[T](t.runtimeClass, Nil) + .recover(dynamicCreateRecover) + .get + }), + supervisor, + name) /** * Java API: create a TestActorRef in the given system for the given props, * with the given supervisor and name. */ - def create[T <: Actor](system: ActorSystem, props: Props, supervisor: ActorRef, name: String): TestActorRef[T] = apply(props, supervisor, name)(system) + def create[T <: Actor](system: ActorSystem, props: Props, supervisor: ActorRef, name: String): TestActorRef[T] = + apply(props, supervisor, name)(system) /** * Java API: create a TestActorRef in the given system for the given props, * with the given supervisor and a random name. */ - def create[T <: Actor](system: ActorSystem, props: Props, supervisor: ActorRef): TestActorRef[T] = apply(props, supervisor)(system) + def create[T <: Actor](system: ActorSystem, props: Props, supervisor: ActorRef): TestActorRef[T] = + apply(props, supervisor)(system) /** * Java API: create a TestActorRef in the given system for the given props, diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActors.scala b/akka-testkit/src/main/scala/akka/testkit/TestActors.scala index 087b391085..aa91ec5ebd 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActors.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActors.scala @@ -4,7 +4,7 @@ package akka.testkit -import akka.actor.{ Props, Actor, ActorRef } +import akka.actor.{ Actor, ActorRef, Props } /** * A collection of common actor patterns used in tests. @@ -36,7 +36,7 @@ object TestActors { */ class ForwardActor(ref: ActorRef) extends Actor { override def receive = { - case message => ref forward message + case message => ref.forward(message) } } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestBarrier.scala b/akka-testkit/src/main/scala/akka/testkit/TestBarrier.scala index cbc91dc02e..d611a54605 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestBarrier.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestBarrier.scala @@ -33,8 +33,8 @@ class TestBarrier(count: Int) { barrier.await(timeout.dilated.toNanos, TimeUnit.NANOSECONDS) } catch { case _: TimeoutException => - throw new TestBarrierTimeoutException("Timeout of %s and time factor of %s" - format (timeout.toString, TestKitExtension(system).TestTimeFactor)) + throw new TestBarrierTimeoutException( + "Timeout of %s and time factor of %s".format(timeout.toString, TestKitExtension(system).TestTimeFactor)) } } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala index 2e923596d0..f6ba42410e 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala @@ -8,9 +8,9 @@ import scala.util.matching.Regex import scala.collection.immutable import scala.concurrent.duration.Duration import scala.reflect.ClassTag -import akka.actor.{ DeadLetter, ActorSystem, UnhandledMessage } +import akka.actor.{ ActorSystem, DeadLetter, UnhandledMessage } import akka.dispatch.sysmsg.{ SystemMessage, Terminate } -import akka.event.Logging.{ Warning, LogEvent, InitializeLogger, Info, Error, Debug, LoggerInitialized } +import akka.event.Logging.{ Debug, Error, Info, InitializeLogger, LogEvent, LoggerInitialized, Warning } import akka.event.Logging import akka.actor.NoSerializationVerificationNeeded import akka.japi.Util.immutableSeq @@ -43,6 +43,7 @@ object TestEvent { def apply(filter: EventFilter, filters: EventFilter*): Mute = new Mute(filter +: filters.to(immutable.Seq)) } final case class Mute(filters: immutable.Seq[EventFilter]) extends TestEvent with NoSerializationVerificationNeeded { + /** * Java API: create a Mute command from a list of filters */ @@ -51,7 +52,10 @@ object TestEvent { object UnMute { def apply(filter: EventFilter, filters: EventFilter*): UnMute = new UnMute(filter +: filters.to(immutable.Seq)) } - final case class UnMute(filters: immutable.Seq[EventFilter]) extends TestEvent with NoSerializationVerificationNeeded { + final case class UnMute(filters: immutable.Seq[EventFilter]) + extends TestEvent + with NoSerializationVerificationNeeded { + /** * Java API: create an UnMute command from a list of filters */ @@ -96,17 +100,16 @@ abstract class EventFilter(occurrences: Int) { * `occurrences` parameter specifies. */ def assertDone(max: Duration): Unit = - assert( - awaitDone(max), - if (todo > 0) s"$todo messages outstanding on $this" - else s"received ${-todo} excess messages on $this") + assert(awaitDone(max), + if (todo > 0) s"$todo messages outstanding on $this" + else s"received ${-todo} excess messages on $this") /** * Apply this filter while executing the given code block. Care is taken to * remove the filter when the block is finished or aborted. */ def intercept[T](code: => T)(implicit system: ActorSystem): T = { - system.eventStream publish TestEvent.Mute(this) + system.eventStream.publish(TestEvent.Mute(this)) val leeway = TestKitExtension(system).TestEventFilterLeeway.dilated try { val result = code @@ -116,7 +119,7 @@ abstract class EventFilter(occurrences: Int) { else throw new AssertionError(s"received ${-todo} excess messages on $this") result - } finally system.eventStream publish TestEvent.UnMute(this) + } finally system.eventStream.publish(TestEvent.UnMute(this)) } /* @@ -125,16 +128,17 @@ abstract class EventFilter(occurrences: Int) { protected val source: Option[String] = None protected val message: Either[String, Regex] = Left("") protected val complete: Boolean = false + /** * internal implementation helper, no guaranteed API */ protected def doMatch(src: String, msg: Any) = { val msgstr = if (msg != null) msg.toString else "null" (source.isDefined && source.get == src || source.isEmpty) && - (message match { - case Left(s) => if (complete) msgstr == s else msgstr.startsWith(s) - case Right(p) => p.findFirstIn(msgstr).isDefined - }) + (message match { + case Left(s) => if (complete) msgstr == s else msgstr.startsWith(s) + case Right(p) => p.findFirstIn(msgstr).isDefined + }) } } @@ -172,18 +176,28 @@ object EventFilter { * `null` does NOT work (passing `null` disables the * source filter).'' */ - def apply[A <: Throwable: ClassTag](message: String = null, source: String = null, start: String = "", pattern: String = null, occurrences: Int = Int.MaxValue): EventFilter = - ErrorFilter(implicitly[ClassTag[A]].runtimeClass, Option(source), - if (message ne null) Left(message) else Option(pattern) map (new Regex(_)) toRight start, - message ne null)(occurrences) + def apply[A <: Throwable: ClassTag](message: String = null, + source: String = null, + start: String = "", + pattern: String = null, + occurrences: Int = Int.MaxValue): EventFilter = + ErrorFilter(implicitly[ClassTag[A]].runtimeClass, + Option(source), + if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start), + message ne null)(occurrences) /** * Create a filter for Error events. See apply() for more details. */ - def error(message: String = null, source: String = null, start: String = "", pattern: String = null, occurrences: Int = Int.MaxValue): EventFilter = - ErrorFilter(Logging.Error.NoCause.getClass, Option(source), - if (message ne null) Left(message) else Option(pattern) map (new Regex(_)) toRight start, - message ne null)(occurrences) + def error(message: String = null, + source: String = null, + start: String = "", + pattern: String = null, + occurrences: Int = Int.MaxValue): EventFilter = + ErrorFilter(Logging.Error.NoCause.getClass, + Option(source), + if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start), + message ne null)(occurrences) /** * Create a filter for Warning events. Give up to one of start and pattern: @@ -199,11 +213,14 @@ object EventFilter { * `null` does NOT work (passing `null` disables the * source filter).'' */ - def warning(message: String = null, source: String = null, start: String = "", pattern: String = null, occurrences: Int = Int.MaxValue): EventFilter = - WarningFilter( - Option(source), - if (message ne null) Left(message) else Option(pattern) map (new Regex(_)) toRight start, - message ne null)(occurrences) + def warning(message: String = null, + source: String = null, + start: String = "", + pattern: String = null, + occurrences: Int = Int.MaxValue): EventFilter = + WarningFilter(Option(source), + if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start), + message ne null)(occurrences) /** * Create a filter for Info events. Give up to one of start and pattern: @@ -219,11 +236,14 @@ object EventFilter { * `null` does NOT work (passing `null` disables the * source filter).'' */ - def info(message: String = null, source: String = null, start: String = "", pattern: String = null, occurrences: Int = Int.MaxValue): EventFilter = - InfoFilter( - Option(source), - if (message ne null) Left(message) else Option(pattern) map (new Regex(_)) toRight start, - message ne null)(occurrences) + def info(message: String = null, + source: String = null, + start: String = "", + pattern: String = null, + occurrences: Int = Int.MaxValue): EventFilter = + InfoFilter(Option(source), + if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start), + message ne null)(occurrences) /** * Create a filter for Debug events. Give up to one of start and pattern: @@ -239,11 +259,14 @@ object EventFilter { * `null` does NOT work (passing `null` disables the * source filter).'' */ - def debug(message: String = null, source: String = null, start: String = "", pattern: String = null, occurrences: Int = Int.MaxValue): EventFilter = - DebugFilter( - Option(source), - if (message ne null) Left(message) else Option(pattern) map (new Regex(_)) toRight start, - message ne null)(occurrences) + def debug(message: String = null, + source: String = null, + start: String = "", + pattern: String = null, + occurrences: Int = Int.MaxValue): EventFilter = + DebugFilter(Option(source), + if (message ne null) Left(message) else Option(pattern).map(new Regex(_)).toRight(start), + message ne null)(occurrences) /** * Create a custom event filter. The filter will affect those events for @@ -274,17 +297,17 @@ object EventFilter { * * If you want to match all Error events, the most efficient is to use Left(""). */ -final case class ErrorFilter( - throwable: Class[_], - override val source: Option[String], - override val message: Either[String, Regex], - override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) { +final case class ErrorFilter(throwable: Class[_], + override val source: Option[String], + override val message: Either[String, Regex], + override val complete: Boolean)(occurrences: Int) + extends EventFilter(occurrences) { def matches(event: LogEvent) = { event match { - case Error(cause, src, _, msg) if (throwable eq Error.NoCause.getClass) || (throwable isInstance cause) => + case Error(cause, src, _, msg) if (throwable eq Error.NoCause.getClass) || (throwable.isInstance(cause)) => (msg == null && cause.getMessage == null && cause.getStackTrace.length == 0) || - doMatch(src, msg) || doMatch(src, cause.getMessage) + doMatch(src, msg) || doMatch(src, cause.getMessage) case _ => false } } @@ -304,12 +327,18 @@ final case class ErrorFilter( * @param complete * whether the event’s message must match the given message string or pattern completely */ - def this(throwable: Class[_], source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) = - this(throwable, Option(source), - if (message eq null) Left("") - else if (pattern) Right(new Regex(message)) - else Left(message), - complete)(occurrences) + def this(throwable: Class[_], + source: String, + message: String, + pattern: Boolean, + complete: Boolean, + occurrences: Int) = + this(throwable, + Option(source), + if (message eq null) Left("") + else if (pattern) Right(new Regex(message)) + else Left(message), + complete)(occurrences) /** * Java API: filter only on the given type of exception @@ -326,10 +355,10 @@ final case class ErrorFilter( * * If you want to match all Warning events, the most efficient is to use Left(""). */ -final case class WarningFilter( - override val source: Option[String], - override val message: Either[String, Regex], - override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) { +final case class WarningFilter(override val source: Option[String], + override val message: Either[String, Regex], + override val complete: Boolean)(occurrences: Int) + extends EventFilter(occurrences) { def matches(event: LogEvent) = { event match { @@ -354,12 +383,11 @@ final case class WarningFilter( * whether the event’s message must match the given message string or pattern completely */ def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) = - this( - Option(source), - if (message eq null) Left("") - else if (pattern) Right(new Regex(message)) - else Left(message), - complete)(occurrences) + this(Option(source), + if (message eq null) Left("") + else if (pattern) Right(new Regex(message)) + else Left(message), + complete)(occurrences) } /** @@ -370,10 +398,10 @@ final case class WarningFilter( * * If you want to match all Info events, the most efficient is to use Left(""). */ -final case class InfoFilter( - override val source: Option[String], - override val message: Either[String, Regex], - override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) { +final case class InfoFilter(override val source: Option[String], + override val message: Either[String, Regex], + override val complete: Boolean)(occurrences: Int) + extends EventFilter(occurrences) { def matches(event: LogEvent) = { event match { @@ -398,12 +426,11 @@ final case class InfoFilter( * whether the event’s message must match the given message string or pattern completely */ def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) = - this( - Option(source), - if (message eq null) Left("") - else if (pattern) Right(new Regex(message)) - else Left(message), - complete)(occurrences) + this(Option(source), + if (message eq null) Left("") + else if (pattern) Right(new Regex(message)) + else Left(message), + complete)(occurrences) } /** @@ -414,10 +441,10 @@ final case class InfoFilter( * * If you want to match all Debug events, the most efficient is to use Left(""). */ -final case class DebugFilter( - override val source: Option[String], - override val message: Either[String, Regex], - override val complete: Boolean)(occurrences: Int) extends EventFilter(occurrences) { +final case class DebugFilter(override val source: Option[String], + override val message: Either[String, Regex], + override val complete: Boolean)(occurrences: Int) + extends EventFilter(occurrences) { def matches(event: LogEvent) = { event match { @@ -442,12 +469,11 @@ final case class DebugFilter( * whether the event’s message must match the given message string or pattern completely */ def this(source: String, message: String, pattern: Boolean, complete: Boolean, occurrences: Int) = - this( - Option(source), - if (message eq null) Left("") - else if (pattern) Right(new Regex(message)) - else Left(message), - complete)(occurrences) + this(Option(source), + if (message eq null) Left("") + else if (pattern) Right(new Regex(message)) + else Left(message), + complete)(occurrences) } /** @@ -455,7 +481,8 @@ final case class DebugFilter( * * If the partial function is defined and returns true, filter the event. */ -final case class CustomEventFilter(test: PartialFunction[LogEvent, Boolean])(occurrences: Int) extends EventFilter(occurrences) { +final case class CustomEventFilter(test: PartialFunction[LogEvent, Boolean])(occurrences: Int) + extends EventFilter(occurrences) { def matches(event: LogEvent) = { test.isDefinedAt(event) && test(event) } @@ -465,6 +492,7 @@ object DeadLettersFilter { def apply[T](implicit t: ClassTag[T]): DeadLettersFilter = new DeadLettersFilter(t.runtimeClass.asInstanceOf[Class[T]])(Int.MaxValue) } + /** * Filter which matches DeadLetter events, if the wrapped message conforms to the * given type. @@ -473,7 +501,7 @@ final case class DeadLettersFilter(val messageClass: Class[_])(occurrences: Int) def matches(event: LogEvent) = { event match { - case Warning(_, _, msg) => BoxedType(messageClass) isInstance msg + case Warning(_, _, msg) => BoxedType(messageClass).isInstance(msg) case _ => false } } @@ -499,10 +527,11 @@ class TestEventListener extends Logging.DefaultLogger { override def receive = { case InitializeLogger(bus) => - Seq(classOf[Mute], classOf[UnMute], classOf[DeadLetter], classOf[UnhandledMessage]) foreach (bus.subscribe(context.self, _)) + Seq(classOf[Mute], classOf[UnMute], classOf[DeadLetter], classOf[UnhandledMessage]) + .foreach(bus.subscribe(context.self, _)) sender() ! LoggerInitialized - case Mute(filters) => filters foreach addFilter - case UnMute(filters) => filters foreach removeFilter + case Mute(filters) => filters.foreach(addFilter) + case UnMute(filters) => filters.foreach(removeFilter) case event: LogEvent => if (!filter(event)) print(event) case DeadLetter(msg, snd, rcp) => if (!msg.isInstanceOf[Terminate]) { @@ -522,7 +551,11 @@ class TestEventListener extends Logging.DefaultLogger { case m => print(Debug(context.system.name, this.getClass, m)) } - def filter(event: LogEvent): Boolean = filters exists (f => try { f(event) } catch { case _: Exception => false }) + def filter(event: LogEvent): Boolean = + filters.exists(f => + try { + f(event) + } catch { case _: Exception => false }) def addFilter(filter: EventFilter): Unit = filters ::= filter diff --git a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala index 1f69ebdf97..4d0a1f255a 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala @@ -32,12 +32,9 @@ import scala.reflect.ClassTag * * @since 1.2 */ -class TestFSMRef[S, D, T <: Actor]( - system: ActorSystem, - props: Props, - supervisor: ActorRef, - name: String)(implicit ev: T <:< FSM[S, D]) - extends TestActorRef[T](system, props, supervisor, name) { +class TestFSMRef[S, D, T <: Actor](system: ActorSystem, props: Props, supervisor: ActorRef, name: String)( + implicit ev: T <:< FSM[S, D]) + extends TestActorRef[T](system, props, supervisor, name) { private def fsm: T = underlyingActor @@ -57,7 +54,10 @@ class TestFSMRef[S, D, T <: Actor]( * corresponding transition initiated from within the FSM, including timeout * and stop handling. */ - def setState(stateName: S = fsm.stateName, stateData: D = fsm.stateData, timeout: FiniteDuration = null, stopReason: Option[FSM.Reason] = None): Unit = { + def setState(stateName: S = fsm.stateName, + stateData: D = fsm.stateData, + timeout: FiniteDuration = null, + stopReason: Option[FSM.Reason] = None): Unit = { fsm.applyState(FSM.State(stateName, stateData, Option(timeout), stopReason)) } @@ -86,22 +86,28 @@ class TestFSMRef[S, D, T <: Actor]( object TestFSMRef { - def apply[S, D, T <: Actor: ClassTag](factory: => T)(implicit ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { + def apply[S, D, T <: Actor: ClassTag](factory: => T)(implicit ev: T <:< FSM[S, D], + system: ActorSystem): TestFSMRef[S, D, T] = { val impl = system.asInstanceOf[ActorSystemImpl] new TestFSMRef(impl, Props(factory), impl.guardian.asInstanceOf[InternalActorRef], TestActorRef.randomName) } - def apply[S, D, T <: Actor: ClassTag](factory: => T, name: String)(implicit ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { + def apply[S, D, T <: Actor: ClassTag](factory: => T, name: String)(implicit ev: T <:< FSM[S, D], + system: ActorSystem): TestFSMRef[S, D, T] = { val impl = system.asInstanceOf[ActorSystemImpl] new TestFSMRef(impl, Props(factory), impl.guardian.asInstanceOf[InternalActorRef], name) } - def apply[S, D, T <: Actor: ClassTag](factory: => T, supervisor: ActorRef, name: String)(implicit ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { + def apply[S, D, T <: Actor: ClassTag](factory: => T, supervisor: ActorRef, name: String)( + implicit ev: T <:< FSM[S, D], + system: ActorSystem): TestFSMRef[S, D, T] = { val impl = system.asInstanceOf[ActorSystemImpl] new TestFSMRef(impl, Props(factory), supervisor.asInstanceOf[InternalActorRef], name) } - def apply[S, D, T <: Actor: ClassTag](factory: => T, supervisor: ActorRef)(implicit ev: T <:< FSM[S, D], system: ActorSystem): TestFSMRef[S, D, T] = { + def apply[S, D, T <: Actor: ClassTag](factory: => T, supervisor: ActorRef)( + implicit ev: T <:< FSM[S, D], + system: ActorSystem): TestFSMRef[S, D, T] = { val impl = system.asInstanceOf[ActorSystemImpl] new TestFSMRef(impl, Props(factory), supervisor.asInstanceOf[InternalActorRef], TestActorRef.randomName) } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 2d7292351d..5b72aef4d0 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -45,7 +45,8 @@ object TestActor { final case class Watch(ref: ActorRef) extends NoSerializationVerificationNeeded final case class UnWatch(ref: ActorRef) extends NoSerializationVerificationNeeded final case class SetAutoPilot(ap: AutoPilot) extends NoSerializationVerificationNeeded - final case class Spawn(props: Props, name: Option[String] = None, strategy: Option[SupervisorStrategy] = None) extends NoSerializationVerificationNeeded { + final case class Spawn(props: Props, name: Option[String] = None, strategy: Option[SupervisorStrategy] = None) + extends NoSerializationVerificationNeeded { def apply(context: ActorRefFactory): ActorRef = name match { case Some(n) => context.actorOf(props, n) case None => context.actorOf(props) @@ -80,11 +81,20 @@ object TestActor { delegates -= child } - override def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit = { + override def processFailure(context: ActorContext, + restart: Boolean, + child: ActorRef, + cause: Throwable, + stats: ChildRestartStats, + children: Iterable[ChildRestartStats]): Unit = { delegate(child).processFailure(context, restart, child, cause, stats, children) } - override def handleFailure(context: ActorContext, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Boolean = { + override def handleFailure(context: ActorContext, + child: ActorRef, + cause: Throwable, + stats: ChildRestartStats, + children: Iterable[ChildRestartStats]): Boolean = { delegate(child).handleFailure(context, child, cause, stats, children) } } @@ -116,13 +126,15 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor { case KeepRunning => autopilot case other => other } - val observe = ignore map (ignoreFunc => !ignoreFunc.applyOrElse(x, FALSE)) getOrElse true + val observe = ignore.map(ignoreFunc => !ignoreFunc.applyOrElse(x, FALSE)).getOrElse(true) if (observe) queue.offerLast(RealMessage(x, sender())) } override def postStop() = { import scala.collection.JavaConverters._ - queue.asScala foreach { m => context.system.deadLetters.tell(DeadLetter(m.msg, m.sender, self), m.sender) } + queue.asScala.foreach { m => + context.system.deadLetters.tell(DeadLetter(m.msg, m.sender, self), m.sender) + } } } @@ -143,7 +155,7 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor { */ trait TestKitBase { - import TestActor.{ Message, RealMessage, NullMessage, Spawn } + import TestActor.{ Message, NullMessage, RealMessage, Spawn } implicit val system: ActorSystem val testKitSettings = TestKitExtension(system) @@ -164,10 +176,8 @@ trait TestKitBase { */ val testActor: ActorRef = { val impl = system.asInstanceOf[ExtendedActorSystem] - val ref = impl.systemActorOf( - TestActor.props(queue) - .withDispatcher(CallingThreadDispatcher.Id), - "%s-%d".format(testActorName, TestKit.testActorId.incrementAndGet)) + val ref = impl.systemActorOf(TestActor.props(queue).withDispatcher(CallingThreadDispatcher.Id), + "%s-%d".format(testActorName, TestKit.testActorId.incrementAndGet)) awaitCond(ref match { case r: RepointableRef => r.isStarted case _ => true @@ -270,7 +280,10 @@ trait TestKitBase { * Note that the timeout is scaled using Duration.dilated, * which uses the configuration entry "akka.test.timefactor". */ - def awaitCond(p: => Boolean, max: Duration = Duration.Undefined, interval: Duration = 100.millis, message: String = ""): Unit = { + def awaitCond(p: => Boolean, + max: Duration = Duration.Undefined, + interval: Duration = 100.millis, + message: String = ""): Unit = { val _max = remainingOrDilated(max) val stop = now + _max @@ -357,7 +370,8 @@ trait TestKitBase { val prev_end = end end = start + max_diff - val ret = try f finally end = prev_end + val ret = try f + finally end = prev_end val diff = now - start assert(min <= diff, s"block took ${format(min.unit, diff)}, should at least have been $min") @@ -464,7 +478,8 @@ trait TestKitBase { * @return result of applying partial function to the last received message, * i.e. the first one for which the partial function is defined */ - def fishForSpecificMessage[T](max: Duration = Duration.Undefined, hint: String = "")(f: PartialFunction[Any, T]): T = { + def fishForSpecificMessage[T](max: Duration = Duration.Undefined, hint: String = "")( + f: PartialFunction[Any, T]): T = { val _max = remainingOrDilated(max) val end = now + _max @tailrec @@ -479,7 +494,8 @@ trait TestKitBase { /** * Same as `expectMsgType[T](remainingOrDefault)`, but correctly treating the timeFactor. */ - def expectMsgType[T](implicit t: ClassTag[T]): T = expectMsgClass_internal(remainingOrDefault, t.runtimeClass.asInstanceOf[Class[T]]) + def expectMsgType[T](implicit t: ClassTag[T]): T = + expectMsgClass_internal(remainingOrDefault, t.runtimeClass.asInstanceOf[Class[T]]) /** * Receive one message from the test actor and assert that it conforms to the @@ -488,7 +504,8 @@ trait TestKitBase { * * @return the received object */ - def expectMsgType[T](max: FiniteDuration)(implicit t: ClassTag[T]): T = expectMsgClass_internal(max.dilated, t.runtimeClass.asInstanceOf[Class[T]]) + def expectMsgType[T](max: FiniteDuration)(implicit t: ClassTag[T]): T = + expectMsgClass_internal(max.dilated, t.runtimeClass.asInstanceOf[Class[T]]) /** * Same as `expectMsgClass(remainingOrDefault, c)`, but correctly treating the timeFactor. @@ -507,7 +524,7 @@ trait TestKitBase { private def expectMsgClass_internal[C](max: FiniteDuration, c: Class[C]): C = { val o = receiveOne(max) assert(o ne null, s"timeout ($max) during expectMsgClass waiting for $c") - assert(BoxedType(c) isInstance o, s"expected $c, found ${o.getClass} ($o)") + assert(BoxedType(c).isInstance(o), s"expected $c, found ${o.getClass} ($o)") o.asInstanceOf[C] } @@ -528,7 +545,7 @@ trait TestKitBase { private def expectMsgAnyOf_internal[T](max: FiniteDuration, obj: T*): T = { val o = receiveOne(max) assert(o ne null, s"timeout ($max) during expectMsgAnyOf waiting for ${obj.mkString("(", ", ", ")")}") - assert(obj exists (_ == o), s"found unexpected $o") + assert(obj.exists(_ == o), s"found unexpected $o") o.asInstanceOf[T] } @@ -544,12 +561,13 @@ trait TestKitBase { * * @return the received object */ - def expectMsgAnyClassOf[C](max: FiniteDuration, obj: Class[_ <: C]*): C = expectMsgAnyClassOf_internal(max.dilated, obj: _*) + def expectMsgAnyClassOf[C](max: FiniteDuration, obj: Class[_ <: C]*): C = + expectMsgAnyClassOf_internal(max.dilated, obj: _*) private def expectMsgAnyClassOf_internal[C](max: FiniteDuration, obj: Class[_ <: C]*): C = { val o = receiveOne(max) assert(o ne null, s"timeout ($max) during expectMsgAnyClassOf waiting for ${obj.mkString("(", ", ", ")")}") - assert(obj exists (c => BoxedType(c) isInstance o), s"found unexpected $o") + assert(obj.exists(c => BoxedType(c).isInstance(o)), s"found unexpected $o") o.asInstanceOf[C] } @@ -573,18 +591,19 @@ trait TestKitBase { */ def expectMsgAllOf[T](max: FiniteDuration, obj: T*): immutable.Seq[T] = expectMsgAllOf_internal(max.dilated, obj: _*) - private def checkMissingAndUnexpected(missing: Seq[Any], unexpected: Seq[Any], - missingMessage: String, unexpectedMessage: String): Unit = { - assert( - missing.isEmpty && unexpected.isEmpty, - (if (missing.isEmpty) "" else missing.mkString(missingMessage + " [", ", ", "] ")) + - (if (unexpected.isEmpty) "" else unexpected.mkString(unexpectedMessage + " [", ", ", "]"))) + private def checkMissingAndUnexpected(missing: Seq[Any], + unexpected: Seq[Any], + missingMessage: String, + unexpectedMessage: String): Unit = { + assert(missing.isEmpty && unexpected.isEmpty, + (if (missing.isEmpty) "" else missing.mkString(missingMessage + " [", ", ", "] ")) + + (if (unexpected.isEmpty) "" else unexpected.mkString(unexpectedMessage + " [", ", ", "]"))) } private def expectMsgAllOf_internal[T](max: FiniteDuration, obj: T*): immutable.Seq[T] = { val recv = receiveN_internal(obj.size, max) - val missing = obj filterNot (x => recv exists (x == _)) - val unexpected = recv filterNot (x => obj exists (x == _)) + val missing = obj.filterNot(x => recv.exists(x == _)) + val unexpected = recv.filterNot(x => obj.exists(x == _)) checkMissingAndUnexpected(missing, unexpected, "not found", "found unexpected") recv.asInstanceOf[immutable.Seq[T]] } @@ -592,7 +611,8 @@ trait TestKitBase { /** * Same as `expectMsgAllClassOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ - def expectMsgAllClassOf[T](obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllClassOf(remainingOrDefault, obj: _*) + def expectMsgAllClassOf[T](obj: Class[_ <: T]*): immutable.Seq[T] = + internalExpectMsgAllClassOf(remainingOrDefault, obj: _*) /** * Receive a number of messages from the test actor matching the given @@ -602,12 +622,13 @@ trait TestKitBase { * Wait time is bounded by the given duration, with an AssertionFailure * being thrown in case of timeout. */ - def expectMsgAllClassOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllClassOf(max.dilated, obj: _*) + def expectMsgAllClassOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = + internalExpectMsgAllClassOf(max.dilated, obj: _*) private def internalExpectMsgAllClassOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = { val recv = receiveN_internal(obj.size, max) - val missing = obj filterNot (x => recv exists (_.getClass eq BoxedType(x))) - val unexpected = recv filterNot (x => obj exists (c => BoxedType(c) eq x.getClass)) + val missing = obj.filterNot(x => recv.exists(_.getClass eq BoxedType(x))) + val unexpected = recv.filterNot(x => obj.exists(c => BoxedType(c) eq x.getClass)) checkMissingAndUnexpected(missing, unexpected, "not found", "found non-matching object(s)") recv.asInstanceOf[immutable.Seq[T]] } @@ -615,7 +636,8 @@ trait TestKitBase { /** * Same as `expectMsgAllConformingOf(remainingOrDefault, obj...)`, but correctly treating the timeFactor. */ - def expectMsgAllConformingOf[T](obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllConformingOf(remainingOrDefault, obj: _*) + def expectMsgAllConformingOf[T](obj: Class[_ <: T]*): immutable.Seq[T] = + internalExpectMsgAllConformingOf(remainingOrDefault, obj: _*) /** * Receive a number of messages from the test actor matching the given @@ -628,12 +650,13 @@ trait TestKitBase { * Beware that one object may satisfy all given class constraints, which * may be counter-intuitive. */ - def expectMsgAllConformingOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = internalExpectMsgAllConformingOf(max.dilated, obj: _*) + def expectMsgAllConformingOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = + internalExpectMsgAllConformingOf(max.dilated, obj: _*) private def internalExpectMsgAllConformingOf[T](max: FiniteDuration, obj: Class[_ <: T]*): immutable.Seq[T] = { val recv = receiveN_internal(obj.size, max) - val missing = obj filterNot (x => recv exists (BoxedType(x) isInstance _)) - val unexpected = recv filterNot (x => obj exists (c => BoxedType(c) isInstance x)) + val missing = obj.filterNot(x => recv.exists(BoxedType(x).isInstance(_))) + val unexpected = recv.filterNot(x => obj.exists(c => BoxedType(c).isInstance(x))) checkMissingAndUnexpected(missing, unexpected, "not found", "found non-matching object(s)") recv.asInstanceOf[immutable.Seq[T]] } @@ -677,9 +700,7 @@ trait TestKitBase { while (left.toNanos > 0 && elem == null) { //Use of (left / 2) gives geometric series limited by finish time similar to (1/2)^n limited by 1, //so it is very precise - Thread.sleep( - pollInterval.toMillis min (left / 2).toMillis - ) + Thread.sleep(pollInterval.toMillis min (left / 2).toMillis) left = leftNow if (left.toNanos > 0) { elem = queue.peekFirst() @@ -718,7 +739,8 @@ trait TestKitBase { * assert(series == (1 to 7).toList) * }}} */ - def receiveWhile[T](max: Duration = Duration.Undefined, idle: Duration = Duration.Inf, messages: Int = Int.MaxValue)(f: PartialFunction[AnyRef, T]): immutable.Seq[T] = { + def receiveWhile[T](max: Duration = Duration.Undefined, idle: Duration = Duration.Inf, messages: Int = Int.MaxValue)( + f: PartialFunction[AnyRef, T]): immutable.Seq[T] = { val stop = now + remainingOrDilated(max) var msg: Message = NullMessage @@ -731,7 +753,7 @@ trait TestKitBase { case NullMessage => lastMessage = msg acc.reverse - case RealMessage(o, _) if (f isDefinedAt o) => + case RealMessage(o, _) if f.isDefinedAt(o) => msg = lastMessage doit(f(o) :: acc, count + 1) case RealMessage(_, _) => @@ -800,10 +822,9 @@ trait TestKitBase { * * If verifySystemShutdown is true, then an exception will be thrown on failure. */ - def shutdown( - actorSystem: ActorSystem = system, - duration: Duration = 10.seconds.dilated.min(10.seconds), - verifySystemShutdown: Boolean = false): Unit = { + def shutdown(actorSystem: ActorSystem = system, + duration: Duration = 10.seconds.dilated.min(10.seconds), + verifySystemShutdown: Boolean = false): Unit = { TestKit.shutdownActorSystem(actorSystem, duration, verifySystemShutdown) } @@ -933,15 +954,16 @@ object TestKit { * * If verifySystemShutdown is true, then an exception will be thrown on failure. */ - def shutdownActorSystem( - actorSystem: ActorSystem, - duration: Duration = 10.seconds, - verifySystemShutdown: Boolean = false): Unit = { + def shutdownActorSystem(actorSystem: ActorSystem, + duration: Duration = 10.seconds, + verifySystemShutdown: Boolean = false): Unit = { actorSystem.terminate() - try Await.ready(actorSystem.whenTerminated, duration) catch { + try Await.ready(actorSystem.whenTerminated, duration) + catch { case _: TimeoutException => - val msg = "Failed to stop [%s] within [%s] \n%s".format(actorSystem.name, duration, - actorSystem.asInstanceOf[ActorSystemImpl].printTree) + val msg = "Failed to stop [%s] within [%s] \n%s".format(actorSystem.name, + duration, + actorSystem.asInstanceOf[ActorSystemImpl].printTree) if (verifySystemShutdown) throw new RuntimeException(msg) else println(msg) } @@ -1014,13 +1036,17 @@ trait DefaultTimeout { this: TestKitBase => * by client code directly. */ @deprecated(message = "The only usage is in JavaTestKit which is deprecated.", since = "2.5.0") -private[testkit] abstract class CachingPartialFunction[A, B <: AnyRef] extends scala.runtime.AbstractPartialFunction[A, B] { +private[testkit] abstract class CachingPartialFunction[A, B <: AnyRef] + extends scala.runtime.AbstractPartialFunction[A, B] { import akka.japi.JavaPartialFunction._ @throws(classOf[Exception]) def `match`(x: A): B var cache: B = _ - final def isDefinedAt(x: A): Boolean = try { cache = `match`(x); true } catch { case NoMatch => cache = null.asInstanceOf[B]; false } + final def isDefinedAt(x: A): Boolean = + try { + cache = `match`(x); true + } catch { case NoMatch => cache = null.asInstanceOf[B]; false } final override def apply(x: A): B = cache } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKitExtension.scala b/akka-testkit/src/main/scala/akka/testkit/TestKitExtension.scala index ba374f72dd..e9120e7aca 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKitExtension.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKitExtension.scala @@ -6,7 +6,7 @@ package akka.testkit import com.typesafe.config.Config import akka.util.Timeout -import akka.actor.{ ExtensionId, ActorSystem, Extension, ExtendedActorSystem } +import akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId } import scala.concurrent.duration.FiniteDuration object TestKitExtension extends ExtensionId[TestKitSettings] { @@ -18,8 +18,9 @@ class TestKitSettings(val config: Config) extends Extension { import akka.util.Helpers._ - val TestTimeFactor = config.getDouble("akka.test.timefactor"). - requiring(tf => !tf.isInfinite && tf > 0, "akka.test.timefactor must be positive finite double") + val TestTimeFactor = config + .getDouble("akka.test.timefactor") + .requiring(tf => !tf.isInfinite && tf > 0, "akka.test.timefactor must be positive finite double") val SingleExpectDefaultTimeout: FiniteDuration = config.getMillisDuration("akka.test.single-expect-default") val TestEventFilterLeeway: FiniteDuration = config.getMillisDuration("akka.test.filter-leeway") val DefaultTimeout: Timeout = Timeout(config.getMillisDuration("akka.test.default-timeout")) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestLatch.scala b/akka-testkit/src/main/scala/akka/testkit/TestLatch.scala index b5c352a4e2..c9964e4af4 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestLatch.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestLatch.scala @@ -6,8 +6,8 @@ package akka.testkit import scala.concurrent.duration.Duration import akka.actor.ActorSystem -import scala.concurrent.{ CanAwait, Awaitable } -import java.util.concurrent.{ TimeoutException, CountDownLatch, TimeUnit } +import scala.concurrent.{ Awaitable, CanAwait } +import java.util.concurrent.{ CountDownLatch, TimeUnit, TimeoutException } import scala.concurrent.duration.FiniteDuration /** @@ -38,8 +38,9 @@ class TestLatch(count: Int = 1)(implicit system: ActorSystem) extends Awaitable[ case _ => throw new IllegalArgumentException("TestLatch does not support waiting for " + atMost) } val opened = latch.await(waitTime.dilated.toNanos, TimeUnit.NANOSECONDS) - if (!opened) throw new TimeoutException( - "Timeout of %s with time factor of %s" format (atMost.toString, TestKitExtension(system).TestTimeFactor)) + if (!opened) + throw new TimeoutException( + "Timeout of %s with time factor of %s".format(atMost.toString, TestKitExtension(system).TestTimeFactor)) this } @throws(classOf[Exception]) @@ -47,4 +48,3 @@ class TestLatch(count: Int = 1)(implicit system: ActorSystem) extends Awaitable[ ready(atMost) } } - diff --git a/akka-testkit/src/main/scala/akka/testkit/TestMessageSerializer.scala b/akka-testkit/src/main/scala/akka/testkit/TestMessageSerializer.scala index 8a81b1e66e..77cbfdcb9c 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestMessageSerializer.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestMessageSerializer.scala @@ -7,7 +7,6 @@ package akka.testkit /** * Copyright (C) 2009-2018 Lightbend Inc. */ - import java.io.{ ByteArrayInputStream, ByteArrayOutputStream, ObjectOutputStream } import akka.actor.ExtendedActorSystem @@ -43,4 +42,3 @@ class TestMessageSerializer(val system: ExtendedActorSystem) extends BaseSeriali obj } } - diff --git a/akka-testkit/src/main/scala/akka/testkit/javadsl/EventFilter.scala b/akka-testkit/src/main/scala/akka/testkit/javadsl/EventFilter.scala index f5175ac284..904f03dffd 100644 --- a/akka-testkit/src/main/scala/akka/testkit/javadsl/EventFilter.scala +++ b/akka-testkit/src/main/scala/akka/testkit/javadsl/EventFilter.scala @@ -12,10 +12,8 @@ import akka.testkit.{ DebugFilter, ErrorFilter, InfoFilter, WarningFilter } class EventFilter(clazz: Class[_], system: ActorSystem) { - require( - classOf[Throwable].isAssignableFrom(clazz) || classOf[Logging.LogEvent].isAssignableFrom(clazz), - "supplied class must either be LogEvent or Throwable" - ) + require(classOf[Throwable].isAssignableFrom(clazz) || classOf[Logging.LogEvent].isAssignableFrom(clazz), + "supplied class must either be LogEvent or Throwable") private val _clazz: Class[_ <: Logging.LogEvent] = if (classOf[Throwable].isAssignableFrom(clazz)) diff --git a/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala index 12775ecad9..5d0a6def7b 100644 --- a/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/javadsl/TestKit.scala @@ -60,7 +60,8 @@ class TestKit(system: ActorSystem) { def duration(s: String): FiniteDuration = { Duration.apply(s) match { case fd: FiniteDuration => fd - case _ => throw new IllegalArgumentException("duration() is only for finite durations, use Duration.Inf() and friends") + case _ => + throw new IllegalArgumentException("duration() is only for finite durations, use Duration.Inf() and friends") } } @@ -427,7 +428,8 @@ class TestKit(system: ActorSystem) { * * @return an arbitrary value that would be returned from awaitAssert if successful, if not interested in such value you can return null. */ - def awaitAssert[A](max: java.time.Duration, interval: java.time.Duration, a: Supplier[A]): A = tp.awaitAssert(a.get, max.asScala, interval.asScala) + def awaitAssert[A](max: java.time.Duration, interval: java.time.Duration, a: Supplier[A]): A = + tp.awaitAssert(a.get, max.asScala, interval.asScala) /** * Same as `expectMsg(remainingOrDefault, obj)`, but correctly treating the timeFactor. @@ -771,9 +773,10 @@ class TestKit(system: ActorSystem) { @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.13") def receiveWhile[T](max: Duration, idle: Duration, messages: Int, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max, idle, messages)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }).asJava + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }) + .asJava } /** @@ -788,27 +791,33 @@ class TestKit(system: ActorSystem) { * certain characteristics are generated at a certain rate: * */ - def receiveWhile[T](max: java.time.Duration, idle: java.time.Duration, messages: Int, f: JFunction[AnyRef, T]): JList[T] = { + def receiveWhile[T](max: java.time.Duration, + idle: java.time.Duration, + messages: Int, + f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max.asScala, idle.asScala, messages)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }).asJava + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }) + .asJava } @Deprecated @deprecated("Use the overloaded one which accepts java.time.Duration instead.", since = "2.5.13") def receiveWhile[T](max: Duration, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max = max)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }).asJava + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }) + .asJava } def receiveWhile[T](max: java.time.Duration, f: JFunction[AnyRef, T]): JList[T] = { tp.receiveWhile(max = max.asScala)(new CachingPartialFunction[AnyRef, T] { - @throws(classOf[Exception]) - override def `match`(x: AnyRef): T = f.apply(x) - }).asJava + @throws(classOf[Exception]) + override def `match`(x: AnyRef): T = f.apply(x) + }) + .asJava } /** @@ -843,10 +852,7 @@ object TestKit { * * If verifySystemShutdown is true, then an exception will be thrown on failure. */ - def shutdownActorSystem( - actorSystem: ActorSystem, - duration: Duration, - verifySystemShutdown: Boolean): Unit = { + def shutdownActorSystem(actorSystem: ActorSystem, duration: Duration, verifySystemShutdown: Boolean): Unit = { akka.testkit.TestKit.shutdownActorSystem(actorSystem, duration, verifySystemShutdown) } @@ -900,6 +906,9 @@ private abstract class CachingPartialFunction[A, B] extends scala.runtime.Abstra def `match`(x: A): B var cache: B = _ - final def isDefinedAt(x: A): Boolean = try { cache = `match`(x); true } catch { case NoMatch => cache = null.asInstanceOf[B]; false } + final def isDefinedAt(x: A): Boolean = + try { + cache = `match`(x); true + } catch { case NoMatch => cache = null.asInstanceOf[B]; false } final override def apply(x: A): B = cache } diff --git a/akka-testkit/src/main/scala/akka/testkit/package.scala b/akka-testkit/src/main/scala/akka/testkit/package.scala index 83ff4507aa..7e0b55b3a1 100644 --- a/akka-testkit/src/main/scala/akka/testkit/package.scala +++ b/akka-testkit/src/main/scala/akka/testkit/package.scala @@ -22,7 +22,9 @@ package object testkit { val testKitSettings = TestKitExtension(system) val stop = now + testKitSettings.TestEventFilterLeeway.dilated.toMillis - val failed = eventFilters filterNot (_.awaitDone(Duration(stop - now, MILLISECONDS))) map ("Timeout (" + testKitSettings.TestEventFilterLeeway.dilated + ") waiting for " + _) + val failed = eventFilters + .filterNot(_.awaitDone(Duration(stop - now, MILLISECONDS))) + .map("Timeout (" + testKitSettings.TestEventFilterLeeway.dilated + ") waiting for " + _) if (failed.nonEmpty) throw new AssertionError("Filter completion error:\n" + failed.mkString("\n")) @@ -32,9 +34,11 @@ package object testkit { } } - def filterEvents[T](eventFilters: EventFilter*)(block: => T)(implicit system: ActorSystem): T = filterEvents(eventFilters.toSeq)(block) + def filterEvents[T](eventFilters: EventFilter*)(block: => T)(implicit system: ActorSystem): T = + filterEvents(eventFilters.toSeq)(block) - def filterException[T <: Throwable](block: => Unit)(implicit system: ActorSystem, t: ClassTag[T]): Unit = EventFilter[T]() intercept (block) + def filterException[T <: Throwable](block: => Unit)(implicit system: ActorSystem, t: ClassTag[T]): Unit = + EventFilter[T]().intercept(block) /** * Scala API. Scale timeouts (durations) during tests with the configured diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index f1823a8772..e8a8e6aa07 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -45,11 +45,13 @@ object AkkaSpec { } def getCallerName(clazz: Class[_]): String = { - val s = (Thread.currentThread.getStackTrace map (_.getClassName) drop 1) - .dropWhile(_ matches "(java.lang.Thread|.*AkkaSpec.*|.*\\.StreamSpec.*|.*MultiNodeSpec.*|.*\\.Abstract.*)") + val s = Thread.currentThread.getStackTrace + .map(_.getClassName) + .drop(1) + .dropWhile(_.matches("(java.lang.Thread|.*AkkaSpec.*|.*\\.StreamSpec.*|.*MultiNodeSpec.*|.*\\.Abstract.*)")) val reduced = s.lastIndexWhere(_ == clazz.getName) match { case -1 => s - case z => s drop (z + 1) + case z => s.drop(z + 1) } reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } @@ -57,14 +59,18 @@ object AkkaSpec { } abstract class AkkaSpec(_system: ActorSystem) - extends TestKit(_system) with WordSpecLike with Matchers with BeforeAndAfterAll with WatchedByCoroner - with TypeCheckedTripleEquals with ScalaFutures { + extends TestKit(_system) + with WordSpecLike + with Matchers + with BeforeAndAfterAll + with WatchedByCoroner + with TypeCheckedTripleEquals + with ScalaFutures { implicit val patience = PatienceConfig(testKitSettings.DefaultTimeout.duration, Span(100, Millis)) - def this(config: Config) = this(ActorSystem( - AkkaSpec.getCallerName(getClass), - ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) + def this(config: Config) = + this(ActorSystem(AkkaSpec.getCallerName(getClass), ConfigFactory.load(config.withFallback(AkkaSpec.testConf)))) def this(s: String) = this(ConfigFactory.parseString(s)) @@ -104,7 +110,7 @@ abstract class AkkaSpec(_system: ActorSystem) def mute(clazz: Class[_]): Unit = sys.eventStream.publish(Mute(DeadLettersFilter(clazz)(occurrences = Int.MaxValue))) if (messageClasses.isEmpty) mute(classOf[AnyRef]) - else messageClasses foreach mute + else messageClasses.foreach(mute) } // for ScalaTest === compare of Class objects diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala index 84bf1b0079..88e2d418ed 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpecSpec.scala @@ -23,7 +23,7 @@ class AkkaSpecSpec extends WordSpec with Matchers { implicit val system = ActorSystem("AkkaSpec0", AkkaSpec.testConf) try { val a = system.actorOf(Props.empty) - EventFilter.warning(start = "unhandled message", occurrences = 1) intercept { + EventFilter.warning(start = "unhandled message", occurrences = 1).intercept { a ! 42 } } finally { @@ -34,15 +34,16 @@ class AkkaSpecSpec extends WordSpec with Matchers { "terminate all actors" in { // verbose config just for demonstration purposes, please leave in in case of debugging import scala.collection.JavaConverters._ - val conf = Map( - "akka.actor.debug.lifecycle" -> true, "akka.actor.debug.event-stream" -> true, - "akka.loglevel" -> "DEBUG", "akka.stdout-loglevel" -> "DEBUG") + val conf = Map("akka.actor.debug.lifecycle" -> true, + "akka.actor.debug.event-stream" -> true, + "akka.loglevel" -> "DEBUG", + "akka.stdout-loglevel" -> "DEBUG") val system = ActorSystem("AkkaSpec1", ConfigFactory.parseMap(conf.asJava).withFallback(AkkaSpec.testConf)) var refs = Seq.empty[ActorRef] val spec = new AkkaSpec(system) { refs = Seq(testActor, system.actorOf(Props.empty, "name")) } - refs foreach (_.isTerminated should not be true) + refs.foreach(_.isTerminated should not be true) TestKit.shutdownActorSystem(system) - spec.awaitCond(refs forall (_.isTerminated), 2 seconds) + spec.awaitCond(refs.forall(_.isTerminated), 2 seconds) } "stop correctly when sending PoisonPill to rootGuardian" in { @@ -74,10 +75,10 @@ class AkkaSpecSpec extends WordSpec with Matchers { val probe = new TestProbe(system) probe.ref.tell(42, davyJones) /* - * this will ensure that the message is actually received, otherwise it - * may happen that the system.stop() suspends the testActor before it had - * a chance to put the message into its private queue - */ + * this will ensure that the message is actually received, otherwise it + * may happen that the system.stop() suspends the testActor before it had + * a chance to put the message into its private queue + */ probe.receiveWhile(1 second) { case null => } diff --git a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala index e8b1f1f66b..1647984054 100644 --- a/akka-testkit/src/test/scala/akka/testkit/Coroner.scala +++ b/akka-testkit/src/test/scala/akka/testkit/Coroner.scala @@ -7,8 +7,8 @@ package akka.testkit import java.io.PrintStream import java.lang.management.{ ManagementFactory, ThreadInfo } import java.util.Date -import java.util.concurrent.{ TimeoutException, CountDownLatch } -import scala.concurrent.{ Promise, Awaitable, CanAwait, Await } +import java.util.concurrent.{ CountDownLatch, TimeoutException } +import scala.concurrent.{ Await, Awaitable, CanAwait, Promise } import scala.concurrent.duration._ import scala.util.control.NonFatal @@ -31,14 +31,14 @@ object Coroner { * The result of this Awaitable will be `true` if it has been cancelled. */ trait WatchHandle extends Awaitable[Boolean] { + /** * Will try to ensure that the Coroner has finished reporting. */ def cancel(): Unit } - private class WatchHandleImpl(startAndStopDuration: FiniteDuration) - extends WatchHandle { + private class WatchHandleImpl(startAndStopDuration: FiniteDuration) extends WatchHandle { val cancelPromise = Promise[Boolean] val startedLatch = new CountDownLatch(1) val finishedLatch = new CountDownLatch(1) @@ -64,7 +64,9 @@ object Coroner { } override def result(atMost: Duration)(implicit permit: CanAwait): Boolean = - try { Await.result(cancelPromise.future, atMost) } catch { case _: TimeoutException => false } + try { + Await.result(cancelPromise.future, atMost) + } catch { case _: TimeoutException => false } } @@ -77,9 +79,11 @@ object Coroner { * If displayThreadCounts is set to true, then the Coroner will print thread counts during start * and stop. */ - def watch(duration: FiniteDuration, reportTitle: String, out: PrintStream, + def watch(duration: FiniteDuration, + reportTitle: String, + out: PrintStream, startAndStopDuration: FiniteDuration = defaultStartAndStopDuration, - displayThreadCounts: Boolean = false): WatchHandle = { + displayThreadCounts: Boolean = false): WatchHandle = { val watchedHandle = new WatchHandleImpl(startAndStopDuration) @@ -95,7 +99,8 @@ object Coroner { if (!Await.result(watchedHandle, duration)) { watchedHandle.expired() out.println(s"Coroner not cancelled after ${duration.toMillis}ms. Looking for signs of foul play...") - try printReport(reportTitle, out) catch { + try printReport(reportTitle, out) + catch { case NonFatal(ex) => { out.println("Error displaying Coroner's Report") ex.printStackTrace(out) @@ -105,7 +110,8 @@ object Coroner { } finally { if (displayThreadCounts) { val endThreads = threadMx.getThreadCount - out.println(s"Coroner Thread Count started at $startThreads, ended at $endThreads, peaked at ${threadMx.getPeakThreadCount} in $reportTitle") + out.println( + s"Coroner Thread Count started at $startThreads, ended at $endThreads, peaked at ${threadMx.getPeakThreadCount} in $reportTitle") } out.flush() watchedHandle.finished() @@ -137,9 +143,7 @@ object Coroner { #Non-heap usage: ${memMx.getNonHeapMemoryUsage()}""".stripMargin('#')) def dumpAllThreads: Seq[ThreadInfo] = { - threadMx.dumpAllThreads( - threadMx.isObjectMonitorUsageSupported, - threadMx.isSynchronizerUsageSupported) + threadMx.dumpAllThreads(threadMx.isObjectMonitorUsageSupported, threadMx.isSynchronizerUsageSupported) } def findDeadlockedThreads: (Seq[ThreadInfo], String) = { @@ -252,8 +256,11 @@ trait WatchedByCoroner { @volatile private var coronerWatch: Coroner.WatchHandle = _ final def startCoroner(): Unit = { - coronerWatch = Coroner.watch(expectedTestDuration.dilated, getClass.getName, System.err, - startAndStopDuration.dilated, displayThreadCounts) + coronerWatch = Coroner.watch(expectedTestDuration.dilated, + getClass.getName, + System.err, + startAndStopDuration.dilated, + displayThreadCounts) } final def stopCoroner(): Unit = { diff --git a/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala b/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala index c2c9e479ed..dead329de3 100644 --- a/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/CoronerSpec.scala @@ -52,7 +52,7 @@ class CoronerSpec extends WordSpec with Matchers { report should include("Coroner Thread Count starts at ") report should include("Coroner Thread Count started at ") report should include("XXXX") - report should not include ("Coroner's Report") + (report should not).include("Coroner's Report") } "display deadlock information in its report" in { @@ -69,7 +69,9 @@ class CoronerSpec extends WordSpec with Matchers { val ready = new Semaphore(0) val proceed = new Semaphore(0) val t = new Thread(new Runnable { - def run = try recursiveLock(initialLocks) catch { case _: InterruptedException => () } + def run = + try recursiveLock(initialLocks) + catch { case _: InterruptedException => () } def recursiveLock(locks: List[ReentrantLock]): Unit = { locks match { @@ -130,8 +132,8 @@ class CoronerSpec extends WordSpec with Matchers { report should include(sectionHeading) val deadlockSection = report.split(sectionHeading)(1) deadlockSection should include("None") - deadlockSection should not include ("deadlock-thread-a") - deadlockSection should not include ("deadlock-thread-b") + (deadlockSection should not).include("deadlock-thread-a") + (deadlockSection should not).include("deadlock-thread-b") } } diff --git a/akka-testkit/src/test/scala/akka/testkit/DefaultTimeoutSpec.scala b/akka-testkit/src/test/scala/akka/testkit/DefaultTimeoutSpec.scala index 8ccc942e69..b263d23336 100644 --- a/akka-testkit/src/test/scala/akka/testkit/DefaultTimeoutSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/DefaultTimeoutSpec.scala @@ -4,12 +4,11 @@ package akka.testkit -import org.scalatest.{ WordSpec, BeforeAndAfterAll } +import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.Matchers import akka.actor.ActorSystem -class DefaultTimeoutSpec - extends WordSpec with Matchers with BeforeAndAfterAll with TestKitBase with DefaultTimeout { +class DefaultTimeoutSpec extends WordSpec with Matchers with BeforeAndAfterAll with TestKitBase with DefaultTimeout { implicit lazy val system = ActorSystem("AkkaCustomSpec") diff --git a/akka-testkit/src/test/scala/akka/testkit/ImplicitSenderSpec.scala b/akka-testkit/src/test/scala/akka/testkit/ImplicitSenderSpec.scala index 6f9ac18531..7aeef416a6 100644 --- a/akka-testkit/src/test/scala/akka/testkit/ImplicitSenderSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/ImplicitSenderSpec.scala @@ -4,12 +4,11 @@ package akka.testkit -import org.scalatest.{ WordSpec, BeforeAndAfterAll } +import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.Matchers import akka.actor.ActorSystem -class ImplicitSenderSpec - extends WordSpec with Matchers with BeforeAndAfterAll with TestKitBase with ImplicitSender { +class ImplicitSenderSpec extends WordSpec with Matchers with BeforeAndAfterAll with TestKitBase with ImplicitSender { implicit lazy val system = ActorSystem("AkkaCustomSpec") diff --git a/akka-testkit/src/test/scala/akka/testkit/JavaTestKitSpec.scala b/akka-testkit/src/test/scala/akka/testkit/JavaTestKitSpec.scala index 31dcbc8574..086b43e8c2 100644 --- a/akka-testkit/src/test/scala/akka/testkit/JavaTestKitSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/JavaTestKitSpec.scala @@ -36,7 +36,7 @@ class JavaTestKitSpec extends AkkaSpec with DefaultTimeout { val actor = system.actorOf(Props(new Actor { def receive = { case _ => } })) watch(actor) - system stop actor + system.stop(actor) expectTerminated(actor).existenceConfirmed should ===(true) watch(actor) diff --git a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala index 99554bae45..1d6a8c655b 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala @@ -8,7 +8,7 @@ import language.{ postfixOps } import org.scalatest.{ BeforeAndAfterEach } import akka.actor._ import akka.event.Logging.Warning -import scala.concurrent.{ Promise, Await } +import scala.concurrent.{ Await, Promise } import scala.concurrent.duration._ import akka.pattern.ask import akka.dispatch.Dispatcher @@ -57,7 +57,7 @@ object TestActorRefSpec { def receiveT = { case "work" => sender() ! "workDone" - context stop self + context.stop(self) case replyTo: Promise[_] => replyTo.asInstanceOf[Promise[Any]].success("complexReply") case replyTo: ActorRef => replyTo ! "complexReply" } @@ -90,11 +90,11 @@ object TestActorRefSpec { } class ReceiveTimeoutActor(target: ActorRef) extends Actor { - context setReceiveTimeout 1.second + context.setReceiveTimeout(1.second) def receive = { case ReceiveTimeout => target ! "timeout" - context stop self + context.stop(self) } } @@ -112,7 +112,7 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA override def beforeEach(): Unit = otherthread = null - private def assertThread(): Unit = otherthread should (be(null) or equal(thread)) + private def assertThread(): Unit = otherthread should (be(null).or(equal(thread))) "A TestActorRef should be an ActorRef, hence it" must { @@ -168,13 +168,13 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA } "stop when sent a poison pill" in { - EventFilter[ActorKilledException]() intercept { + EventFilter[ActorKilledException]().intercept { val a = TestActorRef(Props[WorkerActor]) system.actorOf(Props(new Actor { context.watch(a) def receive = { - case t: Terminated => testActor forward WrappedTerminated(t) - case x => testActor forward x + case t: Terminated => testActor.forward(WrappedTerminated(t)) + case x => testActor.forward(x) } })) a.!(PoisonPill)(testActor) @@ -187,7 +187,7 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA } "restart when Kill:ed" in { - EventFilter[ActorKilledException]() intercept { + EventFilter[ActorKilledException]().intercept { counter = 2 val boss = TestActorRef(Props(new TActor { @@ -242,7 +242,7 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA "set receiveTimeout to None" in { val a = TestActorRef[WorkerActor] - a.underlyingActor.context.receiveTimeout should be theSameInstanceAs Duration.Undefined + (a.underlyingActor.context.receiveTimeout should be).theSameInstanceAs(Duration.Undefined) } "set CallingThreadDispatcher" in { @@ -269,7 +269,7 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA } "not throw an exception when parent is passed in the apply" in { - EventFilter[RuntimeException](occurrences = 1, message = "expected") intercept { + EventFilter[RuntimeException](occurrences = 1, message = "expected").intercept { val parent = TestProbe() val child = TestActorRef(Props(new Actor { def receive: Receive = { @@ -282,7 +282,7 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA } } "not throw an exception when child is created through childActorOf" in { - EventFilter[RuntimeException](occurrences = 1, message = "expected") intercept { + EventFilter[RuntimeException](occurrences = 1, message = "expected").intercept { val parent = TestProbe() val child = parent.childActorOf(Props(new Actor { def receive: Receive = { diff --git a/akka-testkit/src/test/scala/akka/testkit/TestEventListenerSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestEventListenerSpec.scala index 2186ff9b8d..07819866d6 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestEventListenerSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestEventListenerSpec.scala @@ -69,5 +69,6 @@ class TestEventListenerSpec extends AkkaSpec with ImplicitSender { private def errorWithCause(cause: Throwable) = Error(cause, self.path.toString, this.getClass, "this is an error") private def warningNoCause = Warning(self.path.toString, this.getClass, "this is a warning") - private def warningWithCause(cause: Throwable) = Warning(cause, self.path.toString, this.getClass, "this is a warning", Logging.emptyMDC) + private def warningWithCause(cause: Throwable) = + Warning(cause, self.path.toString, this.getClass, "this is a warning", Logging.emptyMDC) } diff --git a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala index bee0124c6f..e6ede5fa3f 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala @@ -17,11 +17,11 @@ class TestFSMRefSpec extends AkkaSpec { val fsm = TestFSMRef(new Actor with FSM[Int, String] { startWith(1, "") when(1) { - case Event("go", _) => goto(2) using "go" - case Event(StateTimeout, _) => goto(2) using "timeout" + case Event("go", _) => goto(2).using("go") + case Event(StateTimeout, _) => goto(2).using("timeout") } when(2) { - case Event("back", _) => goto(1) using "back" + case Event("back", _) => goto(1).using("back") } }, "test-fsm-ref-1") fsm.stateName should ===(1) diff --git a/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala index 450b342503..a1228ea15a 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala @@ -60,10 +60,12 @@ class TestProbeSpec extends AkkaSpec with DefaultTimeout with Eventually { val restarts = new AtomicInteger(0) class FailingActor extends Actor { - override def receive = msg => msg match { - case _ => - throw new RuntimeException("simulated failure") - } + override def receive = + msg => + msg match { + case _ => + throw new RuntimeException("simulated failure") + } override def postRestart(reason: Throwable): Unit = { restarts.incrementAndGet() @@ -194,7 +196,7 @@ class TestProbeSpec extends AkkaSpec with DefaultTimeout with Eventually { })) system.stop(target) probe.ref ! "hello" - probe watch target + probe.watch(target) probe.expectMsg(1.seconds, "hello") probe.expectMsg(1.seconds, Terminated(target)(false, false)) } diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/AveragingGauge.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/AveragingGauge.scala index 6bd9a103a8..baf21df146 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/AveragingGauge.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/AveragingGauge.scala @@ -20,13 +20,13 @@ class AveragingGauge extends Gauge[Double] { def add(n: Long): Unit = { count.increment() - sum add n + sum.add(n) } def add(ns: Seq[Long]): Unit = { // takes a mutable Seq on order to allow use with Array's - count add ns.length - sum add ns.sum + count.add(ns.length) + sum.add(ns.sum) } override def getValue: Double = sum.sum().toDouble / count.sum() diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala index 2895764cdf..18eba23dab 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/FileDescriptorMetricSet.scala @@ -6,7 +6,7 @@ package akka.testkit.metrics import java.util import collection.JavaConverters._ -import java.lang.management.{ OperatingSystemMXBean, ManagementFactory } +import java.lang.management.{ ManagementFactory, OperatingSystemMXBean } import com.codahale.metrics.{ Gauge, Metric, MetricSet } import com.codahale.metrics.MetricRegistry._ import com.codahale.metrics.jvm.FileDescriptorRatioGauge @@ -14,20 +14,15 @@ import com.codahale.metrics.jvm.FileDescriptorRatioGauge /** * MetricSet exposing number of open and maximum file descriptors used by the JVM process. */ -private[akka] class FileDescriptorMetricSet(os: OperatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean) extends MetricSet { +private[akka] class FileDescriptorMetricSet(os: OperatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean) + extends MetricSet { override def getMetrics: util.Map[String, Metric] = { - Map[String, Metric]( - - name("file-descriptors", "open") -> new Gauge[Long] { - override def getValue: Long = invoke("getOpenFileDescriptorCount") - }, - - name("file-descriptors", "max") -> new Gauge[Long] { - override def getValue: Long = invoke("getMaxFileDescriptorCount") - }, - - name("file-descriptors", "ratio") -> new FileDescriptorRatioGauge(os)).asJava + Map[String, Metric](name("file-descriptors", "open") -> new Gauge[Long] { + override def getValue: Long = invoke("getOpenFileDescriptorCount") + }, name("file-descriptors", "max") -> new Gauge[Long] { + override def getValue: Long = invoke("getMaxFileDescriptorCount") + }, name("file-descriptors", "ratio") -> new FileDescriptorRatioGauge(os)).asJava } private def invoke(name: String): Long = { diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala index ae8fea86dd..3b1c6030ff 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/HdrHistogram.scala @@ -16,33 +16,32 @@ import org.{ HdrHistogram => hdr } * maintain value resolution and separation. Must be a non-negative * integer between 0 and 5. */ -private[akka] class HdrHistogram( - highestTrackableValue: Long, - numberOfSignificantValueDigits: Int, - val unit: String = "") - extends Metric { +private[akka] class HdrHistogram(highestTrackableValue: Long, + numberOfSignificantValueDigits: Int, + val unit: String = "") + extends Metric { private val hist = new hdr.Histogram(highestTrackableValue, numberOfSignificantValueDigits) def update(value: Long): Unit = { - try - hist.recordValue(value) + try hist.recordValue(value) catch { case ex: ArrayIndexOutOfBoundsException => throw wrapHistogramOutOfBoundsException(value, ex) } } def updateWithCount(value: Long, count: Long): Unit = { - try - hist.recordValueWithCount(value, count) + try hist.recordValueWithCount(value, count) catch { case ex: ArrayIndexOutOfBoundsException => throw wrapHistogramOutOfBoundsException(value, ex) } } - private def wrapHistogramOutOfBoundsException(value: Long, ex: ArrayIndexOutOfBoundsException): IllegalArgumentException = + private def wrapHistogramOutOfBoundsException(value: Long, + ex: ArrayIndexOutOfBoundsException): IllegalArgumentException = new IllegalArgumentException(s"Given value $value can not be stored in this histogram " + - s"(min: ${hist.getLowestDiscernibleValue}, max: ${hist.getHighestTrackableValue}})", ex) + s"(min: ${hist.getLowestDiscernibleValue}, max: ${hist.getHighestTrackableValue}})", + ex) def getData = hist.copy() diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MemoryUsageSnapshotting.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MemoryUsageSnapshotting.scala index 1e545e7ee1..3059309f30 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MemoryUsageSnapshotting.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MemoryUsageSnapshotting.scala @@ -14,31 +14,28 @@ private[akka] trait MemoryUsageSnapshotting extends MetricsPrefix { def getHeapSnapshot = { val metrics = getMetrics - HeapMemoryUsage( - metrics.get(key("heap-init")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("heap-used")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("heap-max")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("heap-committed")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("heap-usage")).asInstanceOf[RatioGauge].getValue) + HeapMemoryUsage(metrics.get(key("heap-init")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("heap-used")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("heap-max")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("heap-committed")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("heap-usage")).asInstanceOf[RatioGauge].getValue) } def getTotalSnapshot = { val metrics = getMetrics - TotalMemoryUsage( - metrics.get(key("total-init")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("total-used")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("total-max")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("total-committed")).asInstanceOf[Gauge[Long]].getValue) + TotalMemoryUsage(metrics.get(key("total-init")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("total-used")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("total-max")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("total-committed")).asInstanceOf[Gauge[Long]].getValue) } def getNonHeapSnapshot = { val metrics = getMetrics - NonHeapMemoryUsage( - metrics.get(key("non-heap-init")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("non-heap-used")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("non-heap-max")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("non-heap-committed")).asInstanceOf[Gauge[Long]].getValue, - metrics.get(key("non-heap-usage")).asInstanceOf[RatioGauge].getValue) + NonHeapMemoryUsage(metrics.get(key("non-heap-init")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("non-heap-used")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("non-heap-max")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("non-heap-committed")).asInstanceOf[Gauge[Long]].getValue, + metrics.get(key("non-heap-usage")).asInstanceOf[RatioGauge].getValue) } private def key(k: String) = prefix + "." + k @@ -48,32 +45,29 @@ private[akka] trait MemoryUsageSnapshotting extends MetricsPrefix { private[akka] case class TotalMemoryUsage(init: Long, used: Long, max: Long, committed: Long) { def diff(other: TotalMemoryUsage): TotalMemoryUsage = - TotalMemoryUsage( - this.init - other.init, - this.used - other.used, - this.max - other.max, - this.committed - other.committed) + TotalMemoryUsage(this.init - other.init, + this.used - other.used, + this.max - other.max, + this.committed - other.committed) } private[akka] case class HeapMemoryUsage(init: Long, used: Long, max: Long, committed: Long, usage: Double) { def diff(other: HeapMemoryUsage): HeapMemoryUsage = - HeapMemoryUsage( - this.init - other.init, - this.used - other.used, - this.max - other.max, - this.committed - other.committed, - this.usage - other.usage) + HeapMemoryUsage(this.init - other.init, + this.used - other.used, + this.max - other.max, + this.committed - other.committed, + this.usage - other.usage) } private[akka] case class NonHeapMemoryUsage(init: Long, used: Long, max: Long, committed: Long, usage: Double) { def diff(other: NonHeapMemoryUsage): NonHeapMemoryUsage = - NonHeapMemoryUsage( - this.init - other.init, - this.used - other.used, - this.max - other.max, - this.committed - other.committed, - this.usage - other.usage) + NonHeapMemoryUsage(this.init - other.init, + this.used - other.used, + this.max - other.max, + this.committed - other.committed, + this.usage - other.usage) } diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala index 86be4b58a3..2f0d4e00b0 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKit.scala @@ -67,7 +67,7 @@ private[akka] trait MetricsKit extends MetricsKitOps { * Schedule metric reports execution iterval. Should not be used multiple times */ def scheduleMetricReports(every: FiniteDuration): Unit = { - reporters foreach { _.start(every.toMillis, TimeUnit.MILLISECONDS) } + reporters.foreach { _.start(every.toMillis, TimeUnit.MILLISECONDS) } } def registeredMetrics = registry.getMetrics.asScala @@ -92,7 +92,7 @@ private[akka] trait MetricsKit extends MetricsKitOps { */ def reportMetrics(): Unit = { if (reportMetricsEnabled) - reporters foreach { _.report() } + reporters.foreach { _.report() } } /** @@ -103,7 +103,7 @@ private[akka] trait MetricsKit extends MetricsKitOps { def reportMemoryMetrics(): Unit = { val gauges = registry.getGauges(MemMetricsFilter) - reporters foreach { _.report(gauges, empty, empty, empty, empty) } + reporters.foreach { _.report(gauges, empty, empty, empty, empty) } } /** @@ -114,7 +114,7 @@ private[akka] trait MetricsKit extends MetricsKitOps { def reportGcMetrics(): Unit = { val gauges = registry.getGauges(GcMetricsFilter) - reporters foreach { _.report(gauges, empty, empty, empty, empty) } + reporters.foreach { _.report(gauges, empty, empty, empty, empty) } } /** @@ -125,7 +125,7 @@ private[akka] trait MetricsKit extends MetricsKitOps { def reportFileDescriptorMetrics(): Unit = { val gauges = registry.getGauges(FileDescriptorMetricsFilter) - reporters foreach { _.report(gauges, empty, empty, empty, empty) } + reporters.foreach { _.report(gauges, empty, empty, empty, empty) } } /** @@ -144,15 +144,18 @@ private[akka] trait MetricsKit extends MetricsKitOps { * MUST be called after all tests have finished. */ def shutdownMetrics(): Unit = { - reporters foreach { _.stop() } + reporters.foreach { _.stop() } } private[metrics] def getOrRegister[M <: Metric](key: String, metric: => M)(implicit tag: ClassTag[M]): M = { import collection.JavaConverters._ registry.getMetrics.asScala.find(_._1 == key).map(_._2) match { case Some(existing: M) => existing - case Some(existing) => throw new IllegalArgumentException("Key: [%s] is already for different kind of metric! Was [%s], expected [%s]".format(key, metric.getClass.getSimpleName, tag.runtimeClass.getSimpleName)) - case _ => registry.register(key, metric) + case Some(existing) => + throw new IllegalArgumentException( + "Key: [%s] is already for different kind of metric! Was [%s], expected [%s]" + .format(key, metric.getClass.getSimpleName, tag.runtimeClass.getSimpleName)) + case _ => registry.register(key, metric) } } @@ -204,7 +207,8 @@ private[akka] class MetricsKitSettings(config: Config) { val Reporters = config.getStringList("akka.test.metrics.reporters") object ConsoleReporter { - val ScheduledReportInterval = config.getMillisDuration("akka.test.metrics.reporter.console.scheduled-report-interval") + val ScheduledReportInterval = + config.getMillisDuration("akka.test.metrics.reporter.console.scheduled-report-interval") val Verbose = config.getBoolean("akka.test.metrics.reporter.console.verbose") } diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala index 2a846f09cc..5aa6fdde30 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitOps.scala @@ -33,7 +33,8 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL { */ def timedWithKnownOps[T](key: MetricKey, ops: Long)(run: => T): T = { val c = getOrRegister(key.toString, new KnownOpsInTimespanTimer(expectedOps = ops)) - try run finally c.stop() + try run + finally c.stop() } /** @@ -43,8 +44,12 @@ private[akka] trait MetricsKitOps extends MetricKeyDSL { * * @param unitString just for human readable output, during console printing */ - def hdrHistogram(key: MetricKey, highestTrackableValue: Long, numberOfSignificantValueDigits: Int, unitString: String = ""): HdrHistogram = - getOrRegister((key / "hdr-histogram").toString, new HdrHistogram(highestTrackableValue, numberOfSignificantValueDigits, unitString)) + def hdrHistogram(key: MetricKey, + highestTrackableValue: Long, + numberOfSignificantValueDigits: Int, + unitString: String = ""): HdrHistogram = + getOrRegister((key / "hdr-histogram").toString, + new HdrHistogram(highestTrackableValue, numberOfSignificantValueDigits, unitString)) /** * Use when measuring for 9x'th percentiles as well as min / max / mean values. diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitSpec.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitSpec.scala index 86c97e038b..e663895bef 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/MetricsKitSpec.scala @@ -7,8 +7,7 @@ package akka.testkit.metrics import org.scalatest._ import com.typesafe.config.ConfigFactory -class MetricsKitSpec extends WordSpec with Matchers with BeforeAndAfter with BeforeAndAfterAll - with MetricsKit { +class MetricsKitSpec extends WordSpec with Matchers with BeforeAndAfter with BeforeAndAfterAll with MetricsKit { import scala.concurrent.duration._ diff --git a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala index 4c1b7c1c04..777a9e07ee 100644 --- a/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala +++ b/akka-testkit/src/test/scala/akka/testkit/metrics/reporter/AkkaConsoleReporter.scala @@ -14,15 +14,20 @@ import scala.reflect.ClassTag /** * Used to report `akka.testkit.metric.Metric` types that the original `com.codahale.metrics.ConsoleReporter` is unaware of (cannot re-use directly because of private constructor). */ -class AkkaConsoleReporter( - registry: AkkaMetricRegistry, - verbose: Boolean, - output: PrintStream = System.out) - extends ScheduledReporter(registry.asInstanceOf[MetricRegistry], "akka-console-reporter", MetricFilter.ALL, TimeUnit.SECONDS, TimeUnit.NANOSECONDS) { +class AkkaConsoleReporter(registry: AkkaMetricRegistry, verbose: Boolean, output: PrintStream = System.out) + extends ScheduledReporter(registry.asInstanceOf[MetricRegistry], + "akka-console-reporter", + MetricFilter.ALL, + TimeUnit.SECONDS, + TimeUnit.NANOSECONDS) { private final val ConsoleWidth = 80 - override def report(gauges: util.SortedMap[String, Gauge[_]], counters: util.SortedMap[String, Counter], histograms: util.SortedMap[String, Histogram], meters: util.SortedMap[String, Meter], timers: util.SortedMap[String, Timer]): Unit = { + override def report(gauges: util.SortedMap[String, Gauge[_]], + counters: util.SortedMap[String, Counter], + histograms: util.SortedMap[String, Histogram], + meters: util.SortedMap[String, Meter], + timers: util.SortedMap[String, Timer]): Unit = { import collection.JavaConverters._ // default Metrics types @@ -41,7 +46,8 @@ class AkkaConsoleReporter( output.flush() } - def printMetrics[T <: Metric](metrics: Iterable[(String, T)], printer: T => Unit)(implicit clazz: ClassTag[T]): Unit = { + def printMetrics[T <: Metric](metrics: Iterable[(String, T)], printer: T => Unit)( + implicit clazz: ClassTag[T]): Unit = { if (!metrics.isEmpty) { printWithBanner(s"-- ${simpleName(metrics.head._2.getClass)}", '-') for ((key, metric) <- metrics) { @@ -95,11 +101,16 @@ class AkkaConsoleReporter( output.print(" mean = %2.2f %s%n".format(convertDuration(snapshot.getMean), getDurationUnit)) output.print(" stddev = %2.2f %s%n".format(convertDuration(snapshot.getStdDev), getDurationUnit)) output.print(" median = %2.2f %s%n".format(convertDuration(snapshot.getMedian), getDurationUnit)) - output.print(" 75%% <= %2.2f %s%n".format(convertDuration(snapshot.get75thPercentile), getDurationUnit)) - output.print(" 95%% <= %2.2f %s%n".format(convertDuration(snapshot.get95thPercentile), getDurationUnit)) - output.print(" 98%% <= %2.2f %s%n".format(convertDuration(snapshot.get98thPercentile), getDurationUnit)) - output.print(" 99%% <= %2.2f %s%n".format(convertDuration(snapshot.get99thPercentile), getDurationUnit)) - output.print(" 99.9%% <= %2.2f %s%n".format(convertDuration(snapshot.get999thPercentile), getDurationUnit)) + output.print( + " 75%% <= %2.2f %s%n".format(convertDuration(snapshot.get75thPercentile), getDurationUnit)) + output.print( + " 95%% <= %2.2f %s%n".format(convertDuration(snapshot.get95thPercentile), getDurationUnit)) + output.print( + " 98%% <= %2.2f %s%n".format(convertDuration(snapshot.get98thPercentile), getDurationUnit)) + output.print( + " 99%% <= %2.2f %s%n".format(convertDuration(snapshot.get99thPercentile), getDurationUnit)) + output.print( + " 99.9%% <= %2.2f %s%n".format(convertDuration(snapshot.get999thPercentile), getDurationUnit)) } private def printKnownOpsInTimespanCounter(counter: KnownOpsInTimespanTimer): Unit = { @@ -151,4 +162,3 @@ class AkkaConsoleReporter( } } -