diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 5303f47a78..4acad45972 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -6,10 +6,11 @@ package akka.actor import java.util.concurrent.ConcurrentHashMap +import akka.annotation.InternalApi + import scala.annotation.tailrec import scala.collection.immutable import scala.util.control.NonFatal - import akka.dispatch._ import akka.dispatch.sysmsg._ import akka.event.AddressTerminatedTopic @@ -138,7 +139,7 @@ abstract class ActorRef extends java.lang.Comparable[ActorRef] with Serializable * The contract is that if this method returns true, then it will never be false again. * But you cannot rely on that it is alive if it returns false, since this by nature is a racy method. */ - @deprecated("Use context.watch(actor) and receive Terminated(actor)", "2.2") + @InternalApi private[akka] def isTerminated: Boolean final override def hashCode: Int = { @@ -337,6 +338,7 @@ private[akka] class LocalActorRef private[akka] ( * If this method returns true, it will never return false again, but if it * returns false, you cannot be sure if it's alive still (race condition) */ + @InternalApi override private[akka] def isTerminated: Boolean = actorCell.isTerminated /** diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala index db94a34544..ce56547772 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala @@ -11,6 +11,7 @@ import akka.dispatch.sysmsg._ import akka.event.Logging.Error import akka.util.Unsafe import akka.actor._ +import akka.annotation.InternalApi import akka.serialization.{ DisabledJavaSerializer, SerializationExtension, Serializers } import scala.util.control.{ NoStackTrace, NonFatal } @@ -28,6 +29,10 @@ final case class SerializationCheckFailedException private (msg: Object, cause: "To avoid this error, either disable 'akka.actor.serialize-messages', mark the message with 'akka.actor.NoSerializationVerificationNeeded', or configure serialization to support this message", cause) +/** + * INTERNAL API + */ +@InternalApi private[akka] trait Dispatch { this: ActorCell => @silent @volatile private var _mailboxDoNotCallMeDirectly diff --git a/akka-cluster/src/main/mima-filters/2.5.22.backwards.excludes b/akka-cluster/src/main/mima-filters/2.5.22.backwards.excludes new file mode 100644 index 0000000000..a9bd5215c0 --- /dev/null +++ b/akka-cluster/src/main/mima-filters/2.5.22.backwards.excludes @@ -0,0 +1,2 @@ +# Remove warnings, changing type in private class +ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.cluster.CrossDcHeartbeatingState.init") diff --git a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala index 3bb501cc6f..35db8353d5 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AutoDown.scala @@ -12,6 +12,7 @@ import akka.cluster.ClusterEvent._ import scala.concurrent.duration.Duration import akka.actor.ActorLogging +import com.github.ghik.silencer.silent /** * INTERNAL API @@ -31,6 +32,7 @@ final class AutoDowning(system: ActorSystem) extends DowningProvider { private def clusterSettings = Cluster(system).settings + @silent override def downRemovalMargin: FiniteDuration = clusterSettings.DownRemovalMargin override def downingActorProps: Option[Props] = diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala index 3c5681e1cc..e218676bcc 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala @@ -6,6 +6,7 @@ package akka.cluster import akka.ConfigurationException import akka.actor.{ ActorRef, ActorSystem, ActorSystemImpl, Deploy, DynamicAccess, NoScopeGiven, Scope } +import akka.annotation.InternalApi import akka.cluster.routing.{ ClusterRouterGroup, ClusterRouterGroupSettings, @@ -16,6 +17,7 @@ import akka.event.EventStream import akka.remote.{ RemoteActorRefProvider, RemoteDeployer } import akka.remote.routing.RemoteRouterConfig import akka.routing.{ Group, Pool } +import com.github.ghik.silencer.silent import com.typesafe.config.Config import com.typesafe.config.ConfigFactory @@ -26,6 +28,7 @@ import com.typesafe.config.ConfigFactory * extension, i.e. the cluster will automatically be started when * the `ClusterActorRefProvider` is used. */ +@InternalApi private[akka] class ClusterActorRefProvider( _systemName: String, _settings: ActorSystem.Settings, @@ -68,6 +71,7 @@ private[akka] class ClusterActorRefProvider( * * Deployer of cluster aware routers. */ +@InternalApi private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: DynamicAccess) extends RemoteDeployer(_settings, _pm) { @@ -114,6 +118,7 @@ private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: Dynami } +@silent @SerialVersionUID(1L) abstract class ClusterScope extends Scope diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 56a15e4028..b1ae384080 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -11,7 +11,6 @@ import akka.cluster.MemberStatus._ import akka.cluster.ClusterEvent._ import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics } import akka.Done -import akka.actor.CoordinatedShutdown.Reason import akka.pattern.ask import akka.remote.QuarantinedEvent import akka.util.Timeout diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 611545a716..95864b6974 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -369,6 +369,7 @@ object ClusterEvent { * INTERNAL API * The nodes that have seen current version of the Gossip. */ + @ccompatUsedUntil213 private[cluster] final case class SeenChanged(convergence: Boolean, seenBy: Set[Address]) extends ClusterDomainEvent /** @@ -484,7 +485,6 @@ object ClusterEvent { if newMember.status != oldMember.status || newMember.upNumber != oldMember.upNumber => newMember } - import akka.util.ccompat._ val memberEvents = (newMembers ++ changedMembers).unsorted.collect { case m if m.status == Joining => MemberJoined(m) case m if m.status == WeaklyUp => MemberWeaklyUp(m) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index 52e58a4e11..ac3208638a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -22,6 +22,7 @@ import akka.util.ccompat._ * Receives Heartbeat messages and replies. */ @InternalApi +@ccompatUsedUntil213 private[cluster] final class ClusterHeartbeatReceiver extends Actor with ActorLogging { import ClusterHeartbeatSender._ diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index b4d18df189..487e2db400 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -54,7 +54,7 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { override def preStart(): Unit = cluster.subscribe(self, classOf[ClusterDomainEvent]) override def postStop(): Unit = cluster.unsubscribe(self) - def receive = { + def receive: Receive = { case e: ClusterDomainEvent => e match { case SeenChanged(_, seenBy) => diff --git a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala index 603bb8c30e..ab499d217c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/CrossDcClusterHeartbeat.scala @@ -33,6 +33,7 @@ import scala.collection.immutable * nodes which aggressively come and go as the traffic in the service changes. */ @InternalApi +@ccompatUsedUntil213 private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogging { import CrossDcHeartbeatSender._ @@ -61,7 +62,7 @@ private[cluster] final class CrossDcHeartbeatSender extends Actor with ActorLogg selfDataCenter, crossDcFailureDetector, crossDcSettings.NrOfMonitoringActors, - SortedSet.empty) + immutable.SortedSet.empty) // start periodic heartbeat to other nodes in cluster val heartbeatTask = scheduler.schedule( @@ -218,7 +219,7 @@ private[cluster] final case class CrossDcHeartbeatingState( selfDataCenter: DataCenter, failureDetector: FailureDetectorRegistry[Address], nrOfMonitoredNodesPerDc: Int, - state: Map[ClusterSettings.DataCenter, SortedSet[Member]]) { + state: Map[ClusterSettings.DataCenter, immutable.SortedSet[Member]]) { import CrossDcHeartbeatingState._ /** @@ -242,7 +243,8 @@ private[cluster] final case class CrossDcHeartbeatingState( // we need to remove the member first, to avoid having "duplicates" // this is because the removal and uniqueness we need is only by uniqueAddress // which is not used by the `ageOrdering` - val oldMembersWithoutM = state.getOrElse(dc, emptyMembersSortedSet).filterNot(_.uniqueAddress == m.uniqueAddress) + val oldMembersWithoutM: immutable.SortedSet[Member] = + state.getOrElse(dc, emptyMembersSortedSet).filterNot(_.uniqueAddress == m.uniqueAddress) val updatedMembers = oldMembersWithoutM + m val updatedState = this.copy(state = state.updated(dc, updatedMembers)) @@ -307,7 +309,7 @@ private[cluster] final case class CrossDcHeartbeatingState( private[cluster] object CrossDcHeartbeatingState { /** Sorted by age */ - private def emptyMembersSortedSet: SortedSet[Member] = SortedSet.empty[Member](Member.ageOrdering) + private def emptyMembersSortedSet: immutable.SortedSet[Member] = immutable.SortedSet.empty[Member](Member.ageOrdering) // Since we need ordering of oldests guaranteed, we must only look at Up (or Leaving, Exiting...) nodes def atLeastInUpState(m: Member): Boolean = @@ -317,7 +319,7 @@ private[cluster] object CrossDcHeartbeatingState { selfDataCenter: DataCenter, crossDcFailureDetector: FailureDetectorRegistry[Address], nrOfMonitoredNodesPerDc: Int, - members: SortedSet[Member]): CrossDcHeartbeatingState = { + members: immutable.SortedSet[Member]): CrossDcHeartbeatingState = { new CrossDcHeartbeatingState(selfDataCenter, crossDcFailureDetector, nrOfMonitoredNodesPerDc, state = { // TODO unduplicate this with the logic in MembershipState.ageSortedTopOldestMembersPerDc val groupedByDc = members.filter(atLeastInUpState).groupBy(_.dataCenter) @@ -329,7 +331,7 @@ private[cluster] object CrossDcHeartbeatingState { // we need to enforce the ageOrdering for the SortedSet in each DC groupedByDc.map { case (dc, ms) => - dc -> (SortedSet.empty[Member](Member.ageOrdering).union(ms)) + dc -> immutable.SortedSet.empty[Member](Member.ageOrdering).union(ms) } } }) diff --git a/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala b/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala index 4246da8d96..04a9338d50 100644 --- a/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/DowningProvider.scala @@ -6,6 +6,7 @@ package akka.cluster import akka.ConfigurationException import akka.actor.{ ActorSystem, ExtendedActorSystem, Props } +import com.github.ghik.silencer.silent import scala.concurrent.duration.FiniteDuration @@ -64,6 +65,7 @@ abstract class DowningProvider { * is not enabled. */ final class NoDowning(system: ActorSystem) extends DowningProvider { + @silent override def downRemovalMargin: FiniteDuration = Cluster(system).settings.DownRemovalMargin override val downingActorProps: Option[Props] = None } diff --git a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala index 96ff95814c..0b20191e9e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala +++ b/akka-cluster/src/main/scala/akka/cluster/JoinConfigCompatChecker.scala @@ -93,6 +93,7 @@ object JoinConfigCompatChecker { * information that users may have added to their configuration. */ @InternalApi + @ccompatUsedUntil213 private[cluster] def filterWithKeys(requiredKeys: im.Seq[String], config: Config): Config = { val filtered = diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index 1d97121216..4dd222ea54 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -8,6 +8,7 @@ import akka.actor.Address import MemberStatus._ import akka.annotation.InternalApi import akka.cluster.ClusterSettings.DataCenter +import com.github.ghik.silencer.silent import scala.runtime.AbstractFunction2 @@ -317,6 +318,7 @@ final case class UniqueAddress(address: Address, longUid: Long) extends Ordered[ * Stops `copy(Address, Long)` copy from being generated, use `apply` instead. */ @deprecated("Use Long UID constructor instead", since = "2.4.11") + @silent def copy(address: Address = address, uid: Int = uid) = new UniqueAddress(address, uid.toLong) } diff --git a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala index 143357cb1f..7df5094193 100644 --- a/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala +++ b/akka-cluster/src/main/scala/akka/cluster/MembershipState.scala @@ -19,6 +19,7 @@ import scala.util.Random /** * INTERNAL API */ +@ccompatUsedUntil213 @InternalApi private[akka] object MembershipState { import MemberStatus._ private val leaderMemberStatus = Set[MemberStatus](Up, Leaving) @@ -107,8 +108,8 @@ import scala.util.Random /** * @return Up to `crossDcConnections` oldest members for each DC */ - lazy val ageSortedTopOldestMembersPerDc: Map[DataCenter, SortedSet[Member]] = { - latestGossip.members.foldLeft(Map.empty[DataCenter, SortedSet[Member]]) { (acc, member) => + lazy val ageSortedTopOldestMembersPerDc: Map[DataCenter, immutable.SortedSet[Member]] = { + latestGossip.members.foldLeft(Map.empty[DataCenter, immutable.SortedSet[Member]]) { (acc, member) => acc.get(member.dataCenter) match { case Some(set) => if (set.size < crossDcConnections) { @@ -121,7 +122,7 @@ import scala.util.Random } } case None => - acc + (member.dataCenter -> (SortedSet.empty(Member.ageOrdering) + member)) + acc + (member.dataCenter -> (immutable.SortedSet.empty(Member.ageOrdering) + member)) } } } diff --git a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala index de567464d1..4bf7f4dd3d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Reachability.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Reachability.scala @@ -12,6 +12,7 @@ import akka.util.ccompat._ /** * INTERNAL API */ +@ccompatUsedUntil213 private[cluster] object Reachability { val empty = new Reachability(Vector.empty, Map.empty) diff --git a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala index bb4aba3a48..b87ebdbd90 100644 --- a/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala +++ b/akka-cluster/src/main/scala/akka/cluster/protobuf/ClusterMessageSerializer.scala @@ -12,23 +12,25 @@ import akka.cluster._ import akka.cluster.protobuf.msg.{ ClusterMessages => cm } import akka.serialization._ import akka.protobuf.{ ByteString, MessageLite } + import scala.annotation.tailrec import scala.collection.immutable import scala.collection.JavaConverters._ import scala.concurrent.duration.Deadline - import akka.annotation.InternalApi import akka.cluster.InternalClusterAction._ import akka.cluster.routing.{ ClusterRouterPool, ClusterRouterPoolSettings } import akka.routing.Pool import akka.util.ccompat._ import akka.util.ccompat._ +import com.github.ghik.silencer.silent import com.typesafe.config.{ Config, ConfigFactory, ConfigRenderOptions } /** * INTERNAL API */ @InternalApi +@ccompatUsedUntil213 private[akka] object ClusterMessageSerializer { // FIXME use short manifests when we can break wire compatibility // needs to be full class names for backwards compatibility @@ -191,7 +193,8 @@ final class ClusterMessageSerializer(val system: ExtendedActorSystem) .addAllUseRoles(settings.useRoles.asJava) // for backwards compatibility - settings.useRole.foreach(builder.setUseRole) + @silent + val _ = settings.useRole.foreach(builder.setUseRole) builder.build() } diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala index c75e1089eb..b0424176a2 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala @@ -24,6 +24,7 @@ import akka.routing.RouterActor import akka.routing.RouterConfig import akka.routing.RouterPoolActor import akka.routing.RoutingLogic +import com.github.ghik.silencer.silent import com.typesafe.config.Config import com.typesafe.config.ConfigFactory @@ -100,6 +101,7 @@ final case class ClusterRouterGroupSettings( // For binary compatibility @deprecated("Use constructor with useRoles instead", since = "2.5.4") + @silent def copy( totalInstances: Int = totalInstances, routeesPaths: immutable.Seq[String] = routeesPaths, @@ -194,6 +196,7 @@ final case class ClusterRouterPoolSettings( // For binary compatibility @deprecated("Use copy with useRoles instead", since = "2.5.4") + @silent def copy( totalInstances: Int = totalInstances, maxInstancesPerNode: Int = maxInstancesPerNode, @@ -449,7 +452,7 @@ private[akka] class ClusterRouterGroupActor(val settings: ClusterRouterGroupSett def doAddRoutees(): Unit = selectDeploymentTarget match { case None => // done case Some((address, path)) => - val routee = group.routeeFor(address + path, context) + val routee = group.routeeFor(address.toString + path, context) usedRouteePaths = usedRouteePaths.updated(address, usedRouteePaths.getOrElse(address, Set.empty) + path) // must register each one, since registered routees are used in selectDeploymentTarget cell.addRoutee(routee) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala index bc76143d09..e1f7c7d5a4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala @@ -14,8 +14,10 @@ import akka.testkit.TestEvent._ import akka.actor._ import akka.remote.RemoteActorRef import java.util.concurrent.TimeoutException + import akka.remote.RemoteWatcher import akka.cluster.MultiNodeClusterSpec.EndActor +import org.scalatest.concurrent.ScalaFutures object ClusterDeathWatchMultiJvmSpec extends MultiNodeConfig { val first = role("first") @@ -43,7 +45,8 @@ class ClusterDeathWatchMultiJvmNode5 extends ClusterDeathWatchSpec abstract class ClusterDeathWatchSpec extends MultiNodeSpec(ClusterDeathWatchMultiJvmSpec) with MultiNodeClusterSpec - with ImplicitSender { + with ImplicitSender + with ScalaFutures { import ClusterDeathWatchMultiJvmSpec._ @@ -138,23 +141,6 @@ abstract class ClusterDeathWatchSpec } - "receive Terminated when watched path doesn't exist" ignore { - Thread.sleep(5000) - runOn(first) { - val path = RootActorPath(second) / "user" / "non-existing" - system.actorOf(Props(new Actor { - context.watch(context.actorFor(path)) - def receive = { - case t: Terminated => testActor ! t.actor.path - } - }).withDeploy(Deploy.local), name = "observer3") - - expectMsg(path) - } - - enterBarrier("after-2") - } - "be able to watch actor before node joins cluster, ClusterRemoteWatcher takes over from RemoteWatcher" in within( 20 seconds) { runOn(fifth) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index e25ec41745..2ecb8981ac 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -97,7 +97,7 @@ abstract class ConvergenceSpec(multiNodeConfig: ConvergenceMultiNodeConfig) enterBarrier("after-join") runOn(first, second, fourth) { - for (n <- 1 to 5) { + for (_ <- 1 to 5) { awaitAssert(clusterView.members.size should ===(4)) awaitSeenSameState(first, second, fourth) memberStatus(first) should ===(Some(MemberStatus.Up)) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala index 4efdf51255..7b23ac8ad5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeMessageClusterSpec.scala @@ -5,7 +5,6 @@ package akka.cluster import scala.concurrent.duration._ - import akka.actor.ActorIdentity import akka.actor.ActorRef import akka.actor.ExtendedActorSystem @@ -17,6 +16,7 @@ import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.serialization.SerializerWithStringManifest import akka.testkit._ +import akka.util.unused import com.typesafe.config.ConfigFactory object LargeMessageClusterMultiJvmSpec extends MultiNodeConfig { @@ -61,7 +61,7 @@ object LargeMessageClusterMultiJvmSpec extends MultiNodeConfig { final case class Slow(payload: Array[Byte]) - class SlowSerializer(system: ExtendedActorSystem) extends SerializerWithStringManifest { + class SlowSerializer(@unused system: ExtendedActorSystem) extends SerializerWithStringManifest { override def identifier = 999 override def manifest(o: AnyRef) = "a" override def toBinary(o: AnyRef) = o match { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 2e15efba16..618e093ee6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -67,7 +67,6 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig myself match { case `controller` => - val leaderAddress = address(leader) enterBarrier("before-shutdown" + n) testConductor.exit(leader, 0).await enterBarrier("after-shutdown" + n, "after-unavailable" + n, "after-down" + n, "completed" + n) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala index 3079182547..97b1b6ef9c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala @@ -12,6 +12,7 @@ import akka.testkit._ import akka.cluster.MemberStatus._ import akka.util.ccompat._ +@ccompatUsedUntil213 object MinMembersBeforeUpMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala index 130ef0a606..8465a7a5f1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcHeartbeatTakingOverSpec.scala @@ -16,6 +16,7 @@ import scala.collection.immutable.SortedSet import scala.concurrent.duration._ import akka.util.ccompat._ +@ccompatUsedUntil213 object MultiDcHeartbeatTakingOverSpecMultiJvmSpec extends MultiNodeConfig { val first = role("first") // alpha val second = role("second") // alpha @@ -91,8 +92,7 @@ abstract class MultiDcHeartbeatTakingOverSpec expectedBetaHeartbeaterNodes = takeNOldestMembers(dataCenter = "beta", 2) expectedBetaHeartbeaterRoles = membersAsRoles(expectedBetaHeartbeaterNodes) - expectedNoActiveHeartbeatSenderRoles = roles.toSet -- (expectedAlphaHeartbeaterRoles.union( - expectedBetaHeartbeaterRoles)) + expectedNoActiveHeartbeatSenderRoles = roles.toSet -- expectedAlphaHeartbeaterRoles ++ expectedBetaHeartbeaterRoles } "collect information on oldest nodes" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala index 9bf78ced27..7c329aaf9a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSunnyWeatherSpec.scala @@ -96,15 +96,15 @@ abstract class MultiDcSunnyWeatherSpec implicit val sender = observer.ref runOn(expectedAlphaHeartbeaterRoles.toList: _*) { selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() - val status = observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) } runOn(expectedBetaHeartbeaterRoles.toList: _*) { selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() - val status = observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringActive](5.seconds) } runOn(expectedNoActiveHeartbeatSenderRoles.toList: _*) { selectCrossDcHeartbeatSender ! CrossDcHeartbeatSender.ReportStatus() - val status = observer.expectMsgType[CrossDcHeartbeatSender.MonitoringDormant](5.seconds) + observer.expectMsgType[CrossDcHeartbeatSender.MonitoringDormant](5.seconds) } enterBarrier("done") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index e89ed9fabf..1a1d1ff55e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -29,6 +29,7 @@ import akka.util.ccompat._ import scala.concurrent.Await +@ccompatUsedUntil213 object MultiNodeClusterSpec { def clusterConfigWithFailureDetectorPuppet: Config = diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala index 1d53ff7cf8..0e9f198f37 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeChurnSpec.scala @@ -143,9 +143,9 @@ abstract class NodeChurnSpec enterBarrier("end-round-" + n) log.info("end of round-" + n) // log listener will send to testActor if payload size exceed configured log-frame-size-exceeding - expectNoMsg(2.seconds) + expectNoMessage(2.seconds) } - expectNoMsg(5.seconds) + expectNoMessage(5.seconds) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 03e7a0e9b6..9ee243ad30 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -9,6 +9,7 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.ccompat._ +@ccompatUsedUntil213 object NodeMembershipMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index fb97e948c8..6343a79471 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -65,7 +65,7 @@ abstract class NodeUpSpec extends MultiNodeSpec(NodeUpMultiJvmSpec) with MultiNo enterBarrier("joined-again") // let it run for a while to make sure that nothing bad happens - for (n <- 1 to 20) { + for (_ <- 1 to 20) { Thread.sleep(100.millis.dilated.toMillis) unexpected.get should ===(SortedSet.empty) clusterView.members.forall(_.status == MemberStatus.Up) should ===(true) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala index c5e9464294..3fcfc8542a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartFirstSeedNodeSpec.scala @@ -21,6 +21,7 @@ import akka.cluster.MemberStatus._ import akka.actor.Deploy import akka.util.ccompat._ +@ccompatUsedUntil213 object RestartFirstSeedNodeMultiJvmSpec extends MultiNodeConfig { val seed1 = role("seed1") val seed2 = role("seed2") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala index bca5c3d0e1..b6a34bb808 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode2Spec.scala @@ -20,6 +20,7 @@ import akka.testkit._ import com.typesafe.config.ConfigFactory import akka.util.ccompat._ +@ccompatUsedUntil213 object RestartNode2SpecMultiJvmSpec extends MultiNodeConfig { val seed1 = role("seed1") val seed2 = role("seed2") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala index 1ce17e13fc..3942635dc8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNode3Spec.scala @@ -20,6 +20,7 @@ import akka.testkit._ import com.typesafe.config.ConfigFactory import akka.util.ccompat._ +@ccompatUsedUntil213 object RestartNode3MultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala index 6e2f2e5b72..377db036d9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/RestartNodeSpec.scala @@ -25,6 +25,7 @@ import akka.testkit._ import com.typesafe.config.ConfigFactory import akka.util.ccompat._ +@ccompatUsedUntil213 object RestartNodeMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") @@ -49,7 +50,7 @@ object RestartNodeMultiJvmSpec extends MultiNodeConfig { case ActorIdentity(None, Some(ref)) => context.watch(ref) replyTo ! Done - case t: Terminated => + case _: Terminated => } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala index 18e89e3df0..40523e4a56 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala @@ -245,7 +245,6 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { extends Actor with ActorLogging { import settings.infolog - private val cluster = Cluster(context.system) private var reportTo: Option[ActorRef] = None private var results = Vector.empty[ClusterResult] private var phiValuesObservedByNode = { @@ -548,7 +547,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { */ class Worker extends Actor with ActorLogging { def receive = { - case SimpleJob(id, payload) => sender() ! Ack(id) + case SimpleJob(id, _) => sender() ! Ack(id) case TreeJob(id, payload, idx, levels, width) => // create the actors when first TreeJob message is received val totalActors = ((width * math.pow(width, levels) - 1) / (width - 1)).toInt @@ -563,7 +562,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig { } def treeWorker(tree: ActorRef): Receive = { - case SimpleJob(id, payload) => sender() ! Ack(id) + case SimpleJob(id, _) => sender() ! Ack(id) case TreeJob(id, payload, idx, _, _) => tree.forward((idx, SimpleJob(id, payload))) } @@ -1106,7 +1105,7 @@ abstract class StressSpec within(duration + 10.seconds) { val rounds = (duration.toMillis / oneIteration.toMillis).max(1).toInt val supervisor = system.actorOf(Props[Supervisor], "supervisor") - for (count <- 0 until rounds) { + for (_ <- 0 until rounds) { createResultAggregator(title, expectedResults = nbrUsedRoles, includeInHistory = false) val (masterRoles, otherRoles) = roles.take(nbrUsedRoles).splitAt(3) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala index 007364f8a2..5a8a1c09dc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeJoinsAgainSpec.scala @@ -21,6 +21,7 @@ import akka.cluster.MultiNodeClusterSpec.EndActor import akka.remote.RARP import akka.util.ccompat._ +@ccompatUsedUntil213 object UnreachableNodeJoinsAgainMultiNodeConfig extends MultiNodeConfig { val first = role("first") val second = role("second") @@ -89,7 +90,6 @@ abstract class UnreachableNodeJoinsAgainSpec within(30 seconds) { // victim becomes all alone awaitAssert { - val members = clusterView.members clusterView.unreachableMembers.size should ===(roles.size - 1) } clusterView.unreachableMembers.map(_.address) should ===(allButVictim.map(address).toSet) @@ -101,7 +101,6 @@ abstract class UnreachableNodeJoinsAgainSpec within(30 seconds) { // victim becomes unreachable awaitAssert { - val members = clusterView.members clusterView.unreachableMembers.size should ===(1) } awaitSeenSameState(allButVictim.map(address): _*) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala index a7e26869e6..dc416ed0ef 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingGroupSpec.scala @@ -4,7 +4,7 @@ package akka.cluster.routing -import akka.actor.{ Actor, ActorRef, Address, Props } +import akka.actor.{ Actor, ActorRef, Props } import akka.cluster.MultiNodeClusterSpec import akka.pattern.ask import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec } @@ -46,14 +46,6 @@ abstract class ClusterConsistentHashingGroupSpec with DefaultTimeout { import ClusterConsistentHashingGroupMultiJvmSpec._ - /** - * Fills in self address for local ActorRef - */ - private def fullAddress(actorRef: ActorRef): Address = actorRef.path.address match { - case Address(_, _, None, None) => cluster.selfAddress - case a => a - } - def currentRoutees(router: ActorRef) = Await.result(router ? GetRoutees, timeout.duration).asInstanceOf[Routees].routees diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala index 34174cb309..b038c656f0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinSpec.scala @@ -48,10 +48,7 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig( - debugConfig(on = false) - .withFallback( - ConfigFactory.parseString(""" + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(s""" akka.actor { allow-java-serialization = off serialize-creators = off @@ -94,8 +91,7 @@ object ClusterRoundRobinMultiJvmSpec extends MultiNodeConfig { } } } - """)) - .withFallback(MultiNodeClusterSpec.clusterConfig)) + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) nodeConfig(first, second)(ConfigFactory.parseString("""akka.cluster.roles =["a", "c"]""")) nodeConfig(third)(ConfigFactory.parseString("""akka.cluster.roles =["b", "c"]""")) @@ -129,9 +125,9 @@ abstract class ClusterRoundRobinSpec def receiveReplies(routeeType: RouteeType, expectedReplies: Int): Map[Address, Int] = { val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) - (receiveWhile(5 seconds, messages = expectedReplies) { + receiveWhile(5 seconds, messages = expectedReplies) { case Reply(`routeeType`, ref) => fullAddress(ref) - }).foldLeft(zero) { + }.foldLeft(zero) { case (replyMap, address) => replyMap + (address -> (replyMap(address) + 1)) } } @@ -162,14 +158,14 @@ abstract class ClusterRoundRobinSpec awaitAssert(currentRoutees(router1).size should ===(4)) val iterationCount = 10 - for (i <- 0 until iterationCount) { + for (_ <- 0 until iterationCount) { router1 ! "hit" } val replies = receiveReplies(PoolRoutee, iterationCount) - replies(first) should be > (0) - replies(second) should be > (0) + replies(first) should be > 0 + replies(second) should be > 0 replies(third) should ===(0) replies(fourth) should ===(0) replies.values.sum should ===(iterationCount) @@ -193,14 +189,14 @@ abstract class ClusterRoundRobinSpec } val iterationCount = 10 - for (i <- 0 until iterationCount) { + for (_ <- 0 until iterationCount) { router4 ! "hit" } val replies = receiveReplies(GroupRoutee, iterationCount) - replies(first) should be > (0) - replies(second) should be > (0) + replies(first) should be > 0 + replies(second) should be > 0 replies(third) should ===(0) replies(fourth) should ===(0) replies.values.sum should ===(iterationCount) @@ -219,13 +215,13 @@ abstract class ClusterRoundRobinSpec awaitAssert(currentRoutees(router1).size should ===(8)) val iterationCount = 10 - for (i <- 0 until iterationCount) { + for (_ <- 0 until iterationCount) { router1 ! "hit" } val replies = receiveReplies(PoolRoutee, iterationCount) - replies.values.foreach { _ should be > (0) } + replies.values.foreach { _ should be > 0 } replies.values.sum should ===(iterationCount) } @@ -241,13 +237,13 @@ abstract class ClusterRoundRobinSpec awaitAssert(currentRoutees(router4).size should ===(8)) val iterationCount = 10 - for (i <- 0 until iterationCount) { + for (_ <- 0 until iterationCount) { router4 ! "hit" } val replies = receiveReplies(GroupRoutee, iterationCount) - replies.values.foreach { _ should be > (0) } + replies.values.foreach { _ should be > 0 } replies.values.sum should ===(iterationCount) } @@ -261,16 +257,16 @@ abstract class ClusterRoundRobinSpec awaitAssert(currentRoutees(router3).size should ===(3)) val iterationCount = 10 - for (i <- 0 until iterationCount) { + for (_ <- 0 until iterationCount) { router3 ! "hit" } val replies = receiveReplies(PoolRoutee, iterationCount) replies(first) should ===(0) - replies(second) should be > (0) - replies(third) should be > (0) - replies(fourth) should be > (0) + replies(second) should be > 0 + replies(third) should be > 0 + replies(fourth) should be > 0 replies.values.sum should ===(iterationCount) } @@ -283,14 +279,14 @@ abstract class ClusterRoundRobinSpec awaitAssert(currentRoutees(router5).size should ===(2)) val iterationCount = 10 - for (i <- 0 until iterationCount) { + for (_ <- 0 until iterationCount) { router5 ! "hit" } val replies = receiveReplies(PoolRoutee, iterationCount) - replies(first) should be > (0) - replies(second) should be > (0) + replies(first) should be > 0 + replies(second) should be > 0 replies(third) should ===(0) replies(fourth) should ===(0) replies.values.sum should ===(iterationCount) @@ -308,7 +304,7 @@ abstract class ClusterRoundRobinSpec awaitAssert(currentRoutees(router2).size should ===(3)) val iterationCount = 10 - for (i <- 0 until iterationCount) { + for (_ <- 0 until iterationCount) { router2 ! "hit" } @@ -339,7 +335,7 @@ abstract class ClusterRoundRobinSpec testConductor.blackhole(first, second, Direction.Both).await awaitAssert(routees.size should ===(6)) - routeeAddresses should not contain (address(second)) + routeeAddresses should not contain address(second) testConductor.passThrough(first, second, Direction.Both).await awaitAssert(routees.size should ===(8)) @@ -368,11 +364,11 @@ abstract class ClusterRoundRobinSpec expectMsgType[Terminated](15.seconds).actor should ===(downRouteeRef) awaitAssert { routeeAddresses should contain(notUsedAddress) - routeeAddresses should not contain (downAddress) + routeeAddresses should not contain downAddress } val iterationCount = 10 - for (i <- 0 until iterationCount) { + for (_ <- 0 until iterationCount) { router2 ! "hit" } diff --git a/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala index 4a03d1998b..71be27900e 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AutoDownSpec.scala @@ -62,7 +62,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") { val a = autoDownActor(Duration.Zero) a ! LeaderChanged(Some(memberB.address)) a ! UnreachableMember(memberC) - expectNoMsg(1.second) + expectNoMessage(1.second) } "down unreachable when becoming leader" in { @@ -77,7 +77,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") { val a = autoDownActor(2.seconds) a ! LeaderChanged(Some(memberA.address)) a ! UnreachableMember(memberB) - expectNoMsg(1.second) + expectNoMessage(1.second) expectMsg(DownCalled(memberB.address)) } @@ -86,7 +86,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") { a ! LeaderChanged(Some(memberB.address)) a ! UnreachableMember(memberC) a ! LeaderChanged(Some(memberA.address)) - expectNoMsg(1.second) + expectNoMessage(1.second) expectMsg(DownCalled(memberC.address)) } @@ -95,7 +95,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") { a ! LeaderChanged(Some(memberA.address)) a ! UnreachableMember(memberC) a ! LeaderChanged(Some(memberB.address)) - expectNoMsg(3.second) + expectNoMessage(3.second) } "not down when unreachable become reachable in-between detection and specified duration" taggedAs TimingTest in { @@ -103,7 +103,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") { a ! LeaderChanged(Some(memberA.address)) a ! UnreachableMember(memberB) a ! ReachableMember(memberB) - expectNoMsg(3.second) + expectNoMessage(3.second) } "not down when unreachable is removed in-between detection and specified duration" taggedAs TimingTest in { @@ -111,14 +111,14 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") { a ! LeaderChanged(Some(memberA.address)) a ! UnreachableMember(memberB) a ! MemberRemoved(memberB.copy(Removed), previousStatus = Exiting) - expectNoMsg(3.second) + expectNoMessage(3.second) } "not down when unreachable is already Down" in { val a = autoDownActor(Duration.Zero) a ! LeaderChanged(Some(memberA.address)) a ! UnreachableMember(memberB.copy(Down)) - expectNoMsg(1.second) + expectNoMessage(1.second) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 55665f3480..f745c31f2c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -6,15 +6,13 @@ package akka.cluster import language.postfixOps import scala.concurrent.duration._ - import com.typesafe.config.ConfigFactory - import akka.testkit.AkkaSpec import akka.dispatch.Dispatchers - import akka.remote.PhiAccrualFailureDetector import akka.util.Helpers.ConfigOps import akka.actor.Address +import com.github.ghik.silencer.silent class ClusterConfigSpec extends AkkaSpec { @@ -43,7 +41,8 @@ class ClusterConfigSpec extends AkkaSpec { UnreachableNodesReaperInterval should ===(1 second) PublishStatsInterval should ===(Duration.Undefined) AutoDownUnreachableAfter should ===(Duration.Undefined) - DownRemovalMargin should ===(Duration.Zero) + @silent + val _ = DownRemovalMargin should ===(Duration.Zero) MinNrOfMembers should ===(1) MinNrOfMembersOfRole should ===(Map.empty[String, Int]) SelfDataCenter should ===("default") diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index 76cf4439ae..c3463ffdfa 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -19,7 +19,7 @@ import akka.testkit.ImplicitSender import akka.actor.ActorRef import akka.remote.RARP import akka.testkit.TestProbe -import akka.cluster.ClusterSettings.{ DataCenter, DefaultDataCenter } +import akka.cluster.ClusterSettings.DefaultDataCenter object ClusterDomainEventPublisherSpec { val config = """ @@ -55,44 +55,44 @@ class ClusterDomainEventPublisherSpec val dUp = TestMember(Address(protocol, "sys", "d", 2552), Up, Set("GRP")) val eUp = TestMember(Address(protocol, "sys", "e", 2552), Up, Set("GRP"), OtherDataCenter) - private def state(gossip: Gossip, self: UniqueAddress, dc: DataCenter) = + private def state(gossip: Gossip, self: UniqueAddress) = MembershipState(gossip, self, DefaultDataCenter, crossDcConnections = 5) - val emptyMembershipState = state(Gossip.empty, aUp.uniqueAddress, DefaultDataCenter) + val emptyMembershipState = state(Gossip.empty, aUp.uniqueAddress) val g0 = Gossip(members = SortedSet(aUp)).seen(aUp.uniqueAddress) - val state0 = state(g0, aUp.uniqueAddress, DefaultDataCenter) + val state0 = state(g0, aUp.uniqueAddress) val g1 = Gossip(members = SortedSet(aUp, cJoining)).seen(aUp.uniqueAddress).seen(cJoining.uniqueAddress) - val state1 = state(g1, aUp.uniqueAddress, DefaultDataCenter) + val state1 = state(g1, aUp.uniqueAddress) val g2 = Gossip(members = SortedSet(aUp, bExiting, cUp)).seen(aUp.uniqueAddress) - val state2 = state(g2, aUp.uniqueAddress, DefaultDataCenter) + val state2 = state(g2, aUp.uniqueAddress) val g3 = g2.seen(bExiting.uniqueAddress).seen(cUp.uniqueAddress) - val state3 = state(g3, aUp.uniqueAddress, DefaultDataCenter) + val state3 = state(g3, aUp.uniqueAddress) val g4 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)).seen(aUp.uniqueAddress) - val state4 = state(g4, aUp.uniqueAddress, DefaultDataCenter) + val state4 = state(g4, aUp.uniqueAddress) val g5 = Gossip(members = SortedSet(a51Up, aUp, bExiting, cUp)) .seen(aUp.uniqueAddress) .seen(bExiting.uniqueAddress) .seen(cUp.uniqueAddress) .seen(a51Up.uniqueAddress) - val state5 = state(g5, aUp.uniqueAddress, DefaultDataCenter) + val state5 = state(g5, aUp.uniqueAddress) val g6 = Gossip(members = SortedSet(aLeaving, bExiting, cUp)).seen(aUp.uniqueAddress) - val state6 = state(g6, aUp.uniqueAddress, DefaultDataCenter) + val state6 = state(g6, aUp.uniqueAddress) val g7 = Gossip(members = SortedSet(aExiting, bExiting, cUp)).seen(aUp.uniqueAddress) - val state7 = state(g7, aUp.uniqueAddress, DefaultDataCenter) + val state7 = state(g7, aUp.uniqueAddress) val g8 = Gossip( members = SortedSet(aUp, bExiting, cUp, dUp), overview = GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, dUp.uniqueAddress))) .seen(aUp.uniqueAddress) - val state8 = state(g8, aUp.uniqueAddress, DefaultDataCenter) + val state8 = state(g8, aUp.uniqueAddress) val g9 = Gossip( members = SortedSet(aUp, bExiting, cUp, dUp, eUp), overview = GossipOverview(reachability = Reachability.empty.unreachable(aUp.uniqueAddress, eUp.uniqueAddress))) - val state9 = state(g9, aUp.uniqueAddress, DefaultDataCenter) + val state9 = state(g9, aUp.uniqueAddress) val g10 = Gossip( members = SortedSet(aUp, bExiting, cUp, dUp, eUp), overview = GossipOverview(reachability = Reachability.empty)) - val state10 = state(g10, aUp.uniqueAddress, DefaultDataCenter) + val state10 = state(g10, aUp.uniqueAddress) // created in beforeEach var memberSubscriber: TestProbe = _ @@ -129,7 +129,7 @@ class ClusterDomainEventPublisherSpec memberSubscriber.expectMsg(MemberExited(bExiting)) memberSubscriber.expectMsg(MemberUp(cUp)) memberSubscriber.expectMsg(LeaderChanged(Some(a51Up.address))) - memberSubscriber.expectNoMsg(500 millis) + memberSubscriber.expectNoMessage(500 millis) } "publish leader changed when old leader leaves and is removed" in { @@ -141,7 +141,7 @@ class ClusterDomainEventPublisherSpec publisher ! PublishChanges(state7) memberSubscriber.expectMsg(MemberExited(aExiting)) memberSubscriber.expectMsg(LeaderChanged(Some(cUp.address))) - memberSubscriber.expectNoMsg(500 millis) + memberSubscriber.expectNoMessage(500 millis) // at the removed member a an empty gossip is the last thing publisher ! PublishChanges(emptyMembershipState) memberSubscriber.expectMsg(MemberRemoved(aRemoved, Exiting)) @@ -158,19 +158,18 @@ class ClusterDomainEventPublisherSpec memberSubscriber.expectMsg(LeaderChanged(Some(a51Up.address))) publisher ! PublishChanges(state5) - memberSubscriber.expectNoMsg(500 millis) + memberSubscriber.expectNoMessage(500 millis) } "publish role leader changed" in { val subscriber = TestProbe() publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[RoleLeaderChanged])) subscriber.expectMsgType[CurrentClusterState] - publisher ! PublishChanges( - state(Gossip(members = SortedSet(cJoining, dUp)), dUp.uniqueAddress, DefaultDataCenter)) + publisher ! PublishChanges(state(Gossip(members = SortedSet(cJoining, dUp)), dUp.uniqueAddress)) subscriber.expectMsgAllOf( RoleLeaderChanged("GRP", Some(dUp.address)), RoleLeaderChanged(ClusterSettings.DcRolePrefix + ClusterSettings.DefaultDataCenter, Some(dUp.address))) - publisher ! PublishChanges(state(Gossip(members = SortedSet(cUp, dUp)), dUp.uniqueAddress, DefaultDataCenter)) + publisher ! PublishChanges(state(Gossip(members = SortedSet(cUp, dUp)), dUp.uniqueAddress)) subscriber.expectMsg(RoleLeaderChanged("GRP", Some(cUp.address))) } @@ -179,7 +178,7 @@ class ClusterDomainEventPublisherSpec publisher ! Subscribe(subscriber.ref, InitialStateAsSnapshot, Set(classOf[ClusterDomainEvent])) subscriber.expectMsgType[CurrentClusterState] // but only to the new subscriber - memberSubscriber.expectNoMsg(500 millis) + memberSubscriber.expectNoMessage(500 millis) } "send events corresponding to current state when subscribe" in { @@ -188,7 +187,7 @@ class ClusterDomainEventPublisherSpec publisher ! Subscribe(subscriber.ref, InitialStateAsEvents, Set(classOf[MemberEvent], classOf[ReachabilityEvent])) subscriber.receiveN(4).toSet should be(Set(MemberUp(aUp), MemberUp(cUp), MemberUp(dUp), MemberExited(bExiting))) subscriber.expectMsg(UnreachableMember(dUp)) - subscriber.expectNoMsg(500 millis) + subscriber.expectNoMessage(500 millis) } "send datacenter reachability events" in { @@ -196,10 +195,10 @@ class ClusterDomainEventPublisherSpec publisher ! PublishChanges(state9) publisher ! Subscribe(subscriber.ref, InitialStateAsEvents, Set(classOf[DataCenterReachabilityEvent])) subscriber.expectMsg(UnreachableDataCenter(OtherDataCenter)) - subscriber.expectNoMsg(500 millis) + subscriber.expectNoMessage(500 millis) publisher ! PublishChanges(state10) subscriber.expectMsg(ReachableDataCenter(OtherDataCenter)) - subscriber.expectNoMsg(500 millis) + subscriber.expectNoMessage(500 millis) } "support unsubscribe" in { @@ -208,7 +207,7 @@ class ClusterDomainEventPublisherSpec subscriber.expectMsgType[CurrentClusterState] publisher ! Unsubscribe(subscriber.ref, Some(classOf[MemberEvent])) publisher ! PublishChanges(state3) - subscriber.expectNoMsg(500 millis) + subscriber.expectNoMessage(500 millis) // but memberSubscriber is still subscriber memberSubscriber.expectMsg(MemberExited(bExiting)) memberSubscriber.expectMsg(MemberUp(cUp)) @@ -220,10 +219,10 @@ class ClusterDomainEventPublisherSpec subscriber.expectMsgType[CurrentClusterState] publisher ! PublishChanges(state2) subscriber.expectMsgType[SeenChanged] - subscriber.expectNoMsg(500 millis) + subscriber.expectNoMessage(500 millis) publisher ! PublishChanges(state3) subscriber.expectMsgType[SeenChanged] - subscriber.expectNoMsg(500 millis) + subscriber.expectNoMessage(500 millis) } "publish ClusterShuttingDown and Removed when stopped" in { diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala index ff062acebf..3ecb315bb0 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala @@ -202,7 +202,6 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with Matchers { case HeartbeatRsp => if (node != selfUniqueAddress && state.ring.nodes.contains(node)) { val oldUnreachable = state.oldReceiversNowUnreachable - val oldReceivers = state.activeReceivers val oldRingReceivers = state.ring.myReceivers state = state.heartbeatRsp(node) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 27fd842115..b864f5f3c4 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -142,8 +142,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { } "allow to resolve remotePathOf any actor" in { - val remotePath = cluster.remotePathOf(testActor) - testActor.path.address.host should ===(None) cluster.remotePathOf(testActor).uid should ===(testActor.path.uid) cluster.remotePathOf(testActor).address should ===(selfAddress) diff --git a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala index 5e337e0bc2..59db80ced8 100644 --- a/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/DowningProviderSpec.scala @@ -9,19 +9,20 @@ import java.util.concurrent.atomic.AtomicBoolean import akka.ConfigurationException import akka.actor.{ ActorSystem, Props } import akka.testkit.TestKit.{ awaitCond, shutdownActorSystem } +import akka.util.unused import com.typesafe.config.ConfigFactory import org.scalatest.{ Matchers, WordSpec } import scala.concurrent.duration._ -class FailingDowningProvider(system: ActorSystem) extends DowningProvider { +class FailingDowningProvider(@unused system: ActorSystem) extends DowningProvider { override val downRemovalMargin: FiniteDuration = 20.seconds override def downingActorProps: Option[Props] = { throw new ConfigurationException("this provider never works") } } -class DummyDowningProvider(system: ActorSystem) extends DowningProvider { +class DummyDowningProvider(@unused system: ActorSystem) extends DowningProvider { override val downRemovalMargin: FiniteDuration = 20.seconds val actorPropsAccessed = new AtomicBoolean(false) diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala index c48b04a949..e337cabbf4 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -5,14 +5,16 @@ package akka.cluster import java.util.concurrent.atomic.AtomicReference + import akka.remote.FailureDetector import com.typesafe.config.Config import akka.event.EventStream +import akka.util.unused /** * User controllable "puppet" failure detector. */ -class FailureDetectorPuppet(config: Config, ev: EventStream) extends FailureDetector { +class FailureDetectorPuppet(@unused config: Config, @unused ev: EventStream) extends FailureDetector { trait Status object Up extends Status diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 42b6b9ff21..1d843e8d48 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -491,7 +491,6 @@ class GossipSpec extends WordSpec with Matchers { val g = Gossip(members = SortedSet(dc1a1, joining)) g.member(joining.uniqueAddress).status should ===(Joining) - val oldMembers = g.members val updated = g.update(SortedSet(joining.copy(status = Up))) diff --git a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingPerfSpec.scala b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingPerfSpec.scala index 7be8da5274..51c34137c1 100644 --- a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingPerfSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingPerfSpec.scala @@ -23,7 +23,7 @@ class HeartbeatNodeRingPerfSpec extends WordSpec with Matchers { val heartbeatNodeRing = createHeartbeatNodeRingOfSize(nodesSize) private def checkThunkForRing(ring: HeartbeatNodeRing, thunk: HeartbeatNodeRing => Unit, times: Int): Unit = - for (i <- 1 to times) thunk(ring) + for (_ <- 1 to times) thunk(ring) private def myReceivers(ring: HeartbeatNodeRing): Unit = { val r = HeartbeatNodeRing(ring.selfAddress, ring.nodes, Set.empty, ring.monitoredByNrOfMembers) diff --git a/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala index d20b87dfd9..ea57848116 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ReachabilityPerfSpec.scala @@ -7,6 +7,7 @@ package akka.cluster import org.scalatest.WordSpec import org.scalatest.Matchers import akka.actor.Address +import com.github.ghik.silencer.silent class ReachabilityPerfSpec extends WordSpec with Matchers { @@ -26,6 +27,7 @@ class ReachabilityPerfSpec extends WordSpec with Matchers { r.unreachable(observer, subject).reachable(observer, subject) } + @silent private def addUnreachable(base: Reachability, count: Int): Reachability = { val observers = base.versions.keySet.take(count) val subjects = Stream.continually(base.versions.keySet).flatten.iterator @@ -45,13 +47,13 @@ class ReachabilityPerfSpec extends WordSpec with Matchers { r2: Reachability, thunk: (Reachability, Reachability) => Unit, times: Int): Unit = { - for (i <- 1 to times) { + for (_ <- 1 to times) { thunk(Reachability(r1.records, r1.versions), Reachability(r2.records, r2.versions)) } } private def checkThunkFor(r1: Reachability, thunk: Reachability => Unit, times: Int): Unit = { - for (i <- 1 to times) { + for (_ <- 1 to times) { thunk(Reachability(r1.records, r1.versions)) } } @@ -71,12 +73,10 @@ class ReachabilityPerfSpec extends WordSpec with Matchers { } private def allUnreachableOrTerminated(r1: Reachability): Unit = { - val record = r1.records.head r1.allUnreachableOrTerminated.isEmpty should ===(false) } private def allUnreachable(r1: Reachability): Unit = { - val record = r1.records.head r1.allUnreachable.isEmpty should ===(false) } diff --git a/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala b/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala index dae8edbf90..6262c99f2b 100644 --- a/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/StartupWithOneThreadSpec.scala @@ -60,7 +60,7 @@ class StartupWithOneThreadSpec(startTime: Long) extends AkkaSpec(StartupWithOneT system.actorOf(testProps) ! "hello" system.actorOf(testProps) ! "hello" - val cluster = Cluster(system) + Cluster(system) (System.nanoTime - startTime).nanos.toMillis should be < (system.settings.CreationTimeout.duration - 2.second).toMillis diff --git a/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala index 63ca48dd02..796851750c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockPerfSpec.scala @@ -49,7 +49,7 @@ class VectorClockPerfSpec extends WordSpec with Matchers { def checkThunkFor(vc1: VectorClock, vc2: VectorClock, thunk: (VectorClock, VectorClock) => Unit, times: Int): Unit = { val vcc1 = copyVectorClock(vc1) val vcc2 = copyVectorClock(vc2) - for (i <- 1 to times) { + for (_ <- 1 to times) { thunk(vcc1, vcc2) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala index 3783a24708..f1e99ff6d0 100644 --- a/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/protobuf/ClusterMessageSerializerSpec.scala @@ -12,6 +12,7 @@ import akka.routing.RoundRobinPool import collection.immutable.SortedSet import akka.testkit.{ AkkaSpec, TestKit } +import com.github.ghik.silencer.silent import com.typesafe.config.ConfigFactory class ClusterMessageSerializerSpec extends AkkaSpec("akka.actor.provider = cluster") { @@ -147,7 +148,8 @@ class ClusterMessageSerializerSpec extends AkkaSpec("akka.actor.provider = clust pool.settings.totalInstances should ===(123) pool.settings.maxInstancesPerNode should ===(345) pool.settings.allowLocalRoutees should ===(true) - pool.settings.useRole should ===(Some("role ABC")) + @silent + val _ = pool.settings.useRole should ===(Some("role ABC")) pool.settings.useRoles should ===(Set("role ABC")) } } finally { diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala index 99457c6b79..a6fa3be306 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/ClusterRouterSupervisorSpec.scala @@ -11,7 +11,7 @@ import akka.actor.OneForOneStrategy object ClusterRouterSupervisorSpec { - class KillableActor(testActor: ActorRef) extends Actor { + class KillableActor() extends Actor { def receive = { case "go away" => @@ -41,7 +41,7 @@ class ClusterRouterSupervisorSpec extends AkkaSpec(""" SupervisorStrategy.Stop }), ClusterRouterPoolSettings(totalInstances = 1, maxInstancesPerNode = 1, allowLocalRoutees = true)) - .props(Props(classOf[KillableActor], testActor)), + .props(Props(classOf[KillableActor])), name = "therouter") router ! "go away" diff --git a/project/AkkaDisciplinePlugin.scala b/project/AkkaDisciplinePlugin.scala index c7d3ec9f53..6539e60a1a 100644 --- a/project/AkkaDisciplinePlugin.scala +++ b/project/AkkaDisciplinePlugin.scala @@ -26,7 +26,6 @@ object AkkaDisciplinePlugin extends AutoPlugin with ScalafixSupport { "akka-contrib", // To be reviewed "akka-actor-typed-tests", - "akka-cluster", "akka-bench-jmh", "akka-bench-jmh-typed", "akka-multi-node-testkit",