Introduce warning silencer plugin (#26588)

So now we can compile akka-distributed-data with
-Xfatal-warnings - though I'm not yet sure about
enabling the (other) undisciplineScalacOptions

* Fix multi-node silencing
* Fix scaladoc warnings
* Introduce annotation to declare ccompat use
* Add explicit toString
* Fix deprecation on 2.13
* Move 'immutable' ccompat helpers to shared ccompat package
* Add MiMa for internal scala 2.13 compatibility class
* Internal API markers
* Fix scaladoc generation
Got bitten by https://github.com/scala/bug/issues/11021
This commit is contained in:
Arnout Engelen 2019-03-26 14:41:29 +01:00 committed by Patrik Nordwall
parent 140e5e0faa
commit d390fcf183
43 changed files with 130 additions and 74 deletions

View file

@ -17,7 +17,9 @@ import akka.util.ccompat._
* INTERNAL API: Used by the Replicator actor.
* Extracted to separate trait to make it easy to test.
*/
@InternalApi private[akka] trait DeltaPropagationSelector {
@ccompatUsedUntil213
@InternalApi
private[akka] trait DeltaPropagationSelector {
private var _propagationCount = 0L
def propagationCount: Long = _propagationCount

View file

@ -216,7 +216,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
/**
* Adds an entry to the map
* @see [[#put]]
* @see [[ORMap#put(node:akka\.cluster\.ddata\.SelfUniqueAddress*]]
*/
def :+(entry: (A, B))(implicit node: SelfUniqueAddress): ORMap[A, B] = {
val (key, value) = entry
@ -235,12 +235,13 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
* on other nodes and the outcome depends on what `ReplicatedData`
* type that is used.
*
* Consider using [[#updated]] instead of `put` if you want modify
* existing entry.
* Consider using [[ORMap#updated(node:akka\.cluster\.ddata\.SelfUniqueAddress*]]
* instead of `put` if you want modify existing entry.
*
* `IllegalArgumentException` is thrown if you try to replace an existing `ORSet`
* value, because important history can be lost when replacing the `ORSet` and
* undesired effects of merging will occur. Use [[ORMultiMap]] or [[#updated]] instead.
* undesired effects of merging will occur. Use [[ORMultiMap]] or
* [[ORMap#updated(node:akka\.cluster\.ddata\.SelfUniqueAddress*]] instead.
*/
def put(node: SelfUniqueAddress, key: A, value: B): ORMap[A, B] = put(node.uniqueAddress, key, value)

View file

@ -136,7 +136,7 @@ final class ORMultiMap[A, B] private[akka] (
/**
* Convenience for put. Requires an implicit SelfUniqueAddress.
* @see [[#put]]
* @see [[ORMultiMap#put(node:akka\.cluster\.ddata\.SelfUniqueAddress,key:A,value:Set*]]
*/
def :+(entry: (A, Set[B]))(implicit node: SelfUniqueAddress): ORMultiMap[A, B] = {
val (key, value) = entry
@ -196,7 +196,7 @@ final class ORMultiMap[A, B] private[akka] (
/**
* Convenience for remove. Requires an implicit Cluster.
* @see [[#remove]]
* @see [[ORMultiMap#remove(node:akka\.cluster\.ddata\.SelfUniqueAddress*]]
*/
@deprecated("Use `remove` that takes a `SelfUniqueAddress` parameter instead.", since = "2.5.20")
def -(key: A)(implicit node: Cluster): ORMultiMap[A, B] = remove(node.selfUniqueAddress, key)

View file

@ -388,8 +388,9 @@ final class ORSet[A] private[akka] (
/**
* Removes all elements from the set, but keeps the history.
* This has the same result as using [[#remove]] for each
* element, but it is more efficient.
* This has the same result as using
* [[ORSet#remove(node:akka\.cluster\.ddata\.SelfUniqueAddress*]]
* for each element, but it is more efficient.
*/
def clear(@unused node: SelfUniqueAddress): ORSet[A] = clear()

View file

@ -55,6 +55,7 @@ import scala.annotation.varargs
import akka.util.JavaDurationConverters._
import akka.util.ccompat._
@ccompatUsedUntil213
object ReplicatorSettings {
/**

View file

@ -386,5 +386,5 @@ final case class ManyVersionVector(versions: TreeMap[UniqueAddress, Long]) exten
else this
override def toString: String =
versions.map { case ((n, v)) => n + " -> " + v }.mkString("VersionVector(", ", ", ")")
versions.map { case ((n, v)) => n.toString + " -> " + v }.mkString("VersionVector(", ", ", ")")
}

View file

@ -25,11 +25,14 @@ import akka.protobuf.{ ByteString, GeneratedMessage }
import akka.util.ByteString.UTF_8
import java.io.NotSerializableException
import com.github.ghik.silencer.silent
import akka.actor.ActorRef
import akka.cluster.ddata.protobuf.msg.ReplicatorMessages.OtherMessage
import akka.serialization.Serialization
import akka.util.ccompat._
@ccompatUsedUntil213
private object ReplicatedDataSerializer {
/*
* Generic superclass to allow to compare Entry types used in protobuf.
@ -43,6 +46,8 @@ private object ReplicatedDataSerializer {
*/
def getKey(entry: A): Any
final def compare(x: A, y: A): Int = compareKeys(getKey(x), getKey(y))
@silent
private final def compareKeys(t1: Any, t2: Any): Int = (t1, t2) match {
case (k1: String, k2: String) => k1.compareTo(k2)
case (k1: String, k2) => -1

View file

@ -37,7 +37,9 @@ import akka.util.ccompat._
/**
* INTERNAL API
*/
@InternalApi private[akka] object ReplicatorMessageSerializer {
@ccompatUsedUntil213
@InternalApi
private[akka] object ReplicatorMessageSerializer {
/**
* A cache that is designed for a small number (&lt;= 32) of

View file

@ -25,6 +25,7 @@ import akka.util.ccompat._
/**
* Some useful serialization helper methods.
*/
@ccompatUsedUntil213
trait SerializationSupport {
private final val BufferSize = 1024 * 4

View file

@ -16,8 +16,9 @@ import akka.actor.ActorSystem
import akka.actor.ActorRef
import scala.concurrent.Await
import akka.cluster.MemberStatus
import akka.util.ccompat.imm._
import akka.util.ccompat._
@ccompatUsedUntil213
object DurablePruningSpec extends MultiNodeConfig {
val first = role("first")
val second = role("second")
@ -75,6 +76,7 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN
val sys2 = ActorSystem(system.name, system.settings.config)
val cluster2 = Cluster(sys2)
val distributedData2 = DistributedData(sys2)
val replicator2 = startReplicator(sys2)
val probe2 = TestProbe()(sys2)
Cluster(sys2).join(node(first).address)
@ -98,7 +100,9 @@ class DurablePruningSpec extends MultiNodeSpec(DurablePruningSpec) with STMultiN
replicator ! Update(KeyA, GCounter(), WriteLocal)(_ :+ 3)
expectMsg(UpdateSuccess(KeyA, None))
replicator2.tell(Update(KeyA, GCounter(), WriteLocal)(_.increment(cluster2, 2)), probe2.ref)
replicator2.tell(
Update(KeyA, GCounter(), WriteLocal)(_.increment(distributedData2.selfUniqueAddress, 2)),
probe2.ref)
probe2.expectMsg(UpdateSuccess(KeyA, None))
enterBarrier("updates-done")

View file

@ -331,7 +331,7 @@ class ReplicatorMapDeltaSpec extends MultiNodeSpec(ReplicatorMapDeltaSpec) with
enterBarrier("replicated-2")
// no OversizedPayloadException logging
errorLogProbe.expectNoMsg(100.millis)
errorLogProbe.expectNoMessage(100.millis)
enterBarrierAfterTestStep()
}

View file

@ -239,7 +239,7 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST
// after full replication should still not be able to update with data from removed node
// but it would not work after removal of the PruningPerformed markers
expectNoMsg(maxPruningDissemination + 3.seconds)
expectNoMessage(maxPruningDissemination + 3.seconds)
runOn(first) {
updateAfterPruning(expectedValue = 12)

View file

@ -24,7 +24,7 @@ object LotsOfDataBot {
if (args.isEmpty)
startup(Seq("2551", "2552", "0"))
else
startup(args)
startup(args.toIndexedSeq)
}
def startup(ports: Seq[String]): Unit = {