replace unicode arrows

* ⇒, →, ←
* because we don't want to show them in documentation snippets and
  then it's complicated to avoid that when snippets are
  located in src/test/scala in individual modules
* dont replace object `→` in FSM.scala and PersistentFSM.scala
This commit is contained in:
Patrik Nordwall 2019-02-09 15:25:39 +01:00
parent e4d38f92a4
commit 5c96a5f556
1521 changed files with 18846 additions and 18786 deletions

View file

@ -35,21 +35,21 @@ import akka.util.ccompat._
def maxDeltaSize: Int
def currentVersion(key: KeyId): Long = deltaCounter.get(key) match {
case Some(v) v
case None 0L
case Some(v) => v
case None => 0L
}
def update(key: KeyId, delta: ReplicatedData): Unit = {
// bump the counter for each update
val version = deltaCounter.get(key) match {
case Some(c) c + 1
case None 1L
case Some(c) => c + 1
case None => 1L
}
deltaCounter = deltaCounter.updated(key, version)
val deltaEntriesForKey = deltaEntries.get(key) match {
case Some(m) m
case None TreeMap.empty[Long, ReplicatedData]
case Some(m) => m
case None => TreeMap.empty[Long, ReplicatedData]
}
deltaEntries = deltaEntries.updated(key, deltaEntriesForKey.updated(version, delta))
@ -91,12 +91,12 @@ import akka.util.ccompat._
var result = Map.empty[Address, DeltaPropagation]
var cache = Map.empty[(KeyId, Long, Long), ReplicatedData]
slice.foreach { node
slice.foreach { node =>
// collect the deltas that have not already been sent to the node and merge
// them into a delta group
var deltas = Map.empty[KeyId, (ReplicatedData, Long, Long)]
deltaEntries.foreach {
case (key, entries)
case (key, entries) =>
val deltaSentToNodeForKey = deltaSentToNode.getOrElse(key, TreeMap.empty[Address, Long])
val j = deltaSentToNodeForKey.getOrElse(node, 0L)
val deltaEntriesAfterJ = deltaEntriesAfter(entries, j)
@ -107,25 +107,25 @@ import akka.util.ccompat._
// so we cache the merged results
val cacheKey = (key, fromSeqNr, toSeqNr)
val deltaGroup = cache.get(cacheKey) match {
case None
case None =>
val group = deltaEntriesAfterJ.valuesIterator.reduceLeft {
(d1, d2)
(d1, d2) =>
val merged = d2 match {
case NoDeltaPlaceholder NoDeltaPlaceholder
case _
case NoDeltaPlaceholder => NoDeltaPlaceholder
case _ =>
// this is fine also if d1 is a NoDeltaPlaceholder
d1.merge(d2.asInstanceOf[d1.T])
}
merged match {
case s: ReplicatedDeltaSize if s.deltaSize >= maxDeltaSize
case s: ReplicatedDeltaSize if s.deltaSize >= maxDeltaSize =>
// discard too large deltas
NoDeltaPlaceholder
case _ merged
case _ => merged
}
}
cache = cache.updated(cacheKey, group)
group
case Some(group) group
case Some(group) => group
}
deltas = deltas.updated(key, (deltaGroup, fromSeqNr, toSeqNr))
deltaSentToNode = deltaSentToNode.updated(key, deltaSentToNodeForKey.updated(node, deltaEntriesAfterJ.lastKey))
@ -146,24 +146,24 @@ import akka.util.ccompat._
private def deltaEntriesAfter(entries: TreeMap[Long, ReplicatedData], version: Long): TreeMap[Long, ReplicatedData] =
entries.rangeFrom(version) match {
case ntrs if ntrs.isEmpty ntrs
case ntrs if ntrs.firstKey == version ntrs.tail // exclude first, i.e. version j that was already sent
case ntrs ntrs
case ntrs if ntrs.isEmpty => ntrs
case ntrs if ntrs.firstKey == version => ntrs.tail // exclude first, i.e. version j that was already sent
case ntrs => ntrs
}
def hasDeltaEntries(key: KeyId): Boolean = {
deltaEntries.get(key) match {
case Some(m) m.nonEmpty
case None false
case Some(m) => m.nonEmpty
case None => false
}
}
private def findSmallestVersionPropagatedToAllNodes(key: KeyId, all: Vector[Address]): Long = {
deltaSentToNode.get(key) match {
case None 0L
case Some(deltaSentToNodeForKey)
case None => 0L
case Some(deltaSentToNodeForKey) =>
if (deltaSentToNodeForKey.isEmpty) 0L
else if (all.exists(node !deltaSentToNodeForKey.contains(node))) 0L
else if (all.exists(node => !deltaSentToNodeForKey.contains(node))) 0L
else deltaSentToNodeForKey.valuesIterator.min
}
}
@ -174,22 +174,22 @@ import akka.util.ccompat._
deltaEntries = Map.empty
else {
deltaEntries = deltaEntries.map {
case (key, entries)
case (key, entries) =>
val minVersion = findSmallestVersionPropagatedToAllNodes(key, all)
val deltaEntriesAfterMin = deltaEntriesAfter(entries, minVersion)
// TODO perhaps also remove oldest when deltaCounter is too far ahead (e.g. 10 cycles)
key deltaEntriesAfterMin
key -> deltaEntriesAfterMin
}
}
}
def cleanupRemovedNode(address: Address): Unit = {
deltaSentToNode = deltaSentToNode.map {
case (key, deltaSentToNodeForKey)
key (deltaSentToNodeForKey - address)
case (key, deltaSentToNodeForKey) =>
key -> (deltaSentToNodeForKey - address)
}
}
}

View file

@ -91,8 +91,8 @@ object DurableStore {
override def toString(): String = s"DurableDataEnvelope($data)"
override def hashCode(): Int = data.hashCode
override def equals(o: Any): Boolean = o match {
case other: DurableDataEnvelope data == other.data
case _ false
case other: DurableDataEnvelope => data == other.data
case _ => false
}
}
}
@ -120,14 +120,14 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
val manifest = serializer.manifest(new DurableDataEnvelope(Replicator.Internal.DeletedData))
val writeBehindInterval = config.getString("lmdb.write-behind-interval").toLowerCase match {
case "off" Duration.Zero
case _ config.getDuration("lmdb.write-behind-interval", MILLISECONDS).millis
case "off" => Duration.Zero
case _ => config.getDuration("lmdb.write-behind-interval", MILLISECONDS).millis
}
val dir = config.getString("lmdb.dir") match {
case path if path.endsWith("ddata")
case path if path.endsWith("ddata") =>
new File(s"$path-${context.system.name}-${self.path.parent.name}-${Cluster(context.system).selfAddress.port.get}")
case path
case path =>
new File(path)
}
@ -135,8 +135,8 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
private var _lmdb: OptionVal[Lmdb] = OptionVal.None
private def lmdb(): Lmdb = _lmdb match {
case OptionVal.Some(l) l
case OptionVal.None
case OptionVal.Some(l) => l
case OptionVal.None =>
val t0 = System.nanoTime()
log.info("Using durable data in LMDB directory [{}]", dir.getCanonicalPath)
val env = {
@ -195,7 +195,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
def receive = init
def init: Receive = {
case LoadAll
case LoadAll =>
if (dir.exists && dir.list().length > 0) {
val l = lmdb()
val t0 = System.nanoTime()
@ -204,7 +204,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
val iter = l.db.iterate(tx)
try {
var n = 0
val loadData = LoadData(iter.asScala.map { entry
val loadData = LoadData(iter.asScala.map { entry =>
n += 1
val keyArray = new Array[Byte](entry.key.remaining)
entry.key.get(keyArray)
@ -212,7 +212,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
val valArray = new Array[Byte](entry.`val`.remaining)
entry.`val`.get(valArray)
val envelope = serializer.fromBinary(valArray, manifest).asInstanceOf[DurableDataEnvelope]
key envelope
key -> envelope
}.toMap)
if (loadData.data.nonEmpty)
sender() ! loadData
@ -225,7 +225,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
Try(iter.close())
}
} catch {
case NonFatal(e)
case NonFatal(e) =>
throw new LoadFailed("failed to load durable distributed-data", e)
} finally {
Try(tx.close())
@ -238,7 +238,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
}
def active: Receive = {
case Store(key, data, reply)
case Store(key, data, reply) =>
try {
lmdb() // init
if (writeBehindInterval.length == 0) {
@ -249,21 +249,21 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
pending.put(key, data)
}
reply match {
case Some(StoreReply(successMsg, _, replyTo))
case Some(StoreReply(successMsg, _, replyTo)) =>
replyTo ! successMsg
case None
case None =>
}
} catch {
case NonFatal(e)
case NonFatal(e) =>
log.error(e, "failed to store [{}]", key)
reply match {
case Some(StoreReply(_, failureMsg, replyTo))
case Some(StoreReply(_, failureMsg, replyTo)) =>
replyTo ! failureMsg
case None
case None =>
}
}
case WriteBehind
case WriteBehind =>
writeBehind()
}
@ -275,8 +275,8 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
l.keyBuffer.put(key.getBytes(ByteString.UTF_8)).flip()
l.valueBuffer.put(value).flip()
tx match {
case OptionVal.None l.db.put(l.keyBuffer, l.valueBuffer)
case OptionVal.Some(t) l.db.put(t, l.keyBuffer, l.valueBuffer)
case OptionVal.None => l.db.put(l.keyBuffer, l.valueBuffer)
case OptionVal.Some(t) => l.db.put(t, l.keyBuffer, l.valueBuffer)
}
} finally {
val l = lmdb()
@ -300,7 +300,7 @@ final class LmdbDurableStore(config: Config) extends Actor with ActorLogging {
log.debug("store and commit of [{}] entries took [{} ms]", pending.size,
TimeUnit.NANOSECONDS.toMillis(System.nanoTime - t0))
} catch {
case NonFatal(e)
case NonFatal(e) =>
import scala.collection.JavaConverters._
log.error(e, "failed to store [{}]", pending.keySet.asScala.mkString(","))
tx.abort()

View file

@ -22,7 +22,7 @@ import akka.annotation.InternalApi
* i.e. if used outside the Replicator infrastructure, but the worst thing that can happen is that
* a full merge is performed instead of the fast forward merge.
*/
@InternalApi private[akka] trait FastMerge { self: ReplicatedData
@InternalApi private[akka] trait FastMerge { self: ReplicatedData =>
private var ancestor: FastMerge = null

View file

@ -54,7 +54,7 @@ final class GCounter private[akka] (
/**
* Scala API: Current total value of the counter.
*/
def value: BigInt = state.values.foldLeft(Zero) { (acc, v) acc + v }
def value: BigInt = state.values.foldLeft(Zero) { (acc, v) => acc + v }
/**
* Java API: Current total value of the counter.
@ -92,14 +92,14 @@ final class GCounter private[akka] (
if (n == 0) this
else {
val nextValue = state.get(key) match {
case Some(v) v + n
case None n
case Some(v) => v + n
case None => n
}
val newDelta = delta match {
case None new GCounter(Map(key nextValue))
case Some(d) new GCounter(d.state + (key nextValue))
case None => new GCounter(Map(key -> nextValue))
case Some(d) => new GCounter(d.state + (key -> nextValue))
}
assignAncestor(new GCounter(state + (key nextValue), Some(newDelta)))
assignAncestor(new GCounter(state + (key -> nextValue), Some(newDelta)))
}
}
@ -108,7 +108,7 @@ final class GCounter private[akka] (
else if (this.isAncestorOf(that)) that.clearAncestor()
else {
var merged = that.state
for ((key, thisValue) state) {
for ((key, thisValue) <- state) {
val thatValue = merged.getOrElse(key, Zero)
if (thisValue > thatValue)
merged = merged.updated(key, thisValue)
@ -132,8 +132,8 @@ final class GCounter private[akka] (
override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): GCounter =
state.get(removedNode) match {
case Some(value) new GCounter(state - removedNode).increment(collapseInto, value)
case None this
case Some(value) => new GCounter(state - removedNode).increment(collapseInto, value)
case None => this
}
override def pruningCleanup(removedNode: UniqueAddress): GCounter =
@ -144,8 +144,8 @@ final class GCounter private[akka] (
override def toString: String = s"GCounter($value)"
override def equals(o: Any): Boolean = o match {
case other: GCounter state == other.state
case _ false
case other: GCounter => state == other.state
case _ => false
}
override def hashCode: Int = state.hashCode

View file

@ -61,8 +61,8 @@ final case class GSet[A] private (elements: Set[A])(override val delta: Option[G
*/
def add(element: A): GSet[A] = {
val newDelta = delta match {
case Some(e) Some(new GSet(e.elements + element)(None))
case None Some(new GSet[A](Set.apply[A](element))(None))
case Some(e) => Some(new GSet(e.elements + element)(None))
case None => Some(new GSet[A](Set.apply[A](element))(None))
}
assignAncestor(new GSet[A](elements + element)(newDelta))
}

View file

@ -27,8 +27,8 @@ object Key {
abstract class Key[+T <: ReplicatedData](val id: Key.KeyId) extends Serializable {
override final def equals(o: Any): Boolean = o match {
case k: Key[_] id == k.id
case _ false
case k: Key[_] => id == k.id
case _ => false
}
override final def hashCode: Int = id.hashCode

View file

@ -66,7 +66,7 @@ final class LWWMap[A, B] private[akka] (
/**
* Scala API: All entries of the map.
*/
def entries: Map[A, B] = underlying.entries.map { case (k, r) k r.value }
def entries: Map[A, B] = underlying.entries.map { case (k, r) => k -> r.value }
/**
* Java API: All entries of the map.
@ -140,8 +140,8 @@ final class LWWMap[A, B] private[akka] (
*/
@InternalApi private[akka] def put(node: UniqueAddress, key: A, value: B, clock: Clock[B]): LWWMap[A, B] = {
val newRegister = underlying.get(key) match {
case Some(r) r.withValue(node, value, clock)
case None LWWRegister(node, value, clock)
case Some(r) => r.withValue(node, value, clock)
case None => LWWRegister(node, value, clock)
}
new LWWMap(underlying.put(node, key, newRegister))
}
@ -200,8 +200,8 @@ final class LWWMap[A, B] private[akka] (
override def toString: String = s"LWW$entries" //e.g. LWWMap(a -> 1, b -> 2)
override def equals(o: Any): Boolean = o match {
case other: LWWMap[_, _] underlying == other.underlying
case _ false
case other: LWWMap[_, _] => underlying == other.underlying
case _ => false
}
override def hashCode: Int = underlying.hashCode

View file

@ -204,9 +204,9 @@ final class LWWRegister[A] private[akka] (
override def toString: String = s"LWWRegister($value)"
override def equals(o: Any): Boolean = o match {
case other: LWWRegister[_]
case other: LWWRegister[_] =>
timestamp == other.timestamp && value == other.value && node == other.node
case _ false
case _ => false
}
override def hashCode: Int = {

View file

@ -57,8 +57,8 @@ object ORMap {
def zeroTag: ZeroTag
override def zero: DeltaReplicatedData = zeroTag.zero
override def merge(that: DeltaOp): DeltaOp = that match {
case other: AtomicDeltaOp[A, B] DeltaGroup(Vector(this, other))
case DeltaGroup(ops) DeltaGroup(this +: ops)
case other: AtomicDeltaOp[A, B] => DeltaGroup(Vector(this, other))
case DeltaGroup(ops) => DeltaGroup(this +: ops)
}
override def deltaSize: Int = 1
}
@ -67,21 +67,21 @@ object ORMap {
/** INTERNAL API */
@InternalApi private[akka] final case class PutDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, value: (A, B), zeroTag: ZeroTag) extends AtomicDeltaOp[A, B] {
override def merge(that: DeltaOp): DeltaOp = that match {
case put: PutDeltaOp[A, B] if this.value._1 == put.value._1
case put: PutDeltaOp[A, B] if this.value._1 == put.value._1 =>
new PutDeltaOp[A, B](this.underlying.merge(put.underlying), put.value, zeroTag)
case update: UpdateDeltaOp[A, B] if update.values.size == 1 && update.values.contains(this.value._1)
case update: UpdateDeltaOp[A, B] if update.values.size == 1 && update.values.contains(this.value._1) =>
val (key, elem1) = this.value
val newValue = elem1 match {
case e1: DeltaReplicatedData
case e1: DeltaReplicatedData =>
val e2 = update.values.head._2.asInstanceOf[e1.D]
(key, e1.mergeDelta(e2).asInstanceOf[B])
case _
case _ =>
val elem2 = update.values.head._2.asInstanceOf[elem1.T]
(key, elem1.merge(elem2).asInstanceOf[B])
}
new PutDeltaOp[A, B](this.underlying.merge(update.underlying), newValue, zeroTag)
case other: AtomicDeltaOp[A, B] DeltaGroup(Vector(this, other))
case DeltaGroup(ops) DeltaGroup(this +: ops)
case other: AtomicDeltaOp[A, B] => DeltaGroup(Vector(this, other))
case DeltaGroup(ops) => DeltaGroup(this +: ops)
}
}
@ -89,23 +89,23 @@ object ORMap {
/** INTERNAL API */
@InternalApi private[akka] final case class UpdateDeltaOp[A, B <: ReplicatedData](underlying: ORSet.DeltaOp, values: Map[A, B], zeroTag: ZeroTag) extends AtomicDeltaOp[A, B] {
override def merge(that: DeltaOp): DeltaOp = that match {
case update: UpdateDeltaOp[A, B]
case update: UpdateDeltaOp[A, B] =>
new UpdateDeltaOp[A, B](
this.underlying.merge(update.underlying),
update.values.foldLeft(this.values) {
(map, pair)
(map, pair) =>
val (key, value) = pair
if (this.values.contains(key)) {
val elem1 = this.values(key)
val elem2 = value.asInstanceOf[elem1.T]
map + (key elem1.merge(elem2).asInstanceOf[B])
map + (key -> elem1.merge(elem2).asInstanceOf[B])
} else map + pair
},
zeroTag)
case put: PutDeltaOp[A, B] if this.values.size == 1 && this.values.contains(put.value._1)
case put: PutDeltaOp[A, B] if this.values.size == 1 && this.values.contains(put.value._1) =>
new PutDeltaOp[A, B](this.underlying.merge(put.underlying), put.value, zeroTag)
case other: AtomicDeltaOp[A, B] DeltaGroup(Vector(this, other))
case DeltaGroup(ops) DeltaGroup(this +: ops)
case other: AtomicDeltaOp[A, B] => DeltaGroup(Vector(this, other))
case DeltaGroup(ops) => DeltaGroup(this +: ops)
}
}
@ -122,23 +122,23 @@ object ORMap {
@InternalApi private[akka] final case class DeltaGroup[A, B <: ReplicatedData](ops: immutable.IndexedSeq[DeltaOp])
extends DeltaOp with ReplicatedDeltaSize {
override def merge(that: DeltaOp): DeltaOp = that match {
case that: AtomicDeltaOp[A, B]
case that: AtomicDeltaOp[A, B] =>
ops.last match {
case thisPut: PutDeltaOp[A, B]
case thisPut: PutDeltaOp[A, B] =>
val merged = thisPut.merge(that)
merged match {
case op: AtomicDeltaOp[A, B] DeltaGroup(ops.dropRight(1) :+ op)
case DeltaGroup(thatOps) DeltaGroup(ops.dropRight(1) ++ thatOps)
case op: AtomicDeltaOp[A, B] => DeltaGroup(ops.dropRight(1) :+ op)
case DeltaGroup(thatOps) => DeltaGroup(ops.dropRight(1) ++ thatOps)
}
case thisUpdate: UpdateDeltaOp[A, B]
case thisUpdate: UpdateDeltaOp[A, B] =>
val merged = thisUpdate.merge(that)
merged match {
case op: AtomicDeltaOp[A, B] DeltaGroup(ops.dropRight(1) :+ op)
case DeltaGroup(thatOps) DeltaGroup(ops.dropRight(1) ++ thatOps)
case op: AtomicDeltaOp[A, B] => DeltaGroup(ops.dropRight(1) :+ op)
case DeltaGroup(thatOps) => DeltaGroup(ops.dropRight(1) ++ thatOps)
}
case _ DeltaGroup(ops :+ that)
case _ => DeltaGroup(ops :+ that)
}
case DeltaGroup(thatOps) DeltaGroup(ops ++ thatOps)
case DeltaGroup(thatOps) => DeltaGroup(ops ++ thatOps)
}
override def zero: DeltaReplicatedData = ops.headOption.fold(ORMap.empty[A, B].asInstanceOf[DeltaReplicatedData])(_.zero)
@ -187,7 +187,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
* Scala API: Get the value associated with the key if there is one,
* else return the given default.
*/
def getOrElse(key: A, default: B): B = values.getOrElse(key, default)
def getOrElse(key: A, default: => B): B = values.getOrElse(key, default)
def contains(key: A): Boolean = values.contains(key)
@ -239,7 +239,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
"undesired effects of merging will occur. Use `ORMultiMap` or `ORMap.updated` instead.")
else {
val newKeys = keys.resetDelta.add(node, key)
val putDeltaOp = PutDeltaOp(newKeys.delta.get, key value, zeroTag)
val putDeltaOp = PutDeltaOp(newKeys.delta.get, key -> value, zeroTag)
// put forcibly damages history, so we consciously propagate full value that will overwrite previous value
new ORMap(newKeys, values.updated(key, value), zeroTag, Some(newDelta(putDeltaOp)))
}
@ -250,11 +250,11 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
* If there is no current value for the `key` the `initial` value will be
* passed to the `modify` function.
*/
def updated(node: SelfUniqueAddress, key: A, initial: B)(modify: B B): ORMap[A, B] =
def updated(node: SelfUniqueAddress, key: A, initial: B)(modify: B => B): ORMap[A, B] =
updated(node.uniqueAddress, key, initial)(modify)
@deprecated("Use `updated` that takes a `SelfUniqueAddress` parameter instead.", since = "2.5.20")
def updated(node: Cluster, key: A, initial: B)(modify: B B): ORMap[A, B] =
def updated(node: Cluster, key: A, initial: B)(modify: B => B): ORMap[A, B] =
updated(node.selfUniqueAddress, key, initial)(modify)
/**
@ -266,7 +266,7 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
@Deprecated
@deprecated("use update for the Java API as updated is ambiguous with the Scala API", "2.5.20")
def updated(node: Cluster, key: A, initial: B, modify: java.util.function.Function[B, B]): ORMap[A, B] =
updated(node.selfUniqueAddress, key, initial)(value modify.apply(value))
updated(node.selfUniqueAddress, key, initial)(value => modify.apply(value))
/**
* Java API: Replace a value by applying the `modify` function on the existing value.
@ -275,20 +275,20 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
* passed to the `modify` function.
*/
def update(node: SelfUniqueAddress, key: A, initial: B, modify: java.util.function.Function[B, B]): ORMap[A, B] =
updated(node.uniqueAddress, key, initial)(value modify.apply(value))
updated(node.uniqueAddress, key, initial)(value => modify.apply(value))
@Deprecated
@deprecated("Use `update` that takes a `SelfUniqueAddress` parameter instead.", since = "2.5.20")
def update(node: Cluster, key: A, initial: B, modify: java.util.function.Function[B, B]): ORMap[A, B] =
updated(node, key, initial)(value modify.apply(value))
updated(node, key, initial)(value => modify.apply(value))
/**
* INTERNAL API
*/
@InternalApi private[akka] def updated(node: UniqueAddress, key: A, initial: B, valueDeltas: Boolean = false)(modify: B B): ORMap[A, B] = {
@InternalApi private[akka] def updated(node: UniqueAddress, key: A, initial: B, valueDeltas: Boolean = false)(modify: B => B): ORMap[A, B] = {
val (oldValue, hasOldValue) = values.get(key) match {
case Some(old) (old, true)
case _ (initial, false)
case Some(old) => (old, true)
case _ => (initial, false)
}
// Optimization: for some types - like GSet, GCounter, PNCounter and ORSet - that are delta based
// we can emit (and later merge) their deltas instead of full updates.
@ -297,17 +297,17 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
// before removing the key - like e.g. ORMultiMap.emptyWithValueDeltas does
val newKeys = keys.resetDelta.add(node, key)
oldValue match {
case _: DeltaReplicatedData if valueDeltas
case _: DeltaReplicatedData if valueDeltas =>
val newValue = modify(oldValue.asInstanceOf[DeltaReplicatedData].resetDelta.asInstanceOf[B])
val newValueDelta = newValue.asInstanceOf[DeltaReplicatedData].delta
val deltaOp = newValueDelta match {
case Some(d) if hasOldValue UpdateDeltaOp(newKeys.delta.get, Map(key d), zeroTag)
case _ PutDeltaOp(newKeys.delta.get, key newValue, zeroTag)
case Some(d) if hasOldValue => UpdateDeltaOp(newKeys.delta.get, Map(key -> d), zeroTag)
case _ => PutDeltaOp(newKeys.delta.get, key -> newValue, zeroTag)
}
new ORMap(newKeys, values.updated(key, newValue), zeroTag, Some(newDelta(deltaOp)))
case _
case _ =>
val newValue = modify(oldValue)
val deltaOp = PutDeltaOp(newKeys.delta.get, key newValue, zeroTag)
val deltaOp = PutDeltaOp(newKeys.delta.get, key -> newValue, zeroTag)
new ORMap(newKeys, values.updated(key, newValue), zeroTag, Some(newDelta(deltaOp)))
}
}
@ -357,9 +357,9 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
private def dryMerge(that: ORMap[A, B], mergedKeys: ORSet[A], valueKeysIterator: Iterator[A]): ORMap[A, B] = {
var mergedValues = Map.empty[A, B]
valueKeysIterator.foreach { key
valueKeysIterator.foreach { key =>
(this.values.get(key), that.values.get(key)) match {
case (Some(thisValue), Some(thatValue))
case (Some(thisValue), Some(thatValue)) =>
if (thisValue.getClass != thatValue.getClass) {
val errMsg = s"Wrong type for merging [$key] in [${getClass.getName}], existing type " +
s"[${thisValue.getClass.getName}], got [${thatValue.getClass.getName}]"
@ -368,15 +368,15 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
// TODO can we get rid of these (safe) casts?
val mergedValue = thisValue.merge(thatValue.asInstanceOf[thisValue.T]).asInstanceOf[B]
mergedValues = mergedValues.updated(key, mergedValue)
case (Some(thisValue), None)
case (Some(thisValue), None) =>
if (mergedKeys.contains(key))
mergedValues = mergedValues.updated(key, thisValue)
// else thisValue is a tombstone, but we don't want to carry it forward, as the other side does not have the element at all
case (None, Some(thatValue))
case (None, Some(thatValue)) =>
if (mergedKeys.contains(key))
mergedValues = mergedValues.updated(key, thatValue)
// else thatValue is a tombstone, but we don't want to carry it forward, as the other side does not have the element at all
case (None, None) throw new IllegalStateException(s"missing value for $key")
case (None, None) => throw new IllegalStateException(s"missing value for $key")
}
}
new ORMap(mergedKeys, mergedValues, zeroTag = zeroTag)
@ -404,52 +404,52 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
private def dryMergeDelta(thatDelta: ORMap.DeltaOp, withValueDeltas: Boolean = false): ORMap[A, B] = {
def mergeValue(lvalue: ReplicatedData, rvalue: ReplicatedData): B =
(lvalue, rvalue) match {
case (v: DeltaReplicatedData, delta: ReplicatedDelta)
case (v: DeltaReplicatedData, delta: ReplicatedDelta) =>
v.mergeDelta(delta.asInstanceOf[v.D]).asInstanceOf[B]
case _
case _ =>
lvalue.merge(rvalue.asInstanceOf[lvalue.T]).asInstanceOf[B]
}
var mergedKeys: ORSet[A] = this.keys
var (mergedValues, tombstonedVals): (Map[A, B], Map[A, B]) = this.values.partition { case (k, _) this.keys.contains(k) }
var (mergedValues, tombstonedVals): (Map[A, B], Map[A, B]) = this.values.partition { case (k, _) => this.keys.contains(k) }
val processDelta: PartialFunction[ORMap.DeltaOp, Unit] = {
case putOp: PutDeltaOp[A, B]
case putOp: PutDeltaOp[A, B] =>
val keyDelta = putOp.underlying
mergedKeys = mergedKeys.mergeDelta(keyDelta)
mergedValues = mergedValues + putOp.value // put is destructive and propagates only full values of B!
case removeOp: RemoveDeltaOp[A, B]
case removeOp: RemoveDeltaOp[A, B] =>
val removedKey = removeOp.underlying match {
// if op is RemoveDeltaOp then it must have exactly one element in the elements
case op: ORSet.RemoveDeltaOp[_] op.underlying.elements.head.asInstanceOf[A]
case _ throw new IllegalArgumentException("ORMap.RemoveDeltaOp must contain ORSet.RemoveDeltaOp inside")
case op: ORSet.RemoveDeltaOp[_] => op.underlying.elements.head.asInstanceOf[A]
case _ => throw new IllegalArgumentException("ORMap.RemoveDeltaOp must contain ORSet.RemoveDeltaOp inside")
}
mergedValues = mergedValues - removedKey
mergedKeys = mergedKeys.mergeDelta(removeOp.underlying)
// please note that if RemoveDeltaOp is not preceded by update clearing the value
// anomalies may result
case removeKeyOp: RemoveKeyDeltaOp[A, B]
case removeKeyOp: RemoveKeyDeltaOp[A, B] =>
// removeKeyOp tombstones values for later use
if (mergedValues.contains(removeKeyOp.removedKey)) {
tombstonedVals = tombstonedVals + (removeKeyOp.removedKey mergedValues(removeKeyOp.removedKey))
tombstonedVals = tombstonedVals + (removeKeyOp.removedKey -> mergedValues(removeKeyOp.removedKey))
}
mergedValues = mergedValues - removeKeyOp.removedKey
mergedKeys = mergedKeys.mergeDelta(removeKeyOp.underlying)
case updateOp: UpdateDeltaOp[A, _]
case updateOp: UpdateDeltaOp[A, _] =>
mergedKeys = mergedKeys.mergeDelta(updateOp.underlying)
updateOp.values.foreach {
case (key, value)
case (key, value) =>
if (mergedKeys.contains(key)) {
if (mergedValues.contains(key)) {
mergedValues = mergedValues + (key mergeValue(mergedValues(key), value))
mergedValues = mergedValues + (key -> mergeValue(mergedValues(key), value))
} else if (tombstonedVals.contains(key)) {
mergedValues = mergedValues + (key mergeValue(tombstonedVals(key), value))
mergedValues = mergedValues + (key -> mergeValue(tombstonedVals(key), value))
} else {
value match {
case _: ReplicatedDelta
mergedValues = mergedValues + (key mergeValue(value.asInstanceOf[ReplicatedDelta].zero, value))
case _
mergedValues = mergedValues + (key value.asInstanceOf[B])
case _: ReplicatedDelta =>
mergedValues = mergedValues + (key -> mergeValue(value.asInstanceOf[ReplicatedDelta].zero, value))
case _ =>
mergedValues = mergedValues + (key -> value.asInstanceOf[B])
}
}
}
@ -457,10 +457,10 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
}
val processNestedDelta: PartialFunction[ORMap.DeltaOp, Unit] = {
case ORMap.DeltaGroup(ops)
case ORMap.DeltaGroup(ops) =>
ops.foreach {
processDelta.orElse {
case ORMap.DeltaGroup(args)
case ORMap.DeltaGroup(args) =>
throw new IllegalStateException("Cannot nest DeltaGroups")
}
}
@ -490,32 +490,32 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
}
private def newDelta(deltaOp: ORMap.DeltaOp) = delta match {
case Some(d)
case Some(d) =>
d.merge(deltaOp)
case None
case None =>
deltaOp
}
override def modifiedByNodes: Set[UniqueAddress] = {
keys.modifiedByNodes union values.foldLeft(Set.empty[UniqueAddress]) {
case (acc, (_, data: RemovedNodePruning)) acc union data.modifiedByNodes
case (acc, _) acc
case (acc, (_, data: RemovedNodePruning)) => acc union data.modifiedByNodes
case (acc, _) => acc
}
}
override def needPruningFrom(removedNode: UniqueAddress): Boolean = {
keys.needPruningFrom(removedNode) || values.exists {
case (_, data: RemovedNodePruning) data.needPruningFrom(removedNode)
case _ false
case (_, data: RemovedNodePruning) => data.needPruningFrom(removedNode)
case _ => false
}
}
override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): ORMap[A, B] = {
val prunedKeys = keys.prune(removedNode, collapseInto)
val prunedValues = values.foldLeft(values) {
case (acc, (key, data: RemovedNodePruning)) if data.needPruningFrom(removedNode)
case (acc, (key, data: RemovedNodePruning)) if data.needPruningFrom(removedNode) =>
acc.updated(key, data.prune(removedNode, collapseInto).asInstanceOf[B])
case (acc, _) acc
case (acc, _) => acc
}
new ORMap(prunedKeys, prunedValues, zeroTag = zeroTag)
}
@ -523,9 +523,9 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
override def pruningCleanup(removedNode: UniqueAddress): ORMap[A, B] = {
val pruningCleanupedKeys = keys.pruningCleanup(removedNode)
val pruningCleanupedValues = values.foldLeft(values) {
case (acc, (key, data: RemovedNodePruning)) if data.needPruningFrom(removedNode)
case (acc, (key, data: RemovedNodePruning)) if data.needPruningFrom(removedNode) =>
acc.updated(key, data.pruningCleanup(removedNode).asInstanceOf[B])
case (acc, _) acc
case (acc, _) => acc
}
new ORMap(pruningCleanupedKeys, pruningCleanupedValues, zeroTag = zeroTag)
}
@ -535,8 +535,8 @@ final class ORMap[A, B <: ReplicatedData] private[akka] (
override def toString: String = s"OR$entries"
override def equals(o: Any): Boolean = o match {
case other: ORMap[_, _] keys == other.keys && values == other.values
case _ false
case other: ORMap[_, _] => keys == other.keys && values == other.values
case _ => false
}
override def hashCode: Int = {

View file

@ -48,8 +48,8 @@ object ORMultiMap {
* Extract the [[ORMultiMap#entries]] of an `ORMultiMap`.
*/
def unapply[A, B <: ReplicatedData](value: Any): Option[Map[A, Set[B]]] = value match {
case m: ORMultiMap[A, B] @unchecked Some(m.entries)
case _ None
case m: ORMultiMap[A, B] @unchecked => Some(m.entries)
case _ => None
}
}
@ -75,7 +75,7 @@ final class ORMultiMap[A, B] private[akka] (
if (withValueDeltas) {
val newUnderlying = underlying.mergeRetainingDeletedValues(that.underlying)
// Garbage collect the tombstones we no longer need, i.e. those that have Set() as a value.
val newValues = newUnderlying.values.filterNot { case (key, value) !newUnderlying.keys.contains(key) && value.isEmpty }
val newValues = newUnderlying.values.filterNot { case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty }
new ORMultiMap[A, B](new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta), withValueDeltas)
} else
new ORMultiMap(underlying.merge(that.underlying), withValueDeltas)
@ -85,9 +85,9 @@ final class ORMultiMap[A, B] private[akka] (
* Scala API: All entries of a multimap where keys are strings and values are sets.
*/
def entries: Map[A, Set[B]] = if (withValueDeltas)
underlying.entries.collect { case (k, v) if underlying.keys.elements.contains(k) k v.elements }
underlying.entries.collect { case (k, v) if underlying.keys.elements.contains(k) => k -> v.elements }
else
underlying.entries.map { case (k, v) k v.elements }
underlying.entries.map { case (k, v) => k -> v.elements }
/**
* Java API: All entries of a multimap where keys are strings and values are sets.
@ -96,9 +96,9 @@ final class ORMultiMap[A, B] private[akka] (
import scala.collection.JavaConverters._
val result = new java.util.HashMap[A, java.util.Set[B]]
if (withValueDeltas)
underlying.entries.foreach { case (k, v) if (underlying.keys.elements.contains(k)) result.put(k, v.elements.asJava) }
underlying.entries.foreach { case (k, v) => if (underlying.keys.elements.contains(k)) result.put(k, v.elements.asJava) }
else
underlying.entries.foreach { case (k, v) result.put(k, v.elements.asJava) }
underlying.entries.foreach { case (k, v) => result.put(k, v.elements.asJava) }
result
}
@ -115,7 +115,7 @@ final class ORMultiMap[A, B] private[akka] (
* Scala API: Get the set associated with the key if there is one,
* else return the given default.
*/
def getOrElse(key: A, default: Set[B]): Set[B] =
def getOrElse(key: A, default: => Set[B]): Set[B] =
get(key).getOrElse(default)
def contains(key: A): Boolean = underlying.keys.elements.contains(key)
@ -170,8 +170,8 @@ final class ORMultiMap[A, B] private[akka] (
* INTERNAL API
*/
@InternalApi private[akka] def put(node: UniqueAddress, key: A, value: Set[B]): ORMultiMap[A, B] = {
val newUnderlying = underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas) { existing
value.foldLeft(existing.clear(node)) { (s, element) s.add(node, element) }
val newUnderlying = underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas) { existing =>
value.foldLeft(existing.clear(node)) { (s, element) => s.add(node, element) }
}
new ORMultiMap(newUnderlying, withValueDeltas)
}
@ -203,7 +203,7 @@ final class ORMultiMap[A, B] private[akka] (
*/
@InternalApi private[akka] def remove(node: UniqueAddress, key: A): ORMultiMap[A, B] = {
if (withValueDeltas) {
val u = underlying.updated(node, key, ORSet.empty[B], valueDeltas = true) { existing existing.clear(node) }
val u = underlying.updated(node, key, ORSet.empty[B], valueDeltas = true) { existing => existing.clear(node) }
new ORMultiMap(u.removeKey(node, key), withValueDeltas)
} else {
new ORMultiMap(underlying.remove(node, key), withValueDeltas)
@ -263,12 +263,12 @@ final class ORMultiMap[A, B] private[akka] (
val newUnderlying = {
val u = underlying.updated(node, key, ORSet.empty[B], valueDeltas = withValueDeltas)(_.remove(node, element))
u.get(key) match {
case Some(s) if s.isEmpty
case Some(s) if s.isEmpty =>
if (withValueDeltas)
u.removeKey(node, key)
else
u.remove(node, key)
case _ u
case _ => u
}
}
new ORMultiMap(newUnderlying, withValueDeltas)
@ -307,7 +307,7 @@ final class ORMultiMap[A, B] private[akka] (
if (withValueDeltas) {
val newUnderlying = underlying.mergeDeltaRetainingDeletedValues(thatDelta)
// Garbage collect the tombstones we no longer need, i.e. those that have Set() as a value.
val newValues = newUnderlying.values.filterNot { case (key, value) !newUnderlying.keys.contains(key) && value.isEmpty }
val newValues = newUnderlying.values.filterNot { case (key, value) => !newUnderlying.keys.contains(key) && value.isEmpty }
new ORMultiMap[A, B](new ORMap(newUnderlying.keys, newValues, newUnderlying.zeroTag, newUnderlying.delta), withValueDeltas)
} else
new ORMultiMap(underlying.mergeDelta(thatDelta), withValueDeltas)
@ -329,8 +329,8 @@ final class ORMultiMap[A, B] private[akka] (
override def toString: String = s"ORMulti$entries"
override def equals(o: Any): Boolean = o match {
case other: ORMultiMap[_, _] underlying == other.underlying
case _ false
case other: ORMultiMap[_, _] => underlying == other.underlying
case _ => false
}
override def hashCode: Int = underlying.hashCode

View file

@ -30,8 +30,8 @@ object ORSet {
* Extract the [[ORSet#elements]] of an `ORSet`.
*/
def unapply(a: ReplicatedData): Option[Set[Any]] = a match {
case s: ORSet[Any] @unchecked Some(s.elements)
case _ None
case s: ORSet[Any] @unchecked => Some(s.elements)
case _ => None
}
/**
@ -56,13 +56,13 @@ object ORSet {
@InternalApi private[akka] final case class AddDeltaOp[A](underlying: ORSet[A]) extends AtomicDeltaOp[A] {
override def merge(that: DeltaOp): DeltaOp = that match {
case AddDeltaOp(u)
case AddDeltaOp(u) =>
// Note that we only merge deltas originating from the same node
AddDeltaOp(new ORSet(
concatElementsMap(u.elementsMap.asInstanceOf[Map[A, Dot]]),
underlying.vvector.merge(u.vvector)))
case _: AtomicDeltaOp[A] DeltaGroup(Vector(this, that))
case DeltaGroup(ops) DeltaGroup(this +: ops)
case _: AtomicDeltaOp[A] => DeltaGroup(Vector(this, that))
case DeltaGroup(ops) => DeltaGroup(this +: ops)
}
private def concatElementsMap(thatMap: Map[A, Dot]): Map[A, Dot] = {
@ -80,16 +80,16 @@ object ORSet {
throw new IllegalArgumentException(s"RemoveDeltaOp should contain one removed element, but was $underlying")
override def merge(that: DeltaOp): DeltaOp = that match {
case _: AtomicDeltaOp[A] DeltaGroup(Vector(this, that)) // keep it simple for removals
case DeltaGroup(ops) DeltaGroup(this +: ops)
case _: AtomicDeltaOp[A] => DeltaGroup(Vector(this, that)) // keep it simple for removals
case DeltaGroup(ops) => DeltaGroup(this +: ops)
}
}
/** INTERNAL API: Used for `clear` but could be used for other cases also */
@InternalApi private[akka] final case class FullStateDeltaOp[A](underlying: ORSet[A]) extends AtomicDeltaOp[A] {
override def merge(that: DeltaOp): DeltaOp = that match {
case _: AtomicDeltaOp[A] DeltaGroup(Vector(this, that))
case DeltaGroup(ops) DeltaGroup(this +: ops)
case _: AtomicDeltaOp[A] => DeltaGroup(Vector(this, that))
case DeltaGroup(ops) => DeltaGroup(this +: ops)
}
}
@ -99,14 +99,14 @@ object ORSet {
@InternalApi private[akka] final case class DeltaGroup[A](ops: immutable.IndexedSeq[DeltaOp])
extends DeltaOp with ReplicatedDeltaSize {
override def merge(that: DeltaOp): DeltaOp = that match {
case thatAdd: AddDeltaOp[A]
case thatAdd: AddDeltaOp[A] =>
// merge AddDeltaOp into last AddDeltaOp in the group, if possible
ops.last match {
case thisAdd: AddDeltaOp[A] DeltaGroup(ops.dropRight(1) :+ thisAdd.merge(thatAdd))
case _ DeltaGroup(ops :+ thatAdd)
case thisAdd: AddDeltaOp[A] => DeltaGroup(ops.dropRight(1) :+ thisAdd.merge(thatAdd))
case _ => DeltaGroup(ops :+ thatAdd)
}
case DeltaGroup(thatOps) DeltaGroup(ops ++ thatOps)
case _ DeltaGroup(ops :+ that)
case DeltaGroup(thatOps) => DeltaGroup(ops ++ thatOps)
case _ => DeltaGroup(ops :+ that)
}
override def zero: ORSet[A] = ORSet.empty
@ -127,8 +127,8 @@ object ORSet {
@tailrec def dropDots(remaining: List[(UniqueAddress, Long)], acc: List[(UniqueAddress, Long)]): List[(UniqueAddress, Long)] =
remaining match {
case Nil acc
case (d @ (node, v1)) :: rest
case Nil => acc
case (d @ (node, v1)) :: rest =>
val v2 = vvector.versionAt(node)
if (v2 >= v1)
// dot is dominated by version vector, drop it
@ -141,12 +141,12 @@ object ORSet {
VersionVector.empty
else {
dot match {
case OneVersionVector(node, v1)
case OneVersionVector(node, v1) =>
// if dot is dominated by version vector, drop it
if (vvector.versionAt(node) >= v1) VersionVector.empty
else dot
case ManyVersionVector(vs)
case ManyVersionVector(vs) =>
val remaining = vs.toList
val newDots = dropDots(remaining, Nil)
VersionVector(newDots)
@ -163,11 +163,11 @@ object ORSet {
private def mergeCommonKeys[A](commonKeys: Iterator[A], lhs: ORSet[A], rhs: ORSet[A]): Map[A, ORSet.Dot] = {
commonKeys.foldLeft(Map.empty[A, ORSet.Dot]) {
case (acc, k)
case (acc, k) =>
val lhsDots = lhs.elementsMap(k)
val rhsDots = rhs.elementsMap(k)
(lhsDots, rhsDots) match {
case (OneVersionVector(n1, v1), OneVersionVector(n2, v2))
case (OneVersionVector(n1, v1), OneVersionVector(n2, v2)) =>
if (n1 == n2 && v1 == v2)
// one single common dot
acc.updated(k, lhsDots)
@ -180,9 +180,9 @@ object ORSet {
if (merged.isEmpty) acc
else acc.updated(k, merged)
}
case (ManyVersionVector(lhsVs), ManyVersionVector(rhsVs))
case (ManyVersionVector(lhsVs), ManyVersionVector(rhsVs)) =>
val commonDots = lhsVs.filter {
case (thisDotNode, v) rhsVs.get(thisDotNode).exists(_ == v)
case (thisDotNode, v) => rhsVs.get(thisDotNode).exists(_ == v)
}
val commonDotsKeys = commonDots.keys
val lhsUniqueDots = lhsVs -- commonDotsKeys
@ -193,9 +193,9 @@ object ORSet {
// Perfectly possible that an item in both sets should be dropped
if (merged.isEmpty) acc
else acc.updated(k, merged)
case (ManyVersionVector(lhsVs), OneVersionVector(n2, v2))
case (ManyVersionVector(lhsVs), OneVersionVector(n2, v2)) =>
val commonDots = lhsVs.filter {
case (n1, v1) v1 == v2 && n1 == n2
case (n1, v1) => v1 == v2 && n1 == n2
}
val commonDotsKeys = commonDots.keys
val lhsUniqueDots = lhsVs -- commonDotsKeys
@ -206,9 +206,9 @@ object ORSet {
// Perfectly possible that an item in both sets should be dropped
if (merged.isEmpty) acc
else acc.updated(k, merged)
case (OneVersionVector(n1, v1), ManyVersionVector(rhsVs))
case (OneVersionVector(n1, v1), ManyVersionVector(rhsVs)) =>
val commonDots = rhsVs.filter {
case (n2, v2) v1 == v2 && n1 == n2
case (n2, v2) => v1 == v2 && n1 == n2
}
val commonDotsKeys = commonDots.keys
val lhsUnique = if (commonDotsKeys.isEmpty) lhsDots else VersionVector.empty
@ -235,7 +235,7 @@ object ORSet {
private def mergeDisjointKeys[A](keys: Iterator[A], elementsMap: Map[A, ORSet.Dot], vvector: VersionVector,
accumulator: Map[A, ORSet.Dot]): Map[A, ORSet.Dot] = {
keys.foldLeft(accumulator) {
case (acc, k)
case (acc, k) =>
val dots = elementsMap(k)
if (vvector > dots || vvector == dots)
acc
@ -327,12 +327,12 @@ final class ORSet[A] private[akka] (
val newVvector = vvector + node
val newDot = VersionVector(node, newVvector.versionAt(node))
val newDelta = delta match {
case None
ORSet.AddDeltaOp(new ORSet(Map(element newDot), newDot))
case Some(existing: ORSet.AddDeltaOp[A])
existing.merge(ORSet.AddDeltaOp(new ORSet(Map(element newDot), newDot)))
case Some(d)
d.merge(ORSet.AddDeltaOp(new ORSet(Map(element newDot), newDot)))
case None =>
ORSet.AddDeltaOp(new ORSet(Map(element -> newDot), newDot))
case Some(existing: ORSet.AddDeltaOp[A]) =>
existing.merge(ORSet.AddDeltaOp(new ORSet(Map(element -> newDot), newDot)))
case Some(d) =>
d.merge(ORSet.AddDeltaOp(new ORSet(Map(element -> newDot), newDot)))
}
assignAncestor(new ORSet(elementsMap.updated(element, newDot), newVvector, Some(newDelta)))
}
@ -366,10 +366,10 @@ final class ORSet[A] private[akka] (
*/
@InternalApi private[akka] def remove(node: UniqueAddress, element: A): ORSet[A] = {
val deltaDot = VersionVector(node, vvector.versionAt(node))
val rmOp = ORSet.RemoveDeltaOp(new ORSet(Map(element deltaDot), vvector))
val rmOp = ORSet.RemoveDeltaOp(new ORSet(Map(element -> deltaDot), vvector))
val newDelta = delta match {
case None rmOp
case Some(d) d.merge(rmOp)
case None => rmOp
case Some(d) => d.merge(rmOp)
}
assignAncestor(copy(elementsMap = elementsMap - element, delta = Some(newDelta)))
}
@ -391,8 +391,8 @@ final class ORSet[A] private[akka] (
val newFullState = new ORSet[A](elementsMap = Map.empty, vvector)
val clearOp = ORSet.FullStateDeltaOp(newFullState)
val newDelta = delta match {
case None clearOp
case Some(d) d.merge(clearOp)
case None => clearOp
case Some(d) => d.merge(clearOp)
}
assignAncestor(newFullState.copy(delta = Some(newDelta)))
}
@ -426,7 +426,7 @@ final class ORSet[A] private[akka] (
val entries00 = ORSet.mergeCommonKeys(commonKeys, this, that)
val entries0 =
if (addDeltaOp)
entries00 ++ this.elementsMap.filter { case (elem, _) !that.elementsMap.contains(elem) }
entries00 ++ this.elementsMap.filter { case (elem, _) => !that.elementsMap.contains(elem) }
else {
val thisUniqueKeys = this.elementsMap.keysIterator.filterNot(that.elementsMap.contains)
ORSet.mergeDisjointKeys(thisUniqueKeys, this.elementsMap, that.vvector, entries00)
@ -441,15 +441,15 @@ final class ORSet[A] private[akka] (
override def mergeDelta(thatDelta: ORSet.DeltaOp): ORSet[A] = {
thatDelta match {
case d: ORSet.AddDeltaOp[A] dryMerge(d.underlying, addDeltaOp = true)
case d: ORSet.RemoveDeltaOp[A] mergeRemoveDelta(d)
case d: ORSet.FullStateDeltaOp[A] dryMerge(d.underlying, addDeltaOp = false)
case ORSet.DeltaGroup(ops)
case d: ORSet.AddDeltaOp[A] => dryMerge(d.underlying, addDeltaOp = true)
case d: ORSet.RemoveDeltaOp[A] => mergeRemoveDelta(d)
case d: ORSet.FullStateDeltaOp[A] => dryMerge(d.underlying, addDeltaOp = false)
case ORSet.DeltaGroup(ops) =>
ops.foldLeft(this) {
case (acc, op: ORSet.AddDeltaOp[A]) acc.dryMerge(op.underlying, addDeltaOp = true)
case (acc, op: ORSet.RemoveDeltaOp[A]) acc.mergeRemoveDelta(op)
case (acc, op: ORSet.FullStateDeltaOp[A]) acc.dryMerge(op.underlying, addDeltaOp = false)
case (acc, op: ORSet.DeltaGroup[A])
case (acc, op: ORSet.AddDeltaOp[A]) => acc.dryMerge(op.underlying, addDeltaOp = true)
case (acc, op: ORSet.RemoveDeltaOp[A]) => acc.mergeRemoveDelta(op)
case (acc, op: ORSet.FullStateDeltaOp[A]) => acc.dryMerge(op.underlying, addDeltaOp = false)
case (acc, op: ORSet.DeltaGroup[A]) =>
throw new IllegalArgumentException("ORSet.DeltaGroup should not be nested")
}
}
@ -459,23 +459,23 @@ final class ORSet[A] private[akka] (
val that = thatDelta.underlying
val (elem, thatDot) = that.elementsMap.head
def deleteDots = that.vvector.versionsIterator
def deleteDotsNodes = deleteDots.map { case (dotNode, _) dotNode }
def deleteDotsNodes = deleteDots.map { case (dotNode, _) => dotNode }
val newElementsMap = {
val thisDotOption = this.elementsMap.get(elem)
val deleteDotsAreGreater = deleteDots.forall {
case (dotNode, dotV)
case (dotNode, dotV) =>
thisDotOption match {
case Some(thisDot) thisDot.versionAt(dotNode) <= dotV
case None false
case Some(thisDot) => thisDot.versionAt(dotNode) <= dotV
case None => false
}
}
if (deleteDotsAreGreater) {
thisDotOption match {
case Some(thisDot)
if (thisDot.versionsIterator.forall { case (thisDotNode, _) deleteDotsNodes.contains(thisDotNode) })
case Some(thisDot) =>
if (thisDot.versionsIterator.forall { case (thisDotNode, _) => deleteDotsNodes.contains(thisDotNode) })
elementsMap - elem
else elementsMap
case None
case None =>
elementsMap
}
} else
@ -499,7 +499,7 @@ final class ORSet[A] private[akka] (
override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): ORSet[A] = {
val pruned = elementsMap.foldLeft(Map.empty[A, ORSet.Dot]) {
case (acc, (elem, dot))
case (acc, (elem, dot)) =>
if (dot.needPruningFrom(removedNode)) acc.updated(elem, dot.prune(removedNode, collapseInto))
else acc
}
@ -509,14 +509,14 @@ final class ORSet[A] private[akka] (
// re-add elements that were pruned, to bump dots to right vvector
val newSet = new ORSet(elementsMap = elementsMap ++ pruned, vvector = vvector.prune(removedNode, collapseInto))
pruned.keys.foldLeft(newSet) {
case (s, elem) s.add(collapseInto, elem)
case (s, elem) => s.add(collapseInto, elem)
}
}
}
override def pruningCleanup(removedNode: UniqueAddress): ORSet[A] = {
val updated = elementsMap.foldLeft(elementsMap) {
case (acc, (elem, dot))
case (acc, (elem, dot)) =>
if (dot.needPruningFrom(removedNode)) acc.updated(elem, dot.pruningCleanup(removedNode))
else acc
}
@ -532,8 +532,8 @@ final class ORSet[A] private[akka] (
override def toString: String = s"OR$elements"
override def equals(o: Any): Boolean = o match {
case other: ORSet[_] vvector == other.vvector && elementsMap == other.elementsMap
case _ false
case other: ORSet[_] => vvector == other.vvector && elementsMap == other.elementsMap
case _ => false
}
override def hashCode: Int = {

View file

@ -177,12 +177,12 @@ final class PNCounter private[akka] (
override def delta: Option[PNCounter] = {
val incrementsDelta = increments.delta match {
case Some(d) d
case None GCounter.empty
case Some(d) => d
case None => GCounter.empty
}
val decrementsDelta = decrements.delta match {
case Some(d) d
case None GCounter.empty
case Some(d) => d
case None => GCounter.empty
}
Some(new PNCounter(incrementsDelta, decrementsDelta))
}
@ -219,9 +219,9 @@ final class PNCounter private[akka] (
override def toString: String = s"PNCounter($value)"
override def equals(o: Any): Boolean = o match {
case other: PNCounter
case other: PNCounter =>
increments == other.increments && decrements == other.decrements
case _ false
case _ => false
}
override def hashCode: Int = {

View file

@ -47,12 +47,12 @@ final class PNCounterMap[A] private[akka] (
type D = ORMap.DeltaOp
/** Scala API */
def entries: Map[A, BigInt] = underlying.entries.map { case (k, c) k c.value }
def entries: Map[A, BigInt] = underlying.entries.map { case (k, c) => k -> c.value }
/** Java API */
def getEntries: java.util.Map[A, BigInteger] = {
import scala.collection.JavaConverters._
underlying.entries.map { case (k, c) k c.value.bigInteger }.asJava
underlying.entries.map { case (k, c) => k -> c.value.bigInteger }.asJava
}
/**
@ -186,8 +186,8 @@ final class PNCounterMap[A] private[akka] (
override def toString: String = s"PNCounter$entries"
override def equals(o: Any): Boolean = o match {
case other: PNCounterMap[A] underlying == other.underlying
case _ false
case other: PNCounterMap[A] => underlying == other.underlying
case _ => false
}
override def hashCode: Int = underlying.hashCode

View file

@ -32,10 +32,10 @@ import akka.annotation.InternalApi
def merge(that: PruningState): PruningState =
(this, that) match {
case (p1: PruningPerformed, p2: PruningPerformed) if (p1.obsoleteTime >= p2.obsoleteTime) this else that
case (_: PruningPerformed, _) this
case (_, _: PruningPerformed) that
case (PruningInitialized(thisOwner, thisSeen), PruningInitialized(thatOwner, thatSeen))
case (p1: PruningPerformed, p2: PruningPerformed) => if (p1.obsoleteTime >= p2.obsoleteTime) this else that
case (_: PruningPerformed, _) => this
case (_, _: PruningPerformed) => that
case (PruningInitialized(thisOwner, thisSeen), PruningInitialized(thatOwner, thatSeen)) =>
if (thisOwner == thatOwner)
PruningInitialized(thisOwner, thisSeen union thatSeen)
else if (Member.addressOrdering.compare(thisOwner.address, thatOwner.address) > 0)

View file

@ -33,7 +33,7 @@ import akka.cluster.UniqueAddress
import akka.serialization.SerializationExtension
import akka.util.ByteString
import com.typesafe.config.Config
import java.util.function.{ Function JFunction }
import java.util.function.{ Function => JFunction }
import akka.dispatch.Dispatchers
import akka.actor.DeadLetterSuppression
import akka.cluster.ddata.Key.KeyR
@ -70,13 +70,13 @@ object ReplicatorSettings {
*/
def apply(config: Config): ReplicatorSettings = {
val dispatcher = config.getString("use-dispatcher") match {
case "" Dispatchers.DefaultDispatcherId
case id id
case "" => Dispatchers.DefaultDispatcherId
case id => id
}
val pruningInterval = toRootLowerCase(config.getString("pruning-interval")) match {
case "off" | "false" Duration.Zero
case _ config.getDuration("pruning-interval", MILLISECONDS).millis
case "off" | "false" => Duration.Zero
case _ => config.getDuration("pruning-interval", MILLISECONDS).millis
}
import scala.collection.JavaConverters._
@ -108,7 +108,7 @@ object ReplicatorSettings {
*/
@InternalApi private[akka] def name(system: ActorSystem, modifier: Option[String]): String = {
val name = system.settings.config.getString("akka.cluster.distributed-data.name")
modifier.map(s s + name.take(1).toUpperCase + name.drop(1)).getOrElse(name)
modifier.map(s => s + name.take(1).toUpperCase + name.drop(1)).getOrElse(name)
}
}
@ -221,8 +221,8 @@ final class ReplicatorSettings(
def withDispatcher(dispatcher: String): ReplicatorSettings = {
val d = dispatcher match {
case "" Dispatchers.DefaultDispatcherId
case id id
case "" => Dispatchers.DefaultDispatcherId
case id => id
}
copy(dispatcher = d)
}
@ -504,12 +504,12 @@ object Replicator {
*/
def apply[A <: ReplicatedData](
key: Key[A], initial: A, writeConsistency: WriteConsistency,
request: Option[Any] = None)(modify: A A): Update[A] =
request: Option[Any] = None)(modify: A => A): Update[A] =
Update(key, writeConsistency, request)(modifyWithInitial(initial, modify))
private def modifyWithInitial[A <: ReplicatedData](initial: A, modify: A A): Option[A] A = {
case Some(data) modify(data)
case None modify(initial)
private def modifyWithInitial[A <: ReplicatedData](initial: A, modify: A => A): Option[A] => A = {
case Some(data) => modify(data)
case None => modify(initial)
}
}
/**
@ -529,7 +529,7 @@ object Replicator {
* for example not access `sender()` reference of an enclosing actor.
*/
final case class Update[A <: ReplicatedData](key: Key[A], writeConsistency: WriteConsistency,
request: Option[Any])(val modify: Option[A] A)
request: Option[Any])(val modify: Option[A] => A)
extends Command[A] with NoSerializationVerificationNeeded {
/**
@ -541,7 +541,7 @@ object Replicator {
*/
def this(
key: Key[A], initial: A, writeConsistency: WriteConsistency, modify: JFunction[A, A]) =
this(key, writeConsistency, None)(Update.modifyWithInitial(initial, data modify.apply(data)))
this(key, writeConsistency, None)(Update.modifyWithInitial(initial, data => modify.apply(data)))
/**
* Java API: Modify value of local `Replicator` and replicate with given `writeConsistency`.
@ -556,7 +556,7 @@ object Replicator {
*/
def this(
key: Key[A], initial: A, writeConsistency: WriteConsistency, request: Optional[Any], modify: JFunction[A, A]) =
this(key, writeConsistency, Option(request.orElse(null)))(Update.modifyWithInitial(initial, data modify.apply(data)))
this(key, writeConsistency, Option(request.orElse(null)))(Update.modifyWithInitial(initial, data => modify.apply(data)))
}
@ -720,8 +720,8 @@ object Replicator {
def needPruningFrom(removedNode: UniqueAddress): Boolean =
data match {
case r: RemovedNodePruning r.needPruningFrom(removedNode)
case _ false
case r: RemovedNodePruning => r.needPruningFrom(removedNode)
case _ => false
}
def initRemovedNodePruning(removed: UniqueAddress, owner: UniqueAddress): DataEnvelope = {
@ -732,18 +732,18 @@ object Replicator {
def prune(from: UniqueAddress, pruningPerformed: PruningPerformed): DataEnvelope = {
data match {
case dataWithRemovedNodePruning: RemovedNodePruning
case dataWithRemovedNodePruning: RemovedNodePruning =>
require(pruning.contains(from))
pruning(from) match {
case PruningInitialized(owner, _)
case PruningInitialized(owner, _) =>
val prunedData = dataWithRemovedNodePruning.prune(from, owner)
copy(data = prunedData, pruning = pruning.updated(from, pruningPerformed),
deltaVersions = cleanedDeltaVersions(from))
case _
case _ =>
this
}
case _ this
case _ => this
}
}
@ -752,11 +752,11 @@ object Replicator {
else {
val mergedPruning =
pruning.foldLeft(other.pruning) {
case (acc, (key, thisValue))
case (acc, (key, thisValue)) =>
acc.get(key) match {
case None
case None =>
acc.updated(key, thisValue)
case Some(thatValue)
case Some(thatValue) =>
acc.updated(key, thisValue merge thatValue)
}
}
@ -765,16 +765,16 @@ object Replicator {
else {
val currentTime = System.currentTimeMillis()
mergedPruning.filter {
case (_, p: PruningPerformed) !p.isObsolete(currentTime)
case _ true
case (_, p: PruningPerformed) => !p.isObsolete(currentTime)
case _ => true
}
}
}
// cleanup and merge deltaVersions
val removedNodes = filteredMergedPruning.keys
val cleanedDV = removedNodes.foldLeft(deltaVersions) { (acc, node) acc.pruningCleanup(node) }
val cleanedOtherDV = removedNodes.foldLeft(other.deltaVersions) { (acc, node) acc.pruningCleanup(node) }
val cleanedDV = removedNodes.foldLeft(deltaVersions) { (acc, node) => acc.pruningCleanup(node) }
val cleanedOtherDV = removedNodes.foldLeft(other.deltaVersions) { (acc, node) => acc.pruningCleanup(node) }
val mergedDeltaVersions = cleanedDV.merge(cleanedOtherDV)
// cleanup both sides before merging, `merge(otherData: ReplicatedData)` will cleanup other.data
@ -789,11 +789,11 @@ object Replicator {
else {
val mergedData =
cleaned(otherData, pruning) match {
case d: ReplicatedDelta data match {
case drd: DeltaReplicatedData drd.mergeDelta(d.asInstanceOf[drd.D])
case _ throw new IllegalArgumentException("Expected DeltaReplicatedData")
case d: ReplicatedDelta => data match {
case drd: DeltaReplicatedData => drd.mergeDelta(d.asInstanceOf[drd.D])
case _ => throw new IllegalArgumentException("Expected DeltaReplicatedData")
}
case c data.merge(c.asInstanceOf[data.T])
case c => data.merge(c.asInstanceOf[data.T])
}
if (data.getClass != mergedData.getClass)
throw new IllegalArgumentException(
@ -803,15 +803,15 @@ object Replicator {
}
private def cleaned(c: ReplicatedData, p: Map[UniqueAddress, PruningState]): ReplicatedData = p.foldLeft(c) {
case (c: RemovedNodePruning, (removed, _: PruningPerformed))
case (c: RemovedNodePruning, (removed, _: PruningPerformed)) =>
if (c.needPruningFrom(removed)) c.pruningCleanup(removed) else c
case (c, _) c
case (c, _) => c
}
def addSeen(node: Address): DataEnvelope = {
var changed = false
val newRemovedNodePruning = pruning.map {
case (removed, pruningState)
case (removed, pruningState) =>
val newPruningState = pruningState.addSeen(node)
changed = (newPruningState ne pruningState) || changed
(removed, newPruningState)
@ -831,7 +831,7 @@ object Replicator {
final case class Status(digests: Map[KeyId, Digest], chunk: Int, totChunks: Int) extends ReplicatorMessage {
override def toString: String =
(digests.map {
case (key, bytes) key + " -> " + bytes.map(byte f"$byte%02x").mkString("")
case (key, bytes) => key + " -> " + bytes.map(byte => f"$byte%02x").mkString("")
}).mkString("Status(", ", ", ")")
}
final case class Gossip(updatedData: Map[KeyId, DataEnvelope], sendBack: Boolean) extends ReplicatorMessage
@ -1085,12 +1085,12 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val hasDurableKeys = settings.durableKeys.nonEmpty
val durable = settings.durableKeys.filterNot(_.endsWith("*"))
val durableWildcards = settings.durableKeys.collect { case k if k.endsWith("*") k.dropRight(1) }
val durableWildcards = settings.durableKeys.collect { case k if k.endsWith("*") => k.dropRight(1) }
val durableStore: ActorRef =
if (hasDurableKeys) {
val props = settings.durableStoreProps match {
case Right(p) p
case Left((s, c))
case Right(p) => p
case Left((s, c)) =>
val clazz = context.system.asInstanceOf[ExtendedActorSystem].dynamicAccess.getClassFor[Actor](s).get
Props(clazz, c).withDispatcher(c.getString("use-dispatcher"))
}
@ -1111,10 +1111,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
// Important to include the pruning state in the deltas. For example if the delta is based
// on an entry that has been pruned but that has not yet been performed on the target node.
DeltaPropagation(selfUniqueAddress, reply = false, deltas.iterator.collect {
case (key, (d, fromSeqNr, toSeqNr)) if d != NoDeltaPlaceholder
case (key, (d, fromSeqNr, toSeqNr)) if d != NoDeltaPlaceholder =>
getData(key) match {
case Some(envelope) key Delta(envelope.copy(data = d), fromSeqNr, toSeqNr)
case None key Delta(DataEnvelope(d), fromSeqNr, toSeqNr)
case Some(envelope) => key -> Delta(envelope.copy(data = d), fromSeqNr, toSeqNr)
case None => key -> Delta(DataEnvelope(d), fromSeqNr, toSeqNr)
}
}.toMap)
}
@ -1200,7 +1200,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def fromDurableStore: Boolean = sender() == durableStore && sender() != context.system.deadLetters
OneForOneStrategy()(
({
case e @ (_: DurableStore.LoadFailed | _: ActorInitializationException) if fromDurableStore
case e @ (_: DurableStore.LoadFailed | _: ActorInitializationException) if fromDurableStore =>
log.error(e, "Stopping distributed-data Replicator due to load or startup failure in durable store, caused by: {}", if (e.getCause eq null) "" else e.getCause.getMessage)
context.stop(self)
SupervisorStrategy.Stop
@ -1220,7 +1220,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def unstashAll(): Unit = {
val originalReplyTo = replyTo
stash.foreach {
case (msg, snd)
case (msg, snd) =>
replyTo = snd
normalReceive.applyOrElse(msg, unhandled)
}
@ -1229,18 +1229,18 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
{
case LoadData(data)
case LoadData(data) =>
count += data.size
data.foreach {
case (key, d)
case (key, d) =>
write(key, d.dataEnvelope) match {
case Some(newEnvelope)
case Some(newEnvelope) =>
if (newEnvelope ne d.dataEnvelope)
durableStore ! Store(key, new DurableDataEnvelope(newEnvelope), None)
case None
case None =>
}
}
case LoadAllCompleted
case LoadAllCompleted =>
log.debug(
"Loading {} entries from durable store took {} ms, stashed {}",
count, TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime), stash.size)
@ -1248,51 +1248,51 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
unstashAll()
self ! FlushChanges
case GetReplicaCount
case GetReplicaCount =>
// 0 until durable data has been loaded, used by test
replyTo ! ReplicaCount(0)
case RemovedNodePruningTick | FlushChanges | GossipTick
case RemovedNodePruningTick | FlushChanges | GossipTick =>
// ignore scheduled ticks when loading durable data
case TestFullStateGossip(enabled)
case TestFullStateGossip(enabled) =>
fullStateGossipEnabled = enabled
case m @ (_: Read | _: Write | _: Status | _: Gossip)
case m @ (_: Read | _: Write | _: Status | _: Gossip) =>
// ignore gossip and replication when loading durable data
log.debug("ignoring message [{}] when loading durable data", m.getClass.getName)
case msg: ClusterDomainEvent normalReceive.applyOrElse(msg, unhandled)
case msg
stash :+= (msg replyTo)
case msg: ClusterDomainEvent => normalReceive.applyOrElse(msg, unhandled)
case msg =>
stash :+= (msg -> replyTo)
}
}
// MUST use replyTo instead of sender() and forward from normalReceive, because of the stash in load
val normalReceive: Receive = {
case Get(key, consistency, req) receiveGet(key, consistency, req)
case u @ Update(key, writeC, req) receiveUpdate(key, u.modify, writeC, req)
case Read(key) receiveRead(key)
case Write(key, envelope) receiveWrite(key, envelope)
case ReadRepair(key, envelope) receiveReadRepair(key, envelope)
case DeltaPropagation(from, reply, deltas) receiveDeltaPropagation(from, reply, deltas)
case FlushChanges receiveFlushChanges()
case DeltaPropagationTick receiveDeltaPropagationTick()
case GossipTick receiveGossipTick()
case ClockTick receiveClockTick()
case Status(otherDigests, chunk, totChunks) receiveStatus(otherDigests, chunk, totChunks)
case Gossip(updatedData, sendBack) receiveGossip(updatedData, sendBack)
case Subscribe(key, subscriber) receiveSubscribe(key, subscriber)
case Unsubscribe(key, subscriber) receiveUnsubscribe(key, subscriber)
case Terminated(ref) receiveTerminated(ref)
case MemberWeaklyUp(m) receiveWeaklyUpMemberUp(m)
case MemberUp(m) receiveMemberUp(m)
case MemberRemoved(m, _) receiveMemberRemoved(m)
case evt: MemberEvent receiveOtherMemberEvent(evt.member)
case UnreachableMember(m) receiveUnreachable(m)
case ReachableMember(m) receiveReachable(m)
case GetKeyIds receiveGetKeyIds()
case Delete(key, consistency, req) receiveDelete(key, consistency, req)
case RemovedNodePruningTick receiveRemovedNodePruningTick()
case GetReplicaCount receiveGetReplicaCount()
case TestFullStateGossip(enabled) fullStateGossipEnabled = enabled
case Get(key, consistency, req) => receiveGet(key, consistency, req)
case u @ Update(key, writeC, req) => receiveUpdate(key, u.modify, writeC, req)
case Read(key) => receiveRead(key)
case Write(key, envelope) => receiveWrite(key, envelope)
case ReadRepair(key, envelope) => receiveReadRepair(key, envelope)
case DeltaPropagation(from, reply, deltas) => receiveDeltaPropagation(from, reply, deltas)
case FlushChanges => receiveFlushChanges()
case DeltaPropagationTick => receiveDeltaPropagationTick()
case GossipTick => receiveGossipTick()
case ClockTick => receiveClockTick()
case Status(otherDigests, chunk, totChunks) => receiveStatus(otherDigests, chunk, totChunks)
case Gossip(updatedData, sendBack) => receiveGossip(updatedData, sendBack)
case Subscribe(key, subscriber) => receiveSubscribe(key, subscriber)
case Unsubscribe(key, subscriber) => receiveUnsubscribe(key, subscriber)
case Terminated(ref) => receiveTerminated(ref)
case MemberWeaklyUp(m) => receiveWeaklyUpMemberUp(m)
case MemberUp(m) => receiveMemberUp(m)
case MemberRemoved(m, _) => receiveMemberRemoved(m)
case evt: MemberEvent => receiveOtherMemberEvent(evt.member)
case UnreachableMember(m) => receiveUnreachable(m)
case ReachableMember(m) => receiveReachable(m)
case GetKeyIds => receiveGetKeyIds()
case Delete(key, consistency, req) => receiveDelete(key, consistency, req)
case RemovedNodePruningTick => receiveRemovedNodePruningTick()
case GetReplicaCount => receiveGetReplicaCount()
case TestFullStateGossip(enabled) => fullStateGossipEnabled = enabled
}
def receiveGet(key: KeyR, consistency: ReadConsistency, req: Option[Any]): Unit = {
@ -1300,9 +1300,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
log.debug("Received Get for key [{}]", key)
if (isLocalGet(consistency)) {
val reply = localValue match {
case Some(DataEnvelope(DeletedData, _, _)) DataDeleted(key, req)
case Some(DataEnvelope(data, _, _)) GetSuccess(key, req)(data)
case None NotFound(key, req)
case Some(DataEnvelope(DeletedData, _, _)) => DataDeleted(key, req)
case Some(DataEnvelope(data, _, _)) => GetSuccess(key, req)(data)
case None => NotFound(key, req)
}
replyTo ! reply
} else
@ -1312,9 +1312,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def isLocalGet(readConsistency: ReadConsistency): Boolean =
readConsistency match {
case ReadLocal true
case _: ReadMajority | _: ReadAll nodes.isEmpty
case _ false
case ReadLocal => true
case _: ReadMajority | _: ReadAll => nodes.isEmpty
case _ => false
}
def receiveRead(key: KeyId): Unit = {
@ -1323,41 +1323,41 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def isLocalSender(): Boolean = !replyTo.path.address.hasGlobalScope
def receiveUpdate(key: KeyR, modify: Option[ReplicatedData] ReplicatedData,
def receiveUpdate(key: KeyR, modify: Option[ReplicatedData] => ReplicatedData,
writeConsistency: WriteConsistency, req: Option[Any]): Unit = {
val localValue = getData(key.id)
def deltaOrPlaceholder(d: DeltaReplicatedData): Option[ReplicatedDelta] = {
d.delta match {
case s @ Some(_) s
case None Some(NoDeltaPlaceholder)
case s @ Some(_) => s
case None => Some(NoDeltaPlaceholder)
}
}
Try {
localValue match {
case Some(DataEnvelope(DeletedData, _, _)) throw new DataDeleted(key, req)
case Some(envelope @ DataEnvelope(existing, _, _))
case Some(DataEnvelope(DeletedData, _, _)) => throw new DataDeleted(key, req)
case Some(envelope @ DataEnvelope(existing, _, _)) =>
modify(Some(existing)) match {
case d: DeltaReplicatedData if deltaCrdtEnabled
case d: DeltaReplicatedData if deltaCrdtEnabled =>
(envelope.merge(d.resetDelta.asInstanceOf[existing.T]), deltaOrPlaceholder(d))
case d
case d =>
(envelope.merge(d.asInstanceOf[existing.T]), None)
}
case None modify(None) match {
case d: DeltaReplicatedData if deltaCrdtEnabled
case None => modify(None) match {
case d: DeltaReplicatedData if deltaCrdtEnabled =>
(DataEnvelope(d.resetDelta), deltaOrPlaceholder(d))
case d (DataEnvelope(d), None)
case d => (DataEnvelope(d), None)
}
}
} match {
case Success((envelope, delta))
case Success((envelope, delta)) =>
log.debug("Received Update for key [{}]", key)
// handle the delta
delta match {
case Some(d) deltaPropagationSelector.update(key.id, d)
case None // not DeltaReplicatedData
case Some(d) => deltaPropagationSelector.update(key.id, d)
case None => // not DeltaReplicatedData
}
// note that it's important to do deltaPropagationSelector.update before setData,
@ -1373,12 +1373,12 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
replyTo ! UpdateSuccess(key, req)
} else {
val (writeEnvelope, writeDelta) = delta match {
case Some(NoDeltaPlaceholder) (newEnvelope, None)
case Some(d: RequiresCausalDeliveryOfDeltas)
case Some(NoDeltaPlaceholder) => (newEnvelope, None)
case Some(d: RequiresCausalDeliveryOfDeltas) =>
val v = deltaPropagationSelector.currentVersion(key.id)
(newEnvelope, Some(Delta(newEnvelope.copy(data = d), v, v)))
case Some(d) (newEnvelope.copy(data = d), None)
case None (newEnvelope, None)
case Some(d) => (newEnvelope.copy(data = d), None)
case None => (newEnvelope, None)
}
val writeAggregator =
context.actorOf(WriteAggregator.props(key, writeEnvelope, writeDelta, writeConsistency,
@ -1389,10 +1389,10 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
Some(StoreReply(UpdateSuccess(key, req), StoreFailure(key, req), writeAggregator)))
}
}
case Failure(e: DataDeleted[_])
case Failure(e: DataDeleted[_]) =>
log.debug("Received Update for deleted key [{}]", key)
replyTo ! e
case Failure(e)
case Failure(e) =>
log.debug("Received Update for key [{}], failed: {}", key, e.getMessage)
replyTo ! ModifyFailure(key, "Update failed: " + e.getMessage, e, req)
}
@ -1403,9 +1403,9 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def isLocalUpdate(writeConsistency: WriteConsistency): Boolean =
writeConsistency match {
case WriteLocal true
case _: WriteMajority | _: WriteAll nodes.isEmpty
case _ false
case WriteLocal => true
case _: WriteMajority | _: WriteAll => nodes.isEmpty
case _ => false
}
def receiveWrite(key: KeyId, envelope: DataEnvelope): Unit =
@ -1413,13 +1413,13 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def writeAndStore(key: KeyId, writeEnvelope: DataEnvelope, reply: Boolean): Unit = {
write(key, writeEnvelope) match {
case Some(newEnvelope)
case Some(newEnvelope) =>
if (isDurable(key)) {
val storeReply = if (reply) Some(StoreReply(WriteAck, WriteNack, replyTo)) else None
durableStore ! Store(key, new DurableDataEnvelope(newEnvelope), storeReply)
} else if (reply)
replyTo ! WriteAck
case None
case None =>
if (reply)
replyTo ! WriteNack
}
@ -1427,27 +1427,27 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def write(key: KeyId, writeEnvelope: DataEnvelope): Option[DataEnvelope] = {
getData(key) match {
case someEnvelope @ Some(envelope) if envelope eq writeEnvelope someEnvelope
case Some(DataEnvelope(DeletedData, _, _)) Some(DeletedEnvelope) // already deleted
case Some(envelope @ DataEnvelope(existing, _, _))
case someEnvelope @ Some(envelope) if envelope eq writeEnvelope => someEnvelope
case Some(DataEnvelope(DeletedData, _, _)) => Some(DeletedEnvelope) // already deleted
case Some(envelope @ DataEnvelope(existing, _, _)) =>
try {
// DataEnvelope will mergeDelta when needed
val merged = envelope.merge(writeEnvelope).addSeen(selfAddress)
Some(setData(key, merged))
} catch {
case e: IllegalArgumentException
case e: IllegalArgumentException =>
log.warning(
"Couldn't merge [{}], due to: {}", key, e.getMessage)
None
}
case None
case None =>
// no existing data for the key
val writeEnvelope2 =
writeEnvelope.data match {
case d: ReplicatedDelta
case d: ReplicatedDelta =>
val z = d.zero
writeEnvelope.copy(data = z.mergeDelta(d.asInstanceOf[z.D]))
case _
case _ =>
writeEnvelope
}
@ -1463,17 +1463,17 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def receiveGetKeyIds(): Unit = {
val keys: Set[KeyId] = dataEntries.iterator.collect {
case (key, (DataEnvelope(data, _, _), _)) if data != DeletedData key
case (key, (DataEnvelope(data, _, _), _)) if data != DeletedData => key
}.to(immutable.Set)
replyTo ! GetKeyIdsResult(keys)
}
def receiveDelete(key: KeyR, consistency: WriteConsistency, req: Option[Any]): Unit = {
getData(key.id) match {
case Some(DataEnvelope(DeletedData, _, _))
case Some(DataEnvelope(DeletedData, _, _)) =>
// already deleted
replyTo ! DataDeleted(key, req)
case _
case _ =>
setData(key.id, DeletedEnvelope)
val durable = isDurable(key.id)
if (isLocalUpdate(consistency)) {
@ -1524,12 +1524,12 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def getDigest(key: KeyId): Digest = {
dataEntries.get(key) match {
case Some((envelope, LazyDigest))
case Some((envelope, LazyDigest)) =>
val d = digest(envelope)
dataEntries = dataEntries.updated(key, (envelope, d))
d
case Some((_, digest)) digest
case None NotFoundDigest
case Some((_, digest)) => digest
case None => NotFoundDigest
}
}
@ -1540,18 +1540,18 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
ByteString.fromArray(MessageDigest.getInstance("SHA-1").digest(bytes))
}
def getData(key: KeyId): Option[DataEnvelope] = dataEntries.get(key).map { case (envelope, _) envelope }
def getData(key: KeyId): Option[DataEnvelope] = dataEntries.get(key).map { case (envelope, _) => envelope }
def getDeltaSeqNr(key: KeyId, fromNode: UniqueAddress): Long =
dataEntries.get(key) match {
case Some((DataEnvelope(_, _, deltaVersions), _)) deltaVersions.versionAt(fromNode)
case None 0L
case Some((DataEnvelope(_, _, deltaVersions), _)) => deltaVersions.versionAt(fromNode)
case None => 0L
}
def isNodeRemoved(node: UniqueAddress, keys: Iterable[KeyId]): Boolean = {
removedNodes.contains(node) || (keys.exists(key dataEntries.get(key) match {
case Some((DataEnvelope(_, pruning, _), _)) pruning.contains(node)
case None false
removedNodes.contains(node) || (keys.exists(key => dataEntries.get(key) match {
case Some((DataEnvelope(_, pruning, _), _)) => pruning.contains(node)
case None => false
}))
}
@ -1559,22 +1559,22 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def notify(keyId: KeyId, subs: mutable.Set[ActorRef]): Unit = {
val key = subscriptionKeys(keyId)
getData(keyId) match {
case Some(envelope)
case Some(envelope) =>
val msg = if (envelope.data == DeletedData) Deleted(key) else Changed(key)(envelope.data)
subs.foreach { _ ! msg }
case None
case None =>
}
}
if (subscribers.nonEmpty) {
for (key changed; if subscribers.contains(key); subs subscribers.get(key))
for (key <- changed; if subscribers.contains(key); subs <- subscribers.get(key))
notify(key, subs)
}
// Changed event is sent to new subscribers even though the key has not changed,
// i.e. send current value
if (newSubscribers.nonEmpty) {
for ((key, subs) newSubscribers) {
for ((key, subs) <- newSubscribers) {
notify(key, subs)
subs.foreach { subscribers.addBinding(key, _) }
}
@ -1586,7 +1586,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def receiveDeltaPropagationTick(): Unit = {
deltaPropagationSelector.collectPropagations().foreach {
case (node, deltaPropagation)
case (node, deltaPropagation) =>
// TODO split it to several DeltaPropagation if too many entries
if (deltaPropagation.deltas.nonEmpty)
replica(node) ! deltaPropagation
@ -1602,7 +1602,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val isDebugEnabled = log.isDebugEnabled
if (isDebugEnabled)
log.debug("Received DeltaPropagation from [{}], containing [{}]", fromNode.address,
deltas.collect { case (key, Delta(_, fromSeqNr, toSeqNr)) s"$key $fromSeqNr-$toSeqNr" }.mkString(", "))
deltas.collect { case (key, Delta(_, fromSeqNr, toSeqNr)) => s"$key $fromSeqNr-$toSeqNr" }.mkString(", "))
if (isNodeRemoved(fromNode, deltas.keys)) {
// Late message from a removed node.
@ -1611,7 +1611,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
"Skipping DeltaPropagation from [{}] because that node has been removed", fromNode.address)
} else {
deltas.foreach {
case (key, Delta(envelope @ DataEnvelope(_: RequiresCausalDeliveryOfDeltas, _, _), fromSeqNr, toSeqNr))
case (key, Delta(envelope @ DataEnvelope(_: RequiresCausalDeliveryOfDeltas, _, _), fromSeqNr, toSeqNr)) =>
val currentSeqNr = getDeltaSeqNr(key, fromNode)
if (currentSeqNr >= toSeqNr) {
if (isDebugEnabled) log.debug(
@ -1630,13 +1630,13 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val newEnvelope = envelope.copy(deltaVersions = VersionVector(fromNode, toSeqNr))
writeAndStore(key, newEnvelope, reply)
}
case (key, Delta(envelope, _, _))
case (key, Delta(envelope, _, _)) =>
// causal delivery of deltas not needed, just apply it
writeAndStore(key, envelope, reply)
}
}
} catch {
case NonFatal(e)
case NonFatal(e) =>
// catching in case we need to support rolling upgrades that are
// mixing nodes with incompatible delta-CRDT types
log.warning("Couldn't process DeltaPropagation from [{}] due to {}", fromNode, e)
@ -1654,11 +1654,11 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def gossipTo(address: Address): Unit = {
val to = replica(address)
if (dataEntries.size <= maxDeltaElements) {
val status = Status(dataEntries.map { case (key, (_, _)) (key, getDigest(key)) }, chunk = 0, totChunks = 1)
val status = Status(dataEntries.map { case (key, (_, _)) => (key, getDigest(key)) }, chunk = 0, totChunks = 1)
to ! status
} else {
val totChunks = dataEntries.size / maxDeltaElements
for (_ 1 to math.min(totChunks, 10)) {
for (_ <- 1 to math.min(totChunks, 10)) {
if (totChunks == statusTotChunks)
statusCount += 1
else {
@ -1667,7 +1667,7 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
val chunk = (statusCount % totChunks).toInt
val status = Status(dataEntries.collect {
case (key, (_, _)) if math.abs(key.hashCode % totChunks) == chunk (key, getDigest(key))
case (key, (_, _)) if math.abs(key.hashCode % totChunks) == chunk => (key, getDigest(key))
}, chunk, totChunks)
to ! status
}
@ -1690,25 +1690,25 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
d != NotFoundDigest && d != otherDigest
}
val otherDifferentKeys = otherDigests.collect {
case (key, otherDigest) if isOtherDifferent(key, otherDigest) key
case (key, otherDigest) if isOtherDifferent(key, otherDigest) => key
}
val otherKeys = otherDigests.keySet
val myKeys =
if (totChunks == 1) dataEntries.keySet
else dataEntries.keysIterator.filter(key math.abs(key.hashCode % totChunks) == chunk).toSet
else dataEntries.keysIterator.filter(key => math.abs(key.hashCode % totChunks) == chunk).toSet
val otherMissingKeys = myKeys diff otherKeys
val keys = (otherDifferentKeys ++ otherMissingKeys).take(maxDeltaElements)
if (keys.nonEmpty) {
if (log.isDebugEnabled)
log.debug("Sending gossip to [{}], containing [{}]", replyTo.path.address, keys.mkString(", "))
val g = Gossip(keys.iterator.map(k k getData(k).get).toMap, sendBack = otherDifferentKeys.nonEmpty)
val g = Gossip(keys.iterator.map(k => k -> getData(k).get).toMap, sendBack = otherDifferentKeys.nonEmpty)
replyTo ! g
}
val myMissingKeys = otherKeys diff myKeys
if (myMissingKeys.nonEmpty) {
if (log.isDebugEnabled)
log.debug("Sending gossip status to [{}], requesting missing [{}]", replyTo.path.address, myMissingKeys.mkString(", "))
val status = Status(myMissingKeys.iterator.map(k k NotFoundDigest).toMap, chunk, totChunks)
val status = Status(myMissingKeys.iterator.map(k => k -> NotFoundDigest).toMap, chunk, totChunks)
replyTo ! status
}
}
@ -1718,14 +1718,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
log.debug("Received gossip from [{}], containing [{}]", replyTo.path.address, updatedData.keys.mkString(", "))
var replyData = Map.empty[KeyId, DataEnvelope]
updatedData.foreach {
case (key, envelope)
case (key, envelope) =>
val hadData = dataEntries.contains(key)
writeAndStore(key, envelope, reply = false)
if (sendBack) getData(key) match {
case Some(d)
case Some(d) =>
if (hadData || d.pruning.nonEmpty)
replyData = replyData.updated(key, d)
case None
case None =>
}
}
if (sendBack && replyData.nonEmpty)
@ -1749,20 +1749,20 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
def hasSubscriber(subscriber: ActorRef): Boolean =
(subscribers.exists { case (k, s) s.contains(subscriber) }) ||
(newSubscribers.exists { case (k, s) s.contains(subscriber) })
(subscribers.exists { case (k, s) => s.contains(subscriber) }) ||
(newSubscribers.exists { case (k, s) => s.contains(subscriber) })
def receiveTerminated(ref: ActorRef): Unit = {
if (ref == durableStore) {
log.error("Stopping distributed-data Replicator because durable store terminated")
context.stop(self)
} else {
val keys1 = subscribers.collect { case (k, s) if s.contains(ref) k }
keys1.foreach { key subscribers.removeBinding(key, ref) }
val keys2 = newSubscribers.collect { case (k, s) if s.contains(ref) k }
keys2.foreach { key newSubscribers.removeBinding(key, ref) }
val keys1 = subscribers.collect { case (k, s) if s.contains(ref) => k }
keys1.foreach { key => subscribers.removeBinding(key, ref) }
val keys2 = newSubscribers.collect { case (k, s) if s.contains(ref) => k }
keys2.foreach { key => newSubscribers.removeBinding(key, ref) }
(keys1 ++ keys2).foreach { key
(keys1 ++ keys2).foreach { key =>
if (!subscribers.contains(key) && !newSubscribers.contains(key))
subscriptionKeys -= key
}
@ -1833,13 +1833,13 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val knownNodes = nodes union weaklyUpNodes union removedNodes.keySet.map(_.address)
val newRemovedNodes =
dataEntries.foldLeft(Set.empty[UniqueAddress]) {
case (acc, (_, (envelope @ DataEnvelope(data: RemovedNodePruning, _, _), _)))
acc union data.modifiedByNodes.filterNot(n n == selfUniqueAddress || knownNodes(n.address))
case (acc, _)
case (acc, (_, (envelope @ DataEnvelope(data: RemovedNodePruning, _, _), _))) =>
acc union data.modifiedByNodes.filterNot(n => n == selfUniqueAddress || knownNodes(n.address))
case (acc, _) =>
acc
}
newRemovedNodes.foreach { n
newRemovedNodes.foreach { n =>
log.debug("Adding removed node [{}] from data", n)
removedNodes = removedNodes.updated(n, allReachableClockTime)
}
@ -1848,11 +1848,11 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
def initRemovedNodePruning(): Unit = {
// initiate pruning for removed nodes
val removedSet: Set[UniqueAddress] = removedNodes.iterator.collect {
case (r, t) if ((allReachableClockTime - t) > maxPruningDisseminationNanos) r
case (r, t) if ((allReachableClockTime - t) > maxPruningDisseminationNanos) => r
}.to(immutable.Set)
if (removedSet.nonEmpty) {
for ((key, (envelope, _)) dataEntries; removed removedSet) {
for ((key, (envelope, _)) <- dataEntries; removed <- removedSet) {
def init(): Unit = {
val newEnvelope = envelope.initRemovedNodePruning(removed, selfUniqueAddress)
@ -1862,13 +1862,13 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
if (envelope.needPruningFrom(removed)) {
envelope.data match {
case dataWithRemovedNodePruning: RemovedNodePruning
case dataWithRemovedNodePruning: RemovedNodePruning =>
envelope.pruning.get(removed) match {
case None init()
case Some(PruningInitialized(owner, _)) if owner != selfUniqueAddress init()
case _ // already in progress
case None => init()
case Some(PruningInitialized(owner, _)) if owner != selfUniqueAddress => init()
case _ => // already in progress
}
case _
case _ =>
}
}
}
@ -1881,36 +1881,36 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val pruningPerformed = PruningPerformed(System.currentTimeMillis() + pruningMarkerTimeToLive.toMillis)
val durablePruningPerformed = PruningPerformed(System.currentTimeMillis() + durablePruningMarkerTimeToLive.toMillis)
dataEntries.foreach {
case (key, (envelope @ DataEnvelope(data: RemovedNodePruning, pruning, _), _))
case (key, (envelope @ DataEnvelope(data: RemovedNodePruning, pruning, _), _)) =>
pruning.foreach {
case (removed, PruningInitialized(owner, seen)) if owner == selfUniqueAddress
&& (allNodes.isEmpty || allNodes.forall(seen))
&& (allNodes.isEmpty || allNodes.forall(seen)) =>
val newEnvelope = envelope.prune(removed, if (isDurable(key)) durablePruningPerformed else pruningPerformed)
log.debug("Perform pruning of [{}] from [{}] to [{}]", key, removed, selfUniqueAddress)
setData(key, newEnvelope)
if ((newEnvelope.data ne data) && isDurable(key))
durableStore ! Store(key, new DurableDataEnvelope(newEnvelope), None)
case _
case _ =>
}
case _ // deleted, or pruning not needed
case _ => // deleted, or pruning not needed
}
}
def deleteObsoletePruningPerformed(): Unit = {
val currentTime = System.currentTimeMillis()
dataEntries.foreach {
case (key, (envelope @ DataEnvelope(_: RemovedNodePruning, pruning, _), _))
case (key, (envelope @ DataEnvelope(_: RemovedNodePruning, pruning, _), _)) =>
val newEnvelope = pruning.foldLeft(envelope) {
case (acc, (removed, p: PruningPerformed)) if p.isObsolete(currentTime)
case (acc, (removed, p: PruningPerformed)) if p.isObsolete(currentTime) =>
log.debug("Removing obsolete pruning marker for [{}] in [{}]", removed, key)
removedNodes -= removed
acc.copy(pruning = acc.pruning - removed)
case (acc, _) acc
case (acc, _) => acc
}
if (newEnvelope ne envelope)
setData(key, newEnvelope)
case _ // deleted, or pruning not needed
case _ => // deleted, or pruning not needed
}
}
@ -2026,20 +2026,20 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
override def timeout: FiniteDuration = consistency.timeout
override val doneWhenRemainingSize = consistency match {
case WriteTo(n, _) nodes.size - (n - 1)
case _: WriteAll 0
case WriteMajority(_, minCap)
case WriteTo(n, _) => nodes.size - (n - 1)
case _: WriteAll => 0
case WriteMajority(_, minCap) =>
val N = nodes.size + 1
val w = calculateMajorityWithMinCap(minCap, N)
N - w
case WriteLocal
case WriteLocal =>
throw new IllegalArgumentException("WriteLocal not supported by WriteAggregator")
}
val writeMsg = Write(key.id, envelope)
val deltaMsg = delta match {
case None None
case Some(d) Some(DeltaPropagation(selfUniqueAddress, reply = true, Map(key.id d)))
case None => None
case Some(d) => Some(DeltaPropagation(selfUniqueAddress, reply = true, Map(key.id -> d)))
}
var gotLocalStoreReply = !durable
@ -2047,16 +2047,16 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
private val (primaryNodes, secondaryNodes) = {
val requiresCausalDeliveryOfDeltas = delta match {
case None false
case Some(d) d.dataEnvelope.data.isInstanceOf[RequiresCausalDeliveryOfDeltas]
case None => false
case Some(d) => d.dataEnvelope.data.isInstanceOf[RequiresCausalDeliveryOfDeltas]
}
primaryAndSecondaryNodes(requiresCausalDeliveryOfDeltas)
}
override def preStart(): Unit = {
val msg = deltaMsg match {
case Some(d) d
case None writeMsg
case Some(d) => d
case None => writeMsg
}
primaryNodes.foreach { replica(_) ! msg }
@ -2064,37 +2064,37 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
def receive: Receive = {
case WriteAck
case WriteAck =>
remaining -= senderAddress()
if (isDone) reply(isTimeout = false)
case WriteNack
case WriteNack =>
gotWriteNackFrom += senderAddress()
if (isDone) reply(isTimeout = false)
case DeltaNack
case DeltaNack =>
// Deltas must be applied in order and we can't keep track of ordering of
// simultaneous updates so there is a chance that the delta could not be applied.
// Try again with the full state
sender() ! writeMsg
case _: Replicator.UpdateSuccess[_]
case _: Replicator.UpdateSuccess[_] =>
gotLocalStoreReply = true
if (isDone) reply(isTimeout = false)
case f: Replicator.StoreFailure[_]
case f: Replicator.StoreFailure[_] =>
gotLocalStoreReply = true
gotWriteNackFrom += selfUniqueAddress.address
if (isDone) reply(isTimeout = false)
case SendToSecondary
case SendToSecondary =>
deltaMsg match {
case None
case Some(d)
case None =>
case Some(d) =>
// Deltas must be applied in order and we can't keep track of ordering of
// simultaneous updates so there is a chance that the delta could not be applied.
// Try again with the full state to the primary nodes that have not acked.
primaryNodes.toSet.intersect(remaining).foreach { replica(_) ! writeMsg }
}
secondaryNodes.foreach { replica(_) ! writeMsg }
case ReceiveTimeout
case ReceiveTimeout =>
reply(isTimeout = true)
}
@ -2162,13 +2162,13 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
var result = localValue
override val doneWhenRemainingSize = consistency match {
case ReadFrom(n, _) nodes.size - (n - 1)
case _: ReadAll 0
case ReadMajority(_, minCap)
case ReadFrom(n, _) => nodes.size - (n - 1)
case _: ReadAll => 0
case ReadMajority(_, minCap) =>
val N = nodes.size + 1
val r = calculateMajorityWithMinCap(minCap, N)
N - r
case ReadLocal
case ReadLocal =>
throw new IllegalArgumentException("ReadLocal not supported by ReadAggregator")
}
@ -2188,47 +2188,47 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
}
def receive = {
case ReadResult(envelope)
case ReadResult(envelope) =>
result = (result, envelope) match {
case (Some(a), Some(b)) Some(a.merge(b))
case (r @ Some(_), None) r
case (None, r @ Some(_)) r
case (None, None) None
case (Some(a), Some(b)) => Some(a.merge(b))
case (r @ Some(_), None) => r
case (None, r @ Some(_)) => r
case (None, None) => None
}
remaining -= sender().path.address
if (remaining.size == doneWhenRemainingSize)
reply(ok = true)
case SendToSecondary
case SendToSecondary =>
secondaryNodes.foreach { replica(_) ! readMsg }
case ReceiveTimeout reply(ok = false)
case ReceiveTimeout => reply(ok = false)
}
def reply(ok: Boolean): Unit =
(ok, result) match {
case (true, Some(envelope))
case (true, Some(envelope)) =>
context.parent ! ReadRepair(key.id, envelope)
// read-repair happens before GetSuccess
context.become(waitReadRepairAck(envelope))
case (true, None)
case (true, None) =>
replyTo.tell(NotFound(key, req), context.parent)
context.stop(self)
case (false, _)
case (false, _) =>
replyTo.tell(GetFailure(key, req), context.parent)
context.stop(self)
}
def waitReadRepairAck(envelope: Replicator.Internal.DataEnvelope): Receive = {
case ReadRepairAck
case ReadRepairAck =>
val replyMsg =
if (envelope.data == DeletedData) DataDeleted(key, req)
else GetSuccess(key, req)(envelope.data)
replyTo.tell(replyMsg, context.parent)
context.stop(self)
case _: ReadResult
case _: ReadResult =>
//collect late replies
remaining -= sender().path.address
case SendToSecondary
case ReceiveTimeout
case SendToSecondary =>
case ReceiveTimeout =>
}
}

View file

@ -269,7 +269,7 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L
@InternalApi private[akka] override def increment(n: UniqueAddress): VersionVector = {
val v = Timestamp.counter.getAndIncrement()
if (n == node) copy(version = v)
else ManyVersionVector(TreeMap(node version, n v))
else ManyVersionVector(TreeMap(node -> version, n -> v))
}
/** INTERNAL API */
@ -287,10 +287,10 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L
override def merge(that: VersionVector): VersionVector = {
that match {
case OneVersionVector(n2, v2)
case OneVersionVector(n2, v2) =>
if (node == n2) if (version >= v2) this else OneVersionVector(n2, v2)
else ManyVersionVector(TreeMap(node version, n2 v2))
case ManyVersionVector(vs2)
else ManyVersionVector(TreeMap(node -> version, n2 -> v2))
case ManyVersionVector(vs2) =>
val v2 = vs2.getOrElse(node, Timestamp.Zero)
val mergedVersions =
if (v2 >= version) vs2
@ -332,8 +332,8 @@ final case class ManyVersionVector(versions: TreeMap[UniqueAddress, Long]) exten
/** INTERNAL API */
@InternalApi private[akka] override def versionAt(node: UniqueAddress): Long = versions.get(node) match {
case Some(v) v
case None Timestamp.Zero
case Some(v) => v
case None => Timestamp.Zero
}
/** INTERNAL API */
@ -348,15 +348,15 @@ final case class ManyVersionVector(versions: TreeMap[UniqueAddress, Long]) exten
if (that.isEmpty) this
else if (this.isEmpty) that
else that match {
case ManyVersionVector(vs2)
case ManyVersionVector(vs2) =>
var mergedVersions = vs2
for ((node, time) versions) {
for ((node, time) <- versions) {
val mergedVersionsCurrentTime = mergedVersions.getOrElse(node, Timestamp.Zero)
if (time > mergedVersionsCurrentTime)
mergedVersions = mergedVersions.updated(node, time)
}
VersionVector(mergedVersions)
case OneVersionVector(n2, v2)
case OneVersionVector(n2, v2) =>
val v1 = versions.getOrElse(n2, Timestamp.Zero)
val mergedVersions =
if (v1 >= v2) versions
@ -379,5 +379,5 @@ final case class ManyVersionVector(versions: TreeMap[UniqueAddress, Long]) exten
else this
override def toString: String =
versions.map { case ((n, v)) n + " -> " + v }.mkString("VersionVector(", ", ", ")")
versions.map { case ((n, v)) => n + " -> " + v }.mkString("VersionVector(", ", ", ")")
}

View file

@ -4,7 +4,7 @@
package akka.cluster.ddata.protobuf
import java.{ util, lang jl }
import java.{ util, lang => jl }
import java.util.ArrayList
import java.util.Collections
import java.util.Comparator
@ -17,8 +17,8 @@ import scala.collection.immutable
import akka.actor.ExtendedActorSystem
import akka.cluster.ddata._
import akka.cluster.ddata.Replicator.Internal._
import akka.cluster.ddata.protobuf.msg.{ ReplicatedDataMessages rd }
import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages dm }
import akka.cluster.ddata.protobuf.msg.{ ReplicatedDataMessages => rd }
import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages => dm }
import akka.serialization.SerializerWithStringManifest
import akka.serialization.BaseSerializer
import akka.protobuf.{ ByteString, GeneratedMessage }
@ -43,16 +43,16 @@ private object ReplicatedDataSerializer {
def getKey(entry: A): Any
final def compare(x: A, y: A): Int = compareKeys(getKey(x), getKey(y))
private final def compareKeys(t1: Any, t2: Any): Int = (t1, t2) match {
case (k1: String, k2: String) k1.compareTo(k2)
case (k1: String, k2) -1
case (k1, k2: String) 1
case (k1: Int, k2: Int) k1.compareTo(k2)
case (k1: Int, k2) -1
case (k1, k2: Int) 1
case (k1: Long, k2: Long) k1.compareTo(k2)
case (k1: Long, k2) -1
case (k1, k2: Long) 1
case (k1: OtherMessage, k2: OtherMessage) OtherMessageComparator.compare(k1, k2)
case (k1: String, k2: String) => k1.compareTo(k2)
case (k1: String, k2) => -1
case (k1, k2: String) => 1
case (k1: Int, k2: Int) => k1.compareTo(k2)
case (k1: Int, k2) => -1
case (k1, k2: Int) => 1
case (k1: Long, k2: Long) => k1.compareTo(k2)
case (k1: Long, k2) => -1
case (k1, k2: Long) => 1
case (k1: OtherMessage, k2: OtherMessage) => OtherMessageComparator.compare(k1, k2)
}
}
@ -210,110 +210,110 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
private val ORMultiMapKeyManifest = "k"
private val VersionVectorManifest = "L"
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] AnyRef](
GSetManifest gsetFromBinary,
ORSetManifest orsetFromBinary,
ORSetAddManifest orsetAddFromBinary,
ORSetRemoveManifest orsetRemoveFromBinary,
ORSetFullManifest orsetFullFromBinary,
ORSetDeltaGroupManifest orsetDeltaGroupFromBinary,
FlagManifest flagFromBinary,
LWWRegisterManifest lwwRegisterFromBinary,
GCounterManifest gcounterFromBinary,
PNCounterManifest pncounterFromBinary,
ORMapManifest ormapFromBinary,
ORMapPutManifest ormapPutFromBinary,
ORMapRemoveManifest ormapRemoveFromBinary,
ORMapRemoveKeyManifest ormapRemoveKeyFromBinary,
ORMapUpdateManifest ormapUpdateFromBinary,
ORMapDeltaGroupManifest ormapDeltaGroupFromBinary,
LWWMapManifest lwwmapFromBinary,
PNCounterMapManifest pncountermapFromBinary,
ORMultiMapManifest multimapFromBinary,
DeletedDataManifest (_ DeletedData),
VersionVectorManifest versionVectorFromBinary,
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef](
GSetManifest -> gsetFromBinary,
ORSetManifest -> orsetFromBinary,
ORSetAddManifest -> orsetAddFromBinary,
ORSetRemoveManifest -> orsetRemoveFromBinary,
ORSetFullManifest -> orsetFullFromBinary,
ORSetDeltaGroupManifest -> orsetDeltaGroupFromBinary,
FlagManifest -> flagFromBinary,
LWWRegisterManifest -> lwwRegisterFromBinary,
GCounterManifest -> gcounterFromBinary,
PNCounterManifest -> pncounterFromBinary,
ORMapManifest -> ormapFromBinary,
ORMapPutManifest -> ormapPutFromBinary,
ORMapRemoveManifest -> ormapRemoveFromBinary,
ORMapRemoveKeyManifest -> ormapRemoveKeyFromBinary,
ORMapUpdateManifest -> ormapUpdateFromBinary,
ORMapDeltaGroupManifest -> ormapDeltaGroupFromBinary,
LWWMapManifest -> lwwmapFromBinary,
PNCounterMapManifest -> pncountermapFromBinary,
ORMultiMapManifest -> multimapFromBinary,
DeletedDataManifest -> (_ => DeletedData),
VersionVectorManifest -> versionVectorFromBinary,
GSetKeyManifest (bytes GSetKey(keyIdFromBinary(bytes))),
ORSetKeyManifest (bytes ORSetKey(keyIdFromBinary(bytes))),
FlagKeyManifest (bytes FlagKey(keyIdFromBinary(bytes))),
LWWRegisterKeyManifest (bytes LWWRegisterKey(keyIdFromBinary(bytes))),
GCounterKeyManifest (bytes GCounterKey(keyIdFromBinary(bytes))),
PNCounterKeyManifest (bytes PNCounterKey(keyIdFromBinary(bytes))),
ORMapKeyManifest (bytes ORMapKey(keyIdFromBinary(bytes))),
LWWMapKeyManifest (bytes LWWMapKey(keyIdFromBinary(bytes))),
PNCounterMapKeyManifest (bytes PNCounterMapKey(keyIdFromBinary(bytes))),
ORMultiMapKeyManifest (bytes ORMultiMapKey(keyIdFromBinary(bytes))))
GSetKeyManifest -> (bytes => GSetKey(keyIdFromBinary(bytes))),
ORSetKeyManifest -> (bytes => ORSetKey(keyIdFromBinary(bytes))),
FlagKeyManifest -> (bytes => FlagKey(keyIdFromBinary(bytes))),
LWWRegisterKeyManifest -> (bytes => LWWRegisterKey(keyIdFromBinary(bytes))),
GCounterKeyManifest -> (bytes => GCounterKey(keyIdFromBinary(bytes))),
PNCounterKeyManifest -> (bytes => PNCounterKey(keyIdFromBinary(bytes))),
ORMapKeyManifest -> (bytes => ORMapKey(keyIdFromBinary(bytes))),
LWWMapKeyManifest -> (bytes => LWWMapKey(keyIdFromBinary(bytes))),
PNCounterMapKeyManifest -> (bytes => PNCounterMapKey(keyIdFromBinary(bytes))),
ORMultiMapKeyManifest -> (bytes => ORMultiMapKey(keyIdFromBinary(bytes))))
override def manifest(obj: AnyRef): String = obj match {
case _: ORSet[_] ORSetManifest
case _: ORSet.AddDeltaOp[_] ORSetAddManifest
case _: ORSet.RemoveDeltaOp[_] ORSetRemoveManifest
case _: GSet[_] GSetManifest
case _: GCounter GCounterManifest
case _: PNCounter PNCounterManifest
case _: Flag FlagManifest
case _: LWWRegister[_] LWWRegisterManifest
case _: ORMap[_, _] ORMapManifest
case _: ORMap.PutDeltaOp[_, _] ORMapPutManifest
case _: ORMap.RemoveDeltaOp[_, _] ORMapRemoveManifest
case _: ORMap.RemoveKeyDeltaOp[_, _] ORMapRemoveKeyManifest
case _: ORMap.UpdateDeltaOp[_, _] ORMapUpdateManifest
case _: LWWMap[_, _] LWWMapManifest
case _: PNCounterMap[_] PNCounterMapManifest
case _: ORMultiMap[_, _] ORMultiMapManifest
case DeletedData DeletedDataManifest
case _: VersionVector VersionVectorManifest
case _: ORSet[_] => ORSetManifest
case _: ORSet.AddDeltaOp[_] => ORSetAddManifest
case _: ORSet.RemoveDeltaOp[_] => ORSetRemoveManifest
case _: GSet[_] => GSetManifest
case _: GCounter => GCounterManifest
case _: PNCounter => PNCounterManifest
case _: Flag => FlagManifest
case _: LWWRegister[_] => LWWRegisterManifest
case _: ORMap[_, _] => ORMapManifest
case _: ORMap.PutDeltaOp[_, _] => ORMapPutManifest
case _: ORMap.RemoveDeltaOp[_, _] => ORMapRemoveManifest
case _: ORMap.RemoveKeyDeltaOp[_, _] => ORMapRemoveKeyManifest
case _: ORMap.UpdateDeltaOp[_, _] => ORMapUpdateManifest
case _: LWWMap[_, _] => LWWMapManifest
case _: PNCounterMap[_] => PNCounterMapManifest
case _: ORMultiMap[_, _] => ORMultiMapManifest
case DeletedData => DeletedDataManifest
case _: VersionVector => VersionVectorManifest
case _: ORSetKey[_] ORSetKeyManifest
case _: GSetKey[_] GSetKeyManifest
case _: GCounterKey GCounterKeyManifest
case _: PNCounterKey PNCounterKeyManifest
case _: FlagKey FlagKeyManifest
case _: LWWRegisterKey[_] LWWRegisterKeyManifest
case _: ORMapKey[_, _] ORMapKeyManifest
case _: LWWMapKey[_, _] LWWMapKeyManifest
case _: PNCounterMapKey[_] PNCounterMapKeyManifest
case _: ORMultiMapKey[_, _] ORMultiMapKeyManifest
case _: ORSetKey[_] => ORSetKeyManifest
case _: GSetKey[_] => GSetKeyManifest
case _: GCounterKey => GCounterKeyManifest
case _: PNCounterKey => PNCounterKeyManifest
case _: FlagKey => FlagKeyManifest
case _: LWWRegisterKey[_] => LWWRegisterKeyManifest
case _: ORMapKey[_, _] => ORMapKeyManifest
case _: LWWMapKey[_, _] => LWWMapKeyManifest
case _: PNCounterMapKey[_] => PNCounterMapKeyManifest
case _: ORMultiMapKey[_, _] => ORMultiMapKeyManifest
case _: ORSet.DeltaGroup[_] ORSetDeltaGroupManifest
case _: ORMap.DeltaGroup[_, _] ORMapDeltaGroupManifest
case _: ORSet.FullStateDeltaOp[_] ORSetFullManifest
case _: ORSet.DeltaGroup[_] => ORSetDeltaGroupManifest
case _: ORMap.DeltaGroup[_, _] => ORMapDeltaGroupManifest
case _: ORSet.FullStateDeltaOp[_] => ORSetFullManifest
case _
case _ =>
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
def toBinary(obj: AnyRef): Array[Byte] = obj match {
case m: ORSet[_] compress(orsetToProto(m))
case m: ORSet.AddDeltaOp[_] orsetToProto(m.underlying).toByteArray
case m: ORSet.RemoveDeltaOp[_] orsetToProto(m.underlying).toByteArray
case m: GSet[_] gsetToProto(m).toByteArray
case m: GCounter gcounterToProto(m).toByteArray
case m: PNCounter pncounterToProto(m).toByteArray
case m: Flag flagToProto(m).toByteArray
case m: LWWRegister[_] lwwRegisterToProto(m).toByteArray
case m: ORMap[_, _] compress(ormapToProto(m))
case m: ORMap.PutDeltaOp[_, _] ormapPutToProto(m).toByteArray
case m: ORMap.RemoveDeltaOp[_, _] ormapRemoveToProto(m).toByteArray
case m: ORMap.RemoveKeyDeltaOp[_, _] ormapRemoveKeyToProto(m).toByteArray
case m: ORMap.UpdateDeltaOp[_, _] ormapUpdateToProto(m).toByteArray
case m: LWWMap[_, _] compress(lwwmapToProto(m))
case m: PNCounterMap[_] compress(pncountermapToProto(m))
case m: ORMultiMap[_, _] compress(multimapToProto(m))
case DeletedData dm.Empty.getDefaultInstance.toByteArray
case m: VersionVector versionVectorToProto(m).toByteArray
case Key(id) keyIdToBinary(id)
case m: ORSet.DeltaGroup[_] orsetDeltaGroupToProto(m).toByteArray
case m: ORMap.DeltaGroup[_, _] ormapDeltaGroupToProto(m).toByteArray
case m: ORSet.FullStateDeltaOp[_] orsetToProto(m.underlying).toByteArray
case _
case m: ORSet[_] => compress(orsetToProto(m))
case m: ORSet.AddDeltaOp[_] => orsetToProto(m.underlying).toByteArray
case m: ORSet.RemoveDeltaOp[_] => orsetToProto(m.underlying).toByteArray
case m: GSet[_] => gsetToProto(m).toByteArray
case m: GCounter => gcounterToProto(m).toByteArray
case m: PNCounter => pncounterToProto(m).toByteArray
case m: Flag => flagToProto(m).toByteArray
case m: LWWRegister[_] => lwwRegisterToProto(m).toByteArray
case m: ORMap[_, _] => compress(ormapToProto(m))
case m: ORMap.PutDeltaOp[_, _] => ormapPutToProto(m).toByteArray
case m: ORMap.RemoveDeltaOp[_, _] => ormapRemoveToProto(m).toByteArray
case m: ORMap.RemoveKeyDeltaOp[_, _] => ormapRemoveKeyToProto(m).toByteArray
case m: ORMap.UpdateDeltaOp[_, _] => ormapUpdateToProto(m).toByteArray
case m: LWWMap[_, _] => compress(lwwmapToProto(m))
case m: PNCounterMap[_] => compress(pncountermapToProto(m))
case m: ORMultiMap[_, _] => compress(multimapToProto(m))
case DeletedData => dm.Empty.getDefaultInstance.toByteArray
case m: VersionVector => versionVectorToProto(m).toByteArray
case Key(id) => keyIdToBinary(id)
case m: ORSet.DeltaGroup[_] => orsetDeltaGroupToProto(m).toByteArray
case m: ORMap.DeltaGroup[_, _] => ormapDeltaGroupToProto(m).toByteArray
case m: ORSet.FullStateDeltaOp[_] => orsetToProto(m.underlying).toByteArray
case _ =>
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef =
fromBinaryMap.get(manifest) match {
case Some(f) f(bytes)
case None throw new NotSerializableException(
case Some(f) => f(bytes)
case None => throw new NotSerializableException(
s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]")
}
@ -326,11 +326,11 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
val otherElements = new ArrayList[dm.OtherMessage]
val actorRefElements = new ArrayList[String]
gset.elements.foreach {
case s: String stringElements.add(s)
case i: Int intElements.add(i)
case l: Long longElements.add(l)
case ref: ActorRef actorRefElements.add(Serialization.serializedActorPath(ref))
case other otherElements.add(otherMessageToProto(other))
case s: String => stringElements.add(s)
case i: Int => intElements.add(i)
case l: Long => longElements.add(l)
case ref: ActorRef => actorRefElements.add(Serialization.serializedActorPath(ref))
case other => otherElements.add(otherMessageToProto(other))
}
if (!stringElements.isEmpty) {
Collections.sort(stringElements)
@ -382,11 +382,11 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
var otherElementsMap = Map.empty[dm.OtherMessage, Any]
val actorRefElements = new ArrayList[ActorRef]
orset.elementsMap.keysIterator.foreach {
case s: String stringElements.add(s)
case i: Int intElements.add(i)
case l: Long longElements.add(l)
case ref: ActorRef actorRefElements.add(ref)
case other
case s: String => stringElements.add(s)
case i: Int => intElements.add(i)
case l: Long => longElements.add(l)
case ref: ActorRef => actorRefElements.add(ref)
case other =>
val enclosedMsg = otherMessageToProto(other)
otherElements.add(enclosedMsg)
// need the mapping back to the `other` when adding dots
@ -398,8 +398,8 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
val iter = elements.iterator
while (iter.hasNext) {
val element = iter.next() match {
case enclosedMsg: dm.OtherMessage otherElementsMap(enclosedMsg)
case e e
case enclosedMsg: dm.OtherMessage => otherElementsMap(enclosedMsg)
case e => e
}
b.addDots(versionVectorToProto(orset.elementsMap(element)))
}
@ -458,13 +458,13 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
val b = rd.ORSetDeltaGroup.newBuilder()
deltaGroup.ops.foreach {
case ORSet.AddDeltaOp(u)
case ORSet.AddDeltaOp(u) =>
b.addEntries(createEntry(rd.ORSetDeltaOp.Add, u))
case ORSet.RemoveDeltaOp(u)
case ORSet.RemoveDeltaOp(u) =>
b.addEntries(createEntry(rd.ORSetDeltaOp.Remove, u))
case ORSet.FullStateDeltaOp(u)
case ORSet.FullStateDeltaOp(u) =>
b.addEntries(createEntry(rd.ORSetDeltaOp.Full, u))
case ORSet.DeltaGroup(u)
case ORSet.DeltaGroup(u) =>
throw new IllegalArgumentException("ORSet.DeltaGroup should not be nested")
}
b.build()
@ -473,7 +473,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
private def orsetDeltaGroupFromBinary(bytes: Array[Byte]): ORSet.DeltaGroup[Any] = {
val deltaGroup = rd.ORSetDeltaGroup.parseFrom(bytes)
val ops: Vector[ORSet.DeltaOp] =
deltaGroup.getEntriesList.asScala.iterator.map { entry
deltaGroup.getEntriesList.asScala.iterator.map { entry =>
if (entry.getOperation == rd.ORSetDeltaOp.Add)
ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying))
else if (entry.getOperation == rd.ORSetDeltaOp.Remove)
@ -528,8 +528,8 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def gcounterToProto(gcounter: GCounter): rd.GCounter = {
val b = rd.GCounter.newBuilder()
gcounter.state.toVector.sortBy { case (address, _) address }.foreach {
case (address, value) b.addEntries(rd.GCounter.Entry.newBuilder().
gcounter.state.toVector.sortBy { case (address, _) => address }.foreach {
case (address, value) => b.addEntries(rd.GCounter.Entry.newBuilder().
setNode(uniqueAddressToProto(address)).setValue(ByteString.copyFrom(value.toByteArray)))
}
b.build()
@ -539,8 +539,8 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
gcounterFromProto(rd.GCounter.parseFrom(bytes))
def gcounterFromProto(gcounter: rd.GCounter): GCounter = {
new GCounter(state = gcounter.getEntriesList.asScala.iterator.map(entry
uniqueAddressFromProto(entry.getNode) BigInt(entry.getValue.toByteArray)).toMap)
new GCounter(state = gcounter.getEntriesList.asScala.iterator.map(entry =>
uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray)).toMap)
}
def pncounterToProto(pncounter: PNCounter): rd.PNCounter =
@ -561,14 +561,14 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
/*
* Convert a Map[A, B] to an Iterable[Entry] where Entry is the protobuf map entry.
*/
private def getEntries[IKey, IValue, EntryBuilder <: GeneratedMessage.Builder[EntryBuilder], PEntry <: GeneratedMessage, PValue <: GeneratedMessage](input: Map[IKey, IValue], createBuilder: () EntryBuilder, valueConverter: IValue PValue)(implicit comparator: Comparator[PEntry], eh: ProtoMapEntryWriter[PEntry, EntryBuilder, PValue]): java.lang.Iterable[PEntry] = {
private def getEntries[IKey, IValue, EntryBuilder <: GeneratedMessage.Builder[EntryBuilder], PEntry <: GeneratedMessage, PValue <: GeneratedMessage](input: Map[IKey, IValue], createBuilder: () => EntryBuilder, valueConverter: IValue => PValue)(implicit comparator: Comparator[PEntry], eh: ProtoMapEntryWriter[PEntry, EntryBuilder, PValue]): java.lang.Iterable[PEntry] = {
// The resulting Iterable needs to be ordered deterministically in order to create same signature upon serializing same data
val protoEntries = new TreeSet[PEntry](comparator)
input.foreach {
case (key: String, value) protoEntries.add(eh.setStringKey(createBuilder(), key, valueConverter(value)))
case (key: Int, value) protoEntries.add(eh.setIntKey(createBuilder(), key, valueConverter(value)))
case (key: Long, value) protoEntries.add(eh.setLongKey(createBuilder(), key, valueConverter(value)))
case (key, value) protoEntries.add(eh.setOtherKey(createBuilder(), otherMessageToProto(key), valueConverter(value)))
case (key: String, value) => protoEntries.add(eh.setStringKey(createBuilder(), key, valueConverter(value)))
case (key: Int, value) => protoEntries.add(eh.setIntKey(createBuilder(), key, valueConverter(value)))
case (key: Long, value) => protoEntries.add(eh.setLongKey(createBuilder(), key, valueConverter(value)))
case (key, value) => protoEntries.add(eh.setOtherKey(createBuilder(), otherMessageToProto(key), valueConverter(value)))
}
protoEntries
}
@ -582,25 +582,25 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def ormapFromBinary(bytes: Array[Byte]): ORMap[Any, ReplicatedData] =
ormapFromProto(rd.ORMap.parseFrom(decompress(bytes)))
def mapTypeFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage, B <: ReplicatedData](input: util.List[PEntry], valueCreator: A B)(implicit eh: ProtoMapEntryReader[PEntry, A]): Map[Any, B] = {
input.asScala.map { entry
if (eh.hasStringKey(entry)) eh.getStringKey(entry) valueCreator(eh.getValue(entry))
else if (eh.hasIntKey(entry)) eh.getIntKey(entry) valueCreator(eh.getValue(entry))
else if (eh.hasLongKey(entry)) eh.getLongKey(entry) valueCreator(eh.getValue(entry))
else if (eh.hasOtherKey(entry)) otherMessageFromProto(eh.getOtherKey(entry)) valueCreator(eh.getValue(entry))
def mapTypeFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage, B <: ReplicatedData](input: util.List[PEntry], valueCreator: A => B)(implicit eh: ProtoMapEntryReader[PEntry, A]): Map[Any, B] = {
input.asScala.map { entry =>
if (eh.hasStringKey(entry)) eh.getStringKey(entry) -> valueCreator(eh.getValue(entry))
else if (eh.hasIntKey(entry)) eh.getIntKey(entry) -> valueCreator(eh.getValue(entry))
else if (eh.hasLongKey(entry)) eh.getLongKey(entry) -> valueCreator(eh.getValue(entry))
else if (eh.hasOtherKey(entry)) otherMessageFromProto(eh.getOtherKey(entry)) -> valueCreator(eh.getValue(entry))
else throw new IllegalArgumentException(s"Can't deserialize ${entry.getClass} because it does not have any key in the serialized message.")
}.toMap
}
def ormapFromProto(ormap: rd.ORMap): ORMap[Any, ReplicatedData] = {
val entries = mapTypeFromProto(ormap.getEntriesList, (v: dm.OtherMessage) otherMessageFromProto(v).asInstanceOf[ReplicatedData])
val entries = mapTypeFromProto(ormap.getEntriesList, (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData])
new ORMap(
keys = orsetFromProto(ormap.getKeys),
entries,
ORMap.VanillaORMapTag)
}
def singleMapEntryFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage, B <: ReplicatedData](input: util.List[PEntry], valueCreator: A B)(implicit eh: ProtoMapEntryReader[PEntry, A]): Map[Any, B] = {
def singleMapEntryFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage, B <: ReplicatedData](input: util.List[PEntry], valueCreator: A => B)(implicit eh: ProtoMapEntryReader[PEntry, A]): Map[Any, B] = {
val map = mapTypeFromProto(input, valueCreator)
if (map.size > 1)
throw new IllegalArgumentException(s"Can't deserialize the key/value pair in the ORMap delta - too many pairs on the wire")
@ -610,12 +610,12 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def singleKeyEntryFromProto[PEntry <: GeneratedMessage, A <: GeneratedMessage](entryOption: Option[PEntry])(implicit eh: ProtoMapEntryReader[PEntry, A]): Any =
entryOption match {
case Some(entry) if (eh.hasStringKey(entry)) eh.getStringKey(entry)
case Some(entry) => if (eh.hasStringKey(entry)) eh.getStringKey(entry)
else if (eh.hasIntKey(entry)) eh.getIntKey(entry)
else if (eh.hasLongKey(entry)) eh.getLongKey(entry)
else if (eh.hasOtherKey(entry)) otherMessageFromProto(eh.getOtherKey(entry))
else throw new IllegalArgumentException(s"Can't deserialize the key in the ORMap delta")
case _ throw new IllegalArgumentException(s"Can't deserialize the key in the ORMap delta")
case _ => throw new IllegalArgumentException(s"Can't deserialize the key in the ORMap delta")
}
// wire protocol is always DeltaGroup
@ -656,12 +656,12 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
// this can be made client-extendable in the same way as Http codes in Spray are
private def zeroTagFromCode(code: Int) = code match {
case ORMap.VanillaORMapTag.value ORMap.VanillaORMapTag
case PNCounterMap.PNCounterMapTag.value PNCounterMap.PNCounterMapTag
case ORMultiMap.ORMultiMapTag.value ORMultiMap.ORMultiMapTag
case ORMultiMap.ORMultiMapWithValueDeltasTag.value ORMultiMap.ORMultiMapWithValueDeltasTag
case LWWMap.LWWMapTag.value LWWMap.LWWMapTag
case _ throw new IllegalArgumentException("Invalid ZeroTag code")
case ORMap.VanillaORMapTag.value => ORMap.VanillaORMapTag
case PNCounterMap.PNCounterMapTag.value => PNCounterMap.PNCounterMapTag
case ORMultiMap.ORMultiMapTag.value => ORMultiMap.ORMultiMapTag
case ORMultiMap.ORMultiMapWithValueDeltasTag.value => ORMultiMap.ORMultiMapWithValueDeltasTag
case LWWMap.LWWMapTag.value => LWWMap.LWWMapTag
case _ => throw new IllegalArgumentException("Invalid ZeroTag code")
}
private def ormapDeltaGroupFromBinary(bytes: Array[Byte]): ORMap.DeltaGroup[Any, ReplicatedData] = {
@ -671,9 +671,9 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
private def ormapDeltaGroupOpsFromBinary(bytes: Array[Byte]): scala.collection.immutable.IndexedSeq[ORMap.DeltaOp] = {
val deltaGroup = rd.ORMapDeltaGroup.parseFrom(bytes)
val ops: Vector[ORMap.DeltaOp] =
deltaGroup.getEntriesList.asScala.iterator.map { entry
deltaGroup.getEntriesList.asScala.iterator.map { entry =>
if (entry.getOperation == rd.ORMapDeltaOp.ORMapPut) {
val map = singleMapEntryFromProto(entry.getEntryDataList, (v: dm.OtherMessage) otherMessageFromProto(v).asInstanceOf[ReplicatedData])
val map = singleMapEntryFromProto(entry.getEntryDataList, (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedData])
ORMap.PutDeltaOp(ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)), map.head, zeroTagFromCode(entry.getZeroTag))
} else if (entry.getOperation == rd.ORMapDeltaOp.ORMapRemove) {
ORMap.RemoveDeltaOp(ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)), zeroTagFromCode(entry.getZeroTag))
@ -681,7 +681,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
val elem = singleKeyEntryFromProto(entry.getEntryDataList.asScala.headOption)
ORMap.RemoveKeyDeltaOp(ORSet.RemoveDeltaOp(orsetFromProto(entry.getUnderlying)), elem, zeroTagFromCode(entry.getZeroTag))
} else if (entry.getOperation == rd.ORMapDeltaOp.ORMapUpdate) {
val map = mapTypeFromProto(entry.getEntryDataList, (v: dm.OtherMessage) otherMessageFromProto(v).asInstanceOf[ReplicatedDelta])
val map = mapTypeFromProto(entry.getEntryDataList, (v: dm.OtherMessage) => otherMessageFromProto(v).asInstanceOf[ReplicatedDelta])
ORMap.UpdateDeltaOp(ORSet.AddDeltaOp(orsetFromProto(entry.getUnderlying)), map, zeroTagFromCode(entry.getZeroTag))
} else
throw new NotSerializableException(s"Unknown ORMap delta operation ${entry.getOperation}")
@ -719,10 +719,10 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
.setUnderlying(orsetToProto(u))
.setZeroTag(zt)
m.foreach {
case (key: String, value) builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setStringKey(key).setValue(otherMessageToProto(value)).build())
case (key: Int, value) builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setIntKey(key).setValue(otherMessageToProto(value)).build())
case (key: Long, value) builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setLongKey(key).setValue(otherMessageToProto(value)).build())
case (key, value) builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setOtherKey(otherMessageToProto(key)).setValue(otherMessageToProto(value)).build())
case (key: String, value) => builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setStringKey(key).setValue(otherMessageToProto(value)).build())
case (key: Int, value) => builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setIntKey(key).setValue(otherMessageToProto(value)).build())
case (key: Long, value) => builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setLongKey(key).setValue(otherMessageToProto(value)).build())
case (key, value) => builder.addEntryData(rd.ORMapDeltaGroup.MapEntry.newBuilder().setOtherKey(otherMessageToProto(key)).setValue(otherMessageToProto(value)).build())
}
builder
}
@ -731,10 +731,10 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def createEntryWithKey(opType: rd.ORMapDeltaOp, u: ORSet[_], k: Any, zt: Int) = {
val entryDataBuilder = rd.ORMapDeltaGroup.MapEntry.newBuilder()
k match {
case key: String entryDataBuilder.setStringKey(key)
case key: Int entryDataBuilder.setIntKey(key)
case key: Long entryDataBuilder.setLongKey(key)
case key entryDataBuilder.setOtherKey(otherMessageToProto(key))
case key: String => entryDataBuilder.setStringKey(key)
case key: Int => entryDataBuilder.setIntKey(key)
case key: Long => entryDataBuilder.setLongKey(key)
case key => entryDataBuilder.setOtherKey(otherMessageToProto(key))
}
val builder = rd.ORMapDeltaGroup.Entry.newBuilder()
.setOperation(opType)
@ -746,15 +746,15 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
val b = rd.ORMapDeltaGroup.newBuilder()
deltaGroupOps.foreach {
case ORMap.PutDeltaOp(op, pair, zt)
case ORMap.PutDeltaOp(op, pair, zt) =>
b.addEntries(createEntry(rd.ORMapDeltaOp.ORMapPut, op.asInstanceOf[ORSet.AddDeltaOp[_]].underlying, Map(pair), zt.value))
case ORMap.RemoveDeltaOp(op, zt)
case ORMap.RemoveDeltaOp(op, zt) =>
b.addEntries(createEntry(rd.ORMapDeltaOp.ORMapRemove, op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying, Map.empty, zt.value))
case ORMap.RemoveKeyDeltaOp(op, k, zt)
case ORMap.RemoveKeyDeltaOp(op, k, zt) =>
b.addEntries(createEntryWithKey(rd.ORMapDeltaOp.ORMapRemoveKey, op.asInstanceOf[ORSet.RemoveDeltaOp[_]].underlying, k, zt.value))
case ORMap.UpdateDeltaOp(op, m, zt)
case ORMap.UpdateDeltaOp(op, m, zt) =>
b.addEntries(createEntry(rd.ORMapDeltaOp.ORMapUpdate, op.asInstanceOf[ORSet.AddDeltaOp[_]].underlying, m, zt.value))
case ORMap.DeltaGroup(u)
case ORMap.DeltaGroup(u) =>
throw new IllegalArgumentException("ORMap.DeltaGroup should not be nested")
}
b.build()

View file

@ -16,11 +16,11 @@ import akka.cluster.ddata.PruningState
import akka.cluster.ddata.ReplicatedData
import akka.cluster.ddata.Replicator._
import akka.cluster.ddata.Replicator.Internal._
import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages dm }
import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages => dm }
import akka.serialization.Serialization
import akka.serialization.SerializerWithStringManifest
import akka.serialization.BaseSerializer
import akka.util.{ ByteString AkkaByteString }
import akka.util.{ ByteString => AkkaByteString }
import akka.protobuf.ByteString
import akka.cluster.ddata.Key.KeyR
import java.util.concurrent.atomic.AtomicInteger
@ -48,7 +48,7 @@ import akka.util.ccompat._
* `evict` must be called from the outside, i.e. the
* cache will not cleanup itself.
*/
final class SmallCache[A <: AnyRef, B <: AnyRef](size: Int, timeToLive: FiniteDuration, getOrAddFactory: A B) {
final class SmallCache[A <: AnyRef, B <: AnyRef](size: Int, timeToLive: FiniteDuration, getOrAddFactory: A => B) {
require((size & (size - 1)) == 0, "size must be a power of 2")
require(size <= 32, "size must be <= 32")
@ -152,8 +152,8 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private val cacheTimeToLive = system.settings.config.getDuration(
"akka.cluster.distributed-data.serializer-cache-time-to-live", TimeUnit.MILLISECONDS).millis
private val readCache = new SmallCache[Read, Array[Byte]](4, cacheTimeToLive, m readToProto(m).toByteArray)
private val writeCache = new SmallCache[Write, Array[Byte]](4, cacheTimeToLive, m writeToProto(m).toByteArray)
private val readCache = new SmallCache[Read, Array[Byte]](4, cacheTimeToLive, m => readToProto(m).toByteArray)
private val writeCache = new SmallCache[Write, Array[Byte]](4, cacheTimeToLive, m => writeToProto(m).toByteArray)
system.scheduler.schedule(cacheTimeToLive, cacheTimeToLive / 2) {
readCache.evict()
writeCache.evict()
@ -181,76 +181,76 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
val DeltaPropagationManifest = "Q"
val DeltaNackManifest = "R"
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] AnyRef](
GetManifest getFromBinary,
GetSuccessManifest getSuccessFromBinary,
NotFoundManifest notFoundFromBinary,
GetFailureManifest getFailureFromBinary,
SubscribeManifest subscribeFromBinary,
UnsubscribeManifest unsubscribeFromBinary,
ChangedManifest changedFromBinary,
DataEnvelopeManifest dataEnvelopeFromBinary,
WriteManifest writeFromBinary,
WriteAckManifest (_ WriteAck),
ReadManifest readFromBinary,
ReadResultManifest readResultFromBinary,
StatusManifest statusFromBinary,
GossipManifest gossipFromBinary,
DeltaPropagationManifest deltaPropagationFromBinary,
WriteNackManifest (_ WriteNack),
DeltaNackManifest (_ DeltaNack),
DurableDataEnvelopeManifest durableDataEnvelopeFromBinary)
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] => AnyRef](
GetManifest -> getFromBinary,
GetSuccessManifest -> getSuccessFromBinary,
NotFoundManifest -> notFoundFromBinary,
GetFailureManifest -> getFailureFromBinary,
SubscribeManifest -> subscribeFromBinary,
UnsubscribeManifest -> unsubscribeFromBinary,
ChangedManifest -> changedFromBinary,
DataEnvelopeManifest -> dataEnvelopeFromBinary,
WriteManifest -> writeFromBinary,
WriteAckManifest -> (_ => WriteAck),
ReadManifest -> readFromBinary,
ReadResultManifest -> readResultFromBinary,
StatusManifest -> statusFromBinary,
GossipManifest -> gossipFromBinary,
DeltaPropagationManifest -> deltaPropagationFromBinary,
WriteNackManifest -> (_ => WriteNack),
DeltaNackManifest -> (_ => DeltaNack),
DurableDataEnvelopeManifest -> durableDataEnvelopeFromBinary)
override def manifest(obj: AnyRef): String = obj match {
case _: DataEnvelope DataEnvelopeManifest
case _: Write WriteManifest
case WriteAck WriteAckManifest
case _: Read ReadManifest
case _: ReadResult ReadResultManifest
case _: DeltaPropagation DeltaPropagationManifest
case _: Status StatusManifest
case _: Get[_] GetManifest
case _: GetSuccess[_] GetSuccessManifest
case _: DurableDataEnvelope DurableDataEnvelopeManifest
case _: Changed[_] ChangedManifest
case _: NotFound[_] NotFoundManifest
case _: GetFailure[_] GetFailureManifest
case _: Subscribe[_] SubscribeManifest
case _: Unsubscribe[_] UnsubscribeManifest
case _: Gossip GossipManifest
case WriteNack WriteNackManifest
case DeltaNack DeltaNackManifest
case _
case _: DataEnvelope => DataEnvelopeManifest
case _: Write => WriteManifest
case WriteAck => WriteAckManifest
case _: Read => ReadManifest
case _: ReadResult => ReadResultManifest
case _: DeltaPropagation => DeltaPropagationManifest
case _: Status => StatusManifest
case _: Get[_] => GetManifest
case _: GetSuccess[_] => GetSuccessManifest
case _: DurableDataEnvelope => DurableDataEnvelopeManifest
case _: Changed[_] => ChangedManifest
case _: NotFound[_] => NotFoundManifest
case _: GetFailure[_] => GetFailureManifest
case _: Subscribe[_] => SubscribeManifest
case _: Unsubscribe[_] => UnsubscribeManifest
case _: Gossip => GossipManifest
case WriteNack => WriteNackManifest
case DeltaNack => DeltaNackManifest
case _ =>
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
def toBinary(obj: AnyRef): Array[Byte] = obj match {
case m: DataEnvelope dataEnvelopeToProto(m).toByteArray
case m: Write writeCache.getOrAdd(m)
case WriteAck writeAckBytes
case m: Read readCache.getOrAdd(m)
case m: ReadResult readResultToProto(m).toByteArray
case m: Status statusToProto(m).toByteArray
case m: DeltaPropagation deltaPropagationToProto(m).toByteArray
case m: Get[_] getToProto(m).toByteArray
case m: GetSuccess[_] getSuccessToProto(m).toByteArray
case m: DurableDataEnvelope durableDataEnvelopeToProto(m).toByteArray
case m: Changed[_] changedToProto(m).toByteArray
case m: NotFound[_] notFoundToProto(m).toByteArray
case m: GetFailure[_] getFailureToProto(m).toByteArray
case m: Subscribe[_] subscribeToProto(m).toByteArray
case m: Unsubscribe[_] unsubscribeToProto(m).toByteArray
case m: Gossip compress(gossipToProto(m))
case WriteNack dm.Empty.getDefaultInstance.toByteArray
case DeltaNack dm.Empty.getDefaultInstance.toByteArray
case _
case m: DataEnvelope => dataEnvelopeToProto(m).toByteArray
case m: Write => writeCache.getOrAdd(m)
case WriteAck => writeAckBytes
case m: Read => readCache.getOrAdd(m)
case m: ReadResult => readResultToProto(m).toByteArray
case m: Status => statusToProto(m).toByteArray
case m: DeltaPropagation => deltaPropagationToProto(m).toByteArray
case m: Get[_] => getToProto(m).toByteArray
case m: GetSuccess[_] => getSuccessToProto(m).toByteArray
case m: DurableDataEnvelope => durableDataEnvelopeToProto(m).toByteArray
case m: Changed[_] => changedToProto(m).toByteArray
case m: NotFound[_] => notFoundToProto(m).toByteArray
case m: GetFailure[_] => getFailureToProto(m).toByteArray
case m: Subscribe[_] => subscribeToProto(m).toByteArray
case m: Unsubscribe[_] => unsubscribeToProto(m).toByteArray
case m: Gossip => compress(gossipToProto(m))
case WriteNack => dm.Empty.getDefaultInstance.toByteArray
case DeltaNack => dm.Empty.getDefaultInstance.toByteArray
case _ =>
throw new IllegalArgumentException(s"Can't serialize object of type ${obj.getClass} in [${getClass.getName}]")
}
override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef =
fromBinaryMap.get(manifest) match {
case Some(f) f(bytes)
case None throw new NotSerializableException(
case Some(f) => f(bytes)
case None => throw new NotSerializableException(
s"Unimplemented deserialization of message with manifest [$manifest] in [${getClass.getName}]")
}
@ -258,7 +258,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
val b = dm.Status.newBuilder()
b.setChunk(status.chunk).setTotChunks(status.totChunks)
val entries = status.digests.foreach {
case (key, digest)
case (key, digest) =>
b.addEntries(dm.Status.Entry.newBuilder().
setKey(key).
setDigest(ByteString.copyFrom(digest.toArray)))
@ -269,15 +269,15 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def statusFromBinary(bytes: Array[Byte]): Status = {
val status = dm.Status.parseFrom(bytes)
Status(
status.getEntriesList.asScala.iterator.map(e
e.getKey AkkaByteString(e.getDigest.toByteArray())).toMap,
status.getEntriesList.asScala.iterator.map(e =>
e.getKey -> AkkaByteString(e.getDigest.toByteArray())).toMap,
status.getChunk, status.getTotChunks)
}
private def gossipToProto(gossip: Gossip): dm.Gossip = {
val b = dm.Gossip.newBuilder().setSendBack(gossip.sendBack)
val entries = gossip.updatedData.foreach {
case (key, data)
case (key, data) =>
b.addEntries(dm.Gossip.Entry.newBuilder().
setKey(key).
setEnvelope(dataEnvelopeToProto(data)))
@ -288,8 +288,8 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def gossipFromBinary(bytes: Array[Byte]): Gossip = {
val gossip = dm.Gossip.parseFrom(decompress(bytes))
Gossip(
gossip.getEntriesList.asScala.iterator.map(e
e.getKey dataEnvelopeFromProto(e.getEnvelope)).toMap,
gossip.getEntriesList.asScala.iterator.map(e =>
e.getKey -> dataEnvelopeFromProto(e.getEnvelope)).toMap,
sendBack = gossip.getSendBack)
}
@ -299,7 +299,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
if (deltaPropagation.reply)
b.setReply(deltaPropagation.reply)
val entries = deltaPropagation.deltas.foreach {
case (key, Delta(data, fromSeqNr, toSeqNr))
case (key, Delta(data, fromSeqNr, toSeqNr)) =>
val b2 = dm.DeltaPropagation.Entry.newBuilder()
.setKey(key)
.setEnvelope(dataEnvelopeToProto(data))
@ -317,19 +317,19 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
DeltaPropagation(
uniqueAddressFromProto(deltaPropagation.getFromNode),
reply,
deltaPropagation.getEntriesList.asScala.iterator.map { e
deltaPropagation.getEntriesList.asScala.iterator.map { e =>
val fromSeqNr = e.getFromSeqNr
val toSeqNr = if (e.hasToSeqNr) e.getToSeqNr else fromSeqNr
e.getKey Delta(dataEnvelopeFromProto(e.getEnvelope), fromSeqNr, toSeqNr)
e.getKey -> Delta(dataEnvelopeFromProto(e.getEnvelope), fromSeqNr, toSeqNr)
}.toMap)
}
private def getToProto(get: Get[_]): dm.Get = {
val consistencyValue = get.consistency match {
case ReadLocal 1
case ReadFrom(n, _) n
case _: ReadMajority 0
case _: ReadAll -1
case ReadLocal => 1
case ReadFrom(n, _) => n
case _: ReadMajority => 0
case _: ReadAll => -1
}
val b = dm.Get.newBuilder().
@ -337,7 +337,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
setConsistency(consistencyValue).
setTimeout(get.consistency.timeout.toMillis.toInt)
get.request.foreach(o b.setRequest(otherMessageToProto(o)))
get.request.foreach(o => b.setRequest(otherMessageToProto(o)))
b.build()
}
@ -347,10 +347,10 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
val request = if (get.hasRequest()) Some(otherMessageFromProto(get.getRequest)) else None
val timeout = Duration(get.getTimeout, TimeUnit.MILLISECONDS)
val consistency = get.getConsistency match {
case 0 ReadMajority(timeout)
case -1 ReadAll(timeout)
case 1 ReadLocal
case n ReadFrom(n, timeout)
case 0 => ReadMajority(timeout)
case -1 => ReadAll(timeout)
case 1 => ReadLocal
case n => ReadFrom(n, timeout)
}
Get(key, consistency, request)
}
@ -360,7 +360,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
setKey(otherMessageToProto(getSuccess.key)).
setData(otherMessageToProto(getSuccess.dataValue))
getSuccess.request.foreach(o b.setRequest(otherMessageToProto(o)))
getSuccess.request.foreach(o => b.setRequest(otherMessageToProto(o)))
b.build()
}
@ -374,7 +374,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def notFoundToProto(notFound: NotFound[_]): dm.NotFound = {
val b = dm.NotFound.newBuilder().setKey(otherMessageToProto(notFound.key))
notFound.request.foreach(o b.setRequest(otherMessageToProto(o)))
notFound.request.foreach(o => b.setRequest(otherMessageToProto(o)))
b.build()
}
@ -387,7 +387,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def getFailureToProto(getFailure: GetFailure[_]): dm.GetFailure = {
val b = dm.GetFailure.newBuilder().setKey(otherMessageToProto(getFailure.key))
getFailure.request.foreach(o b.setRequest(otherMessageToProto(o)))
getFailure.request.foreach(o => b.setRequest(otherMessageToProto(o)))
b.build()
}
@ -437,15 +437,15 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def pruningToProto(entries: Map[UniqueAddress, PruningState]): Iterable[dm.DataEnvelope.PruningEntry] = {
entries.map {
case (removedAddress, state)
case (removedAddress, state) =>
val b = dm.DataEnvelope.PruningEntry.newBuilder().
setRemovedAddress(uniqueAddressToProto(removedAddress))
state match {
case PruningState.PruningInitialized(owner, seen)
seen.toVector.sorted(Member.addressOrdering).map(addressToProto).foreach { a b.addSeen(a) }
case PruningState.PruningInitialized(owner, seen) =>
seen.toVector.sorted(Member.addressOrdering).map(addressToProto).foreach { a => b.addSeen(a) }
b.setOwnerAddress(uniqueAddressToProto(owner))
b.setPerformed(false)
case PruningState.PruningPerformed(obsoleteTime)
case PruningState.PruningPerformed(obsoleteTime) =>
b.setPerformed(true).setObsoleteTime(obsoleteTime)
// TODO ownerAddress is only needed for PruningInitialized, but kept here for
// wire backwards compatibility with 2.4.16 (required field)
@ -483,7 +483,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
if (pruningEntries.isEmpty)
Map.empty
else
pruningEntries.asScala.iterator.map { pruningEntry
pruningEntries.asScala.iterator.map { pruningEntry =>
val state =
if (pruningEntry.getPerformed) {
// for wire compatibility with Akka 2.4.x
@ -494,7 +494,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
uniqueAddressFromProto(pruningEntry.getOwnerAddress),
pruningEntry.getSeenList.asScala.iterator.map(addressFromProto).to(immutable.Set))
val removed = uniqueAddressFromProto(pruningEntry.getRemovedAddress)
removed state
removed -> state
}.toMap
}
@ -518,8 +518,8 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def readResultToProto(readResult: ReadResult): dm.ReadResult = {
val b = dm.ReadResult.newBuilder()
readResult.envelope match {
case Some(d) b.setEnvelope(dataEnvelopeToProto(d))
case None
case Some(d) => b.setEnvelope(dataEnvelopeToProto(d))
case None =>
}
b.build()
}
@ -535,8 +535,8 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def durableDataEnvelopeToProto(durableDataEnvelope: DurableDataEnvelope): dm.DurableDataEnvelope = {
// only keep the PruningPerformed entries
val pruning = durableDataEnvelope.dataEnvelope.pruning.filter {
case (_, _: PruningPerformed) true
case _ false
case (_, _: PruningPerformed) => true
case _ => false
}
val builder = dm.DurableDataEnvelope.newBuilder()

View file

@ -15,7 +15,7 @@ import akka.actor.ActorRef
import akka.actor.Address
import akka.actor.ExtendedActorSystem
import akka.cluster.UniqueAddress
import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages dm }
import akka.cluster.ddata.protobuf.msg.{ ReplicatorMessages => dm }
import akka.serialization._
import akka.protobuf.ByteString
import akka.protobuf.MessageLite
@ -69,8 +69,8 @@ trait SerializationSupport {
val buffer = new Array[Byte](BufferSize)
@tailrec def readChunk(): Unit = in.read(buffer) match {
case -1 ()
case n
case -1 => ()
case n =>
out.write(buffer, 0, n)
readChunk()
}
@ -81,9 +81,9 @@ trait SerializationSupport {
}
def addressToProto(address: Address): dm.Address.Builder = address match {
case Address(_, _, Some(host), Some(port))
case Address(_, _, Some(host), Some(port)) =>
dm.Address.newBuilder().setHostname(host).setPort(port)
case _ throw new IllegalArgumentException(s"Address [${address}] could not be serialized: host or port missing.")
case _ => throw new IllegalArgumentException(s"Address [${address}] could not be serialized: host or port missing.")
}
def addressFromProto(address: dm.Address): Address =
@ -108,7 +108,7 @@ trait SerializationSupport {
def versionVectorToProto(versionVector: VersionVector): dm.VersionVector = {
val b = dm.VersionVector.newBuilder()
versionVector.versionsIterator.foreach {
case (node, value) b.addEntries(dm.VersionVector.Entry.newBuilder().
case (node, value) => b.addEntries(dm.VersionVector.Entry.newBuilder().
setNode(uniqueAddressToProto(node)).setVersion(value))
}
b.build()
@ -124,8 +124,8 @@ trait SerializationSupport {
else if (entries.size == 1)
VersionVector(uniqueAddressFromProto(entries.get(0).getNode), entries.get(0).getVersion)
else {
val versions: TreeMap[UniqueAddress, Long] = scala.collection.immutable.TreeMap.from(versionVector.getEntriesList.asScala.iterator.map(entry
uniqueAddressFromProto(entry.getNode) entry.getVersion))
val versions: TreeMap[UniqueAddress, Long] = scala.collection.immutable.TreeMap.from(versionVector.getEntriesList.asScala.iterator.map(entry =>
uniqueAddressFromProto(entry.getNode) -> entry.getVersion))
VersionVector(versions)
}
}