Update to a working version of Scalariform

This commit is contained in:
Björn Antonsson 2016-06-02 14:06:57 +02:00
parent cae070bd93
commit c66ce62d63
616 changed files with 5966 additions and 5436 deletions

View file

@ -83,8 +83,8 @@ final class GCounter private[akka] (
else state.get(key) match {
case Some(v)
val tot = v + delta
assignAncestor(new GCounter(state + (key -> tot)))
case None assignAncestor(new GCounter(state + (key -> delta)))
assignAncestor(new GCounter(state + (key tot)))
case None assignAncestor(new GCounter(state + (key delta)))
}
}

View file

@ -47,7 +47,7 @@ final class LWWMap[A] private[akka] (
/**
* Scala API: All entries of the map.
*/
def entries: Map[String, A] = underlying.entries.map { case (k, r) k -> r.value }
def entries: Map[String, A] = underlying.entries.map { case (k, r) k r.value }
/**
* Java API: All entries of the map.

View file

@ -93,8 +93,8 @@ object LWWRegister {
@SerialVersionUID(1L)
final class LWWRegister[A] private[akka] (
private[akka] val node: UniqueAddress,
val value: A,
val timestamp: Long)
val value: A,
val timestamp: Long)
extends ReplicatedData with ReplicatedDataSerialization {
import LWWRegister.{ Clock, defaultClock }

View file

@ -33,7 +33,7 @@ object ORMap {
*/
@SerialVersionUID(1L)
final class ORMap[A <: ReplicatedData] private[akka] (
private[akka] val keys: ORSet[String],
private[akka] val keys: ORSet[String],
private[akka] val values: Map[String, A])
extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning {

View file

@ -52,7 +52,7 @@ final class ORMultiMap[A] private[akka] (private[akka] val underlying: ORMap[ORS
* Scala API: All entries of a multimap where keys are strings and values are sets.
*/
def entries: Map[String, Set[A]] =
underlying.entries.map { case (k, v) k -> v.elements }
underlying.entries.map { case (k, v) k v.elements }
/**
* Java API: All entries of a multimap where keys are strings and values are sets.

View file

@ -201,7 +201,7 @@ object ORSet {
@SerialVersionUID(1L)
final class ORSet[A] private[akka] (
private[akka] val elementsMap: Map[A, ORSet.Dot],
private[akka] val vvector: VersionVector)
private[akka] val vvector: VersionVector)
extends ReplicatedData with ReplicatedDataSerialization with RemovedNodePruning with FastMerge {
type T = ORSet[A]

View file

@ -90,18 +90,21 @@ final class PNCounter private[akka] (
else this
override def merge(that: PNCounter): PNCounter =
copy(increments = that.increments.merge(this.increments),
copy(
increments = that.increments.merge(this.increments),
decrements = that.decrements.merge(this.decrements))
override def needPruningFrom(removedNode: UniqueAddress): Boolean =
increments.needPruningFrom(removedNode) || decrements.needPruningFrom(removedNode)
override def prune(removedNode: UniqueAddress, collapseInto: UniqueAddress): PNCounter =
copy(increments = increments.prune(removedNode, collapseInto),
copy(
increments = increments.prune(removedNode, collapseInto),
decrements = decrements.prune(removedNode, collapseInto))
override def pruningCleanup(removedNode: UniqueAddress): PNCounter =
copy(increments = increments.pruningCleanup(removedNode),
copy(
increments = increments.pruningCleanup(removedNode),
decrements = decrements.pruningCleanup(removedNode))
private def copy(increments: GCounter = this.increments, decrements: GCounter = this.decrements): PNCounter =

View file

@ -34,12 +34,12 @@ final class PNCounterMap private[akka] (
type T = PNCounterMap
/** Scala API */
def entries: Map[String, BigInt] = underlying.entries.map { case (k, c) k -> c.value }
def entries: Map[String, BigInt] = underlying.entries.map { case (k, c) k c.value }
/** Java API */
def getEntries: java.util.Map[String, BigInteger] = {
import scala.collection.JavaConverters._
underlying.entries.map { case (k, c) k -> c.value.bigInteger }.asJava
underlying.entries.map { case (k, c) k c.value.bigInteger }.asJava
}
/**

View file

@ -93,13 +93,13 @@ object ReplicatorSettings {
* be configured to worst case in a healthy cluster.
*/
final class ReplicatorSettings(
val role: Option[String],
val gossipInterval: FiniteDuration,
val role: Option[String],
val gossipInterval: FiniteDuration,
val notifySubscribersInterval: FiniteDuration,
val maxDeltaElements: Int,
val dispatcher: String,
val pruningInterval: FiniteDuration,
val maxPruningDissemination: FiniteDuration) {
val maxDeltaElements: Int,
val dispatcher: String,
val pruningInterval: FiniteDuration,
val maxPruningDissemination: FiniteDuration) {
def withRole(role: String): ReplicatorSettings = copy(role = ReplicatorSettings.roleOption(role))
@ -126,13 +126,13 @@ final class ReplicatorSettings(
copy(pruningInterval = pruningInterval, maxPruningDissemination = maxPruningDissemination)
private def copy(
role: Option[String] = role,
gossipInterval: FiniteDuration = gossipInterval,
role: Option[String] = role,
gossipInterval: FiniteDuration = gossipInterval,
notifySubscribersInterval: FiniteDuration = notifySubscribersInterval,
maxDeltaElements: Int = maxDeltaElements,
dispatcher: String = dispatcher,
pruningInterval: FiniteDuration = pruningInterval,
maxPruningDissemination: FiniteDuration = maxPruningDissemination): ReplicatorSettings =
maxDeltaElements: Int = maxDeltaElements,
dispatcher: String = dispatcher,
pruningInterval: FiniteDuration = pruningInterval,
maxPruningDissemination: FiniteDuration = maxPruningDissemination): ReplicatorSettings =
new ReplicatorSettings(role, gossipInterval, notifySubscribersInterval, maxDeltaElements, dispatcher,
pruningInterval, maxPruningDissemination)
}
@ -471,7 +471,7 @@ object Replicator {
val NotFoundDigest: Digest = ByteString(-1)
final case class DataEnvelope(
data: ReplicatedData,
data: ReplicatedData,
pruning: Map[UniqueAddress, PruningState] = Map.empty)
extends ReplicatorMessage {
@ -735,7 +735,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val selfUniqueAddress = cluster.selfUniqueAddress
require(!cluster.isTerminated, "Cluster node must not be terminated")
require(role.forall(cluster.selfRoles.contains),
require(
role.forall(cluster.selfRoles.contains),
s"This cluster member [${selfAddress}] doesn't have the role [$role]")
//Start periodic gossip to random nodes in cluster
@ -899,7 +900,8 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
val merged = envelope.merge(pruningCleanupTombstoned(writeEnvelope)).addSeen(selfAddress)
setData(key, merged)
} else {
log.warning("Wrong type for writing [{}], existing type [{}], got [{}]",
log.warning(
"Wrong type for writing [{}], existing type [{}], got [{}]",
key, existing.getClass.getName, writeEnvelope.data.getClass.getName)
}
case None
@ -1048,14 +1050,14 @@ final class Replicator(settings: ReplicatorSettings) extends Actor with ActorLog
if (keys.nonEmpty) {
if (log.isDebugEnabled)
log.debug("Sending gossip to [{}], containing [{}]", sender().path.address, keys.mkString(", "))
val g = Gossip(keys.map(k k -> getData(k).get)(collection.breakOut), sendBack = otherDifferentKeys.nonEmpty)
val g = Gossip(keys.map(k k getData(k).get)(collection.breakOut), sendBack = otherDifferentKeys.nonEmpty)
sender() ! g
}
val myMissingKeys = otherKeys diff myKeys
if (myMissingKeys.nonEmpty) {
if (log.isDebugEnabled)
log.debug("Sending gossip status to [{}], requesting missing [{}]", sender().path.address, myMissingKeys.mkString(", "))
val status = Status(myMissingKeys.map(k k -> NotFoundDigest)(collection.breakOut), chunk, totChunks)
val status = Status(myMissingKeys.map(k k NotFoundDigest)(collection.breakOut), chunk, totChunks)
sender() ! status
}
}
@ -1305,12 +1307,12 @@ private[akka] abstract class ReadWriteAggregator extends Actor {
*/
private[akka] object WriteAggregator {
def props(
key: KeyR,
envelope: Replicator.Internal.DataEnvelope,
key: KeyR,
envelope: Replicator.Internal.DataEnvelope,
consistency: Replicator.WriteConsistency,
req: Option[Any],
nodes: Set[Address],
replyTo: ActorRef): Props =
req: Option[Any],
nodes: Set[Address],
replyTo: ActorRef): Props =
Props(new WriteAggregator(key, envelope, consistency, req, nodes, replyTo))
.withDeploy(Deploy.local)
}
@ -1319,12 +1321,12 @@ private[akka] object WriteAggregator {
* INTERNAL API
*/
private[akka] class WriteAggregator(
key: KeyR,
envelope: Replicator.Internal.DataEnvelope,
consistency: Replicator.WriteConsistency,
req: Option[Any],
key: KeyR,
envelope: Replicator.Internal.DataEnvelope,
consistency: Replicator.WriteConsistency,
req: Option[Any],
override val nodes: Set[Address],
replyTo: ActorRef) extends ReadWriteAggregator {
replyTo: ActorRef) extends ReadWriteAggregator {
import Replicator._
import Replicator.Internal._
@ -1384,12 +1386,12 @@ private[akka] class WriteAggregator(
*/
private[akka] object ReadAggregator {
def props(
key: KeyR,
key: KeyR,
consistency: Replicator.ReadConsistency,
req: Option[Any],
nodes: Set[Address],
localValue: Option[Replicator.Internal.DataEnvelope],
replyTo: ActorRef): Props =
req: Option[Any],
nodes: Set[Address],
localValue: Option[Replicator.Internal.DataEnvelope],
replyTo: ActorRef): Props =
Props(new ReadAggregator(key, consistency, req, nodes, localValue, replyTo))
.withDeploy(Deploy.local)
@ -1399,12 +1401,12 @@ private[akka] object ReadAggregator {
* INTERNAL API
*/
private[akka] class ReadAggregator(
key: KeyR,
consistency: Replicator.ReadConsistency,
req: Option[Any],
key: KeyR,
consistency: Replicator.ReadConsistency,
req: Option[Any],
override val nodes: Set[Address],
localValue: Option[Replicator.Internal.DataEnvelope],
replyTo: ActorRef) extends ReadWriteAggregator {
localValue: Option[Replicator.Internal.DataEnvelope],
replyTo: ActorRef) extends ReadWriteAggregator {
import Replicator._
import Replicator.Internal._

View file

@ -262,7 +262,7 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L
private[akka] override def increment(n: UniqueAddress): VersionVector = {
val v = Timestamp.counter.getAndIncrement()
if (n == node) copy(version = v)
else ManyVersionVector(TreeMap(node -> version, n -> v))
else ManyVersionVector(TreeMap(node version, n v))
}
/** INTERNAL API */
@ -282,7 +282,7 @@ final case class OneVersionVector private[akka] (node: UniqueAddress, version: L
that match {
case OneVersionVector(n2, v2)
if (node == n2) if (version >= v2) this else OneVersionVector(n2, v2)
else ManyVersionVector(TreeMap(node -> version, n2 -> v2))
else ManyVersionVector(TreeMap(node version, n2 v2))
case ManyVersionVector(vs2)
val v2 = vs2.getOrElse(node, Timestamp.Zero)
val mergedVersions =

View file

@ -52,29 +52,29 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
private val VersionVectorManifest = "L"
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] AnyRef](
GSetManifest -> gsetFromBinary,
ORSetManifest -> orsetFromBinary,
FlagManifest -> flagFromBinary,
LWWRegisterManifest -> lwwRegisterFromBinary,
GCounterManifest -> gcounterFromBinary,
PNCounterManifest -> pncounterFromBinary,
ORMapManifest -> ormapFromBinary,
LWWMapManifest -> lwwmapFromBinary,
PNCounterMapManifest -> pncountermapFromBinary,
ORMultiMapManifest -> multimapFromBinary,
DeletedDataManifest -> (_ DeletedData),
VersionVectorManifest -> versionVectorFromBinary,
GSetManifest gsetFromBinary,
ORSetManifest orsetFromBinary,
FlagManifest flagFromBinary,
LWWRegisterManifest lwwRegisterFromBinary,
GCounterManifest gcounterFromBinary,
PNCounterManifest pncounterFromBinary,
ORMapManifest ormapFromBinary,
LWWMapManifest lwwmapFromBinary,
PNCounterMapManifest pncountermapFromBinary,
ORMultiMapManifest multimapFromBinary,
DeletedDataManifest (_ DeletedData),
VersionVectorManifest versionVectorFromBinary,
GSetKeyManifest -> (bytes GSetKey(keyIdFromBinary(bytes))),
ORSetKeyManifest -> (bytes ORSetKey(keyIdFromBinary(bytes))),
FlagKeyManifest -> (bytes FlagKey(keyIdFromBinary(bytes))),
LWWRegisterKeyManifest -> (bytes LWWRegisterKey(keyIdFromBinary(bytes))),
GCounterKeyManifest -> (bytes GCounterKey(keyIdFromBinary(bytes))),
PNCounterKeyManifest -> (bytes PNCounterKey(keyIdFromBinary(bytes))),
ORMapKeyManifest -> (bytes ORMapKey(keyIdFromBinary(bytes))),
LWWMapKeyManifest -> (bytes LWWMapKey(keyIdFromBinary(bytes))),
PNCounterMapKeyManifest -> (bytes PNCounterMapKey(keyIdFromBinary(bytes))),
ORMultiMapKeyManifest -> (bytes ORMultiMapKey(keyIdFromBinary(bytes))))
GSetKeyManifest (bytes GSetKey(keyIdFromBinary(bytes))),
ORSetKeyManifest (bytes ORSetKey(keyIdFromBinary(bytes))),
FlagKeyManifest (bytes FlagKey(keyIdFromBinary(bytes))),
LWWRegisterKeyManifest (bytes LWWRegisterKey(keyIdFromBinary(bytes))),
GCounterKeyManifest (bytes GCounterKey(keyIdFromBinary(bytes))),
PNCounterKeyManifest (bytes PNCounterKey(keyIdFromBinary(bytes))),
ORMapKeyManifest (bytes ORMapKey(keyIdFromBinary(bytes))),
LWWMapKeyManifest (bytes LWWMapKey(keyIdFromBinary(bytes))),
PNCounterMapKeyManifest (bytes PNCounterMapKey(keyIdFromBinary(bytes))),
ORMultiMapKeyManifest (bytes ORMultiMapKey(keyIdFromBinary(bytes))))
override def manifest(obj: AnyRef): String = obj match {
case _: ORSet[_] ORSetManifest
@ -284,7 +284,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def gcounterFromProto(gcounter: rd.GCounter): GCounter = {
new GCounter(state = gcounter.getEntriesList.asScala.map(entry
uniqueAddressFromProto(entry.getNode) -> BigInt(entry.getValue.toByteArray))(breakOut))
uniqueAddressFromProto(entry.getNode) BigInt(entry.getValue.toByteArray))(breakOut))
}
def pncounterToProto(pncounter: PNCounter): rd.PNCounter =
@ -322,7 +322,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
VersionVector(uniqueAddressFromProto(entries.get(0).getNode), entries.get(0).getVersion)
else {
val versions: TreeMap[UniqueAddress, Long] = versionVector.getEntriesList.asScala.map(entry
uniqueAddressFromProto(entry.getNode) -> entry.getVersion)(breakOut)
uniqueAddressFromProto(entry.getNode) entry.getVersion)(breakOut)
VersionVector(versions)
}
}
@ -341,7 +341,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def ormapFromProto(ormap: rd.ORMap): ORMap[ReplicatedData] = {
val entries = ormap.getEntriesList.asScala.map(entry
entry.getKey -> otherMessageFromProto(entry.getValue).asInstanceOf[ReplicatedData]).toMap
entry.getKey otherMessageFromProto(entry.getValue).asInstanceOf[ReplicatedData]).toMap
new ORMap(
keys = orsetFromProto(ormap.getKeys).asInstanceOf[ORSet[String]],
entries)
@ -361,7 +361,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def lwwmapFromProto(lwwmap: rd.LWWMap): LWWMap[Any] = {
val entries = lwwmap.getEntriesList.asScala.map(entry
entry.getKey -> lwwRegisterFromProto(entry.getValue)).toMap
entry.getKey lwwRegisterFromProto(entry.getValue)).toMap
new LWWMap(new ORMap(
keys = orsetFromProto(lwwmap.getKeys).asInstanceOf[ORSet[String]],
entries))
@ -381,7 +381,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def pncountermapFromProto(pncountermap: rd.PNCounterMap): PNCounterMap = {
val entries = pncountermap.getEntriesList.asScala.map(entry
entry.getKey -> pncounterFromProto(entry.getValue)).toMap
entry.getKey pncounterFromProto(entry.getValue)).toMap
new PNCounterMap(new ORMap(
keys = orsetFromProto(pncountermap.getKeys).asInstanceOf[ORSet[String]],
entries))
@ -401,7 +401,7 @@ class ReplicatedDataSerializer(val system: ExtendedActorSystem)
def multimapFromProto(multimap: rd.ORMultiMap): ORMultiMap[Any] = {
val entries = multimap.getEntriesList.asScala.map(entry
entry.getKey -> orsetFromProto(entry.getValue)).toMap
entry.getKey orsetFromProto(entry.getValue)).toMap
new ORMultiMap(new ORMap(
keys = orsetFromProto(multimap.getKeys).asInstanceOf[ORSet[String]],
entries))

View file

@ -169,20 +169,20 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
val GossipManifest = "N"
private val fromBinaryMap = collection.immutable.HashMap[String, Array[Byte] AnyRef](
GetManifest -> getFromBinary,
GetSuccessManifest -> getSuccessFromBinary,
NotFoundManifest -> notFoundFromBinary,
GetFailureManifest -> getFailureFromBinary,
SubscribeManifest -> subscribeFromBinary,
UnsubscribeManifest -> unsubscribeFromBinary,
ChangedManifest -> changedFromBinary,
DataEnvelopeManifest -> dataEnvelopeFromBinary,
WriteManifest -> writeFromBinary,
WriteAckManifest -> (_ WriteAck),
ReadManifest -> readFromBinary,
ReadResultManifest -> readResultFromBinary,
StatusManifest -> statusFromBinary,
GossipManifest -> gossipFromBinary)
GetManifest getFromBinary,
GetSuccessManifest getSuccessFromBinary,
NotFoundManifest notFoundFromBinary,
GetFailureManifest getFailureFromBinary,
SubscribeManifest subscribeFromBinary,
UnsubscribeManifest unsubscribeFromBinary,
ChangedManifest changedFromBinary,
DataEnvelopeManifest dataEnvelopeFromBinary,
WriteManifest writeFromBinary,
WriteAckManifest (_ WriteAck),
ReadManifest readFromBinary,
ReadResultManifest readResultFromBinary,
StatusManifest statusFromBinary,
GossipManifest gossipFromBinary)
override def manifest(obj: AnyRef): String = obj match {
case _: DataEnvelope DataEnvelopeManifest
@ -243,8 +243,9 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def statusFromBinary(bytes: Array[Byte]): Status = {
val status = dm.Status.parseFrom(bytes)
Status(status.getEntriesList.asScala.map(e
e.getKey -> AkkaByteString(e.getDigest.toByteArray()))(breakOut),
Status(
status.getEntriesList.asScala.map(e
e.getKey AkkaByteString(e.getDigest.toByteArray()))(breakOut),
status.getChunk, status.getTotChunks)
}
@ -261,8 +262,9 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
private def gossipFromBinary(bytes: Array[Byte]): Gossip = {
val gossip = dm.Gossip.parseFrom(decompress(bytes))
Gossip(gossip.getEntriesList.asScala.map(e
e.getKey -> dataEnvelopeFromProto(e.getEnvelope))(breakOut),
Gossip(
gossip.getEntriesList.asScala.map(e
e.getKey dataEnvelopeFromProto(e.getEnvelope))(breakOut),
sendBack = gossip.getSendBack)
}
@ -408,7 +410,7 @@ class ReplicatorMessageSerializer(val system: ExtendedActorSystem)
else PruningState.PruningInitialized(pruningEntry.getSeenList.asScala.map(addressFromProto)(breakOut))
val state = PruningState(uniqueAddressFromProto(pruningEntry.getOwnerAddress), phase)
val removed = uniqueAddressFromProto(pruningEntry.getRemovedAddress)
removed -> state
removed state
}(breakOut)
val data = otherMessageFromProto(dataEnvelope.getData).asInstanceOf[ReplicatedData]
DataEnvelope(data, pruning)

View file

@ -59,7 +59,7 @@ class JepsenInspiredInsertSpec extends MultiNodeSpec(JepsenInspiredInsertSpec) w
// val totalCount = 2000
val expectedData = (0 until totalCount).toSet
val data: Map[RoleName, Seq[Int]] = {
val nodeIndex = nodes.zipWithIndex.map { case (n, i) i -> n }.toMap
val nodeIndex = nodes.zipWithIndex.map { case (n, i) i n }.toMap
(0 until totalCount).groupBy(i nodeIndex(i % nodeCount))
}
lazy val myData: Seq[Int] = data(myself)

View file

@ -115,7 +115,8 @@ class ReplicatorChaosSpec extends MultiNodeSpec(ReplicatorChaosSpec) with STMult
replicator ! Update(KeyA, GCounter(), WriteLocal)(_ + 20)
replicator ! Update(KeyB, PNCounter(), WriteTo(2, timeout))(_ + 20)
replicator ! Update(KeyC, GCounter(), WriteAll(timeout))(_ + 20)
receiveN(3).toSet should be(Set(UpdateSuccess(KeyA, None),
receiveN(3).toSet should be(Set(
UpdateSuccess(KeyA, None),
UpdateSuccess(KeyB, None), UpdateSuccess(KeyC, None)))
replicator ! Update(KeyE, GSet(), WriteLocal)(_ + "e1" + "e2")

View file

@ -146,7 +146,7 @@ class ReplicatorPruningSpec extends MultiNodeSpec(ReplicatorPruningSpec) with ST
replicator ! Get(KeyC, ReadLocal)
expectMsgPF() {
case g @ GetSuccess(KeyC, _)
g.get(KeyC).entries should be(Map("x" -> 3L, "y" -> 3L))
g.get(KeyC).entries should be(Map("x" 3L, "y" 3L))
g.get(KeyC).needPruningFrom(thirdUniqueAddress) should be(false)
}
}

View file

@ -526,22 +526,22 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec
runOn(second) {
replicator ! Subscribe(KeyH, changedProbe.ref)
replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" -> Flag(enabled = false)))
changedProbe.expectMsgPF() { case c @ Changed(KeyH) c.get(KeyH).entries } should be(Map("a" -> Flag(enabled = false)))
replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" Flag(enabled = false)))
changedProbe.expectMsgPF() { case c @ Changed(KeyH) c.get(KeyH).entries } should be(Map("a" Flag(enabled = false)))
}
enterBarrier("update-h1")
runOn(first) {
replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" -> Flag(enabled = true)))
replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("a" Flag(enabled = true)))
}
runOn(second) {
changedProbe.expectMsgPF() { case c @ Changed(KeyH) c.get(KeyH).entries } should be(Map("a" -> Flag(enabled = true)))
changedProbe.expectMsgPF() { case c @ Changed(KeyH) c.get(KeyH).entries } should be(Map("a" Flag(enabled = true)))
replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("b" -> Flag(enabled = true)))
replicator ! Update(KeyH, ORMap.empty[Flag], writeTwo)(_ + ("b" Flag(enabled = true)))
changedProbe.expectMsgPF() { case c @ Changed(KeyH) c.get(KeyH).entries } should be(
Map("a" -> Flag(enabled = true), "b" -> Flag(enabled = true)))
Map("a" Flag(enabled = true), "b" Flag(enabled = true)))
}
enterBarrierAfterTestStep()

View file

@ -20,7 +20,7 @@ class LWWMapSpec extends WordSpec with Matchers {
"be able to set entries" in {
val m = LWWMap.empty[Int].put(node1, "a", 1, defaultClock[Int]).put(node2, "b", 2, defaultClock[Int])
m.entries should be(Map("a" -> 1, "b" -> 2))
m.entries should be(Map("a" 1, "b" 2))
}
"be able to have its entries correctly merged with another LWWMap with other entries" in {
@ -28,7 +28,7 @@ class LWWMapSpec extends WordSpec with Matchers {
val m2 = LWWMap.empty.put(node2, "c", 3, defaultClock[Int])
// merge both ways
val expected = Map("a" -> 1, "b" -> 2, "c" -> 3)
val expected = Map("a" 1, "b" 2, "c" 3)
(m1 merge m2).entries should be(expected)
(m2 merge m1).entries should be(expected)
}
@ -40,11 +40,11 @@ class LWWMapSpec extends WordSpec with Matchers {
val merged1 = m1 merge m2
val m3 = merged1.remove(node1, "b")
(merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 3))
(merged1 merge m3).entries should be(Map("a" 1, "c" 3))
// but if there is a conflicting update the entry is not removed
val m4 = merged1.put(node2, "b", 22, defaultClock[Int])
(m3 merge m4).entries should be(Map("a" -> 1, "b" -> 22, "c" -> 3))
(m3 merge m4).entries should be(Map("a" 1, "b" 22, "c" 3))
}
"have unapply extractor" in {
@ -55,7 +55,7 @@ class LWWMapSpec extends WordSpec with Matchers {
case c @ Changed(LWWMapKey("key"))
val LWWMap(entries3) = c.dataValue
val entries4: Map[String, Long] = entries3
entries4 should be(Map("a" -> 1L))
entries4 should be(Map("a" 1L))
}
}

View file

@ -41,7 +41,8 @@ class LocalConcurrencySpec(_system: ActorSystem) extends TestKit(_system)
import LocalConcurrencySpec._
def this() {
this(ActorSystem("LocalConcurrencySpec",
this(ActorSystem(
"LocalConcurrencySpec",
ConfigFactory.parseString("""
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
akka.remote.netty.tcp.port=0

View file

@ -197,7 +197,7 @@ class ORMapSpec extends WordSpec with Matchers {
case c @ Changed(ORMapKey("key"))
val ORMap(entries3) = c.dataValue
val entries4: Map[String, ReplicatedData] = entries3
entries4 should be(Map("a" -> Flag(true), "b" -> Flag(false)))
entries4 should be(Map("a" Flag(true), "b" Flag(false)))
}
}

View file

@ -17,20 +17,20 @@ class ORMultiMapSpec extends WordSpec with Matchers {
"be able to add entries" in {
val m = ORMultiMap().addBinding(node1, "a", "A").addBinding(node1, "b", "B")
m.entries should be(Map("a" -> Set("A"), "b" -> Set("B")))
m.entries should be(Map("a" Set("A"), "b" Set("B")))
val m2 = m.addBinding(node1, "a", "C")
m2.entries should be(Map("a" -> Set("A", "C"), "b" -> Set("B")))
m2.entries should be(Map("a" Set("A", "C"), "b" Set("B")))
}
"be able to remove entry" in {
val m = ORMultiMap().addBinding(node1, "a", "A").addBinding(node1, "b", "B").removeBinding(node1, "a", "A")
m.entries should be(Map("b" -> Set("B")))
m.entries should be(Map("b" Set("B")))
}
"be able to replace an entry" in {
val m = ORMultiMap().addBinding(node1, "a", "A").replaceBinding(node1, "a", "A", "B")
m.entries should be(Map("a" -> Set("B")))
m.entries should be(Map("a" Set("B")))
}
"be able to have its entries correctly merged with another ORMultiMap with other entries" in {
@ -40,9 +40,9 @@ class ORMultiMapSpec extends WordSpec with Matchers {
// merge both ways
val expectedMerge = Map(
"a" -> Set("A"),
"b" -> Set("B"),
"c" -> Set("C"))
"a" Set("A"),
"b" Set("B"),
"c" Set("C"))
val merged1 = m1 merge m2
merged1.entries should be(expectedMerge)
@ -67,10 +67,10 @@ class ORMultiMapSpec extends WordSpec with Matchers {
// merge both ways
val expectedMerged = Map(
"a" -> Set("A2"),
"b" -> Set("B1"),
"c" -> Set("C2"),
"d" -> Set("D1", "D2"))
"a" Set("A2"),
"b" Set("B1"),
"c" Set("C2"),
"d" Set("D1", "D2"))
val merged1 = m1 merge m2
merged1.entries should be(expectedMerged)
@ -89,8 +89,8 @@ class ORMultiMapSpec extends WordSpec with Matchers {
val m2 = m.put(node1, "a", a - "A1")
val expectedMerged = Map(
"a" -> Set("A2"),
"b" -> Set("B1"))
"a" Set("A2"),
"b" Set("B1"))
m2.entries should be(expectedMerged)
}
@ -104,7 +104,7 @@ class ORMultiMapSpec extends WordSpec with Matchers {
"remove all bindings for a given key" in {
val m = ORMultiMap().addBinding(node1, "a", "A1").addBinding(node1, "a", "A2").addBinding(node1, "b", "B1")
val m2 = m.remove(node1, "a")
m2.entries should be(Map("b" -> Set("B1")))
m2.entries should be(Map("b" Set("B1")))
}
"have unapply extractor" in {
@ -116,7 +116,7 @@ class ORMultiMapSpec extends WordSpec with Matchers {
case c @ Changed(ORMultiMapKey("key"))
val ORMultiMap(entries3) = c.dataValue
val entries4: Map[String, Set[Long]] = entries3
entries4 should be(Map("a" -> Set(1L, 2L), "b" -> Set(3L)))
entries4 should be(Map("a" Set(1L, 2L), "b" Set(3L)))
}
}
}

View file

@ -228,30 +228,30 @@ class ORSetSpec extends WordSpec with Matchers {
"ORSet unit test" must {
"verify subtractDots" in {
val dot = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 2L, nodeD -> 14L, nodeG -> 22L))
val vvector = VersionVector(TreeMap(nodeA -> 4L, nodeB -> 1L, nodeC -> 1L, nodeD -> 14L, nodeE -> 5L, nodeF -> 2L))
val expected = VersionVector(TreeMap(nodeB -> 2L, nodeG -> 22L))
val dot = VersionVector(TreeMap(nodeA 3L, nodeB 2L, nodeD 14L, nodeG 22L))
val vvector = VersionVector(TreeMap(nodeA 4L, nodeB 1L, nodeC 1L, nodeD 14L, nodeE 5L, nodeF 2L))
val expected = VersionVector(TreeMap(nodeB 2L, nodeG 22L))
ORSet.subtractDots(dot, vvector) should be(expected)
}
"verify mergeCommonKeys" in {
val commonKeys: Set[String] = Set("K1", "K2")
val thisDot1 = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L))
val thisDot2 = VersionVector(TreeMap(nodeB -> 5L, nodeC -> 2L))
val thisVvector = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 5L, nodeC -> 2L, nodeD -> 7L))
val thisDot1 = VersionVector(TreeMap(nodeA 3L, nodeD 7L))
val thisDot2 = VersionVector(TreeMap(nodeB 5L, nodeC 2L))
val thisVvector = VersionVector(TreeMap(nodeA 3L, nodeB 5L, nodeC 2L, nodeD 7L))
val thisSet = new ORSet(
elementsMap = Map("K1" -> thisDot1, "K2" -> thisDot2),
elementsMap = Map("K1" thisDot1, "K2" thisDot2),
vvector = thisVvector)
val thatDot1 = VersionVector(nodeA, 3L)
val thatDot2 = VersionVector(nodeB, 6L)
val thatVvector = VersionVector(TreeMap(nodeA -> 3L, nodeB -> 6L, nodeC -> 1L, nodeD -> 8L))
val thatVvector = VersionVector(TreeMap(nodeA 3L, nodeB 6L, nodeC 1L, nodeD 8L))
val thatSet = new ORSet(
elementsMap = Map("K1" -> thatDot1, "K2" -> thatDot2),
elementsMap = Map("K1" thatDot1, "K2" thatDot2),
vvector = thatVvector)
val expectedDots = Map(
"K1" -> VersionVector(nodeA, 3L),
"K2" -> VersionVector(TreeMap(nodeB -> 6L, nodeC -> 2L)))
"K1" VersionVector(nodeA, 3L),
"K2" VersionVector(TreeMap(nodeB 6L, nodeC 2L)))
ORSet.mergeCommonKeys(commonKeys, thisSet, thatSet) should be(expectedDots)
}
@ -259,14 +259,14 @@ class ORSetSpec extends WordSpec with Matchers {
"verify mergeDisjointKeys" in {
val keys: Set[Any] = Set("K3", "K4", "K5")
val elements: Map[Any, VersionVector] = Map(
"K3" -> VersionVector(nodeA, 4L),
"K4" -> VersionVector(TreeMap(nodeA -> 3L, nodeD -> 8L)),
"K5" -> VersionVector(nodeA, 2L))
val vvector = VersionVector(TreeMap(nodeA -> 3L, nodeD -> 7L))
val acc: Map[Any, VersionVector] = Map("K1" -> VersionVector(nodeA, 3L))
"K3" VersionVector(nodeA, 4L),
"K4" VersionVector(TreeMap(nodeA 3L, nodeD 8L)),
"K5" VersionVector(nodeA, 2L))
val vvector = VersionVector(TreeMap(nodeA 3L, nodeD 7L))
val acc: Map[Any, VersionVector] = Map("K1" VersionVector(nodeA, 3L))
val expectedDots = acc ++ Map(
"K3" -> VersionVector(nodeA, 4L),
"K4" -> VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen
"K3" VersionVector(nodeA, 4L),
"K4" VersionVector(nodeD, 8L)) // "a" -> 3 removed, optimized to include only those unseen
ORSet.mergeDisjointKeys(keys, elements, vvector, acc) should be(expectedDots)
}

View file

@ -19,7 +19,7 @@ class PNCounterMapSpec extends WordSpec with Matchers {
"be able to increment and decrement entries" in {
val m = PNCounterMap().increment(node1, "a", 2).increment(node1, "b", 3).decrement(node2, "a", 1)
m.entries should be(Map("a" -> 1, "b" -> 3))
m.entries should be(Map("a" 1, "b" 3))
}
"be able to have its entries correctly merged with another ORMap with other entries" in {
@ -27,7 +27,7 @@ class PNCounterMapSpec extends WordSpec with Matchers {
val m2 = PNCounterMap().increment(node2, "c", 5)
// merge both ways
val expected = Map("a" -> 1, "b" -> 3, "c" -> 7)
val expected = Map("a" 1, "b" 3, "c" 7)
(m1 merge m2).entries should be(expected)
(m2 merge m1).entries should be(expected)
}
@ -39,11 +39,11 @@ class PNCounterMapSpec extends WordSpec with Matchers {
val merged1 = m1 merge m2
val m3 = merged1.remove(node1, "b")
(merged1 merge m3).entries should be(Map("a" -> 1, "c" -> 7))
(merged1 merge m3).entries should be(Map("a" 1, "c" 7))
// but if there is a conflicting update the entry is not removed
val m4 = merged1.increment(node2, "b", 10)
(m3 merge m4).entries should be(Map("a" -> 1, "b" -> 13, "c" -> 7))
(m3 merge m4).entries should be(Map("a" 1, "b" 13, "c" 7))
}
"have unapply extractor" in {
@ -54,7 +54,7 @@ class PNCounterMapSpec extends WordSpec with Matchers {
case c @ Changed(PNCounterMapKey("key"))
val PNCounterMap(entries3) = c.dataValue
val entries4: Map[String, BigInt] = entries3
entries4 should be(Map("a" -> 1L, "b" -> 2L))
entries4 should be(Map("a" 1L, "b" 2L))
}
}

View file

@ -68,7 +68,7 @@ class WriteAggregatorSpec extends AkkaSpec("""
val writeMajority = WriteMajority(timeout)
def probes(probe: ActorRef): Map[Address, ActorRef] =
nodes.toSeq.map(_ -> system.actorOf(WriteAggregatorSpec.writeAckAdapterProps(probe))).toMap
nodes.toSeq.map(_ system.actorOf(WriteAggregatorSpec.writeAckAdapterProps(probe))).toMap
"WriteAggregator" must {
"send to at least N/2+1 replicas when WriteMajority" in {

View file

@ -25,7 +25,8 @@ import akka.testkit.TestKit
import akka.cluster.UniqueAddress
import com.typesafe.config.ConfigFactory
class ReplicatedDataSerializerSpec extends TestKit(ActorSystem("ReplicatedDataSerializerSpec",
class ReplicatedDataSerializerSpec extends TestKit(ActorSystem(
"ReplicatedDataSerializerSpec",
ConfigFactory.parseString("""
akka.actor.provider=akka.cluster.ClusterActorRefProvider
akka.remote.netty.tcp.port=0

View file

@ -23,7 +23,8 @@ import akka.util.ByteString
import akka.cluster.UniqueAddress
import com.typesafe.config.ConfigFactory
class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMessageSerializerSpec",
class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem(
"ReplicatorMessageSerializerSpec",
ConfigFactory.parseString("""
akka.actor.provider=akka.cluster.ClusterActorRefProvider
akka.remote.netty.tcp.port=0
@ -64,17 +65,19 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMes
checkSerialization(Changed(keyA)(data1))
checkSerialization(DataEnvelope(data1))
checkSerialization(DataEnvelope(data1, pruning = Map(
address1 -> PruningState(address2, PruningPerformed),
address3 -> PruningState(address2, PruningInitialized(Set(address1.address))))))
address1 PruningState(address2, PruningPerformed),
address3 PruningState(address2, PruningInitialized(Set(address1.address))))))
checkSerialization(Write("A", DataEnvelope(data1)))
checkSerialization(WriteAck)
checkSerialization(Read("A"))
checkSerialization(ReadResult(Some(DataEnvelope(data1))))
checkSerialization(ReadResult(None))
checkSerialization(Status(Map("A" -> ByteString.fromString("a"),
"B" -> ByteString.fromString("b")), chunk = 3, totChunks = 10))
checkSerialization(Gossip(Map("A" -> DataEnvelope(data1),
"B" -> DataEnvelope(GSet() + "b" + "c")), sendBack = true))
checkSerialization(Status(Map(
"A" ByteString.fromString("a"),
"B" ByteString.fromString("b")), chunk = 3, totChunks = 10))
checkSerialization(Gossip(Map(
"A" DataEnvelope(data1),
"B" DataEnvelope(GSet() + "b" + "c")), sendBack = true))
}
}
@ -141,7 +144,7 @@ class ReplicatorMessageSerializerSpec extends TestKit(ActorSystem("ReplicatorMes
"handle Int wrap around" ignore { // ignored because it takes 20 seconds (but it works)
val cache = new SmallCache[Read, String](2, 5.seconds, _ null)
val a = Read("a")
val x = a -> "A"
val x = a "A"
var n = 0
while (n <= Int.MaxValue - 3) {
cache.add(x)