+cdd #16799 Add Distributed Data module
Previously know as [patriknw/akka-data-replication](https://github.com/patriknw/akka-data-replication), which was originally inspired by [jboner/akka-crdt](https://github.com/jboner/akka-crdt). The functionality is very similar to akka-data-replication 0.11. Here is a list of the most important changes: * The package name changed to `akka.cluster.ddata` * The extension was renamed to `DistributedData` * The keys changed from strings to classes with unique identifiers and type information of the data values, e.g. `ORSetKey[Int]("set2")` * The optional read consistency parameter was removed from the `Update` message. If you need to read from other replicas before performing the update you have to first send a `Get` message and then continue with the ``Update`` when the ``GetSuccess`` is received. * `BigInt` is used in `GCounter` and `PNCounter` instead of `Long` * Improvements of java api * Better documentation
This commit is contained in:
parent
bf28260cd0
commit
cbe5dd2cf5
69 changed files with 40036 additions and 3 deletions
|
|
@ -0,0 +1,233 @@
|
|||
/**
|
||||
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
package akka.cluster.ddata
|
||||
|
||||
import scala.concurrent.Await
|
||||
import scala.concurrent.duration._
|
||||
import akka.actor.Actor
|
||||
import akka.actor.ActorRef
|
||||
import akka.actor.Deploy
|
||||
import akka.actor.Props
|
||||
import akka.cluster.Cluster
|
||||
import akka.remote.testconductor.RoleName
|
||||
import akka.remote.testkit.MultiNodeConfig
|
||||
import akka.remote.testkit.MultiNodeSpec
|
||||
import akka.testkit._
|
||||
import com.typesafe.config.ConfigFactory
|
||||
|
||||
object PerformanceSpec extends MultiNodeConfig {
|
||||
val n1 = role("n1")
|
||||
val n2 = role("n2")
|
||||
val n3 = role("n3")
|
||||
val n4 = role("n4")
|
||||
val n5 = role("n5")
|
||||
|
||||
commonConfig(ConfigFactory.parseString("""
|
||||
akka.loglevel = ERROR
|
||||
akka.stdout-loglevel = ERROR
|
||||
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
|
||||
akka.log-dead-letters = off
|
||||
akka.log-dead-letters-during-shutdown = off
|
||||
akka.remote.log-remote-lifecycle-events = ERROR
|
||||
akka.remote.log-frame-size-exceeding=1000b
|
||||
akka.testconductor.barrier-timeout = 60 s
|
||||
akka.cluster.distributed-data.gossip-interval = 1 s
|
||||
akka.actor.serialize-messages = off
|
||||
"""))
|
||||
|
||||
def countDownProps(latch: TestLatch): Props = Props(new CountDown(latch)).withDeploy(Deploy.local)
|
||||
|
||||
class CountDown(latch: TestLatch) extends Actor {
|
||||
def receive = {
|
||||
case _ ⇒
|
||||
latch.countDown()
|
||||
if (latch.isOpen)
|
||||
context.stop(self)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class PerformanceSpecMultiJvmNode1 extends PerformanceSpec
|
||||
class PerformanceSpecMultiJvmNode2 extends PerformanceSpec
|
||||
class PerformanceSpecMultiJvmNode3 extends PerformanceSpec
|
||||
class PerformanceSpecMultiJvmNode4 extends PerformanceSpec
|
||||
class PerformanceSpecMultiJvmNode5 extends PerformanceSpec
|
||||
|
||||
class PerformanceSpec extends MultiNodeSpec(PerformanceSpec) with STMultiNodeSpec with ImplicitSender {
|
||||
import PerformanceSpec._
|
||||
import Replicator._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
implicit val cluster = Cluster(system)
|
||||
val replicator = DistributedData(system).replicator
|
||||
val timeout = 3.seconds.dilated
|
||||
val factor = 1 // use 3 here for serious tuning
|
||||
val repeatCount = 3 // use at least 10 here for serious tuning
|
||||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
cluster join node(to).address
|
||||
}
|
||||
enterBarrier(from.name + "-joined")
|
||||
}
|
||||
|
||||
def repeat(description: String, keys: Iterable[ORSetKey[Int]], n: Int, expectedAfterReplication: Option[Set[Int]] = None)(
|
||||
block: (ORSetKey[Int], Int, ActorRef) ⇒ Unit, afterEachKey: ORSetKey[Int] ⇒ Unit = _ ⇒ ()): Unit = {
|
||||
|
||||
keys.foreach { key ⇒
|
||||
val startTime = System.nanoTime()
|
||||
runOn(n1) {
|
||||
val latch = TestLatch(n)
|
||||
val replyTo = system.actorOf(countDownProps(latch))
|
||||
|
||||
var i = 0
|
||||
while (i < n) {
|
||||
block(key, i, replyTo)
|
||||
i += 1
|
||||
}
|
||||
Await.ready(latch, 5.seconds + (1.second * factor))
|
||||
}
|
||||
expectedAfterReplication.foreach { expected ⇒
|
||||
enterBarrier("repeat-" + key + "-before-awaitReplicated")
|
||||
awaitReplicated(key, expected)
|
||||
enterBarrier("repeat-" + key + "-after-awaitReplicated")
|
||||
}
|
||||
runOn(n1) {
|
||||
val endTime = System.nanoTime()
|
||||
val durationMs = (endTime - startTime).nanos.toMillis
|
||||
val tps = (n * 1000.0 / durationMs).toInt
|
||||
println(s"## $n $description took $durationMs ms, $tps TPS")
|
||||
}
|
||||
|
||||
afterEachKey(key)
|
||||
enterBarrier("repeat-" + key + "-done")
|
||||
}
|
||||
}
|
||||
|
||||
def awaitReplicated(keys: Iterable[ORSetKey[Int]], expectedData: Set[Int]): Unit =
|
||||
keys.foreach { key ⇒ awaitReplicated(key, expectedData) }
|
||||
|
||||
def awaitReplicated(key: ORSetKey[Int], expectedData: Set[Int]): Unit = {
|
||||
within(20.seconds) {
|
||||
awaitAssert {
|
||||
val readProbe = TestProbe()
|
||||
replicator.tell(Get(key, ReadLocal), readProbe.ref)
|
||||
val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) }
|
||||
result.elements should be(expectedData)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"Performance" must {
|
||||
|
||||
"setup cluster" in {
|
||||
roles.foreach { join(_, n1) }
|
||||
|
||||
within(10.seconds) {
|
||||
awaitAssert {
|
||||
replicator ! GetReplicaCount
|
||||
expectMsg(ReplicaCount(roles.size))
|
||||
}
|
||||
}
|
||||
|
||||
enterBarrier("after-setup")
|
||||
}
|
||||
|
||||
"be great for ORSet Update WriteLocal" in {
|
||||
val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("A" + n))
|
||||
val n = 1000 * factor
|
||||
val expectedData = (0 until n).toSet
|
||||
repeat("ORSet Update WriteLocal", keys, n)({ (key, i, replyTo) ⇒
|
||||
replicator.tell(Update(key, ORSet(), WriteLocal)(_ + i), replyTo)
|
||||
}, key ⇒ awaitReplicated(key, expectedData))
|
||||
|
||||
enterBarrier("after-1")
|
||||
}
|
||||
|
||||
"be blazingly fast for ORSet Get ReadLocal" in {
|
||||
val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("A" + n))
|
||||
repeat("Get ReadLocal", keys, 100000 * factor) { (key, i, replyTo) ⇒
|
||||
replicator.tell(Get(key, ReadLocal), replyTo)
|
||||
}
|
||||
enterBarrier("after-2")
|
||||
}
|
||||
|
||||
"be good for ORSet Update WriteLocal and gossip replication" in {
|
||||
val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("B" + n))
|
||||
val n = 200 * factor
|
||||
val expected = Some((0 until n).toSet)
|
||||
repeat("ORSet Update WriteLocal + gossip", keys, n, expected) { (key, i, replyTo) ⇒
|
||||
replicator.tell(Update(key, ORSet(), WriteLocal)(_ + i), replyTo)
|
||||
}
|
||||
enterBarrier("after-3")
|
||||
}
|
||||
|
||||
"be good for ORSet Update WriteLocal and gossip of existing keys" in {
|
||||
val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("B" + n))
|
||||
val n = 200 * factor
|
||||
val expected = Some((0 until n).toSet ++ (0 until n).map(-_).toSet)
|
||||
repeat("ORSet Update WriteLocal existing + gossip", keys, n, expected) { (key, i, replyTo) ⇒
|
||||
replicator.tell(Update(key, ORSet(), WriteLocal)(_ + (-i)), replyTo)
|
||||
}
|
||||
enterBarrier("after-4")
|
||||
}
|
||||
|
||||
"be good for ORSet Update WriteTwo and gossip replication" in {
|
||||
val keys = (1 to repeatCount).map(n ⇒ ORSetKey[Int]("C" + n))
|
||||
val n = 200 * factor
|
||||
val expected = Some((0 until n).toSet)
|
||||
val writeTwo = WriteTo(2, timeout)
|
||||
repeat("ORSet Update WriteTwo + gossip", keys, n, expected) { (key, i, replyTo) ⇒
|
||||
replicator.tell(Update(key, ORSet(), writeTwo)(_ + i), replyTo)
|
||||
}
|
||||
enterBarrier("after-5")
|
||||
}
|
||||
|
||||
"be awesome for GCounter Update WriteLocal" in {
|
||||
val startTime = System.nanoTime()
|
||||
val n = 1000 * factor
|
||||
val key = GCounterKey("D")
|
||||
runOn(n1, n2, n3) {
|
||||
val latch = TestLatch(n)
|
||||
val replyTo = system.actorOf(countDownProps(latch))
|
||||
for (_ ← 0 until n)
|
||||
replicator.tell(Update(key, GCounter(), WriteLocal)(_ + 1), replyTo)
|
||||
Await.ready(latch, 5.seconds + (1.second * factor))
|
||||
enterBarrier("update-done-6")
|
||||
runOn(n1) {
|
||||
val endTime = System.nanoTime()
|
||||
val durationMs = (endTime - startTime).nanos.toMillis
|
||||
val tps = (3 * n * 1000.0 / durationMs).toInt
|
||||
println(s"## ${3 * n} GCounter Update took $durationMs ms, $tps TPS")
|
||||
}
|
||||
}
|
||||
runOn(n4, n5) {
|
||||
enterBarrier("update-done-6")
|
||||
}
|
||||
|
||||
within(20.seconds) {
|
||||
awaitAssert {
|
||||
val readProbe = TestProbe()
|
||||
replicator.tell(Get(key, ReadLocal), readProbe.ref)
|
||||
val result = readProbe.expectMsgPF() { case g @ GetSuccess(`key`, _) ⇒ g.get(key) }
|
||||
result.value should be(3 * n)
|
||||
}
|
||||
}
|
||||
enterBarrier("replication-done-6")
|
||||
runOn(n1) {
|
||||
val endTime = System.nanoTime()
|
||||
val durationMs = (endTime - startTime).nanos.toMillis
|
||||
val tps = (n * 1000.0 / durationMs).toInt
|
||||
println(s"## $n GCounter Update + gossip took $durationMs ms, $tps TPS")
|
||||
}
|
||||
|
||||
enterBarrier("after-6")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue