2018-10-29 17:19:37 +08:00
|
|
|
/*
|
2019-01-02 18:55:26 +08:00
|
|
|
* Copyright (C) 2015-2019 Lightbend Inc. <https://www.lightbend.com>
|
2015-05-17 12:28:47 +02:00
|
|
|
*/
|
2018-03-13 23:45:55 +09:00
|
|
|
|
2015-06-29 21:18:39 +02:00
|
|
|
package akka.cluster.ddata
|
2015-05-17 12:28:47 +02:00
|
|
|
|
2015-09-25 08:39:02 +02:00
|
|
|
import java.util.concurrent.ThreadLocalRandom
|
2018-12-14 15:53:08 -05:00
|
|
|
|
|
|
|
|
import scala.concurrent.duration._
|
|
|
|
|
|
2015-05-17 12:28:47 +02:00
|
|
|
import akka.actor.Actor
|
|
|
|
|
import akka.actor.ActorLogging
|
|
|
|
|
import akka.actor.ActorSystem
|
|
|
|
|
import akka.actor.Props
|
|
|
|
|
import com.typesafe.config.ConfigFactory
|
|
|
|
|
|
2015-06-29 21:18:39 +02:00
|
|
|
/**
|
|
|
|
|
* This "sample" simulates lots of data entries, and can be used for
|
|
|
|
|
* optimizing replication (e.g. catch-up when adding more nodes).
|
|
|
|
|
*/
|
2015-05-17 12:28:47 +02:00
|
|
|
object LotsOfDataBot {
|
|
|
|
|
|
|
|
|
|
def main(args: Array[String]): Unit = {
|
|
|
|
|
if (args.isEmpty)
|
|
|
|
|
startup(Seq("2551", "2552", "0"))
|
|
|
|
|
else
|
2019-03-26 14:41:29 +01:00
|
|
|
startup(args.toIndexedSeq)
|
2015-05-17 12:28:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def startup(ports: Seq[String]): Unit = {
|
2019-02-09 15:25:39 +01:00
|
|
|
ports.foreach { port =>
|
2015-05-17 12:28:47 +02:00
|
|
|
// Override the configuration of the port
|
2019-03-11 10:38:24 +01:00
|
|
|
val config = ConfigFactory
|
2019-05-01 08:12:09 +01:00
|
|
|
.parseString("akka.remote.classic.netty.tcp.port=" + port)
|
2019-05-15 18:01:34 +02:00
|
|
|
.withFallback(ConfigFactory.load(ConfigFactory.parseString("""
|
2015-05-17 12:28:47 +02:00
|
|
|
passive = off
|
|
|
|
|
max-entries = 100000
|
2016-06-10 15:04:13 +02:00
|
|
|
akka.actor.provider = "cluster"
|
2015-05-17 12:28:47 +02:00
|
|
|
akka.remote {
|
2019-05-15 18:01:34 +02:00
|
|
|
artery.canonical {
|
2015-05-17 12:28:47 +02:00
|
|
|
hostname = "127.0.0.1"
|
|
|
|
|
port = 0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
akka.cluster {
|
|
|
|
|
seed-nodes = [
|
2019-05-15 18:01:34 +02:00
|
|
|
"akka://ClusterSystem@127.0.0.1:2551",
|
|
|
|
|
"akka://ClusterSystem@127.0.0.1:2552"]
|
2015-05-17 12:28:47 +02:00
|
|
|
|
|
|
|
|
auto-down-unreachable-after = 10s
|
|
|
|
|
}
|
|
|
|
|
""")))
|
|
|
|
|
|
|
|
|
|
// Create an Akka system
|
|
|
|
|
val system = ActorSystem("ClusterSystem", config)
|
|
|
|
|
// Create an actor that handles cluster domain events
|
|
|
|
|
system.actorOf(Props[LotsOfDataBot], name = "dataBot")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
private case object Tick
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class LotsOfDataBot extends Actor with ActorLogging {
|
|
|
|
|
import LotsOfDataBot._
|
|
|
|
|
import Replicator._
|
|
|
|
|
|
2018-12-14 15:53:08 -05:00
|
|
|
implicit val selfUniqueAddress = DistributedData(context.system).selfUniqueAddress
|
2015-05-17 12:28:47 +02:00
|
|
|
val replicator = DistributedData(context.system).replicator
|
|
|
|
|
|
|
|
|
|
import context.dispatcher
|
|
|
|
|
val isPassive = context.system.settings.config.getBoolean("passive")
|
|
|
|
|
var tickTask =
|
|
|
|
|
if (isPassive)
|
2019-05-27 11:53:26 +02:00
|
|
|
context.system.scheduler.scheduleWithFixedDelay(1.seconds, 1.seconds, self, Tick)
|
2015-05-17 12:28:47 +02:00
|
|
|
else
|
2019-05-27 11:53:26 +02:00
|
|
|
context.system.scheduler.scheduleWithFixedDelay(20.millis, 20.millis, self, Tick)
|
2015-05-17 12:28:47 +02:00
|
|
|
|
|
|
|
|
val startTime = System.nanoTime()
|
|
|
|
|
var count = 1L
|
|
|
|
|
val maxEntries = context.system.settings.config.getInt("max-entries")
|
|
|
|
|
|
|
|
|
|
def receive = if (isPassive) passive else active
|
|
|
|
|
|
|
|
|
|
def active: Receive = {
|
2019-02-09 15:25:39 +01:00
|
|
|
case Tick =>
|
2015-05-17 12:28:47 +02:00
|
|
|
val loop = if (count >= maxEntries) 1 else 100
|
2019-02-09 15:25:39 +01:00
|
|
|
for (_ <- 1 to loop) {
|
2015-05-17 12:28:47 +02:00
|
|
|
count += 1
|
|
|
|
|
if (count % 10000 == 0)
|
|
|
|
|
log.info("Reached {} entries", count)
|
|
|
|
|
if (count == maxEntries) {
|
|
|
|
|
log.info("Reached {} entries", count)
|
|
|
|
|
tickTask.cancel()
|
2019-05-27 11:53:26 +02:00
|
|
|
tickTask = context.system.scheduler.scheduleWithFixedDelay(1.seconds, 1.seconds, self, Tick)
|
2015-05-17 12:28:47 +02:00
|
|
|
}
|
|
|
|
|
val key = ORSetKey[String]((count % maxEntries).toString)
|
|
|
|
|
if (count <= 100)
|
|
|
|
|
replicator ! Subscribe(key, self)
|
|
|
|
|
val s = ThreadLocalRandom.current().nextInt(97, 123).toChar.toString
|
|
|
|
|
if (count <= maxEntries || ThreadLocalRandom.current().nextBoolean()) {
|
|
|
|
|
// add
|
2018-12-14 15:53:08 -05:00
|
|
|
replicator ! Update(key, ORSet(), WriteLocal)(_ :+ s)
|
2015-05-17 12:28:47 +02:00
|
|
|
} else {
|
|
|
|
|
// remove
|
2019-03-11 10:38:24 +01:00
|
|
|
replicator ! Update(key, ORSet(), WriteLocal)(_.remove(s))
|
2015-05-17 12:28:47 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-09 15:25:39 +01:00
|
|
|
case _: UpdateResponse[_] => // ignore
|
2015-05-17 12:28:47 +02:00
|
|
|
|
2019-02-09 15:25:39 +01:00
|
|
|
case c @ Changed(ORSetKey(id)) =>
|
2015-05-17 12:28:47 +02:00
|
|
|
val ORSet(elements) = c.dataValue
|
|
|
|
|
log.info("Current elements: {} -> {}", id, elements)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def passive: Receive = {
|
2019-02-09 15:25:39 +01:00
|
|
|
case Tick =>
|
2015-05-17 12:28:47 +02:00
|
|
|
if (!tickTask.isCancelled)
|
|
|
|
|
replicator ! GetKeyIds
|
2019-02-09 15:25:39 +01:00
|
|
|
case GetKeyIdsResult(keys) =>
|
2015-05-17 12:28:47 +02:00
|
|
|
if (keys.size >= maxEntries) {
|
|
|
|
|
tickTask.cancel()
|
|
|
|
|
val duration = (System.nanoTime() - startTime).nanos.toMillis
|
|
|
|
|
log.info("It took {} ms to replicate {} entries", duration, keys.size)
|
|
|
|
|
}
|
2019-02-09 15:25:39 +01:00
|
|
|
case c @ Changed(ORSetKey(id)) =>
|
2015-05-17 12:28:47 +02:00
|
|
|
val ORSet(elements) = c.dataValue
|
|
|
|
|
log.info("Current elements: {} -> {}", id, elements)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
override def postStop(): Unit = tickTask.cancel()
|
|
|
|
|
|
|
|
|
|
}
|