Merge branch 'master' into actor-ref-provider-logging
This commit is contained in:
commit
7770ea7ec9
351 changed files with 11456 additions and 2421 deletions
|
|
@ -1,3 +1,5 @@
|
|||
pullRequests.frequency = "@monthly"
|
||||
|
||||
updates.ignore = [
|
||||
{ groupId = "com.google.protobuf", artifactId = "protobuf-java" },
|
||||
{ groupId = "org.scalameta", artifactId = "scalafmt-core" },
|
||||
|
|
@ -17,3 +19,4 @@ updates.ignore = [
|
|||
{ groupId = "org.mockito", artifactId = "mockito-core" }
|
||||
]
|
||||
|
||||
updatePullRequests = false
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ ignored-files = [
|
|||
|
||||
//ignored packages
|
||||
ignored-packages = [
|
||||
"docs",
|
||||
"doc",
|
||||
"jdoc"
|
||||
]
|
||||
|
|
@ -38,7 +39,7 @@ ignored-packages = [
|
|||
//sort imports, see https://github.com/NeQuissimus/sort-imports
|
||||
SortImports.asciiSort = false
|
||||
SortImports.blocks = [
|
||||
"java.",
|
||||
"re:javax?\\.",
|
||||
"scala.",
|
||||
"*",
|
||||
"com.sun."
|
||||
|
|
|
|||
|
|
@ -205,8 +205,8 @@ target PR branch you can do so by setting the PR_TARGET_BRANCH environment varia
|
|||
PR_TARGET_BRANCH=origin/example sbt validatePullRequest
|
||||
```
|
||||
|
||||
If you have already run all tests and now just need to check that everything is formatted and or mima passes there
|
||||
are a set of `all*` commands aliases for running `test:compile` (also formats), `mimaReportBinaryIssues`, and `validateCompile`
|
||||
If you already ran all tests and just need to check formatting and mima, there
|
||||
is a set of `all*` command aliases that run `test:compile` (also formats), `mimaReportBinaryIssues`, and `validateCompile`
|
||||
(compiles `multi-jvm` if enabled for that project). See `build.sbt` or use completion to find the most appropriate one
|
||||
e.g. `allCluster`, `allTyped`.
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,14 @@ private[akka] final class BehaviorTestKitImpl[T](_path: ActorPath, _initialBehav
|
|||
private[akka] def as[U]: BehaviorTestKitImpl[U] = this.asInstanceOf[BehaviorTestKitImpl[U]]
|
||||
|
||||
private var currentUncanonical = _initialBehavior
|
||||
private var current = Behavior.validateAsInitial(Behavior.start(_initialBehavior, context))
|
||||
private var current = {
|
||||
try {
|
||||
context.setCurrentActorThread()
|
||||
Behavior.validateAsInitial(Behavior.start(_initialBehavior, context))
|
||||
} finally {
|
||||
context.clearCurrentActorThread()
|
||||
}
|
||||
}
|
||||
|
||||
// execute any future tasks scheduled in Actor's constructor
|
||||
runAllTasks()
|
||||
|
|
@ -122,8 +129,13 @@ private[akka] final class BehaviorTestKitImpl[T](_path: ActorPath, _initialBehav
|
|||
|
||||
override def run(message: T): Unit = {
|
||||
try {
|
||||
currentUncanonical = Behavior.interpretMessage(current, context, message)
|
||||
current = Behavior.canonicalize(currentUncanonical, current, context)
|
||||
context.setCurrentActorThread()
|
||||
try {
|
||||
currentUncanonical = Behavior.interpretMessage(current, context, message)
|
||||
current = Behavior.canonicalize(currentUncanonical, current, context)
|
||||
} finally {
|
||||
context.clearCurrentActorThread()
|
||||
}
|
||||
runAllTasks()
|
||||
} catch handleException
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,9 +6,10 @@ package akka.actor.testkit.typed.internal
|
|||
|
||||
import org.slf4j.LoggerFactory
|
||||
import org.slf4j.event.Level
|
||||
|
||||
import akka.annotation.InternalApi
|
||||
|
||||
import scala.annotation.tailrec
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
|
|
@ -16,9 +17,17 @@ import akka.annotation.InternalApi
|
|||
def loggerNameOrRoot(loggerName: String): String =
|
||||
if (loggerName == "") org.slf4j.Logger.ROOT_LOGGER_NAME else loggerName
|
||||
|
||||
def getLogbackLogger(loggerName: String): ch.qos.logback.classic.Logger = {
|
||||
def getLogbackLogger(loggerName: String): ch.qos.logback.classic.Logger =
|
||||
getLogbackLoggerInternal(loggerName, 50)
|
||||
|
||||
@tailrec
|
||||
private def getLogbackLoggerInternal(loggerName: String, count: Int): ch.qos.logback.classic.Logger = {
|
||||
LoggerFactory.getLogger(loggerNameOrRoot(loggerName)) match {
|
||||
case logger: ch.qos.logback.classic.Logger => logger
|
||||
case logger: ch.qos.logback.classic.Logger => logger
|
||||
case _: org.slf4j.helpers.SubstituteLogger if count > 0 =>
|
||||
// Wait for logging initialisation http://www.slf4j.org/codes.html#substituteLogger
|
||||
Thread.sleep(50)
|
||||
getLogbackLoggerInternal(loggerName, count - 1)
|
||||
case null =>
|
||||
throw new IllegalArgumentException(s"Couldn't find logger for [$loggerName].")
|
||||
case other =>
|
||||
|
|
|
|||
|
|
@ -87,17 +87,25 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T
|
|||
throw new UnsupportedOperationException(
|
||||
"No classic ActorContext available with the stubbed actor context, to spawn materializers and run streams you will need a real actor")
|
||||
|
||||
override def children: Iterable[ActorRef[Nothing]] = _children.values.map(_.context.self)
|
||||
override def children: Iterable[ActorRef[Nothing]] = {
|
||||
checkCurrentActorThread()
|
||||
_children.values.map(_.context.self)
|
||||
}
|
||||
def childrenNames: Iterable[String] = _children.keys
|
||||
|
||||
override def child(name: String): Option[ActorRef[Nothing]] = _children.get(name).map(_.context.self)
|
||||
override def child(name: String): Option[ActorRef[Nothing]] = {
|
||||
checkCurrentActorThread()
|
||||
_children.get(name).map(_.context.self)
|
||||
}
|
||||
|
||||
override def spawnAnonymous[U](behavior: Behavior[U], props: Props = Props.empty): ActorRef[U] = {
|
||||
checkCurrentActorThread()
|
||||
val btk = new BehaviorTestKitImpl[U]((path / childName.next()).withUid(rnd().nextInt()), behavior)
|
||||
_children += btk.context.self.path.name -> btk
|
||||
btk.context.self
|
||||
}
|
||||
override def spawn[U](behavior: Behavior[U], name: String, props: Props = Props.empty): ActorRef[U] =
|
||||
override def spawn[U](behavior: Behavior[U], name: String, props: Props = Props.empty): ActorRef[U] = {
|
||||
checkCurrentActorThread()
|
||||
_children.get(name) match {
|
||||
case Some(_) => throw classic.InvalidActorNameException(s"actor name $name is already taken")
|
||||
case None =>
|
||||
|
|
@ -105,12 +113,14 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T
|
|||
_children += name -> btk
|
||||
btk.context.self
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Do not actually stop the child inbox, only simulate the liveness check.
|
||||
* Removal is asynchronous, explicit removeInbox is needed from outside afterwards.
|
||||
*/
|
||||
override def stop[U](child: ActorRef[U]): Unit = {
|
||||
checkCurrentActorThread()
|
||||
if (child.path.parent != self.path)
|
||||
throw new IllegalArgumentException(
|
||||
"Only direct children of an actor can be stopped through the actor context, " +
|
||||
|
|
@ -120,11 +130,21 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T
|
|||
_children -= child.path.name
|
||||
}
|
||||
}
|
||||
override def watch[U](other: ActorRef[U]): Unit = ()
|
||||
override def watchWith[U](other: ActorRef[U], message: T): Unit = ()
|
||||
override def unwatch[U](other: ActorRef[U]): Unit = ()
|
||||
override def setReceiveTimeout(d: FiniteDuration, message: T): Unit = ()
|
||||
override def cancelReceiveTimeout(): Unit = ()
|
||||
override def watch[U](other: ActorRef[U]): Unit = {
|
||||
checkCurrentActorThread()
|
||||
}
|
||||
override def watchWith[U](other: ActorRef[U], message: T): Unit = {
|
||||
checkCurrentActorThread()
|
||||
}
|
||||
override def unwatch[U](other: ActorRef[U]): Unit = {
|
||||
checkCurrentActorThread()
|
||||
}
|
||||
override def setReceiveTimeout(d: FiniteDuration, message: T): Unit = {
|
||||
checkCurrentActorThread()
|
||||
}
|
||||
override def cancelReceiveTimeout(): Unit = {
|
||||
checkCurrentActorThread()
|
||||
}
|
||||
|
||||
override def scheduleOnce[U](delay: FiniteDuration, target: ActorRef[U], message: U): classic.Cancellable =
|
||||
new classic.Cancellable {
|
||||
|
|
@ -186,11 +206,20 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T
|
|||
|
||||
override def toString: String = s"Inbox($self)"
|
||||
|
||||
override def log: Logger = logger
|
||||
override def log: Logger = {
|
||||
checkCurrentActorThread()
|
||||
logger
|
||||
}
|
||||
|
||||
override def setLoggerName(name: String): Unit = () // nop as we don't track logger
|
||||
override def setLoggerName(name: String): Unit = {
|
||||
// nop as we don't track logger
|
||||
checkCurrentActorThread()
|
||||
}
|
||||
|
||||
override def setLoggerName(clazz: Class[_]): Unit = () // nop as we don't track logger
|
||||
override def setLoggerName(clazz: Class[_]): Unit = {
|
||||
// nop as we don't track logger
|
||||
checkCurrentActorThread()
|
||||
}
|
||||
|
||||
/**
|
||||
* The log entries logged through context.log.{debug, info, warn, error} are captured and can be inspected through
|
||||
|
|
|
|||
|
|
@ -5,12 +5,11 @@
|
|||
package akka.actor
|
||||
|
||||
import java.util.UUID.{ randomUUID => newUuid }
|
||||
import java.util.concurrent.CountDownLatch
|
||||
import java.util.concurrent.atomic._
|
||||
|
||||
import scala.concurrent.Await
|
||||
|
||||
import scala.concurrent.{ Await, Future }
|
||||
import org.scalatest.BeforeAndAfterEach
|
||||
|
||||
import akka.actor.Actor._
|
||||
import akka.pattern.ask
|
||||
import akka.testkit._
|
||||
|
|
@ -19,7 +18,7 @@ object ActorLifeCycleSpec {
|
|||
|
||||
class LifeCycleTestActor(testActor: ActorRef, id: String, generationProvider: AtomicInteger) extends Actor {
|
||||
def report(msg: Any) = testActor ! message(msg)
|
||||
def message(msg: Any): Tuple3[Any, String, Int] = (msg, id, currentGen)
|
||||
def message(msg: Any): (Any, String, Int) = (msg, id, currentGen)
|
||||
val currentGen = generationProvider.getAndIncrement()
|
||||
override def preStart(): Unit = { report("preStart") }
|
||||
override def postStop(): Unit = { report("postStop") }
|
||||
|
|
@ -151,4 +150,41 @@ class ActorLifeCycleSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitS
|
|||
}
|
||||
}
|
||||
|
||||
"have a non null context after termination" in {
|
||||
class StopBeforeFutureFinishes(val latch: CountDownLatch) extends Actor {
|
||||
import context.dispatcher
|
||||
import akka.pattern._
|
||||
|
||||
override def receive: Receive = {
|
||||
case "ping" =>
|
||||
val replyTo = sender()
|
||||
|
||||
context.stop(self)
|
||||
|
||||
Future {
|
||||
latch.await()
|
||||
Thread.sleep(50)
|
||||
"po"
|
||||
}
|
||||
// Here, we implicitly close over the actor instance and access the context
|
||||
// when the flatMap thunk is run. Previously, the context was nulled when the actor
|
||||
// was terminated. This isn't done any more. Still, the pattern of `import context.dispatcher`
|
||||
// is discouraged as closing over `context` is unsafe in general.
|
||||
.flatMap(x => Future { x + "ng" } /* implicitly: (this.context.dispatcher) */ )
|
||||
.recover { case _: NullPointerException => "npe" }
|
||||
.pipeTo(replyTo)
|
||||
}
|
||||
}
|
||||
|
||||
val latch = new CountDownLatch(1)
|
||||
val actor = system.actorOf(Props(new StopBeforeFutureFinishes(latch)))
|
||||
watch(actor)
|
||||
|
||||
actor ! "ping"
|
||||
|
||||
expectTerminated(actor)
|
||||
latch.countDown()
|
||||
|
||||
expectMsg("pong")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,10 @@ import akka.testkit._
|
|||
class LocalDeathWatchSpec extends AkkaSpec with ImplicitSender with DefaultTimeout with DeathWatchSpec
|
||||
|
||||
object DeathWatchSpec {
|
||||
object Watcher {
|
||||
def props(target: ActorRef, testActor: ActorRef) =
|
||||
Props(classOf[Watcher], target, testActor)
|
||||
}
|
||||
class Watcher(target: ActorRef, testActor: ActorRef) extends Actor {
|
||||
context.watch(target)
|
||||
def receive = {
|
||||
|
|
@ -26,9 +30,6 @@ object DeathWatchSpec {
|
|||
}
|
||||
}
|
||||
|
||||
def props(target: ActorRef, testActor: ActorRef) =
|
||||
Props(classOf[Watcher], target, testActor)
|
||||
|
||||
class EmptyWatcher(target: ActorRef) extends Actor {
|
||||
context.watch(target)
|
||||
def receive = Actor.emptyBehavior
|
||||
|
|
@ -70,6 +71,40 @@ object DeathWatchSpec {
|
|||
final case class FF(fail: Failed)
|
||||
|
||||
final case class Latches(t1: TestLatch, t2: TestLatch) extends NoSerializationVerificationNeeded
|
||||
|
||||
object WatchWithVerifier {
|
||||
case class WatchThis(ref: ActorRef)
|
||||
case object Watching
|
||||
case class CustomWatchMsg(ref: ActorRef)
|
||||
case class StartStashing(numberOfMessagesToStash: Int)
|
||||
case object StashingStarted
|
||||
|
||||
def props(probe: ActorRef) = Props(new WatchWithVerifier(probe))
|
||||
}
|
||||
class WatchWithVerifier(probe: ActorRef) extends Actor with Stash {
|
||||
import WatchWithVerifier._
|
||||
private var stashing = false
|
||||
private var stashNMessages = 0
|
||||
|
||||
override def receive: Receive = {
|
||||
case StartStashing(messagesToStash) =>
|
||||
stashing = true
|
||||
stashNMessages = messagesToStash
|
||||
sender() ! StashingStarted
|
||||
case WatchThis(ref) =>
|
||||
context.watchWith(ref, CustomWatchMsg(ref))
|
||||
sender() ! Watching
|
||||
case _ if stashing =>
|
||||
stash()
|
||||
stashNMessages -= 1
|
||||
if (stashNMessages == 0) {
|
||||
stashing = false
|
||||
unstashAll()
|
||||
}
|
||||
case msg: CustomWatchMsg =>
|
||||
probe ! msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@silent
|
||||
|
|
@ -79,7 +114,8 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout =>
|
|||
|
||||
lazy val supervisor = system.actorOf(Props(classOf[Supervisor], SupervisorStrategy.defaultStrategy), "watchers")
|
||||
|
||||
def startWatching(target: ActorRef) = Await.result((supervisor ? props(target, testActor)).mapTo[ActorRef], 3 seconds)
|
||||
def startWatching(target: ActorRef) =
|
||||
Await.result((supervisor ? Watcher.props(target, testActor)).mapTo[ActorRef], 3 seconds)
|
||||
|
||||
"The Death Watch" must {
|
||||
def expectTerminationOf(actorRef: ActorRef) =
|
||||
|
|
@ -244,6 +280,31 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout =>
|
|||
w ! Identify(())
|
||||
expectMsg(ActorIdentity((), Some(w)))
|
||||
}
|
||||
|
||||
"watch with custom message" in {
|
||||
val verifierProbe = TestProbe()
|
||||
val verifier = system.actorOf(WatchWithVerifier.props(verifierProbe.ref))
|
||||
val subject = system.actorOf(Props[EmptyActor]())
|
||||
verifier ! WatchWithVerifier.WatchThis(subject)
|
||||
expectMsg(WatchWithVerifier.Watching)
|
||||
|
||||
subject ! PoisonPill
|
||||
verifierProbe.expectMsg(WatchWithVerifier.CustomWatchMsg(subject))
|
||||
}
|
||||
|
||||
// Coverage for #29101
|
||||
"stash watchWith termination message correctly" in {
|
||||
val verifierProbe = TestProbe()
|
||||
val verifier = system.actorOf(WatchWithVerifier.props(verifierProbe.ref))
|
||||
val subject = system.actorOf(Props[EmptyActor]())
|
||||
verifier ! WatchWithVerifier.WatchThis(subject)
|
||||
expectMsg(WatchWithVerifier.Watching)
|
||||
verifier ! WatchWithVerifier.StartStashing(numberOfMessagesToStash = 1)
|
||||
expectMsg(WatchWithVerifier.StashingStarted)
|
||||
|
||||
subject ! PoisonPill
|
||||
verifierProbe.expectMsg(WatchWithVerifier.CustomWatchMsg(subject))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,16 +34,17 @@ object FailingTestExtension extends ExtensionId[FailingTestExtension] with Exten
|
|||
class TestException extends IllegalArgumentException("ERR") with NoStackTrace
|
||||
}
|
||||
|
||||
object InstanceCountingExtension extends ExtensionId[DummyExtensionImpl] with ExtensionIdProvider {
|
||||
object InstanceCountingExtension extends ExtensionId[InstanceCountingExtension] with ExtensionIdProvider {
|
||||
val createCount = new AtomicInteger(0)
|
||||
override def createExtension(system: ExtendedActorSystem): DummyExtensionImpl = {
|
||||
createCount.addAndGet(1)
|
||||
new DummyExtensionImpl
|
||||
override def createExtension(system: ExtendedActorSystem): InstanceCountingExtension = {
|
||||
new InstanceCountingExtension
|
||||
}
|
||||
override def lookup(): ExtensionId[_ <: Extension] = this
|
||||
}
|
||||
|
||||
class DummyExtensionImpl extends Extension
|
||||
class InstanceCountingExtension extends Extension {
|
||||
InstanceCountingExtension.createCount.incrementAndGet()
|
||||
}
|
||||
|
||||
// Dont't place inside ActorSystemSpec object, since it will not be garbage collected and reference to system remains
|
||||
class FailingTestExtension(val system: ExtendedActorSystem) extends Extension {
|
||||
|
|
@ -111,12 +112,33 @@ class ExtensionSpec extends AnyWordSpec with Matchers {
|
|||
shutdownActorSystem(system)
|
||||
}
|
||||
|
||||
"allow for auto-loading of library-extensions" in {
|
||||
"allow for auto-loading of library-extensions from reference.conf" in {
|
||||
import akka.util.ccompat.JavaConverters._
|
||||
// could be initialized by other tests, but assuming tests are not running in parallel
|
||||
val countBefore = InstanceCountingExtension.createCount.get()
|
||||
val system = ActorSystem("extensions")
|
||||
val listedExtensions = system.settings.config.getStringList("akka.library-extensions")
|
||||
listedExtensions.size should be > 0
|
||||
// could be initialized by other tests, so at least once
|
||||
InstanceCountingExtension.createCount.get() should be > 0
|
||||
val listedExtensions = system.settings.config.getStringList("akka.library-extensions").asScala
|
||||
listedExtensions.count(_.contains("InstanceCountingExtension")) should ===(1)
|
||||
|
||||
InstanceCountingExtension.createCount.get() - countBefore should ===(1)
|
||||
|
||||
shutdownActorSystem(system)
|
||||
}
|
||||
|
||||
"not create duplicate instances when auto-loading of library-extensions" in {
|
||||
import akka.util.ccompat.JavaConverters._
|
||||
// could be initialized by other tests, but assuming tests are not running in parallel
|
||||
val countBefore = InstanceCountingExtension.createCount.get()
|
||||
val system = ActorSystem(
|
||||
"extensions",
|
||||
ConfigFactory.parseString(
|
||||
"""
|
||||
akka.library-extensions = ["akka.actor.InstanceCountingExtension", "akka.actor.InstanceCountingExtension", "akka.actor.InstanceCountingExtension$"]
|
||||
"""))
|
||||
val listedExtensions = system.settings.config.getStringList("akka.library-extensions").asScala
|
||||
listedExtensions.count(_.contains("InstanceCountingExtension")) should ===(3) // testing duplicate names
|
||||
|
||||
InstanceCountingExtension.createCount.get() - countBefore should ===(1)
|
||||
|
||||
shutdownActorSystem(system)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ class TestActor(probe: ActorRef) extends Actor {
|
|||
|
||||
probe ! "STARTED"
|
||||
|
||||
def receive = {
|
||||
def receive: Receive = {
|
||||
case "DIE" => context.stop(self)
|
||||
case "THROW" => throw new TestActor.NormalException
|
||||
case "THROW_STOPPING_EXCEPTION" => throw new TestActor.StoppingException
|
||||
|
|
@ -46,9 +46,9 @@ object TestParentActor {
|
|||
}
|
||||
|
||||
class TestParentActor(probe: ActorRef, supervisorProps: Props) extends Actor {
|
||||
val supervisor = context.actorOf(supervisorProps)
|
||||
val supervisor: ActorRef = context.actorOf(supervisorProps)
|
||||
|
||||
def receive = {
|
||||
def receive: Receive = {
|
||||
case other => probe.forward(other)
|
||||
}
|
||||
}
|
||||
|
|
@ -58,10 +58,10 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec("""
|
|||
akka.loggers = ["akka.testkit.SilenceAllTestEventListener"]
|
||||
""") with WithLogCapturing with ImplicitSender {
|
||||
|
||||
@silent
|
||||
def supervisorProps(probeRef: ActorRef) = {
|
||||
val options = Backoff
|
||||
.onFailure(TestActor.props(probeRef), "someChildName", 200 millis, 10 seconds, 0.0, maxNrOfRetries = -1)
|
||||
val options = BackoffOpts
|
||||
.onFailure(TestActor.props(probeRef), "someChildName", 200 millis, 10 seconds, 0.0)
|
||||
.withMaxNrOfRetries(-1)
|
||||
.withSupervisorStrategy(OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 30 seconds) {
|
||||
case _: TestActor.StoppingException => SupervisorStrategy.Stop
|
||||
})
|
||||
|
|
@ -69,16 +69,16 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec("""
|
|||
}
|
||||
|
||||
trait Setup {
|
||||
val probe = TestProbe()
|
||||
val supervisor = system.actorOf(supervisorProps(probe.ref))
|
||||
val probe: TestProbe = TestProbe()
|
||||
val supervisor: ActorRef = system.actorOf(supervisorProps(probe.ref))
|
||||
probe.expectMsg("STARTED")
|
||||
}
|
||||
|
||||
trait Setup2 {
|
||||
val probe = TestProbe()
|
||||
val parent = system.actorOf(TestParentActor.props(probe.ref, supervisorProps(probe.ref)))
|
||||
val probe: TestProbe = TestProbe()
|
||||
val parent: ActorRef = system.actorOf(TestParentActor.props(probe.ref, supervisorProps(probe.ref)))
|
||||
probe.expectMsg("STARTED")
|
||||
val child = probe.lastSender
|
||||
val child: ActorRef = probe.lastSender
|
||||
}
|
||||
|
||||
"BackoffOnRestartSupervisor" must {
|
||||
|
|
@ -139,7 +139,7 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec("""
|
|||
}
|
||||
|
||||
class SlowlyFailingActor(latch: CountDownLatch) extends Actor {
|
||||
def receive = {
|
||||
def receive: Receive = {
|
||||
case "THROW" =>
|
||||
sender ! "THROWN"
|
||||
throw new NormalException
|
||||
|
|
@ -155,18 +155,12 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec("""
|
|||
"accept commands while child is terminating" in {
|
||||
val postStopLatch = new CountDownLatch(1)
|
||||
@silent
|
||||
val options = Backoff
|
||||
.onFailure(
|
||||
Props(new SlowlyFailingActor(postStopLatch)),
|
||||
"someChildName",
|
||||
1 nanos,
|
||||
1 nanos,
|
||||
0.0,
|
||||
maxNrOfRetries = -1)
|
||||
val options = BackoffOpts
|
||||
.onFailure(Props(new SlowlyFailingActor(postStopLatch)), "someChildName", 1 nanos, 1 nanos, 0.0)
|
||||
.withMaxNrOfRetries(-1)
|
||||
.withSupervisorStrategy(OneForOneStrategy(loggingEnabled = false) {
|
||||
case _: TestActor.StoppingException => SupervisorStrategy.Stop
|
||||
})
|
||||
@silent
|
||||
val supervisor = system.actorOf(BackoffSupervisor.props(options))
|
||||
|
||||
supervisor ! BackoffSupervisor.GetCurrentChild
|
||||
|
|
@ -221,13 +215,12 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec("""
|
|||
// withinTimeRange indicates the time range in which maxNrOfRetries will cause the child to
|
||||
// stop. IE: If we restart more than maxNrOfRetries in a time range longer than withinTimeRange
|
||||
// that is acceptable.
|
||||
@silent
|
||||
val options = Backoff
|
||||
.onFailure(TestActor.props(probe.ref), "someChildName", 300.millis, 10.seconds, 0.0, maxNrOfRetries = -1)
|
||||
val options = BackoffOpts
|
||||
.onFailure(TestActor.props(probe.ref), "someChildName", 300.millis, 10.seconds, 0.0)
|
||||
.withMaxNrOfRetries(-1)
|
||||
.withSupervisorStrategy(OneForOneStrategy(withinTimeRange = 1 seconds, maxNrOfRetries = 3) {
|
||||
case _: TestActor.StoppingException => SupervisorStrategy.Stop
|
||||
})
|
||||
@silent
|
||||
val supervisor = system.actorOf(BackoffSupervisor.props(options))
|
||||
probe.expectMsg("STARTED")
|
||||
filterException[TestActor.TestException] {
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ package akka.pattern
|
|||
import scala.concurrent.duration._
|
||||
import scala.util.control.NoStackTrace
|
||||
|
||||
import com.github.ghik.silencer.silent
|
||||
import org.scalatest.concurrent.Eventually
|
||||
import org.scalatest.prop.TableDrivenPropertyChecks._
|
||||
|
||||
|
|
@ -24,7 +23,7 @@ object BackoffSupervisorSpec {
|
|||
}
|
||||
|
||||
class Child(probe: ActorRef) extends Actor {
|
||||
def receive = {
|
||||
def receive: Receive = {
|
||||
case "boom" => throw new TestException
|
||||
case msg => probe ! msg
|
||||
}
|
||||
|
|
@ -36,7 +35,7 @@ object BackoffSupervisorSpec {
|
|||
}
|
||||
|
||||
class ManualChild(probe: ActorRef) extends Actor {
|
||||
def receive = {
|
||||
def receive: Receive = {
|
||||
case "boom" => throw new TestException
|
||||
case msg =>
|
||||
probe ! msg
|
||||
|
|
@ -48,14 +47,13 @@ object BackoffSupervisorSpec {
|
|||
class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually {
|
||||
import BackoffSupervisorSpec._
|
||||
|
||||
@silent("deprecated")
|
||||
def onStopOptions(props: Props = Child.props(testActor), maxNrOfRetries: Int = -1) =
|
||||
Backoff.onStop(props, "c1", 100.millis, 3.seconds, 0.2, maxNrOfRetries)
|
||||
@silent("deprecated")
|
||||
def onFailureOptions(props: Props = Child.props(testActor), maxNrOfRetries: Int = -1) =
|
||||
Backoff.onFailure(props, "c1", 100.millis, 3.seconds, 0.2, maxNrOfRetries)
|
||||
@silent("deprecated")
|
||||
def create(options: BackoffOptions) = system.actorOf(BackoffSupervisor.props(options))
|
||||
def onStopOptions(props: Props = Child.props(testActor), maxNrOfRetries: Int = -1): BackoffOnStopOptions =
|
||||
BackoffOpts.onStop(props, "c1", 100.millis, 3.seconds, 0.2).withMaxNrOfRetries(maxNrOfRetries)
|
||||
def onFailureOptions(props: Props = Child.props(testActor), maxNrOfRetries: Int = -1): BackoffOnFailureOptions =
|
||||
BackoffOpts.onFailure(props, "c1", 100.millis, 3.seconds, 0.2).withMaxNrOfRetries(maxNrOfRetries)
|
||||
|
||||
def create(options: BackoffOnStopOptions): ActorRef = system.actorOf(BackoffSupervisor.props(options))
|
||||
def create(options: BackoffOnFailureOptions): ActorRef = system.actorOf(BackoffSupervisor.props(options))
|
||||
|
||||
"BackoffSupervisor" must {
|
||||
"start child again when it stops when using `Backoff.onStop`" in {
|
||||
|
|
@ -179,10 +177,10 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually
|
|||
|
||||
"reply to sender if replyWhileStopped is specified" in {
|
||||
filterException[TestException] {
|
||||
@silent("deprecated")
|
||||
val supervisor = create(
|
||||
Backoff
|
||||
.onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2, maxNrOfRetries = -1)
|
||||
BackoffOpts
|
||||
.onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2)
|
||||
.withMaxNrOfRetries(-1)
|
||||
.withReplyWhileStopped("child was stopped"))
|
||||
supervisor ! BackoffSupervisor.GetCurrentChild
|
||||
val c1 = expectMsgType[BackoffSupervisor.CurrentChild].ref.get
|
||||
|
|
@ -203,11 +201,43 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually
|
|||
}
|
||||
}
|
||||
|
||||
"use provided actor while stopped and withHandlerWhileStopped is specified" in {
|
||||
val handler = system.actorOf(Props(new Actor {
|
||||
override def receive: Receive = {
|
||||
case "still there?" =>
|
||||
sender() ! "not here!"
|
||||
}
|
||||
}))
|
||||
filterException[TestException] {
|
||||
val supervisor = create(
|
||||
BackoffOpts
|
||||
.onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2)
|
||||
.withMaxNrOfRetries(-1)
|
||||
.withHandlerWhileStopped(handler))
|
||||
supervisor ! BackoffSupervisor.GetCurrentChild
|
||||
val c1 = expectMsgType[BackoffSupervisor.CurrentChild].ref.get
|
||||
watch(c1)
|
||||
supervisor ! BackoffSupervisor.GetRestartCount
|
||||
expectMsg(BackoffSupervisor.RestartCount(0))
|
||||
|
||||
c1 ! "boom"
|
||||
expectTerminated(c1)
|
||||
|
||||
awaitAssert {
|
||||
supervisor ! BackoffSupervisor.GetRestartCount
|
||||
expectMsg(BackoffSupervisor.RestartCount(1))
|
||||
}
|
||||
|
||||
supervisor ! "still there?"
|
||||
expectMsg("not here!")
|
||||
}
|
||||
}
|
||||
|
||||
"not reply to sender if replyWhileStopped is NOT specified" in {
|
||||
filterException[TestException] {
|
||||
@silent("deprecated")
|
||||
val supervisor =
|
||||
create(Backoff.onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2, maxNrOfRetries = -1))
|
||||
create(
|
||||
BackoffOpts.onFailure(Child.props(testActor), "c1", 100.seconds, 300.seconds, 0.2).withMaxNrOfRetries(-1))
|
||||
supervisor ! BackoffSupervisor.GetCurrentChild
|
||||
val c1 = expectMsgType[BackoffSupervisor.CurrentChild].ref.get
|
||||
watch(c1)
|
||||
|
|
@ -382,7 +412,7 @@ class BackoffSupervisorSpec extends AkkaSpec with ImplicitSender with Eventually
|
|||
c1 ! PoisonPill
|
||||
expectTerminated(c1)
|
||||
// since actor stopped we can expect the two messages to end up in dead letters
|
||||
EventFilter.warning(pattern = ".*(ping|stop).*", occurrences = 2).intercept {
|
||||
EventFilter.warning(pattern = ".*(ping|stop).*", occurrences = 1).intercept {
|
||||
supervisor ! "ping"
|
||||
supervisorWatcher.expectNoMessage(20.millis) // supervisor must not terminate
|
||||
|
||||
|
|
|
|||
|
|
@ -124,6 +124,23 @@ class RetrySpec extends AkkaSpec with RetrySupport {
|
|||
elapse <= 100 shouldBe true
|
||||
}
|
||||
}
|
||||
|
||||
"handle thrown exceptions in same way as failed Future" in {
|
||||
@volatile var failCount = 0
|
||||
|
||||
def attempt() = {
|
||||
if (failCount < 5) {
|
||||
failCount += 1
|
||||
throw new IllegalStateException(failCount.toString)
|
||||
} else Future.successful(5)
|
||||
}
|
||||
|
||||
val retried = retry(() => attempt(), 10, 100 milliseconds)
|
||||
|
||||
within(3 seconds) {
|
||||
Await.result(retried, remaining) should ===(5)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -270,6 +270,29 @@ class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) {
|
|||
ser.serialize(new Other).get
|
||||
}
|
||||
}
|
||||
|
||||
"detect duplicate serializer ids" in {
|
||||
(intercept[IllegalArgumentException] {
|
||||
val sys = ActorSystem(
|
||||
"SerializeSpec",
|
||||
ConfigFactory.parseString(s"""
|
||||
akka {
|
||||
actor {
|
||||
serializers {
|
||||
test = "akka.serialization.NoopSerializer"
|
||||
test-same = "akka.serialization.NoopSerializerSameId"
|
||||
}
|
||||
|
||||
serialization-bindings {
|
||||
"akka.serialization.SerializationTests$$Person" = test
|
||||
"akka.serialization.SerializationTests$$Address" = test-same
|
||||
}
|
||||
}
|
||||
}
|
||||
"""))
|
||||
shutdown(sys)
|
||||
}.getMessage should include).regex("Serializer identifier \\[9999\\].*is not unique")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -578,6 +601,8 @@ protected[akka] class NoopSerializer2 extends Serializer {
|
|||
def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = null
|
||||
}
|
||||
|
||||
protected[akka] class NoopSerializerSameId extends NoopSerializer
|
||||
|
||||
@SerialVersionUID(1)
|
||||
protected[akka] final case class FakeThrowable(msg: String) extends Throwable(msg) with Serializable {
|
||||
override def fillInStackTrace = null
|
||||
|
|
|
|||
|
|
@ -0,0 +1,271 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.actor.typed.scaladsl
|
||||
|
||||
import java.util.concurrent.CountDownLatch
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
import scala.concurrent.Future
|
||||
import scala.util.Failure
|
||||
import scala.util.Success
|
||||
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
import akka.actor.testkit.typed.scaladsl.LoggingTestKit
|
||||
import akka.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit
|
||||
import akka.actor.typed.ActorRef
|
||||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.scaladsl.ActorThreadSpec.Echo
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
|
||||
object ActorThreadSpec {
|
||||
object Echo {
|
||||
final case class Msg(i: Int, replyTo: ActorRef[Int])
|
||||
|
||||
def apply(): Behavior[Msg] =
|
||||
Behaviors.receiveMessage {
|
||||
case Msg(i, replyTo) =>
|
||||
replyTo ! i
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class ActorThreadSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with LogCapturing {
|
||||
|
||||
"Actor thread-safety checks" must {
|
||||
|
||||
"detect illegal access to ActorContext from outside" in {
|
||||
@volatile var context: ActorContext[String] = null
|
||||
val probe = createTestProbe[String]()
|
||||
|
||||
spawn(Behaviors.setup[String] { ctx =>
|
||||
// here it's ok
|
||||
ctx.children
|
||||
context = ctx
|
||||
probe.ref ! "initialized"
|
||||
Behaviors.empty
|
||||
})
|
||||
|
||||
probe.expectMessage("initialized")
|
||||
intercept[UnsupportedOperationException] {
|
||||
context.children
|
||||
}.getMessage should include("Unsupported access to ActorContext")
|
||||
|
||||
}
|
||||
|
||||
"detect illegal access to ActorContext from other thread when processing message" in {
|
||||
val probe = createTestProbe[UnsupportedOperationException]()
|
||||
|
||||
val ref = spawn(Behaviors.receive[CountDownLatch] {
|
||||
case (context, latch) =>
|
||||
Future {
|
||||
try {
|
||||
context.children
|
||||
} catch {
|
||||
case e: UnsupportedOperationException =>
|
||||
probe.ref ! e
|
||||
}
|
||||
}(context.executionContext)
|
||||
latch.await(5, TimeUnit.SECONDS)
|
||||
Behaviors.same
|
||||
})
|
||||
|
||||
val l = new CountDownLatch(1)
|
||||
try {
|
||||
ref ! l
|
||||
probe.receiveMessage().getMessage should include("Unsupported access to ActorContext")
|
||||
} finally {
|
||||
l.countDown()
|
||||
}
|
||||
}
|
||||
|
||||
"detect illegal access to ActorContext from other thread after processing message" in {
|
||||
val probe = createTestProbe[UnsupportedOperationException]()
|
||||
|
||||
val ref = spawn(Behaviors.receive[CountDownLatch] {
|
||||
case (context, latch) =>
|
||||
Future {
|
||||
try {
|
||||
latch.await(5, TimeUnit.SECONDS)
|
||||
context.children
|
||||
} catch {
|
||||
case e: UnsupportedOperationException =>
|
||||
probe.ref ! e
|
||||
}
|
||||
}(context.executionContext)
|
||||
|
||||
Behaviors.stopped
|
||||
})
|
||||
|
||||
val l = new CountDownLatch(1)
|
||||
try {
|
||||
ref ! l
|
||||
probe.expectTerminated(ref)
|
||||
} finally {
|
||||
l.countDown()
|
||||
}
|
||||
probe.receiveMessage().getMessage should include("Unsupported access to ActorContext")
|
||||
}
|
||||
|
||||
"detect illegal access from child" in {
|
||||
val probe = createTestProbe[UnsupportedOperationException]()
|
||||
|
||||
val ref = spawn(Behaviors.receive[String] {
|
||||
case (context, _) =>
|
||||
// really bad idea to define a child actor like this
|
||||
context.spawnAnonymous(Behaviors.setup[String] { _ =>
|
||||
try {
|
||||
context.children
|
||||
} catch {
|
||||
case e: UnsupportedOperationException =>
|
||||
probe.ref ! e
|
||||
}
|
||||
Behaviors.empty
|
||||
})
|
||||
Behaviors.same
|
||||
})
|
||||
|
||||
ref ! "hello"
|
||||
probe.receiveMessage().getMessage should include("Unsupported access to ActorContext")
|
||||
}
|
||||
|
||||
"allow access from message adapter" in {
|
||||
val probe = createTestProbe[String]()
|
||||
val echo = spawn(Echo())
|
||||
|
||||
spawn(Behaviors.setup[String] { context =>
|
||||
val replyAdapter = context.messageAdapter[Int] { i =>
|
||||
// this is allowed because the mapping function is running in the target actor
|
||||
context.children
|
||||
i.toString
|
||||
}
|
||||
echo ! Echo.Msg(17, replyAdapter)
|
||||
|
||||
Behaviors.receiveMessage { msg =>
|
||||
probe.ref ! msg
|
||||
Behaviors.same
|
||||
}
|
||||
})
|
||||
|
||||
probe.expectMessage("17")
|
||||
}
|
||||
|
||||
"allow access from ask response mapper" in {
|
||||
val probe = createTestProbe[String]()
|
||||
val echo = spawn(Echo())
|
||||
|
||||
spawn(Behaviors.setup[String] { context =>
|
||||
context.ask[Echo.Msg, Int](echo, Echo.Msg(18, _)) {
|
||||
case Success(i) =>
|
||||
// this is allowed because the mapping function is running in the target actor
|
||||
context.children
|
||||
i.toString
|
||||
case Failure(e) => throw e
|
||||
}
|
||||
|
||||
Behaviors.receiveMessage { msg =>
|
||||
probe.ref ! msg
|
||||
Behaviors.same
|
||||
}
|
||||
})
|
||||
|
||||
probe.expectMessage("18")
|
||||
}
|
||||
|
||||
"detect wrong context in construction of AbstractBehavior" in {
|
||||
val probe = createTestProbe[String]()
|
||||
val ref = spawn(Behaviors.setup[String] { context =>
|
||||
// missing setup new AbstractBehavior and passing in parent's context
|
||||
val child = context.spawnAnonymous(new AbstractBehavior[String](context) {
|
||||
override def onMessage(msg: String): Behavior[String] = {
|
||||
probe.ref ! msg
|
||||
Behaviors.same
|
||||
}
|
||||
})
|
||||
|
||||
Behaviors.receiveMessage { msg =>
|
||||
child ! msg
|
||||
Behaviors.same
|
||||
}
|
||||
})
|
||||
|
||||
// 2 occurrences because one from PostStop also
|
||||
LoggingTestKit
|
||||
.error[IllegalStateException]
|
||||
.withMessageContains("was created with wrong ActorContext")
|
||||
.withOccurrences(2)
|
||||
.expect {
|
||||
// it's not detected when spawned, but when processing message
|
||||
ref ! "hello"
|
||||
probe.expectNoMessage()
|
||||
}
|
||||
}
|
||||
|
||||
"detect illegal access from AbstractBehavior constructor" in {
|
||||
val probe = createTestProbe[UnsupportedOperationException]()
|
||||
|
||||
spawn(Behaviors.setup[String] { context =>
|
||||
context.spawnAnonymous(
|
||||
Behaviors.setup[String](_ =>
|
||||
// wrongly using parent's context
|
||||
new AbstractBehavior[String](context) {
|
||||
try {
|
||||
this.context.children
|
||||
} catch {
|
||||
case e: UnsupportedOperationException =>
|
||||
probe.ref ! e
|
||||
}
|
||||
|
||||
override def onMessage(msg: String): Behavior[String] = {
|
||||
Behaviors.same
|
||||
}
|
||||
}))
|
||||
|
||||
Behaviors.empty
|
||||
})
|
||||
|
||||
probe.receiveMessage().getMessage should include("Unsupported access to ActorContext")
|
||||
}
|
||||
|
||||
"detect sharing of same AbstractBehavior instance" in {
|
||||
// extremely contrived example, but the creativity among users can be great
|
||||
@volatile var behv: Behavior[CountDownLatch] = null
|
||||
|
||||
val ref1 = spawn(Behaviors.setup[CountDownLatch] { context =>
|
||||
behv = new AbstractBehavior[CountDownLatch](context) {
|
||||
override def onMessage(latch: CountDownLatch): Behavior[CountDownLatch] = {
|
||||
latch.await(5, TimeUnit.SECONDS)
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
behv
|
||||
})
|
||||
|
||||
eventually(behv shouldNot equal(null))
|
||||
|
||||
// spawning same instance again
|
||||
val ref2 = spawn(behv)
|
||||
|
||||
val latch1 = new CountDownLatch(1)
|
||||
try {
|
||||
ref1 ! latch1
|
||||
|
||||
// 2 occurrences because one from PostStop also
|
||||
LoggingTestKit
|
||||
.error[IllegalStateException]
|
||||
.withMessageContains("was created with wrong ActorContext")
|
||||
.withOccurrences(2)
|
||||
.expect {
|
||||
ref2 ! new CountDownLatch(0)
|
||||
}
|
||||
} finally {
|
||||
latch1.countDown()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -6,7 +6,6 @@ package akka.actor.typed.scaladsl
|
|||
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import org.scalatest.wordspec.AnyWordSpecLike
|
||||
import org.slf4j.event.Level
|
||||
|
||||
import akka.actor.testkit.typed.TestException
|
||||
import akka.actor.testkit.typed.scaladsl.LogCapturing
|
||||
|
|
@ -17,6 +16,7 @@ import akka.actor.typed.ActorRef
|
|||
import akka.actor.typed.Behavior
|
||||
import akka.actor.typed.PostStop
|
||||
import akka.actor.typed.Props
|
||||
import akka.actor.typed.internal.AdaptMessage
|
||||
|
||||
object MessageAdapterSpec {
|
||||
val config = ConfigFactory.parseString("""
|
||||
|
|
@ -271,13 +271,15 @@ class MessageAdapterSpec
|
|||
|
||||
}
|
||||
|
||||
"log wrapped message of DeadLetter" in {
|
||||
"redirect to DeadLetter after termination" in {
|
||||
case class Ping(sender: ActorRef[Pong])
|
||||
case class Pong(greeting: String)
|
||||
case class PingReply(response: Pong)
|
||||
|
||||
val pingProbe = createTestProbe[Ping]()
|
||||
|
||||
val deadLetterProbe = testKit.createDeadLetterProbe()
|
||||
|
||||
val snitch = Behaviors.setup[PingReply] { context =>
|
||||
val replyTo = context.messageAdapter[Pong](PingReply)
|
||||
pingProbe.ref ! Ping(replyTo)
|
||||
|
|
@ -287,13 +289,13 @@ class MessageAdapterSpec
|
|||
|
||||
createTestProbe().expectTerminated(ref)
|
||||
|
||||
LoggingTestKit.empty
|
||||
.withLogLevel(Level.INFO)
|
||||
.withMessageRegex("Pong.*wrapped in.*AdaptMessage.*dead letters encountered")
|
||||
.expect {
|
||||
pingProbe.receiveMessage().sender ! Pong("hi")
|
||||
}
|
||||
|
||||
pingProbe.receiveMessage().sender ! Pong("hi")
|
||||
val deadLetter = deadLetterProbe.receiveMessage()
|
||||
deadLetter.message match {
|
||||
case AdaptMessage(Pong("hi"), _) => // passed through the FunctionRef
|
||||
case Pong("hi") => // FunctionRef stopped
|
||||
case unexpected => fail(s"Unexpected message [$unexpected], expected Pong or AdaptMessage(Pong)")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
# add internal currentActorThread to ActorContext
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.typed.scaladsl.ActorContext.setCurrentActorThread")
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.typed.scaladsl.ActorContext.clearCurrentActorThread")
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.typed.scaladsl.ActorContext.checkCurrentActorThread")
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.typed.internal.ActorContextImpl.akka$actor$typed$internal$ActorContextImpl$$_currentActorThread_=")
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.typed.internal.ActorContextImpl.akka$actor$typed$internal$ActorContextImpl$$_currentActorThread")
|
||||
|
||||
|
|
@ -16,7 +16,7 @@ akka.actor.typed {
|
|||
library-extensions = ${?akka.actor.typed.library-extensions} []
|
||||
|
||||
# Receptionist is started eagerly to allow clustered receptionist to gather remote registrations early on.
|
||||
library-extensions += "akka.actor.typed.receptionist.Receptionist"
|
||||
library-extensions += "akka.actor.typed.receptionist.Receptionist$"
|
||||
|
||||
# While an actor is restarted (waiting for backoff to expire and children to stop)
|
||||
# incoming messages and signals are stashed, and delivered later to the newly restarted
|
||||
|
|
|
|||
|
|
@ -95,10 +95,16 @@ import akka.util.Timeout
|
|||
private var _messageAdapters: List[(Class[_], Any => T)] = Nil
|
||||
private var _timer: OptionVal[TimerSchedulerImpl[T]] = OptionVal.None
|
||||
|
||||
// _currentActorThread is on purpose not volatile. Used from `checkCurrentActorThread`.
|
||||
// It will always see the right value when accessed from the right thread.
|
||||
// Possible that it would NOT detect illegal access sometimes but that's ok.
|
||||
private var _currentActorThread: OptionVal[Thread] = OptionVal.None
|
||||
|
||||
// context-shared timer needed to allow for nested timer usage
|
||||
def timer: TimerSchedulerImpl[T] = _timer match {
|
||||
case OptionVal.Some(timer) => timer
|
||||
case OptionVal.None =>
|
||||
checkCurrentActorThread()
|
||||
val timer = new TimerSchedulerImpl[T](this)
|
||||
_timer = OptionVal.Some(timer)
|
||||
timer
|
||||
|
|
@ -152,6 +158,7 @@ import akka.util.Timeout
|
|||
}
|
||||
|
||||
override def log: Logger = {
|
||||
checkCurrentActorThread()
|
||||
val logging = loggingContext()
|
||||
ActorMdc.setMdc(logging)
|
||||
logging.logger
|
||||
|
|
@ -160,6 +167,7 @@ import akka.util.Timeout
|
|||
override def getLog: Logger = log
|
||||
|
||||
override def setLoggerName(name: String): Unit = {
|
||||
checkCurrentActorThread()
|
||||
_logging = OptionVal.Some(loggingContext().withLogger(LoggerFactory.getLogger(name)))
|
||||
}
|
||||
|
||||
|
|
@ -247,6 +255,7 @@ import akka.util.Timeout
|
|||
internalMessageAdapter(messageClass, f.apply)
|
||||
|
||||
private def internalMessageAdapter[U](messageClass: Class[U], f: U => T): ActorRef[U] = {
|
||||
checkCurrentActorThread()
|
||||
// replace existing adapter for same class, only one per class is supported to avoid unbounded growth
|
||||
// in case "same" adapter is added repeatedly
|
||||
val boxedMessageClass = BoxedType(messageClass).asInstanceOf[Class[U]]
|
||||
|
|
@ -268,4 +277,44 @@ import akka.util.Timeout
|
|||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] def messageAdapters: List[(Class[_], Any => T)] = _messageAdapters
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] def setCurrentActorThread(): Unit = {
|
||||
_currentActorThread match {
|
||||
case OptionVal.None =>
|
||||
_currentActorThread = OptionVal.Some(Thread.currentThread())
|
||||
case OptionVal.Some(t) =>
|
||||
throw new IllegalStateException(
|
||||
s"Invalid access by thread from the outside of $self. " +
|
||||
s"Current message is processed by $t, but also accessed from from ${Thread.currentThread()}.")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] def clearCurrentActorThread(): Unit = {
|
||||
_currentActorThread = OptionVal.None
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] def checkCurrentActorThread(): Unit = {
|
||||
val callerThread = Thread.currentThread()
|
||||
_currentActorThread match {
|
||||
case OptionVal.Some(t) =>
|
||||
if (callerThread ne t) {
|
||||
throw new UnsupportedOperationException(
|
||||
s"Unsupported access to ActorContext operation from the outside of $self. " +
|
||||
s"Current message is processed by $t, but ActorContext was called from $callerThread.")
|
||||
}
|
||||
case OptionVal.None =>
|
||||
throw new UnsupportedOperationException(
|
||||
s"Unsupported access to ActorContext from the outside of $self. " +
|
||||
s"No message is currently processed by the actor, but ActorContext was called from $callerThread.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ import akka.util.OptionVal
|
|||
def receive: Receive = ActorAdapter.DummyReceive
|
||||
|
||||
override protected[akka] def aroundReceive(receive: Receive, msg: Any): Unit = {
|
||||
ctx.setCurrentActorThread()
|
||||
try {
|
||||
// as we know we never become in "normal" typed actors, it is just the current behavior that
|
||||
// changes, we can avoid some overhead with the partial function/behavior stack of untyped entirely
|
||||
|
|
@ -104,7 +105,10 @@ import akka.util.OptionVal
|
|||
case msg: T @unchecked =>
|
||||
handleMessage(msg)
|
||||
}
|
||||
} finally ctx.clearMdc()
|
||||
} finally {
|
||||
ctx.clearCurrentActorThread()
|
||||
ctx.clearMdc()
|
||||
}
|
||||
}
|
||||
|
||||
private def handleMessage(msg: T): Unit = {
|
||||
|
|
@ -206,32 +210,38 @@ import akka.util.OptionVal
|
|||
}
|
||||
|
||||
override val supervisorStrategy = classic.OneForOneStrategy(loggingEnabled = false) {
|
||||
case TypedActorFailedException(cause) =>
|
||||
// These have already been optionally logged by typed supervision
|
||||
recordChildFailure(cause)
|
||||
classic.SupervisorStrategy.Stop
|
||||
case ex =>
|
||||
val isTypedActor = sender() match {
|
||||
case afwc: ActorRefWithCell =>
|
||||
afwc.underlying.props.producer.actorClass == classOf[ActorAdapter[_]]
|
||||
ctx.setCurrentActorThread()
|
||||
try ex match {
|
||||
case TypedActorFailedException(cause) =>
|
||||
// These have already been optionally logged by typed supervision
|
||||
recordChildFailure(cause)
|
||||
classic.SupervisorStrategy.Stop
|
||||
case _ =>
|
||||
false
|
||||
}
|
||||
recordChildFailure(ex)
|
||||
val logMessage = ex match {
|
||||
case e: ActorInitializationException if e.getCause ne null =>
|
||||
e.getCause match {
|
||||
case ex: InvocationTargetException if ex.getCause ne null => ex.getCause.getMessage
|
||||
case ex => ex.getMessage
|
||||
val isTypedActor = sender() match {
|
||||
case afwc: ActorRefWithCell =>
|
||||
afwc.underlying.props.producer.actorClass == classOf[ActorAdapter[_]]
|
||||
case _ =>
|
||||
false
|
||||
}
|
||||
case e => e.getMessage
|
||||
recordChildFailure(ex)
|
||||
val logMessage = ex match {
|
||||
case e: ActorInitializationException if e.getCause ne null =>
|
||||
e.getCause match {
|
||||
case ex: InvocationTargetException if ex.getCause ne null => ex.getCause.getMessage
|
||||
case ex => ex.getMessage
|
||||
}
|
||||
case e => e.getMessage
|
||||
}
|
||||
// log at Error as that is what the supervision strategy would have done.
|
||||
ctx.log.error(logMessage, ex)
|
||||
if (isTypedActor)
|
||||
classic.SupervisorStrategy.Stop
|
||||
else
|
||||
ActorAdapter.classicSupervisorDecider(ex)
|
||||
} finally {
|
||||
ctx.clearCurrentActorThread()
|
||||
}
|
||||
// log at Error as that is what the supervision strategy would have done.
|
||||
ctx.log.error(logMessage, ex)
|
||||
if (isTypedActor)
|
||||
classic.SupervisorStrategy.Stop
|
||||
else
|
||||
ActorAdapter.classicSupervisorDecider(ex)
|
||||
}
|
||||
|
||||
private def recordChildFailure(ex: Throwable): Unit = {
|
||||
|
|
@ -241,6 +251,30 @@ import akka.util.OptionVal
|
|||
}
|
||||
}
|
||||
|
||||
override protected[akka] def aroundPreStart(): Unit = {
|
||||
ctx.setCurrentActorThread()
|
||||
try super.aroundPreStart()
|
||||
finally ctx.clearCurrentActorThread()
|
||||
}
|
||||
|
||||
override protected[akka] def aroundPreRestart(reason: Throwable, message: Option[Any]): Unit = {
|
||||
ctx.setCurrentActorThread()
|
||||
try super.aroundPreRestart(reason, message)
|
||||
finally ctx.clearCurrentActorThread()
|
||||
}
|
||||
|
||||
override protected[akka] def aroundPostRestart(reason: Throwable): Unit = {
|
||||
ctx.setCurrentActorThread()
|
||||
try super.aroundPostRestart(reason)
|
||||
finally ctx.clearCurrentActorThread()
|
||||
}
|
||||
|
||||
override protected[akka] def aroundPostStop(): Unit = {
|
||||
ctx.setCurrentActorThread()
|
||||
try super.aroundPostStop()
|
||||
finally ctx.clearCurrentActorThread()
|
||||
}
|
||||
|
||||
override def preStart(): Unit = {
|
||||
try {
|
||||
if (Behavior.isAlive(behavior)) {
|
||||
|
|
|
|||
|
|
@ -58,13 +58,26 @@ private[akka] object ActorContextAdapter {
|
|||
final override val self = ActorRefAdapter(classicContext.self)
|
||||
final override val system = ActorSystemAdapter(classicContext.system)
|
||||
private[akka] def classicActorContext = classicContext
|
||||
override def children: Iterable[ActorRef[Nothing]] = classicContext.children.map(ActorRefAdapter(_))
|
||||
override def child(name: String): Option[ActorRef[Nothing]] = classicContext.child(name).map(ActorRefAdapter(_))
|
||||
override def spawnAnonymous[U](behavior: Behavior[U], props: Props = Props.empty): ActorRef[U] =
|
||||
override def children: Iterable[ActorRef[Nothing]] = {
|
||||
checkCurrentActorThread()
|
||||
classicContext.children.map(ActorRefAdapter(_))
|
||||
}
|
||||
override def child(name: String): Option[ActorRef[Nothing]] = {
|
||||
checkCurrentActorThread()
|
||||
classicContext.child(name).map(ActorRefAdapter(_))
|
||||
}
|
||||
override def spawnAnonymous[U](behavior: Behavior[U], props: Props = Props.empty): ActorRef[U] = {
|
||||
checkCurrentActorThread()
|
||||
ActorRefFactoryAdapter.spawnAnonymous(classicContext, behavior, props, rethrowTypedFailure = true)
|
||||
override def spawn[U](behavior: Behavior[U], name: String, props: Props = Props.empty): ActorRef[U] =
|
||||
}
|
||||
|
||||
override def spawn[U](behavior: Behavior[U], name: String, props: Props = Props.empty): ActorRef[U] = {
|
||||
checkCurrentActorThread()
|
||||
ActorRefFactoryAdapter.spawn(classicContext, behavior, name, props, rethrowTypedFailure = true)
|
||||
override def stop[U](child: ActorRef[U]): Unit =
|
||||
}
|
||||
|
||||
override def stop[U](child: ActorRef[U]): Unit = {
|
||||
checkCurrentActorThread()
|
||||
if (child.path.parent == self.path) { // only if a direct child
|
||||
toClassic(child) match {
|
||||
case f: akka.actor.FunctionRef =>
|
||||
|
|
@ -90,16 +103,29 @@ private[akka] object ActorContextAdapter {
|
|||
s"but [$child] is not a child of [$self]. Stopping other actors has to be expressed as " +
|
||||
"an explicit stop message that the actor accepts.")
|
||||
}
|
||||
}
|
||||
|
||||
override def watch[U](other: ActorRef[U]): Unit = { classicContext.watch(toClassic(other)) }
|
||||
override def watchWith[U](other: ActorRef[U], msg: T): Unit = { classicContext.watchWith(toClassic(other), msg) }
|
||||
override def unwatch[U](other: ActorRef[U]): Unit = { classicContext.unwatch(toClassic(other)) }
|
||||
override def watch[U](other: ActorRef[U]): Unit = {
|
||||
checkCurrentActorThread()
|
||||
classicContext.watch(toClassic(other))
|
||||
}
|
||||
override def watchWith[U](other: ActorRef[U], msg: T): Unit = {
|
||||
checkCurrentActorThread()
|
||||
classicContext.watchWith(toClassic(other), msg)
|
||||
}
|
||||
override def unwatch[U](other: ActorRef[U]): Unit = {
|
||||
checkCurrentActorThread()
|
||||
classicContext.unwatch(toClassic(other))
|
||||
}
|
||||
var receiveTimeoutMsg: T = null.asInstanceOf[T]
|
||||
override def setReceiveTimeout(d: FiniteDuration, msg: T): Unit = {
|
||||
checkCurrentActorThread()
|
||||
receiveTimeoutMsg = msg
|
||||
classicContext.setReceiveTimeout(d)
|
||||
}
|
||||
override def cancelReceiveTimeout(): Unit = {
|
||||
checkCurrentActorThread()
|
||||
|
||||
receiveTimeoutMsg = null.asInstanceOf[T]
|
||||
classicContext.setReceiveTimeout(Duration.Undefined)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,12 +50,13 @@ abstract class AbstractBehavior[T](context: ActorContext[T]) extends ExtensibleB
|
|||
|
||||
protected def getContext: ActorContext[T] = context
|
||||
|
||||
private def checkRightContext(ctx: TypedActorContext[T]): Unit =
|
||||
private def checkRightContext(ctx: TypedActorContext[T]): Unit = {
|
||||
if (ctx.asJava ne context)
|
||||
throw new IllegalStateException(
|
||||
s"Actor [${ctx.asJava.getSelf}] of AbstractBehavior class " +
|
||||
s"[${getClass.getName}] was created with wrong ActorContext [${context.asJava.getSelf}]. " +
|
||||
"Wrap in Behaviors.setup and pass the context to the constructor of AbstractBehavior.")
|
||||
}
|
||||
|
||||
@throws(classOf[Exception])
|
||||
override final def receive(ctx: TypedActorContext[T], msg: T): Behavior[T] = {
|
||||
|
|
|
|||
|
|
@ -70,12 +70,13 @@ abstract class AbstractBehavior[T](protected val context: ActorContext[T]) exten
|
|||
@throws(classOf[Exception])
|
||||
def onSignal: PartialFunction[Signal, Behavior[T]] = PartialFunction.empty
|
||||
|
||||
private def checkRightContext(ctx: TypedActorContext[T]): Unit =
|
||||
private def checkRightContext(ctx: TypedActorContext[T]): Unit = {
|
||||
if (ctx.asJava ne context)
|
||||
throw new IllegalStateException(
|
||||
s"Actor [${ctx.asJava.getSelf}] of AbstractBehavior class " +
|
||||
s"[${getClass.getName}] was created with wrong ActorContext [${context.asJava.getSelf}]. " +
|
||||
"Wrap in Behaviors.setup and pass the context to the constructor of AbstractBehavior.")
|
||||
}
|
||||
|
||||
@throws(classOf[Exception])
|
||||
override final def receive(ctx: TypedActorContext[T], msg: T): Behavior[T] = {
|
||||
|
|
|
|||
|
|
@ -337,4 +337,19 @@ trait ActorContext[T] extends TypedActorContext[T] with ClassicActorContextProvi
|
|||
@InternalApi
|
||||
private[akka] def clearMdc(): Unit
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] def setCurrentActorThread(): Unit
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] def clearCurrentActorThread(): Unit
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] def checkCurrentActorThread(): Unit
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@ import akka.actor.typed.RecipientRef
|
|||
import akka.actor.typed.Scheduler
|
||||
import akka.actor.typed.internal.{ adapter => adapt }
|
||||
import akka.actor.typed.internal.InternalRecipientRef
|
||||
import akka.annotation.InternalApi
|
||||
import akka.annotation.{ InternalApi, InternalStableApi }
|
||||
import akka.pattern.PromiseActorRef
|
||||
import akka.util.Timeout
|
||||
import akka.util.{ unused, Timeout }
|
||||
|
||||
/**
|
||||
* The ask-pattern implements the initiator side of a request–reply protocol.
|
||||
|
|
@ -146,14 +146,19 @@ object AskPattern {
|
|||
val ref: ActorRef[U] = _ref
|
||||
val future: Future[U] = _future
|
||||
val promiseRef: PromiseActorRef = _promiseRef
|
||||
|
||||
@InternalStableApi
|
||||
private[akka] def ask[T](target: InternalRecipientRef[T], message: T, @unused timeout: Timeout): Future[U] = {
|
||||
target ! message
|
||||
future
|
||||
}
|
||||
}
|
||||
|
||||
private def askClassic[T, U](target: InternalRecipientRef[T], timeout: Timeout, f: ActorRef[U] => T): Future[U] = {
|
||||
val p = new PromiseRef[U](target, timeout)
|
||||
val m = f(p.ref)
|
||||
if (p.promiseRef ne null) p.promiseRef.messageClassName = m.getClass.getName
|
||||
target ! m
|
||||
p.future
|
||||
p.ask(target, m, timeout)
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
# #25040 changes to ActorCell internals
|
||||
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorCell.setActorFields")
|
||||
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorCell.clearActorCellFields")
|
||||
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.actor.ActorCell.actor_=")
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.FaultHandling.akka$actor$dungeon$FaultHandling$$_failed")
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.actor.dungeon.FaultHandling.akka$actor$dungeon$FaultHandling$$_failed_=")
|
||||
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.util.Reflect.lookupAndSetField")
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
# Internals changed
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.pattern.ExtendedBackoffOptions.withHandlerWhileStopped")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.<init>$default$8")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.apply")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.apply$default$8")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.<init>$default$8")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.apply")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.apply$default$8")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.<init>$default$8")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.apply$default$8")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.apply")
|
||||
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOnFailureOptionsImpl.replyWhileStopped")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.copy")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.copy$default$8")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.BackoffOnFailureOptionsImpl.this")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.<init>$default$8")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.apply$default$8")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.apply")
|
||||
ProblemFilters.exclude[DirectMissingMethodProblem]("akka.pattern.BackoffOnStopOptionsImpl.replyWhileStopped")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.copy")
|
||||
ProblemFilters.exclude[IncompatibleResultTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.copy$default$8")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.BackoffOnStopOptionsImpl.this")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.internal.BackoffOnRestartSupervisor.this")
|
||||
ProblemFilters.exclude[IncompatibleMethTypeProblem]("akka.pattern.internal.BackoffOnStopSupervisor.this")
|
||||
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.pattern.BackoffOnFailureOptionsImpl.unapply")
|
||||
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.pattern.BackoffOnStopOptionsImpl.unapply")
|
||||
ProblemFilters.exclude[IncompatibleSignatureProblem]("akka.pattern.BackoffOnFailureOptionsImpl.unapply")
|
||||
|
|
@ -73,7 +73,7 @@ akka {
|
|||
#
|
||||
# Should not be set by end user applications in 'application.conf', use the extensions property for that
|
||||
#
|
||||
library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension"]
|
||||
library-extensions = ${?akka.library-extensions} ["akka.serialization.SerializationExtension$"]
|
||||
|
||||
# List FQCN of extensions which shall be loaded at actor system startup.
|
||||
# Should be on the format: 'extensions = ["foo", "bar"]' etc.
|
||||
|
|
|
|||
|
|
@ -4,9 +4,10 @@
|
|||
|
||||
package akka.compat
|
||||
|
||||
import akka.annotation.InternalApi
|
||||
import scala.concurrent.{ ExecutionContext, Future => SFuture }
|
||||
import scala.collection.immutable
|
||||
import scala.concurrent.{ ExecutionContext, Future => SFuture }
|
||||
|
||||
import akka.annotation.InternalApi
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@
|
|||
|
||||
package akka.dispatch.internal
|
||||
|
||||
import akka.annotation.InternalApi
|
||||
|
||||
import scala.concurrent.ExecutionContext
|
||||
|
||||
import akka.annotation.InternalApi
|
||||
|
||||
/**
|
||||
* Factory to create same thread ec. Not intended to be called from any other site than to create [[akka.dispatch.ExecutionContexts#parasitic]]
|
||||
*
|
||||
|
|
|
|||
|
|
@ -4,8 +4,6 @@
|
|||
|
||||
package akka.util
|
||||
|
||||
import akka.util.Collections.EmptyImmutableSeq
|
||||
|
||||
import java.nio.{ ByteBuffer, ByteOrder }
|
||||
|
||||
import scala.annotation.tailrec
|
||||
|
|
@ -14,6 +12,8 @@ import scala.collection.LinearSeq
|
|||
import scala.collection.mutable.ListBuffer
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
import akka.util.Collections.EmptyImmutableSeq
|
||||
|
||||
object ByteIterator {
|
||||
object ByteArrayIterator {
|
||||
|
||||
|
|
|
|||
|
|
@ -5,16 +5,17 @@
|
|||
package akka.util
|
||||
|
||||
import java.io.{ ObjectInputStream, ObjectOutputStream }
|
||||
import java.nio.{ ByteBuffer, ByteOrder }
|
||||
import java.lang.{ Iterable => JIterable }
|
||||
import java.nio.{ ByteBuffer, ByteOrder }
|
||||
import java.nio.charset.{ Charset, StandardCharsets }
|
||||
import java.util.Base64
|
||||
|
||||
import scala.annotation.{ tailrec, varargs }
|
||||
import scala.collection.mutable.{ Builder, WrappedArray }
|
||||
import scala.collection.{ immutable, mutable }
|
||||
import scala.collection.immutable.{ IndexedSeq, IndexedSeqOps, StrictOptimizedSeqOps, VectorBuilder }
|
||||
import scala.collection.mutable.{ Builder, WrappedArray }
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
import com.github.ghik.silencer.silent
|
||||
|
||||
object ByteString {
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import akka.dispatch.{ Envelope, MessageDispatcher }
|
|||
import akka.dispatch.sysmsg._
|
||||
import akka.event.Logging.{ Debug, Error, LogEvent }
|
||||
import akka.japi.Procedure
|
||||
import akka.util.{ unused, Reflect }
|
||||
import akka.util.unused
|
||||
|
||||
/**
|
||||
* The actor context - the view of the actor cell from the actor.
|
||||
|
|
@ -410,7 +410,7 @@ private[akka] object ActorCell {
|
|||
private[akka] class ActorCell(
|
||||
val system: ActorSystemImpl,
|
||||
val self: InternalActorRef,
|
||||
final val props: Props, // Must be final so that it can be properly cleared in clearActorCellFields
|
||||
_initialProps: Props,
|
||||
val dispatcher: MessageDispatcher,
|
||||
val parent: InternalActorRef)
|
||||
extends AbstractActor.ActorContext
|
||||
|
|
@ -421,6 +421,9 @@ private[akka] class ActorCell(
|
|||
with dungeon.DeathWatch
|
||||
with dungeon.FaultHandling {
|
||||
|
||||
private[this] var _props = _initialProps
|
||||
def props: Props = _props
|
||||
|
||||
import ActorCell._
|
||||
|
||||
final def isLocal = true
|
||||
|
|
@ -435,7 +438,6 @@ private[akka] class ActorCell(
|
|||
protected def uid: Int = self.path.uid
|
||||
private[this] var _actor: Actor = _
|
||||
def actor: Actor = _actor
|
||||
protected def actor_=(a: Actor): Unit = _actor = a
|
||||
var currentMessage: Envelope = _
|
||||
private var behaviorStack: List[Actor.Receive] = emptyBehaviorStack
|
||||
private[this] var sysmsgStash: LatestFirstSystemMessageList = SystemMessageList.LNil
|
||||
|
|
@ -615,6 +617,7 @@ private[akka] class ActorCell(
|
|||
|
||||
// If no becomes were issued, the actors behavior is its receive method
|
||||
behaviorStack = if (behaviorStack.isEmpty) instance.receive :: behaviorStack else behaviorStack
|
||||
_actor = instance
|
||||
instance
|
||||
} finally {
|
||||
val stackAfter = contextStack.get
|
||||
|
|
@ -624,29 +627,28 @@ private[akka] class ActorCell(
|
|||
}
|
||||
|
||||
protected def create(failure: Option[ActorInitializationException]): Unit = {
|
||||
def clearOutActorIfNonNull(): Unit = {
|
||||
if (actor != null) {
|
||||
def failActor(): Unit =
|
||||
if (_actor != null) {
|
||||
clearActorFields(actor, recreate = false)
|
||||
actor = null // ensure that we know that we failed during creation
|
||||
setFailedFatally()
|
||||
_actor = null // ensure that we know that we failed during creation
|
||||
}
|
||||
}
|
||||
|
||||
failure.foreach { throw _ }
|
||||
|
||||
try {
|
||||
val created = newActor()
|
||||
actor = created
|
||||
created.aroundPreStart()
|
||||
checkReceiveTimeout(reschedule = true)
|
||||
if (system.settings.DebugLifecycle)
|
||||
publish(Debug(self.path.toString, clazz(created), "started (" + created + ")"))
|
||||
} catch {
|
||||
case e: InterruptedException =>
|
||||
clearOutActorIfNonNull()
|
||||
failActor()
|
||||
Thread.currentThread().interrupt()
|
||||
throw ActorInitializationException(self, "interruption during creation", e)
|
||||
case NonFatal(e) =>
|
||||
clearOutActorIfNonNull()
|
||||
failActor()
|
||||
e match {
|
||||
case i: InstantiationException =>
|
||||
throw ActorInitializationException(
|
||||
|
|
@ -684,25 +686,17 @@ private[akka] class ActorCell(
|
|||
case _ =>
|
||||
}
|
||||
|
||||
final protected def clearActorCellFields(cell: ActorCell): Unit = {
|
||||
cell.unstashAll()
|
||||
if (!Reflect.lookupAndSetField(classOf[ActorCell], cell, "props", ActorCell.terminatedProps))
|
||||
throw new IllegalArgumentException("ActorCell has no props field")
|
||||
}
|
||||
|
||||
@InternalStableApi
|
||||
@silent("never used")
|
||||
final protected def clearActorFields(actorInstance: Actor, recreate: Boolean): Unit = {
|
||||
setActorFields(actorInstance, context = null, self = if (recreate) self else system.deadLetters)
|
||||
currentMessage = null
|
||||
behaviorStack = emptyBehaviorStack
|
||||
}
|
||||
|
||||
final protected def setActorFields(actorInstance: Actor, context: ActorContext, self: ActorRef): Unit =
|
||||
if (actorInstance ne null) {
|
||||
if (!Reflect.lookupAndSetField(actorInstance.getClass, actorInstance, "context", context)
|
||||
|| !Reflect.lookupAndSetField(actorInstance.getClass, actorInstance, "self", self))
|
||||
throw IllegalActorStateException(
|
||||
s"${actorInstance.getClass} is not an Actor class. It doesn't extend the 'Actor' trait")
|
||||
}
|
||||
final protected def clearFieldsForTermination(): Unit = {
|
||||
unstashAll()
|
||||
_props = ActorCell.terminatedProps
|
||||
_actor = null
|
||||
}
|
||||
|
||||
// logging is not the main purpose, and if it fails there’s nothing we can do
|
||||
protected final def publish(e: LogEvent): Unit =
|
||||
|
|
|
|||
|
|
@ -809,6 +809,15 @@ private[akka] class VirtualPathContainer(
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] object FunctionRef {
|
||||
def deadLetterMessageHandler(system: ActorSystem): (ActorRef, Any) => Unit = { (sender, msg) =>
|
||||
system.deadLetters.tell(msg, sender)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
|
|
@ -826,17 +835,20 @@ private[akka] class VirtualPathContainer(
|
|||
* [[FunctionRef#unwatch]] must be called to avoid a resource leak, which is different
|
||||
* from an ordinary actor.
|
||||
*/
|
||||
private[akka] final class FunctionRef(
|
||||
@InternalApi private[akka] final class FunctionRef(
|
||||
override val path: ActorPath,
|
||||
override val provider: ActorRefProvider,
|
||||
system: ActorSystem,
|
||||
f: (ActorRef, Any) => Unit)
|
||||
extends MinimalActorRef {
|
||||
|
||||
// var because it's replaced in `stop`
|
||||
private var messageHandler: (ActorRef, Any) => Unit = f
|
||||
|
||||
override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = {
|
||||
message match {
|
||||
case AddressTerminated(address) => addressTerminated(address)
|
||||
case _ => f(sender, message)
|
||||
case _ => messageHandler(sender, message)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -922,7 +934,13 @@ private[akka] final class FunctionRef(
|
|||
}
|
||||
}
|
||||
|
||||
override def stop(): Unit = sendTerminated()
|
||||
override def stop(): Unit = {
|
||||
sendTerminated()
|
||||
// The messageHandler function may close over a large object graph (such as an Akka Stream)
|
||||
// so we replace the messageHandler function to make that available for garbage collection.
|
||||
// Doesn't matter if the change isn't visible immediately, volatile not needed.
|
||||
messageHandler = FunctionRef.deadLetterMessageHandler(system)
|
||||
}
|
||||
|
||||
private def addWatcher(watchee: ActorRef, watcher: ActorRef): Unit = {
|
||||
val selfTerminated = this.synchronized {
|
||||
|
|
|
|||
|
|
@ -1178,9 +1178,11 @@ private[akka] class ActorSystemImpl(
|
|||
* when the extension cannot be found at all we throw regardless of this setting)
|
||||
*/
|
||||
def loadExtensions(key: String, throwOnLoadFail: Boolean): Unit = {
|
||||
|
||||
immutableSeq(settings.config.getStringList(key)).foreach { fqcn =>
|
||||
dynamicAccess.getObjectFor[AnyRef](fqcn).recoverWith {
|
||||
case _ => dynamicAccess.createInstanceFor[AnyRef](fqcn, Nil)
|
||||
case firstProblem =>
|
||||
dynamicAccess.createInstanceFor[AnyRef](fqcn, Nil).recoverWith { case _ => Failure(firstProblem) }
|
||||
} match {
|
||||
case Success(p: ExtensionIdProvider) =>
|
||||
registerExtension(p.lookup())
|
||||
|
|
|
|||
|
|
@ -120,13 +120,13 @@ final class Deploy(
|
|||
new Deploy(path, config, routerConfig, scope, dispatcher, mailbox, tags)
|
||||
|
||||
override def productElement(n: Int): Any = n match {
|
||||
case 1 => path
|
||||
case 2 => config
|
||||
case 3 => routerConfig
|
||||
case 4 => scope
|
||||
case 5 => dispatcher
|
||||
case 6 => mailbox
|
||||
case 7 => tags
|
||||
case 0 => path
|
||||
case 1 => config
|
||||
case 2 => routerConfig
|
||||
case 3 => scope
|
||||
case 4 => dispatcher
|
||||
case 5 => mailbox
|
||||
case 6 => tags
|
||||
}
|
||||
|
||||
override def productArity: Int = 7
|
||||
|
|
|
|||
|
|
@ -9,10 +9,9 @@ import java.util.Optional
|
|||
import scala.annotation.tailrec
|
||||
import scala.collection.immutable
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
import com.github.ghik.silencer.silent
|
||||
|
||||
import akka.actor._
|
||||
import akka.annotation.InternalStableApi
|
||||
import akka.serialization.{ Serialization, SerializationExtension, Serializers }
|
||||
import akka.util.{ Helpers, Unsafe }
|
||||
|
||||
|
|
@ -182,6 +181,7 @@ private[akka] trait Children { this: ActorCell =>
|
|||
case _ => null
|
||||
}
|
||||
|
||||
@InternalStableApi
|
||||
protected def suspendChildren(exceptFor: Set[ActorRef] = Set.empty): Unit =
|
||||
childrenRefs.stats.foreach {
|
||||
case ChildRestartStats(child, _, _) if !(exceptFor contains child) =>
|
||||
|
|
|
|||
|
|
@ -63,7 +63,15 @@ private[akka] trait DeathWatch { this: ActorCell =>
|
|||
protected def receivedTerminated(t: Terminated): Unit =
|
||||
terminatedQueued.get(t.actor).foreach { optionalMessage =>
|
||||
terminatedQueued -= t.actor // here we know that it is the SAME ref which was put in
|
||||
receiveMessage(optionalMessage.getOrElse(t))
|
||||
optionalMessage match {
|
||||
case Some(customTermination) =>
|
||||
// needed for stashing of custom watch messages to work (or stash will stash the Terminated message instead)
|
||||
currentMessage = currentMessage.copy(message = customTermination)
|
||||
receiveMessage(customTermination)
|
||||
|
||||
case None =>
|
||||
receiveMessage(t)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -4,22 +4,35 @@
|
|||
|
||||
package akka.actor.dungeon
|
||||
|
||||
import scala.collection.immutable
|
||||
import scala.concurrent.duration.Duration
|
||||
import scala.util.control.Exception._
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
import akka.actor.{ Actor, ActorCell, ActorInterruptedException, ActorRef, InternalActorRef }
|
||||
import akka.actor.{ ActorCell, ActorInterruptedException, ActorRef, InternalActorRef }
|
||||
import akka.actor.ActorRefScope
|
||||
import akka.actor.PostRestartException
|
||||
import akka.actor.PreRestartException
|
||||
import akka.annotation.InternalApi
|
||||
import akka.annotation.InternalStableApi
|
||||
import akka.dispatch._
|
||||
import akka.dispatch.sysmsg._
|
||||
import akka.event.Logging
|
||||
import akka.event.Logging.Debug
|
||||
import akka.event.Logging.Error
|
||||
|
||||
import scala.collection.immutable
|
||||
import scala.concurrent.duration.Duration
|
||||
import scala.util.control.Exception._
|
||||
import scala.util.control.NonFatal
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] object FaultHandling {
|
||||
sealed trait FailedInfo
|
||||
private case object NoFailedInfo extends FailedInfo
|
||||
private final case class FailedRef(ref: ActorRef) extends FailedInfo
|
||||
private case object FailedFatally extends FailedInfo
|
||||
}
|
||||
|
||||
private[akka] trait FaultHandling { this: ActorCell =>
|
||||
import FaultHandling._
|
||||
|
||||
/* =================
|
||||
* T H E R U L E S
|
||||
|
|
@ -44,11 +57,22 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
* a restart with dying children)
|
||||
* might well be replaced by ref to a Cancellable in the future (see #2299)
|
||||
*/
|
||||
private var _failed: ActorRef = null
|
||||
private def isFailed: Boolean = _failed != null
|
||||
private def setFailed(perpetrator: ActorRef): Unit = _failed = perpetrator
|
||||
private def clearFailed(): Unit = _failed = null
|
||||
private def perpetrator: ActorRef = _failed
|
||||
private var _failed: FailedInfo = NoFailedInfo
|
||||
private def isFailed: Boolean = _failed.isInstanceOf[FailedRef]
|
||||
private def isFailedFatally: Boolean = _failed eq FailedFatally
|
||||
private def perpetrator: ActorRef = _failed match {
|
||||
case FailedRef(ref) => ref
|
||||
case _ => null
|
||||
}
|
||||
private def setFailed(perpetrator: ActorRef): Unit = _failed = _failed match {
|
||||
case FailedFatally => FailedFatally
|
||||
case _ => FailedRef(perpetrator)
|
||||
}
|
||||
private def clearFailed(): Unit = _failed = _failed match {
|
||||
case FailedRef(_) => NoFailedInfo
|
||||
case other => other
|
||||
}
|
||||
protected def setFailedFatally(): Unit = _failed = FailedFatally
|
||||
|
||||
/**
|
||||
* Do re-create the actor in response to a failure.
|
||||
|
|
@ -65,7 +89,7 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
val optionalMessage = if (currentMessage ne null) Some(currentMessage.message) else None
|
||||
try {
|
||||
// if the actor fails in preRestart, we can do nothing but log it: it’s best-effort
|
||||
if (failedActor.context ne null) failedActor.aroundPreRestart(cause, optionalMessage)
|
||||
if (!isFailedFatally) failedActor.aroundPreRestart(cause, optionalMessage)
|
||||
} catch handleNonFatalOrInterruptedException { e =>
|
||||
val ex = PreRestartException(self, e, cause, optionalMessage)
|
||||
publish(Error(ex, self.path.toString, clazz(failedActor), e.getMessage))
|
||||
|
|
@ -74,7 +98,7 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
}
|
||||
}
|
||||
assert(mailbox.isSuspended, "mailbox must be suspended during restart, status=" + mailbox.currentStatus)
|
||||
if (!setChildrenTerminationReason(ChildrenContainer.Recreation(cause))) finishRecreate(cause, failedActor)
|
||||
if (!setChildrenTerminationReason(ChildrenContainer.Recreation(cause))) finishRecreate(cause)
|
||||
} else {
|
||||
// need to keep that suspend counter balanced
|
||||
faultResume(causedByFailure = null)
|
||||
|
|
@ -101,7 +125,7 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
system.eventStream.publish(
|
||||
Error(self.path.toString, clazz(actor), "changing Resume into Create after " + causedByFailure))
|
||||
faultCreate()
|
||||
} else if (actor.context == null && causedByFailure != null) {
|
||||
} else if (isFailedFatally && causedByFailure != null) {
|
||||
system.eventStream.publish(
|
||||
Error(self.path.toString, clazz(actor), "changing Resume into Restart after " + causedByFailure))
|
||||
faultRecreate(causedByFailure)
|
||||
|
|
@ -174,6 +198,7 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
}
|
||||
}
|
||||
|
||||
@InternalStableApi
|
||||
final def handleInvokeFailure(childrenNotToSuspend: immutable.Iterable[ActorRef], t: Throwable): Unit = {
|
||||
// prevent any further messages to be processed until the actor has been restarted
|
||||
if (!isFailed) try {
|
||||
|
|
@ -226,12 +251,11 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
publish(Debug(self.path.toString, clazz(a), "stopped"))
|
||||
|
||||
clearActorFields(a, recreate = false)
|
||||
clearActorCellFields(this)
|
||||
actor = null
|
||||
clearFieldsForTermination()
|
||||
}
|
||||
}
|
||||
|
||||
private def finishRecreate(cause: Throwable, failedActor: Actor): Unit = {
|
||||
private def finishRecreate(cause: Throwable): Unit = {
|
||||
// need to keep a snapshot of the surviving children before the new actor instance creates new ones
|
||||
val survivors = children
|
||||
|
||||
|
|
@ -240,8 +264,6 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
finally clearFailed() // must happen in any case, so that failure is propagated
|
||||
|
||||
val freshActor = newActor()
|
||||
actor = freshActor // this must happen before postRestart has a chance to fail
|
||||
if (freshActor eq failedActor) setActorFields(freshActor, this, self) // If the creator returns the same instance, we need to restore our nulled out fields.
|
||||
|
||||
freshActor.aroundPostRestart(cause)
|
||||
checkReceiveTimeout(reschedule = true) // user may have set a receive timeout in preStart which is called from postRestart
|
||||
|
|
@ -255,6 +277,7 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
publish(Error(e, self.path.toString, clazz(freshActor), "restarting " + child))
|
||||
})
|
||||
} catch handleNonFatalOrInterruptedException { e =>
|
||||
setFailedFatally()
|
||||
clearActorFields(actor, recreate = false) // in order to prevent preRestart() from happening again
|
||||
handleInvokeFailure(survivors, PostRestartException(self, e, cause))
|
||||
}
|
||||
|
|
@ -301,7 +324,7 @@ private[akka] trait FaultHandling { this: ActorCell =>
|
|||
* then we are continuing the previously suspended recreate/create/terminate action
|
||||
*/
|
||||
status match {
|
||||
case Some(ChildrenContainer.Recreation(cause)) => finishRecreate(cause, actor)
|
||||
case Some(ChildrenContainer.Recreation(cause)) => finishRecreate(cause)
|
||||
case Some(ChildrenContainer.Creation()) => finishCreate()
|
||||
case Some(ChildrenContainer.Termination) => finishTerminate()
|
||||
case _ =>
|
||||
|
|
|
|||
|
|
@ -78,15 +78,16 @@ object ExecutionContexts {
|
|||
def global(): ExecutionContextExecutor = ExecutionContext.global
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* WARNING: Not A General Purpose ExecutionContext!
|
||||
*
|
||||
* This is an execution context which runs everything on the calling thread.
|
||||
* It is very useful for actions which are known to be non-blocking and
|
||||
* non-throwing in order to save a round-trip to the thread pool.
|
||||
*
|
||||
* INTERNAL API
|
||||
* Once Scala 2.12 is no longer supported this can be dropped in favour of directly using `ExecutionContext.parasitic`
|
||||
*/
|
||||
// Once Scala 2.12 is no longer supported this can be dropped in favour of directly using [[ExecutionContext.parasitic]]
|
||||
@InternalStableApi
|
||||
private[akka] val parasitic: ExecutionContext = SameThreadExecutionContext()
|
||||
|
||||
|
|
|
|||
|
|
@ -22,9 +22,6 @@ import akka.event.Logging
|
|||
import akka.util.{ ImmutableIntMap, ReentrantGuard }
|
||||
import akka.util.Helpers.Requiring
|
||||
|
||||
import scala.annotation.{ switch, tailrec }
|
||||
import scala.collection.{ immutable, mutable }
|
||||
|
||||
@InternalApi
|
||||
@ApiMayChange
|
||||
private[affinity] object AffinityPool {
|
||||
|
|
|
|||
|
|
@ -165,7 +165,6 @@ object DnsSettings {
|
|||
|
||||
def getNameserversUsingJNDI: Try[List[InetSocketAddress]] = {
|
||||
import java.util
|
||||
|
||||
import javax.naming.Context
|
||||
import javax.naming.directory.InitialDirContext
|
||||
// Using jndi-dns to obtain the default name servers.
|
||||
|
|
|
|||
|
|
@ -14,10 +14,11 @@ import scala.util.{ Failure, Success }
|
|||
import com.github.ghik.silencer.silent
|
||||
|
||||
import akka.actor._
|
||||
import akka.annotation.InternalApi
|
||||
import akka.annotation.{ InternalApi, InternalStableApi }
|
||||
import akka.dispatch.ExecutionContexts
|
||||
import akka.dispatch.sysmsg._
|
||||
import akka.util.{ Timeout, Unsafe }
|
||||
import akka.util.unused
|
||||
|
||||
/**
|
||||
* This is what is used to complete a Future that is returned from an ask/? call,
|
||||
|
|
@ -339,9 +340,8 @@ final class AskableActorRef(val actorRef: ActorRef) extends AnyVal {
|
|||
if (timeout.duration.length <= 0)
|
||||
Future.failed[Any](AskableActorRef.negativeTimeoutException(actorRef, message, sender))
|
||||
else {
|
||||
val a = PromiseActorRef(ref.provider, timeout, targetName = actorRef, message.getClass.getName, sender)
|
||||
actorRef.tell(message, a)
|
||||
a.result.future
|
||||
PromiseActorRef(ref.provider, timeout, targetName = actorRef, message.getClass.getName, sender)
|
||||
.ask(actorRef, message, timeout)
|
||||
}
|
||||
case _ => Future.failed[Any](AskableActorRef.unsupportedRecipientType(actorRef, message, sender))
|
||||
}
|
||||
|
|
@ -376,8 +376,7 @@ final class ExplicitlyAskableActorRef(val actorRef: ActorRef) extends AnyVal {
|
|||
val a = PromiseActorRef(ref.provider, timeout, targetName = actorRef, "unknown", sender)
|
||||
val message = messageFactory(a)
|
||||
a.messageClassName = message.getClass.getName
|
||||
actorRef.tell(message, a)
|
||||
a.result.future
|
||||
a.ask(actorRef, message, timeout)
|
||||
}
|
||||
case _ if sender eq null =>
|
||||
Future.failed[Any](
|
||||
|
|
@ -423,9 +422,8 @@ final class AskableActorSelection(val actorSel: ActorSelection) extends AnyVal {
|
|||
if (timeout.duration.length <= 0)
|
||||
Future.failed[Any](AskableActorRef.negativeTimeoutException(actorSel, message, sender))
|
||||
else {
|
||||
val a = PromiseActorRef(ref.provider, timeout, targetName = actorSel, message.getClass.getName, sender)
|
||||
actorSel.tell(message, a)
|
||||
a.result.future
|
||||
PromiseActorRef(ref.provider, timeout, targetName = actorSel, message.getClass.getName, sender)
|
||||
.ask(actorSel, message, timeout)
|
||||
}
|
||||
case _ => Future.failed[Any](AskableActorRef.unsupportedRecipientType(actorSel, message, sender))
|
||||
}
|
||||
|
|
@ -455,8 +453,7 @@ final class ExplicitlyAskableActorSelection(val actorSel: ActorSelection) extend
|
|||
val a = PromiseActorRef(ref.provider, timeout, targetName = actorSel, "unknown", sender)
|
||||
val message = messageFactory(a)
|
||||
a.messageClassName = message.getClass.getName
|
||||
actorSel.tell(message, a)
|
||||
a.result.future
|
||||
a.ask(actorSel, message, timeout)
|
||||
}
|
||||
case _ if sender eq null =>
|
||||
Future.failed[Any](
|
||||
|
|
@ -573,7 +570,9 @@ private[akka] final class PromiseActorRef private (
|
|||
}
|
||||
|
||||
override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = state match {
|
||||
case Stopped | _: StoppedWithPath => provider.deadLetters ! message
|
||||
case Stopped | _: StoppedWithPath =>
|
||||
provider.deadLetters ! message
|
||||
onComplete(message, alreadyCompleted = true)
|
||||
case _ =>
|
||||
if (message == null) throw InvalidMessageException("Message is null")
|
||||
val promiseResult = message match {
|
||||
|
|
@ -581,8 +580,10 @@ private[akka] final class PromiseActorRef private (
|
|||
case Status.Failure(f) => Failure(f)
|
||||
case other => Success(other)
|
||||
}
|
||||
if (!result.tryComplete(promiseResult))
|
||||
val alreadyCompleted = !result.tryComplete(promiseResult)
|
||||
if (alreadyCompleted)
|
||||
provider.deadLetters ! message
|
||||
onComplete(message, alreadyCompleted)
|
||||
}
|
||||
|
||||
override def sendSystemMessage(message: SystemMessage): Unit = message match {
|
||||
|
|
@ -632,6 +633,24 @@ private[akka] final class PromiseActorRef private (
|
|||
case Registering => stop() // spin until registration is completed before stopping
|
||||
}
|
||||
}
|
||||
|
||||
@InternalStableApi
|
||||
private[akka] def ask(actorSel: ActorSelection, message: Any, @unused timeout: Timeout): Future[Any] = {
|
||||
actorSel.tell(message, this)
|
||||
result.future
|
||||
}
|
||||
|
||||
@InternalStableApi
|
||||
private[akka] def ask(actorRef: ActorRef, message: Any, @unused timeout: Timeout): Future[Any] = {
|
||||
actorRef.tell(message, this)
|
||||
result.future
|
||||
}
|
||||
|
||||
@InternalStableApi
|
||||
private[akka] def onComplete(@unused message: Any, @unused alreadyCompleted: Boolean): Unit = {}
|
||||
|
||||
@InternalStableApi
|
||||
private[akka] def onTimeout(@unused timeout: Timeout): Unit = {}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -658,7 +677,7 @@ private[akka] object PromiseActorRef {
|
|||
val a = new PromiseActorRef(provider, result, messageClassName)
|
||||
implicit val ec = ExecutionContexts.parasitic
|
||||
val f = scheduler.scheduleOnce(timeout.duration) {
|
||||
result.tryComplete {
|
||||
val timedOut = result.tryComplete {
|
||||
val wasSentBy = if (sender == ActorRef.noSender) "" else s" was sent by [$sender]"
|
||||
val messagePart = s"Message of type [${a.messageClassName}]$wasSentBy."
|
||||
Failure(
|
||||
|
|
@ -667,6 +686,9 @@ private[akka] object PromiseActorRef {
|
|||
messagePart +
|
||||
" A typical reason for `AskTimeoutException` is that the recipient actor didn't send a reply."))
|
||||
}
|
||||
if (timedOut) {
|
||||
a.onTimeout(timeout)
|
||||
}
|
||||
}
|
||||
result.future.onComplete { _ =>
|
||||
try a.stop()
|
||||
|
|
|
|||
|
|
@ -617,7 +617,7 @@ private final case class BackoffOptionsImpl(
|
|||
backoffReset,
|
||||
randomFactor,
|
||||
supervisorStrategy,
|
||||
replyWhileStopped))
|
||||
replyWhileStopped.map(msg => ReplyWith(msg)).getOrElse(ForwardDeathLetters)))
|
||||
//onStop method in companion object
|
||||
case StopImpliesFailure =>
|
||||
Props(
|
||||
|
|
@ -629,7 +629,7 @@ private final case class BackoffOptionsImpl(
|
|||
backoffReset,
|
||||
randomFactor,
|
||||
supervisorStrategy,
|
||||
replyWhileStopped,
|
||||
replyWhileStopped.map(msg => ReplyWith(msg)).getOrElse(ForwardDeathLetters),
|
||||
finalStopMessage))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,9 +5,8 @@
|
|||
package akka.pattern
|
||||
|
||||
import scala.concurrent.duration.{ Duration, FiniteDuration }
|
||||
|
||||
import akka.actor.{ OneForOneStrategy, Props, SupervisorStrategy }
|
||||
import akka.annotation.DoNotInherit
|
||||
import akka.actor.{ ActorRef, OneForOneStrategy, Props, SupervisorStrategy }
|
||||
import akka.annotation.{ DoNotInherit, InternalApi }
|
||||
import akka.pattern.internal.{ BackoffOnRestartSupervisor, BackoffOnStopSupervisor }
|
||||
import akka.util.JavaDurationConverters._
|
||||
|
||||
|
|
@ -299,6 +298,15 @@ private[akka] sealed trait ExtendedBackoffOptions[T <: ExtendedBackoffOptions[T]
|
|||
*/
|
||||
def withReplyWhileStopped(replyWhileStopped: Any): T
|
||||
|
||||
/**
|
||||
* Returns a new BackoffOptions with a custom handler for messages that the supervisor receives while its child is stopped.
|
||||
* By default, a message received while the child is stopped is forwarded to `deadLetters`.
|
||||
* Essentially, this handler replaces `deadLetters` allowing to implement custom handling instead of a static reply.
|
||||
*
|
||||
* @param handler PartialFunction of the received message and sender
|
||||
*/
|
||||
def withHandlerWhileStopped(handler: ActorRef): T
|
||||
|
||||
/**
|
||||
* Returns the props to create the back-off supervisor.
|
||||
*/
|
||||
|
|
@ -334,7 +342,7 @@ private final case class BackoffOnStopOptionsImpl[T](
|
|||
randomFactor: Double,
|
||||
reset: Option[BackoffReset] = None,
|
||||
supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider),
|
||||
replyWhileStopped: Option[Any] = None,
|
||||
handlingWhileStopped: HandlingWhileStopped = ForwardDeathLetters,
|
||||
finalStopMessage: Option[Any => Boolean] = None)
|
||||
extends BackoffOnStopOptions {
|
||||
|
||||
|
|
@ -344,7 +352,9 @@ private final case class BackoffOnStopOptionsImpl[T](
|
|||
def withAutoReset(resetBackoff: FiniteDuration) = copy(reset = Some(AutoReset(resetBackoff)))
|
||||
def withManualReset = copy(reset = Some(ManualReset))
|
||||
def withSupervisorStrategy(supervisorStrategy: OneForOneStrategy) = copy(supervisorStrategy = supervisorStrategy)
|
||||
def withReplyWhileStopped(replyWhileStopped: Any) = copy(replyWhileStopped = Some(replyWhileStopped))
|
||||
def withReplyWhileStopped(replyWhileStopped: Any) = copy(handlingWhileStopped = ReplyWith(replyWhileStopped))
|
||||
def withHandlerWhileStopped(handlerWhileStopped: ActorRef) =
|
||||
copy(handlingWhileStopped = ForwardTo(handlerWhileStopped))
|
||||
def withMaxNrOfRetries(maxNrOfRetries: Int) =
|
||||
copy(supervisorStrategy = supervisorStrategy.withMaxNrOfRetries(maxNrOfRetries))
|
||||
|
||||
|
|
@ -374,7 +384,7 @@ private final case class BackoffOnStopOptionsImpl[T](
|
|||
backoffReset,
|
||||
randomFactor,
|
||||
supervisorStrategy,
|
||||
replyWhileStopped,
|
||||
handlingWhileStopped,
|
||||
finalStopMessage))
|
||||
}
|
||||
}
|
||||
|
|
@ -387,7 +397,7 @@ private final case class BackoffOnFailureOptionsImpl[T](
|
|||
randomFactor: Double,
|
||||
reset: Option[BackoffReset] = None,
|
||||
supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider),
|
||||
replyWhileStopped: Option[Any] = None)
|
||||
handlingWhileStopped: HandlingWhileStopped = ForwardDeathLetters)
|
||||
extends BackoffOnFailureOptions {
|
||||
|
||||
private val backoffReset = reset.getOrElse(AutoReset(minBackoff))
|
||||
|
|
@ -396,7 +406,9 @@ private final case class BackoffOnFailureOptionsImpl[T](
|
|||
def withAutoReset(resetBackoff: FiniteDuration) = copy(reset = Some(AutoReset(resetBackoff)))
|
||||
def withManualReset = copy(reset = Some(ManualReset))
|
||||
def withSupervisorStrategy(supervisorStrategy: OneForOneStrategy) = copy(supervisorStrategy = supervisorStrategy)
|
||||
def withReplyWhileStopped(replyWhileStopped: Any) = copy(replyWhileStopped = Some(replyWhileStopped))
|
||||
def withReplyWhileStopped(replyWhileStopped: Any) = copy(handlingWhileStopped = ReplyWith(replyWhileStopped))
|
||||
def withHandlerWhileStopped(handlerWhileStopped: ActorRef) =
|
||||
copy(handlingWhileStopped = ForwardTo(handlerWhileStopped))
|
||||
def withMaxNrOfRetries(maxNrOfRetries: Int) =
|
||||
copy(supervisorStrategy = supervisorStrategy.withMaxNrOfRetries(maxNrOfRetries))
|
||||
|
||||
|
|
@ -419,10 +431,17 @@ private final case class BackoffOnFailureOptionsImpl[T](
|
|||
backoffReset,
|
||||
randomFactor,
|
||||
supervisorStrategy,
|
||||
replyWhileStopped))
|
||||
handlingWhileStopped))
|
||||
}
|
||||
}
|
||||
|
||||
@InternalApi
|
||||
private[akka] sealed trait BackoffReset
|
||||
private[akka] case object ManualReset extends BackoffReset
|
||||
private[akka] final case class AutoReset(resetBackoff: FiniteDuration) extends BackoffReset
|
||||
|
||||
@InternalApi
|
||||
private[akka] sealed trait HandlingWhileStopped
|
||||
private[akka] case object ForwardDeathLetters extends HandlingWhileStopped
|
||||
private[akka] case class ForwardTo(handler: ActorRef) extends HandlingWhileStopped
|
||||
private[akka] case class ReplyWith(msg: Any) extends HandlingWhileStopped
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ object BackoffSupervisor {
|
|||
AutoReset(minBackoff),
|
||||
randomFactor,
|
||||
strategy,
|
||||
None,
|
||||
ForwardDeathLetters,
|
||||
None))
|
||||
}
|
||||
|
||||
|
|
@ -341,7 +341,7 @@ final class BackoffSupervisor @deprecated("Use `BackoffSupervisor.props` method
|
|||
reset,
|
||||
randomFactor,
|
||||
strategy,
|
||||
replyWhileStopped,
|
||||
replyWhileStopped.map(msg => ReplyWith(msg)).getOrElse(ForwardDeathLetters),
|
||||
finalStopMessage) {
|
||||
|
||||
// for binary compatibility with 2.5.18
|
||||
|
|
|
|||
|
|
@ -153,39 +153,44 @@ object RetrySupport extends RetrySupport {
|
|||
maxAttempts: Int,
|
||||
delayFunction: Int => Option[FiniteDuration],
|
||||
attempted: Int)(implicit ec: ExecutionContext, scheduler: Scheduler): Future[T] = {
|
||||
try {
|
||||
require(maxAttempts >= 0, "Parameter maxAttempts must >= 0.")
|
||||
require(attempt != null, "Parameter attempt should not be null.")
|
||||
if (maxAttempts - attempted > 0) {
|
||||
val result = attempt()
|
||||
if (result eq null)
|
||||
result
|
||||
else {
|
||||
val nextAttempt = attempted + 1
|
||||
result.recoverWith {
|
||||
case NonFatal(_) =>
|
||||
delayFunction(nextAttempt) match {
|
||||
case Some(delay) =>
|
||||
if (delay.length < 1)
|
||||
retry(attempt, maxAttempts, delayFunction, nextAttempt)
|
||||
else
|
||||
after(delay, scheduler) {
|
||||
retry(attempt, maxAttempts, delayFunction, nextAttempt)
|
||||
}
|
||||
case None =>
|
||||
retry(attempt, maxAttempts, delayFunction, nextAttempt)
|
||||
case _ =>
|
||||
Future.failed(new IllegalArgumentException("The delayFunction of retry should not return null."))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
def tryAttempt(): Future[T] = {
|
||||
try {
|
||||
attempt()
|
||||
} catch {
|
||||
case NonFatal(exc) => Future.failed(exc) // in case the `attempt` function throws
|
||||
}
|
||||
} catch {
|
||||
case NonFatal(error) => Future.failed(error)
|
||||
}
|
||||
|
||||
require(maxAttempts >= 0, "Parameter maxAttempts must >= 0.")
|
||||
require(attempt != null, "Parameter attempt should not be null.")
|
||||
if (maxAttempts - attempted > 0) {
|
||||
val result = tryAttempt()
|
||||
if (result eq null)
|
||||
result
|
||||
else {
|
||||
val nextAttempt = attempted + 1
|
||||
result.recoverWith {
|
||||
case NonFatal(_) =>
|
||||
delayFunction(nextAttempt) match {
|
||||
case Some(delay) =>
|
||||
if (delay.length < 1)
|
||||
retry(attempt, maxAttempts, delayFunction, nextAttempt)
|
||||
else
|
||||
after(delay, scheduler) {
|
||||
retry(attempt, maxAttempts, delayFunction, nextAttempt)
|
||||
}
|
||||
case None =>
|
||||
retry(attempt, maxAttempts, delayFunction, nextAttempt)
|
||||
case _ =>
|
||||
Future.failed(new IllegalArgumentException("The delayFunction of retry should not return null."))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
tryAttempt()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,12 +4,20 @@
|
|||
|
||||
package akka.pattern.internal
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.{ OneForOneStrategy, _ }
|
||||
import akka.actor.SupervisorStrategy._
|
||||
import akka.actor.{ OneForOneStrategy, _ }
|
||||
import akka.annotation.InternalApi
|
||||
import akka.pattern.{ BackoffReset, BackoffSupervisor, HandleBackoff }
|
||||
import akka.pattern.{
|
||||
BackoffReset,
|
||||
BackoffSupervisor,
|
||||
ForwardDeathLetters,
|
||||
ForwardTo,
|
||||
HandleBackoff,
|
||||
HandlingWhileStopped,
|
||||
ReplyWith
|
||||
}
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
|
|
@ -26,7 +34,7 @@ import akka.pattern.{ BackoffReset, BackoffSupervisor, HandleBackoff }
|
|||
val reset: BackoffReset,
|
||||
randomFactor: Double,
|
||||
strategy: OneForOneStrategy,
|
||||
replyWhileStopped: Option[Any])
|
||||
handlingWhileStopped: HandlingWhileStopped)
|
||||
extends Actor
|
||||
with HandleBackoff
|
||||
with ActorLogging {
|
||||
|
|
@ -34,7 +42,7 @@ import akka.pattern.{ BackoffReset, BackoffSupervisor, HandleBackoff }
|
|||
import BackoffSupervisor._
|
||||
import context._
|
||||
|
||||
override val supervisorStrategy =
|
||||
override val supervisorStrategy: OneForOneStrategy =
|
||||
OneForOneStrategy(strategy.maxNrOfRetries, strategy.withinTimeRange, strategy.loggingEnabled) {
|
||||
case ex =>
|
||||
val defaultDirective: Directive =
|
||||
|
|
@ -94,9 +102,10 @@ import akka.pattern.{ BackoffReset, BackoffSupervisor, HandleBackoff }
|
|||
case Some(c) =>
|
||||
c.forward(msg)
|
||||
case None =>
|
||||
replyWhileStopped match {
|
||||
case None => context.system.deadLetters.forward(msg)
|
||||
case Some(r) => sender() ! r
|
||||
handlingWhileStopped match {
|
||||
case ForwardDeathLetters => context.system.deadLetters.forward(msg)
|
||||
case ForwardTo(h) => h.forward(msg)
|
||||
case ReplyWith(r) => sender() ! r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,12 +4,20 @@
|
|||
|
||||
package akka.pattern.internal
|
||||
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
|
||||
import akka.actor.{ Actor, ActorLogging, OneForOneStrategy, Props, SupervisorStrategy, Terminated }
|
||||
import akka.actor.SupervisorStrategy.{ Directive, Escalate }
|
||||
import akka.actor.{ Actor, ActorLogging, OneForOneStrategy, Props, SupervisorStrategy, Terminated }
|
||||
import akka.annotation.InternalApi
|
||||
import akka.pattern.{ BackoffReset, BackoffSupervisor, HandleBackoff }
|
||||
import akka.pattern.{
|
||||
BackoffReset,
|
||||
BackoffSupervisor,
|
||||
ForwardDeathLetters,
|
||||
ForwardTo,
|
||||
HandleBackoff,
|
||||
HandlingWhileStopped,
|
||||
ReplyWith
|
||||
}
|
||||
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
|
|
@ -26,7 +34,7 @@ import akka.pattern.{ BackoffReset, BackoffSupervisor, HandleBackoff }
|
|||
val reset: BackoffReset,
|
||||
randomFactor: Double,
|
||||
strategy: SupervisorStrategy,
|
||||
replyWhileStopped: Option[Any],
|
||||
handlingWhileStopped: HandlingWhileStopped,
|
||||
finalStopMessage: Option[Any => Boolean])
|
||||
extends Actor
|
||||
with HandleBackoff
|
||||
|
|
@ -35,7 +43,7 @@ import akka.pattern.{ BackoffReset, BackoffSupervisor, HandleBackoff }
|
|||
import BackoffSupervisor._
|
||||
import context.dispatcher
|
||||
|
||||
override val supervisorStrategy = strategy match {
|
||||
override val supervisorStrategy: SupervisorStrategy = strategy match {
|
||||
case oneForOne: OneForOneStrategy =>
|
||||
OneForOneStrategy(oneForOne.maxNrOfRetries, oneForOne.withinTimeRange, oneForOne.loggingEnabled) {
|
||||
case ex =>
|
||||
|
|
@ -84,13 +92,14 @@ import akka.pattern.{ BackoffReset, BackoffSupervisor, HandleBackoff }
|
|||
case None =>
|
||||
}
|
||||
case None =>
|
||||
replyWhileStopped match {
|
||||
case Some(r) => sender() ! r
|
||||
case None => context.system.deadLetters.forward(msg)
|
||||
}
|
||||
finalStopMessage match {
|
||||
case Some(fsm) if fsm(msg) => context.stop(self)
|
||||
case _ =>
|
||||
case _ =>
|
||||
handlingWhileStopped match {
|
||||
case ForwardDeathLetters => context.system.deadLetters.forward(msg)
|
||||
case ForwardTo(h) => h.forward(msg)
|
||||
case ReplyWith(r) => sender() ! r
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -503,8 +503,21 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
|
|||
/**
|
||||
* Maps from a Serializer Identity (Int) to a Serializer instance (optimization)
|
||||
*/
|
||||
val serializerByIdentity: Map[Int, Serializer] =
|
||||
Map(NullSerializer.identifier -> NullSerializer) ++ serializers.map { case (_, v) => (v.identifier, v) }
|
||||
val serializerByIdentity: Map[Int, Serializer] = {
|
||||
val zero: Map[Int, Serializer] = Map(NullSerializer.identifier -> NullSerializer)
|
||||
serializers.foldLeft(zero) {
|
||||
case (acc, (_, ser)) =>
|
||||
val id = ser.identifier
|
||||
acc.get(id) match {
|
||||
case Some(existing) if existing != ser =>
|
||||
throw new IllegalArgumentException(
|
||||
s"Serializer identifier [$id] of [${ser.getClass.getName}] " +
|
||||
s"is not unique. It is also used by [${acc(id).getClass.getName}].")
|
||||
case _ =>
|
||||
acc.updated(id, ser)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializers with id 0 - 1023 are stored in an array for quick allocation free access
|
||||
|
|
|
|||
|
|
@ -7,9 +7,12 @@ import java.time.{ Duration => JDuration }
|
|||
|
||||
import scala.concurrent.duration.{ Duration, FiniteDuration }
|
||||
|
||||
import akka.annotation.InternalStableApi
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalStableApi
|
||||
private[akka] object JavaDurationConverters {
|
||||
def asFiniteDuration(duration: JDuration): FiniteDuration = duration.asScala
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ import java.lang.reflect.Constructor
|
|||
import java.lang.reflect.ParameterizedType
|
||||
import java.lang.reflect.Type
|
||||
|
||||
import akka.annotation.InternalApi
|
||||
|
||||
import scala.annotation.tailrec
|
||||
import scala.collection.immutable
|
||||
import scala.util.Try
|
||||
|
|
@ -18,6 +20,7 @@ import scala.util.control.NonFatal
|
|||
*
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi
|
||||
private[akka] object Reflect {
|
||||
|
||||
/**
|
||||
|
|
@ -138,33 +141,6 @@ private[akka] object Reflect {
|
|||
rec(root)
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
* Set a val inside a class.
|
||||
*/
|
||||
@tailrec protected[akka] final def lookupAndSetField(
|
||||
clazz: Class[_],
|
||||
instance: AnyRef,
|
||||
name: String,
|
||||
value: Any): Boolean = {
|
||||
@tailrec def clearFirst(fields: Array[java.lang.reflect.Field], idx: Int): Boolean =
|
||||
if (idx < fields.length) {
|
||||
val field = fields(idx)
|
||||
if (field.getName == name) {
|
||||
field.setAccessible(true)
|
||||
field.set(instance, value)
|
||||
true
|
||||
} else clearFirst(fields, idx + 1)
|
||||
} else false
|
||||
|
||||
clearFirst(clazz.getDeclaredFields, 0) || {
|
||||
clazz.getSuperclass match {
|
||||
case null => false // clazz == classOf[AnyRef]
|
||||
case sc => lookupAndSetField(sc, instance, name, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -190,6 +190,10 @@ class JacksonSerializationBench {
|
|||
@Param(Array("jackson-json", "jackson-cbor")) // "java"
|
||||
private var serializerName: String = _
|
||||
|
||||
@silent("immutable val")
|
||||
@Param(Array("off", "gzip", "lz4"))
|
||||
private var compression: String = _
|
||||
|
||||
@Setup(Level.Trial)
|
||||
def setupTrial(): Unit = {
|
||||
val config = ConfigFactory.parseString(s"""
|
||||
|
|
@ -208,7 +212,7 @@ class JacksonSerializationBench {
|
|||
}
|
||||
}
|
||||
akka.serialization.jackson.jackson-json.compression {
|
||||
algorithm = off
|
||||
algorithm = $compression
|
||||
compress-larger-than = 100 b
|
||||
}
|
||||
""")
|
||||
|
|
@ -222,10 +226,18 @@ class JacksonSerializationBench {
|
|||
Await.result(system.terminate(), 5.seconds)
|
||||
}
|
||||
|
||||
private var size = 0L
|
||||
|
||||
private def serializeDeserialize[T <: AnyRef](msg: T): T = {
|
||||
serialization.findSerializerFor(msg) match {
|
||||
case serializer: SerializerWithStringManifest =>
|
||||
val blob = serializer.toBinary(msg)
|
||||
if (size != blob.length) {
|
||||
size = blob.length
|
||||
println(
|
||||
s"# Size is $size of ${msg.getClass.getName} with " +
|
||||
s"${system.settings.config.getString("akka.serialization.jackson.jackson-json.compression.algorithm")}")
|
||||
}
|
||||
serializer.fromBinary(blob, serializer.manifest(msg)).asInstanceOf[T]
|
||||
case serializer =>
|
||||
val blob = serializer.toBinary(msg)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import akka.actor.typed.internal.PoisonPillInterceptor
|
|||
import akka.actor.typed.internal.adapter.ActorRefAdapter
|
||||
import akka.actor.typed.internal.adapter.ActorSystemAdapter
|
||||
import akka.actor.typed.scaladsl.Behaviors
|
||||
import akka.annotation.InternalApi
|
||||
import akka.annotation.{ InternalApi, InternalStableApi }
|
||||
import akka.cluster.ClusterSettings.DataCenter
|
||||
import akka.cluster.sharding.ShardCoordinator.LeastShardAllocationStrategy
|
||||
import akka.cluster.sharding.ShardCoordinator.ShardAllocationStrategy
|
||||
|
|
@ -40,9 +40,8 @@ import akka.event.LoggingAdapter
|
|||
import akka.japi.function.{ Function => JFunction }
|
||||
import akka.pattern.AskTimeoutException
|
||||
import akka.pattern.PromiseActorRef
|
||||
import akka.util.ByteString
|
||||
import akka.util.{ unused, ByteString, Timeout }
|
||||
import akka.util.JavaDurationConverters._
|
||||
import akka.util.Timeout
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
|
|
@ -311,8 +310,7 @@ import akka.util.Timeout
|
|||
val replyTo = new EntityPromiseRef[U](shardRegion.asInstanceOf[InternalActorRef], timeout)
|
||||
val m = message(replyTo.ref)
|
||||
if (replyTo.promiseRef ne null) replyTo.promiseRef.messageClassName = m.getClass.getName
|
||||
shardRegion ! ShardingEnvelope(entityId, m)
|
||||
replyTo.future
|
||||
replyTo.ask(shardRegion, entityId, m, timeout)
|
||||
}
|
||||
|
||||
def ask[U](message: JFunction[ActorRef[U], M], timeout: Duration): CompletionStage[U] =
|
||||
|
|
@ -349,6 +347,16 @@ import akka.util.Timeout
|
|||
val ref: ActorRef[U] = _ref
|
||||
val future: Future[U] = _future
|
||||
val promiseRef: PromiseActorRef = _promiseRef
|
||||
|
||||
@InternalStableApi
|
||||
private[akka] def ask[T](
|
||||
shardRegion: akka.actor.ActorRef,
|
||||
entityId: String,
|
||||
message: T,
|
||||
@unused timeout: Timeout): Future[U] = {
|
||||
shardRegion ! ShardingEnvelope(entityId, message)
|
||||
future
|
||||
}
|
||||
}
|
||||
|
||||
// impl InternalRecipientRef
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ import akka.remote.testkit.MultiNodeConfig
|
|||
import akka.remote.testkit.MultiNodeSpec
|
||||
import akka.remote.testkit.PerfFlamesSupport
|
||||
import akka.serialization.jackson.CborSerializable
|
||||
import akka.actor.typed.scaladsl.LoggerOps
|
||||
|
||||
object DeliveryThroughputSpec extends MultiNodeConfig {
|
||||
val first = role("first")
|
||||
|
|
@ -232,6 +233,7 @@ object DeliveryThroughputSpec extends MultiNodeConfig {
|
|||
|
||||
case object Run extends Command
|
||||
private case class WrappedRequestNext(r: ShardingProducerController.RequestNext[Consumer.Command]) extends Command
|
||||
private case object PrintStatus extends Command
|
||||
|
||||
def apply(
|
||||
producerController: ActorRef[ShardingProducerController.Command[Consumer.Command]],
|
||||
|
|
@ -240,35 +242,54 @@ object DeliveryThroughputSpec extends MultiNodeConfig {
|
|||
resultReporter: BenchmarkFileReporter): Behavior[Command] = {
|
||||
val numberOfMessages = testSettings.totalMessages
|
||||
|
||||
Behaviors.setup { context =>
|
||||
val requestNextAdapter =
|
||||
context.messageAdapter[ShardingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_))
|
||||
var startTime = System.nanoTime()
|
||||
var remaining = numberOfMessages + context.system.settings.config
|
||||
.getInt("akka.reliable-delivery.sharding.consumer-controller.flow-control-window")
|
||||
Behaviors.withTimers { timers =>
|
||||
Behaviors.setup { context =>
|
||||
timers.startTimerWithFixedDelay(PrintStatus, 1.second)
|
||||
val requestNextAdapter =
|
||||
context.messageAdapter[ShardingProducerController.RequestNext[Consumer.Command]](WrappedRequestNext(_))
|
||||
var startTime = System.nanoTime()
|
||||
var remaining = numberOfMessages + context.system.settings.config
|
||||
.getInt("akka.reliable-delivery.sharding.consumer-controller.flow-control-window")
|
||||
var latestDemand: ShardingProducerController.RequestNext[Consumer.Command] = null
|
||||
var messagesSentToEachEntity: Map[String, Long] = Map.empty[String, Long].withDefaultValue(0L)
|
||||
|
||||
Behaviors.receiveMessage {
|
||||
case WrappedRequestNext(next) =>
|
||||
remaining -= 1
|
||||
if (remaining == 0) {
|
||||
context.log.info("Completed {} messages", numberOfMessages)
|
||||
Producer.reportEnd(startTime, testSettings, plotRef, resultReporter)
|
||||
Behaviors.stopped
|
||||
} else {
|
||||
val entityId = (remaining % testSettings.numberOfConsumers).toString
|
||||
if (next.entitiesWithDemand(entityId) || !next.bufferedForEntitiesWithoutDemand.contains(entityId))
|
||||
next.sendNextTo ! ShardingEnvelope(entityId, Consumer.TheMessage)
|
||||
Behaviors.receiveMessage {
|
||||
case WrappedRequestNext(next) =>
|
||||
latestDemand = next
|
||||
remaining -= 1
|
||||
if (remaining == 0) {
|
||||
context.log.info("Completed {} messages", numberOfMessages)
|
||||
Producer.reportEnd(startTime, testSettings, plotRef, resultReporter)
|
||||
Behaviors.stopped
|
||||
} else {
|
||||
val entityId = (remaining % testSettings.numberOfConsumers).toString
|
||||
if (next.entitiesWithDemand(entityId) || !next.bufferedForEntitiesWithoutDemand.contains(entityId)) {
|
||||
messagesSentToEachEntity =
|
||||
messagesSentToEachEntity.updated(entityId, messagesSentToEachEntity(entityId) + 1L)
|
||||
|
||||
next.sendNextTo ! ShardingEnvelope(entityId, Consumer.TheMessage)
|
||||
}
|
||||
Behaviors.same
|
||||
}
|
||||
case Run =>
|
||||
context.log.info("Starting {} messages", numberOfMessages)
|
||||
startTime = System.nanoTime()
|
||||
producerController ! ShardingProducerController.Start(requestNextAdapter)
|
||||
Behaviors.same
|
||||
}
|
||||
|
||||
case Run =>
|
||||
context.log.info("Starting {} messages", numberOfMessages)
|
||||
startTime = System.nanoTime()
|
||||
producerController ! ShardingProducerController.Start(requestNextAdapter)
|
||||
Behaviors.same
|
||||
case PrintStatus =>
|
||||
context.log.infoN(
|
||||
"Remaining {}. Latest demand {}. Messages sent {}. Expecting demand from {}",
|
||||
remaining,
|
||||
latestDemand,
|
||||
messagesSentToEachEntity,
|
||||
(remaining % testSettings.numberOfConsumers))
|
||||
Behaviors.same
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
final case class TestSettings(testName: String, totalMessages: Long, numberOfConsumers: Int)
|
||||
|
|
|
|||
|
|
@ -38,10 +38,10 @@ public interface AccountExampleWithEventHandlersInState {
|
|||
|
||||
// Command
|
||||
// #reply-command
|
||||
interface Command<Reply> extends CborSerializable {}
|
||||
interface Command extends CborSerializable {}
|
||||
// #reply-command
|
||||
|
||||
public static class CreateAccount implements Command<OperationResult> {
|
||||
public static class CreateAccount implements Command {
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
@ -50,7 +50,7 @@ public interface AccountExampleWithEventHandlersInState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Deposit implements Command<OperationResult> {
|
||||
public static class Deposit implements Command {
|
||||
public final BigDecimal amount;
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
|
|
@ -60,7 +60,7 @@ public interface AccountExampleWithEventHandlersInState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Withdraw implements Command<OperationResult> {
|
||||
public static class Withdraw implements Command {
|
||||
public final BigDecimal amount;
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
|
|
@ -70,7 +70,7 @@ public interface AccountExampleWithEventHandlersInState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class GetBalance implements Command<CurrentBalance> {
|
||||
public static class GetBalance implements Command {
|
||||
public final ActorRef<CurrentBalance> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
@ -79,7 +79,7 @@ public interface AccountExampleWithEventHandlersInState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CloseAccount implements Command<OperationResult> {
|
||||
public static class CloseAccount implements Command {
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
|
|||
|
|
@ -35,9 +35,9 @@ public interface AccountExampleWithMutableState {
|
|||
EntityTypeKey.create(Command.class, "Account");
|
||||
|
||||
// Command
|
||||
interface Command<Reply> extends CborSerializable {}
|
||||
interface Command extends CborSerializable {}
|
||||
|
||||
public static class CreateAccount implements Command<OperationResult> {
|
||||
public static class CreateAccount implements Command {
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
@ -46,7 +46,7 @@ public interface AccountExampleWithMutableState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Deposit implements Command<OperationResult> {
|
||||
public static class Deposit implements Command {
|
||||
public final BigDecimal amount;
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
|
|
@ -56,7 +56,7 @@ public interface AccountExampleWithMutableState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Withdraw implements Command<OperationResult> {
|
||||
public static class Withdraw implements Command {
|
||||
public final BigDecimal amount;
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
|
|
@ -66,7 +66,7 @@ public interface AccountExampleWithMutableState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class GetBalance implements Command<CurrentBalance> {
|
||||
public static class GetBalance implements Command {
|
||||
public final ActorRef<CurrentBalance> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
@ -75,7 +75,7 @@ public interface AccountExampleWithMutableState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CloseAccount implements Command<OperationResult> {
|
||||
public static class CloseAccount implements Command {
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
|
|||
|
|
@ -35,9 +35,9 @@ public interface AccountExampleWithNullState {
|
|||
EntityTypeKey.create(Command.class, "Account");
|
||||
|
||||
// Command
|
||||
interface Command<Reply> extends CborSerializable {}
|
||||
interface Command extends CborSerializable {}
|
||||
|
||||
public static class CreateAccount implements Command<OperationResult> {
|
||||
public static class CreateAccount implements Command {
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
@ -46,7 +46,7 @@ public interface AccountExampleWithNullState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Deposit implements Command<OperationResult> {
|
||||
public static class Deposit implements Command {
|
||||
public final BigDecimal amount;
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
|
|
@ -56,7 +56,7 @@ public interface AccountExampleWithNullState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Withdraw implements Command<OperationResult> {
|
||||
public static class Withdraw implements Command {
|
||||
public final BigDecimal amount;
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
|
|
@ -66,7 +66,7 @@ public interface AccountExampleWithNullState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class GetBalance implements Command<CurrentBalance> {
|
||||
public static class GetBalance implements Command {
|
||||
public final ActorRef<CurrentBalance> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
@ -75,7 +75,7 @@ public interface AccountExampleWithNullState {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CloseAccount implements Command<OperationResult> {
|
||||
public static class CloseAccount implements Command {
|
||||
public final ActorRef<OperationResult> replyTo;
|
||||
|
||||
@JsonCreator
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ class AccountExampleDocSpec
|
|||
with LogCapturing {
|
||||
|
||||
private val eventSourcedTestKit =
|
||||
EventSourcedBehaviorTestKit[AccountEntity.Command[_], AccountEntity.Event, AccountEntity.Account](
|
||||
EventSourcedBehaviorTestKit[AccountEntity.Command, AccountEntity.Event, AccountEntity.Account](
|
||||
system,
|
||||
AccountEntity("1", PersistenceId("Account", "1")))
|
||||
|
||||
|
|
|
|||
|
|
@ -79,10 +79,8 @@ class AccountExampleSpec
|
|||
}
|
||||
|
||||
"reject Withdraw overdraft" in {
|
||||
// AccountCommand[_] is the command type, but it should also be possible to narrow it to
|
||||
// AccountCommand[OperationResult]
|
||||
val probe = createTestProbe[OperationResult]()
|
||||
val ref = ClusterSharding(system).entityRefFor[Command[OperationResult]](AccountEntity.TypeKey, "3")
|
||||
val ref = ClusterSharding(system).entityRefFor[Command](AccountEntity.TypeKey, "3")
|
||||
ref ! CreateAccount(probe.ref)
|
||||
probe.expectMessage(Confirmed)
|
||||
ref ! Deposit(100, probe.ref)
|
||||
|
|
@ -90,10 +88,12 @@ class AccountExampleSpec
|
|||
ref ! Withdraw(110, probe.ref)
|
||||
probe.expectMessageType[Rejected]
|
||||
|
||||
// Account.Command is the command type, but it should also be possible to narrow it
|
||||
// ... thus restricting the entity ref from being sent other commands, e.g.:
|
||||
// val ref2 = ClusterSharding(system).entityRefFor[Deposit](AccountEntity.TypeKey, "3")
|
||||
// val probe2 = createTestProbe[CurrentBalance]()
|
||||
// val msg = GetBalance(probe2.ref)
|
||||
// ref ! msg // type mismatch: GetBalance NOT =:= AccountCommand[OperationResult]
|
||||
// ref2 ! msg // type mismatch: GetBalance NOT =:= Deposit
|
||||
}
|
||||
|
||||
"handle GetBalance" in {
|
||||
|
|
|
|||
|
|
@ -24,14 +24,12 @@ object AccountExampleWithCommandHandlersInState {
|
|||
//#account-entity
|
||||
object AccountEntity {
|
||||
// Command
|
||||
sealed trait Command[Reply <: CommandReply] extends CborSerializable {
|
||||
def replyTo: ActorRef[Reply]
|
||||
}
|
||||
final case class CreateAccount(replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class Deposit(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command[CurrentBalance]
|
||||
final case class CloseAccount(replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
sealed trait Command extends CborSerializable
|
||||
final case class CreateAccount(replyTo: ActorRef[OperationResult]) extends Command
|
||||
final case class Deposit(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command
|
||||
final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command
|
||||
final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command
|
||||
final case class CloseAccount(replyTo: ActorRef[OperationResult]) extends Command
|
||||
|
||||
// Reply
|
||||
sealed trait CommandReply extends CborSerializable
|
||||
|
|
@ -54,11 +52,11 @@ object AccountExampleWithCommandHandlersInState {
|
|||
|
||||
// State
|
||||
sealed trait Account extends CborSerializable {
|
||||
def applyCommand(cmd: Command[_]): ReplyEffect
|
||||
def applyCommand(cmd: Command): ReplyEffect
|
||||
def applyEvent(event: Event): Account
|
||||
}
|
||||
case object EmptyAccount extends Account {
|
||||
override def applyCommand(cmd: Command[_]): ReplyEffect =
|
||||
override def applyCommand(cmd: Command): ReplyEffect =
|
||||
cmd match {
|
||||
case CreateAccount(replyTo) =>
|
||||
Effect.persist(AccountCreated).thenReply(replyTo)(_ => Confirmed)
|
||||
|
|
@ -76,7 +74,7 @@ object AccountExampleWithCommandHandlersInState {
|
|||
case class OpenedAccount(balance: BigDecimal) extends Account {
|
||||
require(balance >= Zero, "Account balance can't be negative")
|
||||
|
||||
override def applyCommand(cmd: Command[_]): ReplyEffect =
|
||||
override def applyCommand(cmd: Command): ReplyEffect =
|
||||
cmd match {
|
||||
case Deposit(amount, replyTo) =>
|
||||
Effect.persist(Deposited(amount)).thenReply(replyTo)(_ => Confirmed)
|
||||
|
|
@ -115,28 +113,33 @@ object AccountExampleWithCommandHandlersInState {
|
|||
|
||||
}
|
||||
case object ClosedAccount extends Account {
|
||||
override def applyCommand(cmd: Command[_]): ReplyEffect =
|
||||
override def applyCommand(cmd: Command): ReplyEffect =
|
||||
cmd match {
|
||||
case c @ (_: Deposit | _: Withdraw) =>
|
||||
Effect.reply(c.replyTo)(Rejected("Account is closed"))
|
||||
case c: Deposit =>
|
||||
replyClosed(c.replyTo)
|
||||
case c: Withdraw =>
|
||||
replyClosed(c.replyTo)
|
||||
case GetBalance(replyTo) =>
|
||||
Effect.reply(replyTo)(CurrentBalance(Zero))
|
||||
case CloseAccount(replyTo) =>
|
||||
Effect.reply(replyTo)(Rejected("Account is already closed"))
|
||||
replyClosed(replyTo)
|
||||
case CreateAccount(replyTo) =>
|
||||
Effect.reply(replyTo)(Rejected("Account is already created"))
|
||||
replyClosed(replyTo)
|
||||
}
|
||||
|
||||
private def replyClosed(replyTo: ActorRef[AccountEntity.OperationResult]): ReplyEffect =
|
||||
Effect.reply(replyTo)(Rejected(s"Account is closed"))
|
||||
|
||||
override def applyEvent(event: Event): Account =
|
||||
throw new IllegalStateException(s"unexpected event [$event] in state [ClosedAccount]")
|
||||
}
|
||||
|
||||
// when used with sharding, this TypeKey can be used in `sharding.init` and `sharding.entityRefFor`:
|
||||
val TypeKey: EntityTypeKey[Command[_]] =
|
||||
EntityTypeKey[Command[_]]("Account")
|
||||
val TypeKey: EntityTypeKey[Command] =
|
||||
EntityTypeKey[Command]("Account")
|
||||
|
||||
def apply(persistenceId: PersistenceId): Behavior[Command[_]] = {
|
||||
EventSourcedBehavior.withEnforcedReplies[Command[_], Event, Account](
|
||||
def apply(persistenceId: PersistenceId): Behavior[Command] = {
|
||||
EventSourcedBehavior.withEnforcedReplies[Command, Event, Account](
|
||||
persistenceId,
|
||||
EmptyAccount,
|
||||
(state, cmd) => state.applyCommand(cmd),
|
||||
|
|
|
|||
|
|
@ -27,17 +27,15 @@ object AccountExampleWithEventHandlersInState {
|
|||
object AccountEntity {
|
||||
// Command
|
||||
//#reply-command
|
||||
sealed trait Command[Reply <: CommandReply] extends CborSerializable {
|
||||
def replyTo: ActorRef[Reply]
|
||||
}
|
||||
sealed trait Command extends CborSerializable
|
||||
//#reply-command
|
||||
final case class CreateAccount(replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class Deposit(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class CreateAccount(replyTo: ActorRef[OperationResult]) extends Command
|
||||
final case class Deposit(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command
|
||||
//#reply-command
|
||||
final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command
|
||||
//#reply-command
|
||||
final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command[CurrentBalance]
|
||||
final case class CloseAccount(replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command
|
||||
final case class CloseAccount(replyTo: ActorRef[OperationResult]) extends Command
|
||||
|
||||
// Reply
|
||||
//#reply-command
|
||||
|
|
@ -89,20 +87,20 @@ object AccountExampleWithEventHandlersInState {
|
|||
}
|
||||
|
||||
// when used with sharding, this TypeKey can be used in `sharding.init` and `sharding.entityRefFor`:
|
||||
val TypeKey: EntityTypeKey[Command[_]] =
|
||||
EntityTypeKey[Command[_]]("Account")
|
||||
val TypeKey: EntityTypeKey[Command] =
|
||||
EntityTypeKey[Command]("Account")
|
||||
|
||||
// Note that after defining command, event and state classes you would probably start here when writing this.
|
||||
// When filling in the parameters of EventSourcedBehavior.apply you can use IntelliJ alt+Enter > createValue
|
||||
// to generate the stub with types for the command and event handlers.
|
||||
|
||||
//#withEnforcedReplies
|
||||
def apply(accountNumber: String, persistenceId: PersistenceId): Behavior[Command[_]] = {
|
||||
def apply(accountNumber: String, persistenceId: PersistenceId): Behavior[Command] = {
|
||||
EventSourcedBehavior.withEnforcedReplies(persistenceId, EmptyAccount, commandHandler(accountNumber), eventHandler)
|
||||
}
|
||||
//#withEnforcedReplies
|
||||
|
||||
private def commandHandler(accountNumber: String): (Account, Command[_]) => ReplyEffect[Event, Account] = {
|
||||
private def commandHandler(accountNumber: String): (Account, Command) => ReplyEffect[Event, Account] = {
|
||||
(state, cmd) =>
|
||||
state match {
|
||||
case EmptyAccount =>
|
||||
|
|
@ -122,18 +120,26 @@ object AccountExampleWithEventHandlersInState {
|
|||
|
||||
case ClosedAccount =>
|
||||
cmd match {
|
||||
case c @ (_: Deposit | _: Withdraw) =>
|
||||
Effect.reply(c.replyTo)(Rejected(s"Account $accountNumber is closed"))
|
||||
case c: Deposit =>
|
||||
replyClosed(accountNumber, c.replyTo)
|
||||
case c: Withdraw =>
|
||||
replyClosed(accountNumber, c.replyTo)
|
||||
case GetBalance(replyTo) =>
|
||||
Effect.reply(replyTo)(CurrentBalance(Zero))
|
||||
case CloseAccount(replyTo) =>
|
||||
Effect.reply(replyTo)(Rejected(s"Account $accountNumber is already closed"))
|
||||
replyClosed(accountNumber, replyTo)
|
||||
case CreateAccount(replyTo) =>
|
||||
Effect.reply(replyTo)(Rejected(s"Account $accountNumber is already closed"))
|
||||
replyClosed(accountNumber, replyTo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def replyClosed(
|
||||
accountNumber: String,
|
||||
replyTo: ActorRef[AccountEntity.OperationResult]): ReplyEffect[Event, Account] = {
|
||||
Effect.reply(replyTo)(Rejected(s"Account $accountNumber is closed"))
|
||||
}
|
||||
|
||||
private val eventHandler: (Account, Event) => Account = { (state, event) =>
|
||||
state.applyEvent(event)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,14 +24,12 @@ object AccountExampleWithOptionState {
|
|||
//#account-entity
|
||||
object AccountEntity {
|
||||
// Command
|
||||
sealed trait Command[Reply <: CommandReply] extends CborSerializable {
|
||||
def replyTo: ActorRef[Reply]
|
||||
}
|
||||
final case class CreateAccount(replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class Deposit(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command[CurrentBalance]
|
||||
final case class CloseAccount(replyTo: ActorRef[OperationResult]) extends Command[OperationResult]
|
||||
sealed trait Command extends CborSerializable
|
||||
final case class CreateAccount(replyTo: ActorRef[OperationResult]) extends Command
|
||||
final case class Deposit(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command
|
||||
final case class Withdraw(amount: BigDecimal, replyTo: ActorRef[OperationResult]) extends Command
|
||||
final case class GetBalance(replyTo: ActorRef[CurrentBalance]) extends Command
|
||||
final case class CloseAccount(replyTo: ActorRef[OperationResult]) extends Command
|
||||
|
||||
// Reply
|
||||
sealed trait CommandReply extends CborSerializable
|
||||
|
|
@ -54,13 +52,13 @@ object AccountExampleWithOptionState {
|
|||
|
||||
// State
|
||||
sealed trait Account extends CborSerializable {
|
||||
def applyCommand(cmd: Command[_]): ReplyEffect
|
||||
def applyCommand(cmd: Command): ReplyEffect
|
||||
def applyEvent(event: Event): Account
|
||||
}
|
||||
case class OpenedAccount(balance: BigDecimal) extends Account {
|
||||
require(balance >= Zero, "Account balance can't be negative")
|
||||
|
||||
override def applyCommand(cmd: Command[_]): ReplyEffect =
|
||||
override def applyCommand(cmd: Command): ReplyEffect =
|
||||
cmd match {
|
||||
case Deposit(amount, replyTo) =>
|
||||
Effect.persist(Deposited(amount)).thenReply(replyTo)(_ => Confirmed)
|
||||
|
|
@ -99,28 +97,33 @@ object AccountExampleWithOptionState {
|
|||
|
||||
}
|
||||
case object ClosedAccount extends Account {
|
||||
override def applyCommand(cmd: Command[_]): ReplyEffect =
|
||||
override def applyCommand(cmd: Command): ReplyEffect =
|
||||
cmd match {
|
||||
case c @ (_: Deposit | _: Withdraw) =>
|
||||
Effect.reply(c.replyTo)(Rejected("Account is closed"))
|
||||
case c: Deposit =>
|
||||
replyClosed(c.replyTo)
|
||||
case c: Withdraw =>
|
||||
replyClosed(c.replyTo)
|
||||
case GetBalance(replyTo) =>
|
||||
Effect.reply(replyTo)(CurrentBalance(Zero))
|
||||
case CloseAccount(replyTo) =>
|
||||
Effect.reply(replyTo)(Rejected("Account is already closed"))
|
||||
replyClosed(replyTo)
|
||||
case CreateAccount(replyTo) =>
|
||||
Effect.reply(replyTo)(Rejected("Account is already created"))
|
||||
replyClosed(replyTo)
|
||||
}
|
||||
|
||||
private def replyClosed(replyTo: ActorRef[AccountEntity.OperationResult]): ReplyEffect =
|
||||
Effect.reply(replyTo)(Rejected(s"Account is closed"))
|
||||
|
||||
override def applyEvent(event: Event): Account =
|
||||
throw new IllegalStateException(s"unexpected event [$event] in state [ClosedAccount]")
|
||||
}
|
||||
|
||||
// when used with sharding, this TypeKey can be used in `sharding.init` and `sharding.entityRefFor`:
|
||||
val TypeKey: EntityTypeKey[Command[_]] =
|
||||
EntityTypeKey[Command[_]]("Account")
|
||||
val TypeKey: EntityTypeKey[Command] =
|
||||
EntityTypeKey[Command]("Account")
|
||||
|
||||
def apply(persistenceId: PersistenceId): Behavior[Command[_]] = {
|
||||
EventSourcedBehavior.withEnforcedReplies[Command[_], Event, Option[Account]](
|
||||
def apply(persistenceId: PersistenceId): Behavior[Command] = {
|
||||
EventSourcedBehavior.withEnforcedReplies[Command, Event, Option[Account]](
|
||||
persistenceId,
|
||||
None,
|
||||
(state, cmd) =>
|
||||
|
|
@ -135,7 +138,7 @@ object AccountExampleWithOptionState {
|
|||
})
|
||||
}
|
||||
|
||||
def onFirstCommand(cmd: Command[_]): ReplyEffect = {
|
||||
def onFirstCommand(cmd: Command): ReplyEffect = {
|
||||
cmd match {
|
||||
case CreateAccount(replyTo) =>
|
||||
Effect.persist(AccountCreated).thenReply(replyTo)(_ => Confirmed)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,4 @@
|
|||
# Add methods to trait not for user extension
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.sharding.external.javadsl.ExternalShardAllocationClient.setShardLocations")
|
||||
ProblemFilters.exclude[ReversedMissingMethodProblem]("akka.cluster.sharding.external.scaladsl.ExternalShardAllocationClient.updateShardLocations")
|
||||
|
||||
|
|
@ -614,7 +614,7 @@ abstract class ShardCoordinator(
|
|||
case GetShardHome(shard) =>
|
||||
if (!handleGetShardHome(shard)) {
|
||||
// location not know, yet
|
||||
val activeRegions = state.regions -- gracefulShutdownInProgress
|
||||
val activeRegions = (state.regions -- gracefulShutdownInProgress) -- regionTerminationInProgress
|
||||
if (activeRegions.nonEmpty) {
|
||||
val getShardHomeSender = sender()
|
||||
val regionFuture = allocationStrategy.allocateShard(getShardHomeSender, shard, activeRegions)
|
||||
|
|
@ -923,7 +923,8 @@ abstract class ShardCoordinator(
|
|||
state.shards.get(shard) match {
|
||||
case Some(ref) => getShardHomeSender ! ShardHome(shard, ref)
|
||||
case None =>
|
||||
if (state.regions.contains(region) && !gracefulShutdownInProgress.contains(region)) {
|
||||
if (state.regions.contains(region) && !gracefulShutdownInProgress.contains(region) && !regionTerminationInProgress
|
||||
.contains(region)) {
|
||||
update(ShardHomeAllocated(shard, region)) { evt =>
|
||||
state = state.updated(evt)
|
||||
log.debug(
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ import akka.pattern.ask
|
|||
import akka.util.JavaDurationConverters._
|
||||
import akka.util.PrettyDuration._
|
||||
import akka.util.Timeout
|
||||
import akka.util.ccompat.JavaConverters._
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
|
|
@ -92,4 +93,21 @@ final private[external] class ExternalShardAllocationClientImpl(system: ActorSys
|
|||
}
|
||||
|
||||
override def getShardLocations(): CompletionStage[ShardLocations] = shardLocations().toJava
|
||||
|
||||
override def updateShardLocations(locations: Map[ShardId, Address]): Future[Done] = {
|
||||
log.debug("updateShardLocations {} for {}", locations, Key)
|
||||
(replicator ? Update(Key, LWWMap.empty[ShardId, String], WriteLocal, None) { existing =>
|
||||
locations.foldLeft(existing) {
|
||||
case (acc, (shardId, address)) => acc.put(self, shardId, address.toString)
|
||||
}
|
||||
}).flatMap {
|
||||
case UpdateSuccess(_, _) => Future.successful(Done)
|
||||
case UpdateTimeout =>
|
||||
Future.failed(new ClientTimeoutException(s"Unable to update shard location after ${timeout.duration.pretty}"))
|
||||
}
|
||||
}
|
||||
|
||||
override def setShardLocations(locations: java.util.Map[ShardId, Address]): CompletionStage[Done] = {
|
||||
updateShardLocations(locations.asScala.toMap).toJava
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,10 +28,21 @@ trait ExternalShardAllocationClient {
|
|||
*
|
||||
* @param shard The shard identifier
|
||||
* @param location Location (akka node) to allocate the shard to
|
||||
* @return Confirmation that the update has been propagated to a majority of cluster nodes
|
||||
* @return Conformation that the update has been written to the local node
|
||||
*/
|
||||
def setShardLocation(shard: ShardId, location: Address): CompletionStage[Done]
|
||||
|
||||
/**
|
||||
* Update all of the provided ShardLocations.
|
||||
* The [[Address]] should match one of the nodes in the cluster. If the node has not joined
|
||||
* the cluster yet it will be moved to that node after the first cluster
|
||||
* sharding rebalance it does.
|
||||
*
|
||||
* @param locations to update
|
||||
* @return Confirmation that the update has been written to the local node
|
||||
*/
|
||||
def setShardLocations(locations: java.util.Map[ShardId, Address]): CompletionStage[Done]
|
||||
|
||||
/**
|
||||
* Get all the current shard locations that have been set via setShardLocation
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ trait ExternalShardAllocationClient {
|
|||
* Update the given shard's location. The [[Address]] should
|
||||
* match one of the nodes in the cluster. If the node has not joined
|
||||
* the cluster yet it will be moved to that node after the first cluster
|
||||
* sharding rebalance.
|
||||
* sharding rebalance it does.
|
||||
*
|
||||
* @param shard The shard identifier
|
||||
* @param location Location (akka node) to allocate the shard to
|
||||
|
|
@ -32,6 +32,17 @@ trait ExternalShardAllocationClient {
|
|||
*/
|
||||
def updateShardLocation(shard: ShardId, location: Address): Future[Done]
|
||||
|
||||
/**
|
||||
* Update all of the provided ShardLocations.
|
||||
* The [[Address]] should match one of the nodes in the cluster. If the node has not joined
|
||||
* the cluster yet it will be moved to that node after the first cluster
|
||||
* sharding rebalance it does.
|
||||
*
|
||||
* @param locations to update
|
||||
* @return Confirmation that the update has been propagates to a majority of cluster nodes
|
||||
*/
|
||||
def updateShardLocations(locations: Map[ShardId, Address]): Future[Done]
|
||||
|
||||
/**
|
||||
* Get all the current shard locations that have been set via updateShardLocation
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright (C) 2015-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.Actor
|
||||
import akka.actor.ActorLogging
|
||||
import akka.actor.ActorRef
|
||||
import akka.actor.Address
|
||||
import akka.actor.Props
|
||||
import akka.cluster.Cluster
|
||||
import akka.cluster.sharding.ShardRegion
|
||||
import akka.serialization.jackson.CborSerializable
|
||||
|
||||
object GlobalRegistry {
|
||||
final case class Register(key: String, address: Address) extends CborSerializable
|
||||
final case class Unregister(key: String, address: Address) extends CborSerializable
|
||||
final case class DoubleRegister(key: String, msg: String) extends CborSerializable
|
||||
|
||||
def props(probe: ActorRef, onlyErrors: Boolean): Props =
|
||||
Props(new GlobalRegistry(probe, onlyErrors))
|
||||
|
||||
object SingletonActor {
|
||||
def props(registry: ActorRef): Props =
|
||||
Props(new SingletonActor(registry))
|
||||
|
||||
val extractEntityId: ShardRegion.ExtractEntityId = {
|
||||
case id: Int => (id.toString, id)
|
||||
}
|
||||
|
||||
val extractShardId: ShardRegion.ExtractShardId = msg =>
|
||||
msg match {
|
||||
case id: Int => (id % 10).toString
|
||||
}
|
||||
}
|
||||
|
||||
class SingletonActor(registry: ActorRef) extends Actor with ActorLogging {
|
||||
val key = self.path.toStringWithoutAddress + "-" + Cluster(context.system).selfDataCenter
|
||||
|
||||
override def preStart(): Unit = {
|
||||
log.info("Starting")
|
||||
registry ! Register(key, Cluster(context.system).selfAddress)
|
||||
}
|
||||
|
||||
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
|
||||
// don't call postStop
|
||||
}
|
||||
|
||||
override def postStop(): Unit = {
|
||||
log.info("Stopping")
|
||||
registry ! Unregister(key, Cluster(context.system).selfAddress)
|
||||
}
|
||||
|
||||
override def receive = {
|
||||
case i: Int => sender() ! i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class GlobalRegistry(probe: ActorRef, onlyErrors: Boolean) extends Actor with ActorLogging {
|
||||
import GlobalRegistry._
|
||||
|
||||
var registry = Map.empty[String, Address]
|
||||
var unregisterTimestamp = Map.empty[String, Long]
|
||||
|
||||
override def receive = {
|
||||
case r @ Register(key, address) =>
|
||||
log.info("{}", r)
|
||||
if (registry.contains(key)) {
|
||||
val errMsg = s"trying to register $address, but ${registry(key)} was already registered for $key"
|
||||
log.error(errMsg)
|
||||
probe ! DoubleRegister(key, errMsg)
|
||||
} else {
|
||||
unregisterTimestamp.get(key).foreach { t =>
|
||||
log.info("Unregister/register margin for [{}] was [{}] ms", key, (System.nanoTime() - t).nanos.toMillis)
|
||||
}
|
||||
registry += key -> address
|
||||
if (!onlyErrors) probe ! r
|
||||
}
|
||||
|
||||
case u @ Unregister(key, address) =>
|
||||
log.info("{}", u)
|
||||
if (!registry.contains(key))
|
||||
probe ! s"$key was not registered"
|
||||
else if (registry(key) != address)
|
||||
probe ! s"${registry(key)} instead of $address was registered for $key"
|
||||
else {
|
||||
registry -= key
|
||||
unregisterTimestamp += key -> System.nanoTime()
|
||||
if (!onlyErrors) probe ! u
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import akka.actor.Actor
|
||||
import akka.actor.ActorLogging
|
||||
import akka.actor.ActorRef
|
||||
import akka.actor.Address
|
||||
import akka.actor.ExtendedActorSystem
|
||||
import akka.actor.Props
|
||||
import akka.cluster.Cluster
|
||||
import akka.pattern.pipe
|
||||
import akka.remote.RemoteActorRefProvider
|
||||
import akka.remote.transport.ThrottlerTransportAdapter.Blackhole
|
||||
import akka.remote.transport.ThrottlerTransportAdapter.Direction
|
||||
import akka.remote.transport.ThrottlerTransportAdapter.SetThrottle
|
||||
import akka.remote.transport.ThrottlerTransportAdapter.Unthrottled
|
||||
import akka.serialization.jackson.CborSerializable
|
||||
|
||||
object GremlinController {
|
||||
final case class BlackholeNode(target: Address) extends CborSerializable
|
||||
final case class PassThroughNode(target: Address) extends CborSerializable
|
||||
case object GetAddress extends CborSerializable
|
||||
|
||||
def props: Props =
|
||||
Props(new GremlinController)
|
||||
}
|
||||
|
||||
class GremlinController extends Actor with ActorLogging {
|
||||
import context.dispatcher
|
||||
|
||||
import GremlinController._
|
||||
val transport =
|
||||
context.system.asInstanceOf[ExtendedActorSystem].provider.asInstanceOf[RemoteActorRefProvider].transport
|
||||
val selfAddress = Cluster(context.system).selfAddress
|
||||
|
||||
override def receive = {
|
||||
case GetAddress =>
|
||||
sender() ! selfAddress
|
||||
case BlackholeNode(target) =>
|
||||
log.debug("Blackhole {} <-> {}", selfAddress, target)
|
||||
transport.managementCommand(SetThrottle(target, Direction.Both, Blackhole)).pipeTo(sender())
|
||||
case PassThroughNode(target) =>
|
||||
log.debug("PassThrough {} <-> {}", selfAddress, target)
|
||||
transport.managementCommand(SetThrottle(target, Direction.Both, Unthrottled)).pipeTo(sender())
|
||||
}
|
||||
}
|
||||
|
||||
object GremlinControllerProxy {
|
||||
def props(target: ActorRef): Props =
|
||||
Props(new GremlinControllerProxy(target))
|
||||
}
|
||||
|
||||
class GremlinControllerProxy(target: ActorRef) extends Actor {
|
||||
override def receive = {
|
||||
case msg => target.forward(msg)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,409 @@
|
|||
/*
|
||||
* Copyright (C) 2015-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import scala.concurrent.Await
|
||||
import scala.concurrent.duration._
|
||||
import scala.util.Random
|
||||
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import org.scalatest.BeforeAndAfterEach
|
||||
|
||||
import akka.actor._
|
||||
import akka.cluster.Cluster
|
||||
import akka.cluster.MemberStatus
|
||||
import akka.cluster.MultiNodeClusterSpec
|
||||
import akka.cluster.sharding.ClusterSharding
|
||||
import akka.cluster.sharding.ClusterShardingSettings
|
||||
import akka.cluster.singleton.ClusterSingletonManager
|
||||
import akka.cluster.singleton.ClusterSingletonManagerSettings
|
||||
import akka.pattern.ask
|
||||
import akka.remote.testconductor.RoleName
|
||||
import akka.remote.testkit.MultiNodeConfig
|
||||
import akka.remote.testkit.MultiNodeSpec
|
||||
import akka.testkit.ImplicitSender
|
||||
import akka.testkit.LongRunningTest
|
||||
import akka.testkit.TestKit
|
||||
import akka.testkit.TestProbe
|
||||
import akka.util.Timeout
|
||||
|
||||
/*
|
||||
* Depends on akka private classes so needs to be in this package
|
||||
*/
|
||||
object RandomizedSplitBrainResolverIntegrationSpec extends MultiNodeConfig {
|
||||
val node1 = role("node1")
|
||||
val node2 = role("node2")
|
||||
val node3 = role("node3")
|
||||
val node4 = role("node4")
|
||||
val node5 = role("node5")
|
||||
val node6 = role("node6")
|
||||
val node7 = role("node7")
|
||||
val node8 = role("node8")
|
||||
val node9 = role("node9")
|
||||
|
||||
commonConfig(ConfigFactory.parseString(s"""
|
||||
akka {
|
||||
loglevel = INFO
|
||||
cluster {
|
||||
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
|
||||
split-brain-resolver {
|
||||
stable-after = 10s
|
||||
|
||||
active-strategy = lease-majority
|
||||
lease-majority {
|
||||
lease-implementation = test-lease
|
||||
}
|
||||
}
|
||||
|
||||
#failure-detector.acceptable-heartbeat-pause = 10s
|
||||
|
||||
# speedup timeout
|
||||
sharding.handoff-timeout = 10 s
|
||||
|
||||
# this is starting singleton more aggressively than default (15)
|
||||
singleton.min-number-of-hand-over-retries = 10
|
||||
}
|
||||
actor.provider = cluster
|
||||
}
|
||||
|
||||
test-lease {
|
||||
lease-class = akka.cluster.sbr.SbrTestLeaseActorClient
|
||||
heartbeat-interval = 1s
|
||||
heartbeat-timeout = 120s
|
||||
lease-operation-timeout = 3s
|
||||
}
|
||||
|
||||
test.random-seed = ${System.currentTimeMillis()}
|
||||
|
||||
akka.testconductor.barrier-timeout = 120 s
|
||||
akka.cluster.run-coordinated-shutdown-when-down = off
|
||||
"""))
|
||||
|
||||
testTransport(on = true)
|
||||
|
||||
}
|
||||
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode1 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode2 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode3 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode4 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode5 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode6 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode7 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode8 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
class RandomizedSplitBrainResolverIntegrationSpecMultiJvmNode9 extends RandomizedSplitBrainResolverIntegrationSpec
|
||||
|
||||
class RandomizedSplitBrainResolverIntegrationSpec
|
||||
extends MultiNodeSpec(RandomizedSplitBrainResolverIntegrationSpec)
|
||||
with MultiNodeClusterSpec
|
||||
with ImplicitSender
|
||||
with BeforeAndAfterEach {
|
||||
import GlobalRegistry._
|
||||
import GremlinController._
|
||||
import RandomizedSplitBrainResolverIntegrationSpec._
|
||||
|
||||
// counter for unique naming for each test
|
||||
var c = 0
|
||||
// to be shutdown in afterEach
|
||||
var disposableSys: DisposableSys = _
|
||||
|
||||
override def expectedTestDuration = 3.minutes
|
||||
|
||||
object DisposableSys {
|
||||
def apply(scenario: Scenario): DisposableSys = {
|
||||
disposableSys = new DisposableSys(scenario)
|
||||
disposableSys
|
||||
}
|
||||
}
|
||||
|
||||
override def afterEach(): Unit = {
|
||||
if (disposableSys ne null)
|
||||
disposableSys.shutdownSys()
|
||||
}
|
||||
|
||||
class DisposableSys(scenario: Scenario) {
|
||||
|
||||
c += 1
|
||||
|
||||
val sys: ActorSystem = {
|
||||
|
||||
val sys = ActorSystem(system.name + "-" + c, system.settings.config)
|
||||
val gremlinController = sys.actorOf(GremlinController.props, "gremlinController")
|
||||
system.actorOf(GremlinControllerProxy.props(gremlinController), s"gremlinControllerProxy-$c")
|
||||
sys
|
||||
}
|
||||
|
||||
val singletonProbe = TestProbe()
|
||||
val shardingProbe = TestProbe()
|
||||
runOn(node1) {
|
||||
system.actorOf(GlobalRegistry.props(singletonProbe.ref, true), s"singletonRegistry-$c")
|
||||
system.actorOf(GlobalRegistry.props(shardingProbe.ref, true), s"shardingRegistry-$c")
|
||||
if (scenario.usingLease)
|
||||
system.actorOf(SbrTestLeaseActor.props, s"lease-${sys.name}")
|
||||
}
|
||||
enterBarrier("registry-started")
|
||||
|
||||
system.actorSelection(node(node1) / "user" / s"singletonRegistry-$c") ! Identify(None)
|
||||
val singletonRegistry: ActorRef = expectMsgType[ActorIdentity].ref.get
|
||||
system.actorSelection(node(node1) / "user" / s"shardingRegistry-$c") ! Identify(None)
|
||||
val shardingRegistry: ActorRef = expectMsgType[ActorIdentity].ref.get
|
||||
|
||||
if (scenario.usingLease) {
|
||||
system.actorSelection(node(node1) / "user" / s"lease-${sys.name}") ! Identify(None)
|
||||
val leaseRef: ActorRef = expectMsgType[ActorIdentity].ref.get
|
||||
SbrTestLeaseActorClientExt(sys).getActorLeaseClient().setActorLeaseRef(leaseRef)
|
||||
}
|
||||
enterBarrier("registry-located")
|
||||
|
||||
lazy val region = ClusterSharding(sys).shardRegion(s"Entity-$c")
|
||||
|
||||
def shutdownSys(): Unit = {
|
||||
TestKit.shutdownActorSystem(sys, 10.seconds, verifySystemShutdown = true)
|
||||
}
|
||||
|
||||
def gremlinControllerProxy(at: RoleName): ActorRef = {
|
||||
system.actorSelection(node(at) / "user" / s"gremlinControllerProxy-$c") ! Identify(None)
|
||||
expectMsgType[ActorIdentity].ref.get
|
||||
}
|
||||
|
||||
def sysAddress(at: RoleName): Address = {
|
||||
implicit val timeout = Timeout(3.seconds)
|
||||
Await.result((gremlinControllerProxy(at) ? GetAddress).mapTo[Address], timeout.duration)
|
||||
}
|
||||
|
||||
def blackhole(from: RoleName, to: RoleName): Unit = {
|
||||
implicit val timeout = Timeout(3.seconds)
|
||||
import system.dispatcher
|
||||
val f = for {
|
||||
target <- (gremlinControllerProxy(to) ? GetAddress).mapTo[Address]
|
||||
done <- gremlinControllerProxy(from) ? BlackholeNode(target)
|
||||
} yield done
|
||||
Await.ready(f, timeout.duration * 2)
|
||||
log.info("Blackhole {} <-> {}", from.name, to.name)
|
||||
}
|
||||
|
||||
def passThrough(from: RoleName, to: RoleName): Unit = {
|
||||
implicit val timeout = Timeout(3.seconds)
|
||||
import system.dispatcher
|
||||
val f = for {
|
||||
target <- (gremlinControllerProxy(to) ? GetAddress).mapTo[Address]
|
||||
done <- gremlinControllerProxy(from) ? PassThroughNode(target)
|
||||
} yield done
|
||||
Await.ready(f, timeout.duration * 2)
|
||||
log.info("PassThrough {} <-> {}", from.name, to.name)
|
||||
}
|
||||
|
||||
def join(from: RoleName, to: RoleName, awaitUp: Boolean): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(sys).join(sysAddress(to))
|
||||
createSingleton()
|
||||
startSharding()
|
||||
if (awaitUp)
|
||||
awaitMemberUp()
|
||||
}
|
||||
enterBarrier(from.name + s"-joined-$c")
|
||||
}
|
||||
|
||||
def awaitMemberUp(): Unit =
|
||||
within(10.seconds) {
|
||||
awaitAssert {
|
||||
Cluster(sys).state.members.exists { m =>
|
||||
m.address == Cluster(sys).selfAddress && m.status == MemberStatus.Up
|
||||
} should be(true)
|
||||
}
|
||||
}
|
||||
|
||||
def createSingleton(): ActorRef = {
|
||||
sys.actorOf(
|
||||
ClusterSingletonManager.props(
|
||||
singletonProps = SingletonActor.props(singletonRegistry),
|
||||
terminationMessage = PoisonPill,
|
||||
settings = ClusterSingletonManagerSettings(system)),
|
||||
name = "singletonRegistry")
|
||||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
ClusterSharding(sys).start(
|
||||
typeName = s"Entity-$c",
|
||||
entityProps = SingletonActor.props(shardingRegistry),
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = SingletonActor.extractEntityId,
|
||||
extractShardId = SingletonActor.extractShardId)
|
||||
}
|
||||
|
||||
def verify(): Unit = {
|
||||
val nodes = roles.take(scenario.numberOfNodes)
|
||||
|
||||
def sendToSharding(expectReply: Boolean): Unit = {
|
||||
runOn(nodes: _*) {
|
||||
if (!Cluster(sys).isTerminated) {
|
||||
val probe = TestProbe()(sys)
|
||||
for (i <- 0 until 10) {
|
||||
region.tell(i, probe.ref)
|
||||
if (expectReply)
|
||||
probe.expectMsg(3.seconds, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
runOn(nodes: _*) {
|
||||
log.info("Running {} {} in round {}", myself.name, Cluster(sys).selfUniqueAddress, c)
|
||||
}
|
||||
val randomSeed = sys.settings.config.getLong("test.random-seed")
|
||||
val random = new Random(randomSeed)
|
||||
enterBarrier(s"log-startup-$c")
|
||||
|
||||
within(3.minutes) {
|
||||
|
||||
join(nodes.head, nodes.head, awaitUp = true) // oldest
|
||||
join(nodes.last, nodes.head, awaitUp = true) // next oldest
|
||||
for (n <- nodes.tail.dropRight(1))
|
||||
join(n, nodes.head, awaitUp = false)
|
||||
runOn(nodes: _*) {
|
||||
awaitMemberUp()
|
||||
}
|
||||
enterBarrier(s"all-up-$c")
|
||||
|
||||
singletonProbe.expectNoMessage(1.second)
|
||||
shardingProbe.expectNoMessage(10.millis)
|
||||
|
||||
sendToSharding(expectReply = true)
|
||||
|
||||
enterBarrier(s"initialized-$c")
|
||||
runOn(nodes: _*) {
|
||||
log.info("Initialized {} {} in round {}", myself.name, Cluster(sys).selfUniqueAddress, c)
|
||||
}
|
||||
|
||||
runOn(node1) {
|
||||
val cleanSplit = random.nextBoolean()
|
||||
val healCleanSplit = cleanSplit && random.nextBoolean()
|
||||
val side1 = nodes.take(1 + random.nextInt(nodes.size - 1))
|
||||
val side2 = nodes.drop(side1.size)
|
||||
|
||||
val numberOfFlaky = random.nextInt(5)
|
||||
val healLastFlay = numberOfFlaky > 0 && random.nextBoolean()
|
||||
val flaky: Map[Int, (RoleName, List[RoleName])] =
|
||||
(0 until numberOfFlaky).map { i =>
|
||||
val from = nodes(random.nextInt(nodes.size))
|
||||
val targets = nodes.filterNot(_ == from)
|
||||
val to = (0 to random.nextInt(math.min(5, targets.size))).map(j => targets(j)).toList
|
||||
i -> (from -> to)
|
||||
}.toMap
|
||||
|
||||
val delays = (0 until 10).map(_ => 2 + random.nextInt(13))
|
||||
|
||||
log.info(s"Generated $scenario with random seed [$randomSeed] in round [$c]: " +
|
||||
s"cleanSplit [$cleanSplit], healCleanSplit [$healCleanSplit] " +
|
||||
(if (cleanSplit) s"side1 [${side1.map(_.name).mkString(", ")}], side2 [${side2.map(_.name).mkString(", ")}] ") +
|
||||
s"flaky [${flaky.map { case (_, (from, to)) => from.name -> to.map(_.name).mkString("(", ", ", ")") }.mkString("; ")}] " +
|
||||
s"delays [${delays.mkString(", ")}]")
|
||||
|
||||
var delayIndex = 0
|
||||
def nextDelay(): Unit = {
|
||||
Thread.sleep(delays(delayIndex) * 1000)
|
||||
delayIndex += 1
|
||||
}
|
||||
|
||||
if (cleanSplit) {
|
||||
for (n1 <- side1; n2 <- side2)
|
||||
blackhole(n1, n2)
|
||||
|
||||
nextDelay()
|
||||
}
|
||||
|
||||
flaky.foreach {
|
||||
case (i, (from, to)) =>
|
||||
if (i != 0) {
|
||||
// heal previous flakiness
|
||||
val (prevFrom, prevTo) = flaky(i - 1)
|
||||
for (n <- prevTo)
|
||||
passThrough(prevFrom, n)
|
||||
}
|
||||
|
||||
for (n <- to)
|
||||
blackhole(from, n)
|
||||
|
||||
nextDelay()
|
||||
}
|
||||
|
||||
if (healLastFlay) {
|
||||
val (prevFrom, prevTo) = flaky(flaky.size - 1)
|
||||
for (n <- prevTo)
|
||||
passThrough(prevFrom, n)
|
||||
|
||||
nextDelay()
|
||||
}
|
||||
|
||||
if (healCleanSplit) {
|
||||
for (n1 <- side1; n2 <- side2)
|
||||
passThrough(n1, n2)
|
||||
}
|
||||
}
|
||||
enterBarrier(s"scenario-done-$c")
|
||||
|
||||
runOn(nodes: _*) {
|
||||
sendToSharding(expectReply = false)
|
||||
singletonProbe.expectNoMessage(10.seconds)
|
||||
shardingProbe.expectNoMessage(10.millis)
|
||||
|
||||
var loopLimit = 20
|
||||
while (loopLimit != 0 && !Cluster(sys).isTerminated && Cluster(sys).state.unreachable.nonEmpty) {
|
||||
sendToSharding(expectReply = false)
|
||||
singletonProbe.expectNoMessage(5.seconds)
|
||||
shardingProbe.expectNoMessage(10.millis)
|
||||
loopLimit -= 1
|
||||
}
|
||||
}
|
||||
enterBarrier(s"terminated-or-unreachable-removed-$c")
|
||||
|
||||
runOn(nodes: _*) {
|
||||
(Cluster(sys).isTerminated || Cluster(sys).state.unreachable.isEmpty) should ===(true)
|
||||
within(30.seconds) {
|
||||
awaitAssert {
|
||||
sendToSharding(expectReply = true)
|
||||
}
|
||||
}
|
||||
singletonProbe.expectNoMessage(5.seconds)
|
||||
shardingProbe.expectNoMessage(10.millis)
|
||||
if (!Cluster(sys).isTerminated)
|
||||
log.info(s"Survived ${Cluster(sys).state.members.size} members in round $c")
|
||||
}
|
||||
|
||||
enterBarrier(s"verified-$c")
|
||||
}
|
||||
enterBarrier(s"after-$c")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private val leaseMajorityConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver {
|
||||
active-strategy = lease-majority
|
||||
}""")
|
||||
|
||||
case class Scenario(cfg: Config, numberOfNodes: Int) {
|
||||
|
||||
val activeStrategy: String = cfg.getString("akka.cluster.split-brain-resolver.active-strategy")
|
||||
|
||||
override def toString: String =
|
||||
s"Scenario($activeStrategy, $numberOfNodes)"
|
||||
|
||||
def usingLease: Boolean = activeStrategy.contains("lease")
|
||||
}
|
||||
|
||||
val scenarios =
|
||||
List(Scenario(leaseMajorityConfig, 3), Scenario(leaseMajorityConfig, 5), Scenario(leaseMajorityConfig, 9))
|
||||
|
||||
"SplitBrainResolver with lease" must {
|
||||
|
||||
for (scenario <- scenarios) {
|
||||
scenario.toString taggedAs LongRunningTest in {
|
||||
DisposableSys(scenario).verify()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference
|
||||
|
||||
import scala.concurrent.Future
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.Actor
|
||||
import akka.actor.ActorLogging
|
||||
import akka.actor.ActorRef
|
||||
import akka.actor.ActorSystem
|
||||
import akka.actor.ExtendedActorSystem
|
||||
import akka.actor.Extension
|
||||
import akka.actor.ExtensionId
|
||||
import akka.actor.ExtensionIdProvider
|
||||
import akka.actor.Props
|
||||
import akka.coordination.lease.LeaseSettings
|
||||
import akka.coordination.lease.scaladsl.Lease
|
||||
import akka.pattern.ask
|
||||
import akka.serialization.jackson.CborSerializable
|
||||
import akka.util.Timeout
|
||||
|
||||
object SbrTestLeaseActor {
|
||||
def props: Props =
|
||||
Props(new SbrTestLeaseActor)
|
||||
|
||||
final case class Acquire(owner: String) extends CborSerializable
|
||||
final case class Release(owner: String) extends CborSerializable
|
||||
}
|
||||
|
||||
class SbrTestLeaseActor extends Actor with ActorLogging {
|
||||
import SbrTestLeaseActor._
|
||||
|
||||
var owner: Option[String] = None
|
||||
|
||||
override def receive = {
|
||||
case Acquire(o) =>
|
||||
owner match {
|
||||
case None =>
|
||||
log.info("ActorLease: acquired by [{}]", o)
|
||||
owner = Some(o)
|
||||
sender() ! true
|
||||
case Some(`o`) =>
|
||||
log.info("ActorLease: renewed by [{}]", o)
|
||||
sender() ! true
|
||||
case Some(existingOwner) =>
|
||||
log.info("ActorLease: requested by [{}], but already held by [{}]", o, existingOwner)
|
||||
sender() ! false
|
||||
}
|
||||
|
||||
case Release(o) =>
|
||||
owner match {
|
||||
case None =>
|
||||
log.info("ActorLease: released by [{}] but no owner", o)
|
||||
owner = Some(o)
|
||||
sender() ! true
|
||||
case Some(`o`) =>
|
||||
log.info("ActorLease: released by [{}]", o)
|
||||
sender() ! true
|
||||
case Some(existingOwner) =>
|
||||
log.info("ActorLease: release attempt by [{}], but held by [{}]", o, existingOwner)
|
||||
sender() ! false
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
object SbrTestLeaseActorClientExt extends ExtensionId[SbrTestLeaseActorClientExt] with ExtensionIdProvider {
|
||||
override def get(system: ActorSystem): SbrTestLeaseActorClientExt = super.get(system)
|
||||
override def lookup = SbrTestLeaseActorClientExt
|
||||
override def createExtension(system: ExtendedActorSystem): SbrTestLeaseActorClientExt =
|
||||
new SbrTestLeaseActorClientExt(system)
|
||||
}
|
||||
|
||||
class SbrTestLeaseActorClientExt(val system: ExtendedActorSystem) extends Extension {
|
||||
|
||||
private val leaseClient = new AtomicReference[SbrTestLeaseActorClient]()
|
||||
|
||||
def getActorLeaseClient(): SbrTestLeaseActorClient = {
|
||||
val lease = leaseClient.get
|
||||
if (lease == null) throw new IllegalStateException("ActorLeaseClient must be set first")
|
||||
lease
|
||||
}
|
||||
|
||||
def setActorLeaseClient(client: SbrTestLeaseActorClient): Unit =
|
||||
leaseClient.set(client)
|
||||
|
||||
}
|
||||
|
||||
class SbrTestLeaseActorClient(settings: LeaseSettings, system: ExtendedActorSystem) extends Lease(settings) {
|
||||
import SbrTestLeaseActor.Acquire
|
||||
import SbrTestLeaseActor.Release
|
||||
|
||||
SbrTestLeaseActorClientExt(system).setActorLeaseClient(this)
|
||||
|
||||
private implicit val timeout = Timeout(3.seconds)
|
||||
|
||||
private val _leaseRef = new AtomicReference[ActorRef]
|
||||
|
||||
private def leaseRef: ActorRef = {
|
||||
val ref = _leaseRef.get
|
||||
if (ref == null) throw new IllegalStateException("ActorLeaseRef must be set first")
|
||||
ref
|
||||
}
|
||||
|
||||
def setActorLeaseRef(ref: ActorRef): Unit =
|
||||
_leaseRef.set(ref)
|
||||
|
||||
override def acquire(): Future[Boolean] = {
|
||||
(leaseRef ? Acquire(settings.ownerName)).mapTo[Boolean]
|
||||
}
|
||||
|
||||
override def acquire(leaseLostCallback: Option[Throwable] => Unit): Future[Boolean] =
|
||||
acquire()
|
||||
|
||||
override def release(): Future[Boolean] = {
|
||||
(leaseRef ? Release(settings.ownerName)).mapTo[Boolean]
|
||||
}
|
||||
|
||||
override def checkLease(): Boolean = false
|
||||
}
|
||||
|
|
@ -0,0 +1,465 @@
|
|||
/*
|
||||
* Copyright (C) 2015-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import scala.concurrent.Await
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import com.typesafe.config.Config
|
||||
import com.typesafe.config.ConfigFactory
|
||||
import com.typesafe.config.ConfigValueFactory
|
||||
import org.scalatest.BeforeAndAfterEach
|
||||
|
||||
import akka.actor._
|
||||
import akka.cluster.Cluster
|
||||
import akka.cluster.ClusterSettings.DataCenter
|
||||
import akka.cluster.ClusterSettings.DefaultDataCenter
|
||||
import akka.cluster.Member
|
||||
import akka.cluster.MemberStatus
|
||||
import akka.cluster.MultiNodeClusterSpec
|
||||
import akka.cluster.sharding.ClusterSharding
|
||||
import akka.cluster.sharding.ClusterShardingSettings
|
||||
import akka.cluster.singleton.ClusterSingletonManager
|
||||
import akka.cluster.singleton.ClusterSingletonManagerSettings
|
||||
import akka.pattern.ask
|
||||
import akka.remote.testconductor.RoleName
|
||||
import akka.remote.testkit.MultiNodeConfig
|
||||
import akka.remote.testkit.MultiNodeSpec
|
||||
import akka.testkit.ImplicitSender
|
||||
import akka.testkit.LongRunningTest
|
||||
import akka.testkit.TestKit
|
||||
import akka.testkit.TestProbe
|
||||
import akka.util.Timeout
|
||||
|
||||
/*
|
||||
* Depends on akka private classes so needs to be in this package
|
||||
*/
|
||||
object SplitBrainResolverIntegrationSpec extends MultiNodeConfig {
|
||||
val node1 = role("node1")
|
||||
val node2 = role("node2")
|
||||
val node3 = role("node3")
|
||||
val node4 = role("node4")
|
||||
val node5 = role("node5")
|
||||
val node6 = role("node6")
|
||||
val node7 = role("node7")
|
||||
val node8 = role("node8")
|
||||
val node9 = role("node9")
|
||||
|
||||
commonConfig(ConfigFactory.parseString("""
|
||||
akka {
|
||||
loglevel = INFO
|
||||
cluster {
|
||||
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
|
||||
split-brain-resolver.active-strategy = keep-majority
|
||||
split-brain-resolver.stable-after = 10s
|
||||
|
||||
sharding.handoff-timeout = 5s
|
||||
}
|
||||
|
||||
actor.provider = cluster
|
||||
remote.log-remote-lifecycle-events = off
|
||||
}
|
||||
|
||||
akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off
|
||||
akka.coordinated-shutdown.terminate-actor-system = off
|
||||
akka.cluster.run-coordinated-shutdown-when-down = off
|
||||
"""))
|
||||
|
||||
testTransport(on = true)
|
||||
|
||||
}
|
||||
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode1 extends SplitBrainResolverIntegrationSpec
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode2 extends SplitBrainResolverIntegrationSpec
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode3 extends SplitBrainResolverIntegrationSpec
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode4 extends SplitBrainResolverIntegrationSpec
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode5 extends SplitBrainResolverIntegrationSpec
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode6 extends SplitBrainResolverIntegrationSpec
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode7 extends SplitBrainResolverIntegrationSpec
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode8 extends SplitBrainResolverIntegrationSpec
|
||||
class SplitBrainResolverIntegrationSpecMultiJvmNode9 extends SplitBrainResolverIntegrationSpec
|
||||
|
||||
class SplitBrainResolverIntegrationSpec
|
||||
extends MultiNodeSpec(SplitBrainResolverIntegrationSpec)
|
||||
with MultiNodeClusterSpec
|
||||
with ImplicitSender
|
||||
with BeforeAndAfterEach {
|
||||
import GlobalRegistry._
|
||||
import GremlinController._
|
||||
import SplitBrainResolverIntegrationSpec._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
override def afterEach(): Unit = {
|
||||
if (disposableSys ne null)
|
||||
disposableSys.shutdownSys()
|
||||
}
|
||||
|
||||
// counter for unique naming for each test
|
||||
var c = 0
|
||||
// to be shutdown in afterEach
|
||||
var disposableSys: DisposableSys = _
|
||||
|
||||
override def expectedTestDuration = 10.minutes
|
||||
|
||||
object DisposableSys {
|
||||
def apply(scenario: Scenario): DisposableSys = {
|
||||
disposableSys = new DisposableSys(scenario)
|
||||
disposableSys
|
||||
}
|
||||
}
|
||||
|
||||
class DisposableSys(scenario: Scenario) {
|
||||
|
||||
c += 1
|
||||
|
||||
val sys: ActorSystem = {
|
||||
val dcName = scenario.dcDecider(myself)
|
||||
|
||||
val sys = ActorSystem(
|
||||
system.name + "-" + c,
|
||||
scenario.cfg
|
||||
.withValue("akka.cluster.multi-data-center.self-data-center", ConfigValueFactory.fromAnyRef(dcName))
|
||||
.withFallback(system.settings.config))
|
||||
val gremlinController = sys.actorOf(GremlinController.props, "gremlinController")
|
||||
system.actorOf(GremlinControllerProxy.props(gremlinController), s"gremlinControllerProxy-$c")
|
||||
sys
|
||||
}
|
||||
|
||||
val singletonProbe = TestProbe()
|
||||
val shardingProbe = TestProbe()
|
||||
runOn(node1) {
|
||||
system.actorOf(GlobalRegistry.props(singletonProbe.ref, false), s"singletonRegistry-$c")
|
||||
system.actorOf(GlobalRegistry.props(shardingProbe.ref, true), s"shardingRegistry-$c")
|
||||
if (scenario.usingLease)
|
||||
system.actorOf(SbrTestLeaseActor.props, s"lease-${sys.name}")
|
||||
}
|
||||
enterBarrier("registry-started")
|
||||
|
||||
system.actorSelection(node(node1) / "user" / s"singletonRegistry-$c") ! Identify(None)
|
||||
val singletonRegistry: ActorRef = expectMsgType[ActorIdentity].ref.get
|
||||
system.actorSelection(node(node1) / "user" / s"shardingRegistry-$c") ! Identify(None)
|
||||
val shardingRegistry: ActorRef = expectMsgType[ActorIdentity].ref.get
|
||||
|
||||
if (scenario.usingLease) {
|
||||
system.actorSelection(node(node1) / "user" / s"lease-${sys.name}") ! Identify(None)
|
||||
val leaseRef: ActorRef = expectMsgType[ActorIdentity].ref.get
|
||||
SbrTestLeaseActorClientExt(sys).getActorLeaseClient().setActorLeaseRef(leaseRef)
|
||||
}
|
||||
|
||||
enterBarrier("registry-located")
|
||||
|
||||
lazy val region = ClusterSharding(sys).shardRegion(s"Entity-$c")
|
||||
|
||||
def shutdownSys(): Unit = {
|
||||
TestKit.shutdownActorSystem(sys, 10.seconds, verifySystemShutdown = true)
|
||||
}
|
||||
|
||||
def gremlinControllerProxy(at: RoleName): ActorRef = {
|
||||
system.actorSelection(node(at) / "user" / s"gremlinControllerProxy-$c") ! Identify(None)
|
||||
expectMsgType[ActorIdentity].ref.get
|
||||
}
|
||||
|
||||
def sysAddress(at: RoleName): Address = {
|
||||
implicit val timeout = Timeout(3.seconds)
|
||||
Await.result((gremlinControllerProxy(at) ? GetAddress).mapTo[Address], timeout.duration)
|
||||
}
|
||||
|
||||
def blackhole(from: RoleName, to: RoleName): Unit = {
|
||||
implicit val timeout = Timeout(3.seconds)
|
||||
import system.dispatcher
|
||||
val f = for {
|
||||
target <- (gremlinControllerProxy(to) ? GetAddress).mapTo[Address]
|
||||
done <- gremlinControllerProxy(from) ? BlackholeNode(target)
|
||||
} yield done
|
||||
Await.ready(f, timeout.duration * 2)
|
||||
log.info("Blackhole {} <-> {}", from.name, to.name)
|
||||
}
|
||||
|
||||
def join(from: RoleName, to: RoleName, awaitUp: Boolean): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(sys).join(sysAddress(to))
|
||||
createSingleton()
|
||||
startSharding()
|
||||
if (awaitUp)
|
||||
awaitMemberUp()
|
||||
}
|
||||
enterBarrier(from.name + s"-joined-$c")
|
||||
}
|
||||
|
||||
def awaitMemberUp(): Unit =
|
||||
within(10.seconds) {
|
||||
awaitAssert {
|
||||
Cluster(sys).state.members.exists { m =>
|
||||
m.address == Cluster(sys).selfAddress && m.status == MemberStatus.Up
|
||||
} should be(true)
|
||||
}
|
||||
}
|
||||
|
||||
def awaitAllMembersUp(nodes: RoleName*): Unit = {
|
||||
val addresses = nodes.map(sysAddress).toSet
|
||||
within(15.seconds) {
|
||||
awaitAssert {
|
||||
Cluster(sys).state.members.map(_.address) should ===(addresses)
|
||||
Cluster(sys).state.members.foreach {
|
||||
_.status should ===(MemberStatus.Up)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def createSingleton(): ActorRef = {
|
||||
sys.actorOf(
|
||||
ClusterSingletonManager.props(
|
||||
singletonProps = SingletonActor.props(singletonRegistry),
|
||||
terminationMessage = PoisonPill,
|
||||
settings = ClusterSingletonManagerSettings(system)),
|
||||
name = "singletonRegistry")
|
||||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
ClusterSharding(sys).start(
|
||||
typeName = s"Entity-$c",
|
||||
entityProps = SingletonActor.props(shardingRegistry),
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = SingletonActor.extractEntityId,
|
||||
extractShardId = SingletonActor.extractShardId)
|
||||
}
|
||||
|
||||
def verify(): Unit = {
|
||||
val side1 = roles.take(scenario.side1Size)
|
||||
val side2 = roles.drop(scenario.side1Size).take(scenario.side2Size)
|
||||
|
||||
def singletonRegisterKey(node: RoleName): String =
|
||||
"/user/singletonRegistry/singleton-" + scenario.dcDecider(node)
|
||||
|
||||
runOn(side1 ++ side2: _*) {
|
||||
log.info("Running {} {} in round {}", myself.name, Cluster(sys).selfUniqueAddress, c)
|
||||
}
|
||||
enterBarrier(s"log-startup-$c")
|
||||
|
||||
within(90.seconds) {
|
||||
|
||||
join(side1.head, side1.head, awaitUp = true) // oldest
|
||||
join(side2.head, side1.head, awaitUp = true) // next oldest
|
||||
for (n <- side1.tail ++ side2.tail)
|
||||
join(n, side1.head, awaitUp = false)
|
||||
runOn(side1 ++ side2: _*) {
|
||||
awaitAllMembersUp(side1 ++ side2: _*)
|
||||
}
|
||||
enterBarrier(s"all-up-$c")
|
||||
|
||||
runOn(node1) {
|
||||
singletonProbe.within(25.seconds) {
|
||||
singletonProbe.expectMsg(Register(singletonRegisterKey(node1), sysAddress(node1)))
|
||||
}
|
||||
shardingProbe.expectNoMessage(100.millis)
|
||||
}
|
||||
|
||||
runOn(side1 ++ side2: _*) {
|
||||
val probe = TestProbe()(sys)
|
||||
for (i <- 0 until 10) {
|
||||
region.tell(i, probe.ref)
|
||||
probe.expectMsg(5.seconds, i)
|
||||
}
|
||||
}
|
||||
|
||||
enterBarrier(s"initialized-$c")
|
||||
runOn(side1 ++ side2: _*) {
|
||||
log.info("Initialized {} {} in round {}", myself.name, Cluster(sys).selfUniqueAddress, c)
|
||||
}
|
||||
|
||||
runOn(node1) {
|
||||
for (n1 <- side1; n2 <- side2)
|
||||
blackhole(n1, n2)
|
||||
}
|
||||
enterBarrier(s"blackhole-$c")
|
||||
|
||||
val resolvedExpected = scenario.expected match {
|
||||
case KeepLeader =>
|
||||
import Member.addressOrdering
|
||||
val address = (side1 ++ side2).map(sysAddress).min
|
||||
if (side1.exists(sysAddress(_) == address)) KeepSide1
|
||||
else if (side2.exists(sysAddress(_) == address)) KeepSide2
|
||||
else ShutdownBoth
|
||||
case other => other
|
||||
}
|
||||
|
||||
resolvedExpected match {
|
||||
case ShutdownBoth =>
|
||||
runOn(side1 ++ side2: _*) {
|
||||
awaitCond(Cluster(sys).isTerminated, max = 30.seconds)
|
||||
}
|
||||
enterBarrier(s"sys-terminated-$c")
|
||||
runOn(node1) {
|
||||
singletonProbe.within(20.seconds) {
|
||||
singletonProbe.expectMsg(Unregister(singletonRegisterKey(side1.head), sysAddress(side1.head)))
|
||||
}
|
||||
shardingProbe.expectNoMessage(100.millis)
|
||||
}
|
||||
|
||||
case KeepSide1 =>
|
||||
runOn(side1: _*) {
|
||||
val expectedAddresses = side1.map(sysAddress).toSet
|
||||
within(remaining - 3.seconds) {
|
||||
awaitAssert {
|
||||
val probe = TestProbe()(sys)
|
||||
for (i <- 0 until 10) {
|
||||
region.tell(i, probe.ref)
|
||||
probe.expectMsg(2.seconds, i)
|
||||
}
|
||||
|
||||
Cluster(sys).state.members.map(_.address) should be(expectedAddresses)
|
||||
}
|
||||
}
|
||||
}
|
||||
runOn(side2: _*) {
|
||||
awaitCond(Cluster(sys).isTerminated, max = 30.seconds)
|
||||
}
|
||||
enterBarrier(s"cluster-shutdown-verified-$c")
|
||||
singletonProbe.expectNoMessage(1.second)
|
||||
shardingProbe.expectNoMessage(100.millis)
|
||||
|
||||
case KeepSide2 =>
|
||||
runOn(side1: _*) {
|
||||
awaitCond(Cluster(sys).isTerminated, max = 30.seconds)
|
||||
}
|
||||
enterBarrier(s"sys-terminated-$c")
|
||||
runOn(node1) {
|
||||
singletonProbe.within(30.seconds) {
|
||||
singletonProbe.expectMsg(Unregister(singletonRegisterKey(side1.head), sysAddress(side1.head)))
|
||||
singletonProbe.expectMsg(Register(singletonRegisterKey(side2.head), sysAddress(side2.head)))
|
||||
}
|
||||
shardingProbe.expectNoMessage(100.millis)
|
||||
}
|
||||
runOn(side2: _*) {
|
||||
val expectedAddresses = side2.map(sysAddress).toSet
|
||||
within(remaining - 3.seconds) {
|
||||
awaitAssert {
|
||||
val probe = TestProbe()(sys)
|
||||
for (i <- 0 until 10) {
|
||||
region.tell(i, probe.ref)
|
||||
probe.expectMsg(2.seconds, i)
|
||||
}
|
||||
|
||||
Cluster(sys).state.members.map(_.address) should be(expectedAddresses)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case KeepAll =>
|
||||
runOn((side1 ++ side2): _*) {
|
||||
val expectedAddresses = (side1 ++ side2).map(sysAddress).toSet
|
||||
within(remaining - 3.seconds) {
|
||||
awaitAssert {
|
||||
val probe = TestProbe()(sys)
|
||||
for (i <- 0 until 10) {
|
||||
region.tell(i, probe.ref)
|
||||
probe.expectMsg(2.seconds, i)
|
||||
}
|
||||
|
||||
Cluster(sys).state.members.map(_.address) should be(expectedAddresses)
|
||||
}
|
||||
}
|
||||
Cluster(sys).isTerminated should be(false)
|
||||
}
|
||||
enterBarrier(s"cluster-intact-verified-$c")
|
||||
|
||||
case KeepLeader => throw new IllegalStateException // already resolved to other case
|
||||
}
|
||||
|
||||
enterBarrier(s"verified-$c")
|
||||
}
|
||||
enterBarrier(s"after-$c")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private val staticQuorumConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver {
|
||||
active-strategy = static-quorum
|
||||
static-quorum.quorum-size = 5
|
||||
}""")
|
||||
|
||||
private val keepMajorityConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver {
|
||||
active-strategy = keep-majority
|
||||
}""")
|
||||
private val keepOldestConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver {
|
||||
active-strategy = keep-oldest
|
||||
}""")
|
||||
private val downAllConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver {
|
||||
active-strategy = down-all
|
||||
}""")
|
||||
private val leaseMajorityConfig = ConfigFactory.parseString("""akka.cluster.split-brain-resolver {
|
||||
active-strategy = lease-majority
|
||||
lease-majority {
|
||||
lease-implementation = test-lease
|
||||
acquire-lease-delay-for-minority = 3s
|
||||
}
|
||||
}
|
||||
test-lease {
|
||||
lease-class = akka.cluster.sbr.SbrTestLeaseActorClient
|
||||
heartbeat-interval = 1s
|
||||
heartbeat-timeout = 120s
|
||||
lease-operation-timeout = 3s
|
||||
}
|
||||
""")
|
||||
|
||||
sealed trait Expected
|
||||
case object KeepSide1 extends Expected
|
||||
case object KeepSide2 extends Expected
|
||||
case object ShutdownBoth extends Expected
|
||||
case object KeepLeader extends Expected
|
||||
case object KeepAll extends Expected
|
||||
|
||||
val defaultDcDecider: RoleName => DataCenter = _ => DefaultDataCenter
|
||||
|
||||
case class Scenario(
|
||||
cfg: Config,
|
||||
side1Size: Int,
|
||||
side2Size: Int,
|
||||
expected: Expected,
|
||||
dcDecider: RoleName => DataCenter = defaultDcDecider // allows to set the dc per indexed node
|
||||
) {
|
||||
|
||||
val activeStrategy: String = cfg.getString("akka.cluster.split-brain-resolver.active-strategy")
|
||||
|
||||
override def toString: String = {
|
||||
s"$expected when using $activeStrategy and side1=$side1Size and side2=$side2Size" +
|
||||
(if (dcDecider ne defaultDcDecider) "with multi-DC" else "")
|
||||
}
|
||||
|
||||
def usingLease: Boolean = activeStrategy.contains("lease")
|
||||
}
|
||||
|
||||
val scenarios = List(
|
||||
Scenario(staticQuorumConfig, 1, 2, ShutdownBoth),
|
||||
Scenario(staticQuorumConfig, 4, 4, ShutdownBoth),
|
||||
Scenario(staticQuorumConfig, 5, 4, KeepSide1),
|
||||
Scenario(staticQuorumConfig, 1, 5, KeepSide2),
|
||||
Scenario(staticQuorumConfig, 4, 5, KeepSide2),
|
||||
Scenario(keepMajorityConfig, 2, 1, KeepSide1),
|
||||
Scenario(keepMajorityConfig, 1, 2, KeepSide2),
|
||||
Scenario(keepMajorityConfig, 4, 5, KeepSide2),
|
||||
Scenario(keepMajorityConfig, 4, 4, KeepLeader),
|
||||
Scenario(keepOldestConfig, 3, 3, KeepSide1),
|
||||
Scenario(keepOldestConfig, 1, 1, KeepSide1),
|
||||
Scenario(keepOldestConfig, 1, 2, KeepSide2), // because down-if-alone
|
||||
Scenario(keepMajorityConfig, 3, 2, KeepAll, {
|
||||
case `node1` | `node2` | `node3` => "dcA"
|
||||
case _ => "dcB"
|
||||
}),
|
||||
Scenario(downAllConfig, 1, 2, ShutdownBoth),
|
||||
Scenario(leaseMajorityConfig, 4, 5, KeepSide2))
|
||||
|
||||
"Cluster SplitBrainResolver" must {
|
||||
|
||||
for (scenario <- scenarios) {
|
||||
scenario.toString taggedAs LongRunningTest in {
|
||||
DisposableSys(scenario).verify()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sharding
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.Actor
|
||||
import akka.actor.ActorRef
|
||||
import akka.actor.Props
|
||||
import akka.cluster.MemberStatus
|
||||
import akka.remote.transport.ThrottlerTransportAdapter.Direction
|
||||
import akka.serialization.jackson.CborSerializable
|
||||
import akka.testkit._
|
||||
import akka.util.ccompat._
|
||||
|
||||
@ccompatUsedUntil213
|
||||
object ClusterShardCoordinatorDowning2Spec {
|
||||
case class Ping(id: String) extends CborSerializable
|
||||
|
||||
class Entity extends Actor {
|
||||
def receive = {
|
||||
case Ping(_) => sender() ! self
|
||||
}
|
||||
}
|
||||
|
||||
case object GetLocations extends CborSerializable
|
||||
case class Locations(locations: Map[String, ActorRef]) extends CborSerializable
|
||||
|
||||
class ShardLocations extends Actor {
|
||||
var locations: Locations = _
|
||||
def receive = {
|
||||
case GetLocations => sender() ! locations
|
||||
case l: Locations => locations = l
|
||||
}
|
||||
}
|
||||
|
||||
val extractEntityId: ShardRegion.ExtractEntityId = {
|
||||
case m @ Ping(id) => (id, m)
|
||||
}
|
||||
|
||||
val extractShardId: ShardRegion.ExtractShardId = {
|
||||
case Ping(id: String) => id.charAt(0).toString
|
||||
}
|
||||
}
|
||||
|
||||
abstract class ClusterShardCoordinatorDowning2SpecConfig(mode: String)
|
||||
extends MultiNodeClusterShardingConfig(
|
||||
mode,
|
||||
loglevel = "INFO",
|
||||
additionalConfig = """
|
||||
akka.cluster.sharding.rebalance-interval = 120 s
|
||||
# setting down-removal-margin, for testing of issue #29131
|
||||
akka.cluster.down-removal-margin = 3 s
|
||||
akka.remote.watch-failure-detector.acceptable-heartbeat-pause = 3s
|
||||
""") {
|
||||
val first = role("first")
|
||||
val second = role("second")
|
||||
|
||||
testTransport(on = true)
|
||||
|
||||
}
|
||||
|
||||
object PersistentClusterShardCoordinatorDowning2SpecConfig
|
||||
extends ClusterShardCoordinatorDowning2SpecConfig(ClusterShardingSettings.StateStoreModePersistence)
|
||||
object DDataClusterShardCoordinatorDowning2SpecConfig
|
||||
extends ClusterShardCoordinatorDowning2SpecConfig(ClusterShardingSettings.StateStoreModeDData)
|
||||
|
||||
class PersistentClusterShardCoordinatorDowning2Spec
|
||||
extends ClusterShardCoordinatorDowning2Spec(PersistentClusterShardCoordinatorDowning2SpecConfig)
|
||||
class DDataClusterShardCoordinatorDowning2Spec
|
||||
extends ClusterShardCoordinatorDowning2Spec(DDataClusterShardCoordinatorDowning2SpecConfig)
|
||||
|
||||
class PersistentClusterShardCoordinatorDowning2MultiJvmNode1 extends PersistentClusterShardCoordinatorDowning2Spec
|
||||
class PersistentClusterShardCoordinatorDowning2MultiJvmNode2 extends PersistentClusterShardCoordinatorDowning2Spec
|
||||
|
||||
class DDataClusterShardCoordinatorDowning2MultiJvmNode1 extends DDataClusterShardCoordinatorDowning2Spec
|
||||
class DDataClusterShardCoordinatorDowning2MultiJvmNode2 extends DDataClusterShardCoordinatorDowning2Spec
|
||||
|
||||
abstract class ClusterShardCoordinatorDowning2Spec(multiNodeConfig: ClusterShardCoordinatorDowning2SpecConfig)
|
||||
extends MultiNodeClusterShardingSpec(multiNodeConfig)
|
||||
with ImplicitSender {
|
||||
import multiNodeConfig._
|
||||
|
||||
import ClusterShardCoordinatorDowning2Spec._
|
||||
|
||||
def startSharding(): Unit = {
|
||||
startSharding(
|
||||
system,
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity](),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
||||
s"Cluster sharding ($mode) with down member, scenario 2" must {
|
||||
|
||||
"join cluster" in within(20.seconds) {
|
||||
startPersistenceIfNotDdataMode(startOn = first, setStoreOn = Seq(first, second))
|
||||
|
||||
join(first, first, onJoinedRunOnFrom = startSharding())
|
||||
join(second, first, onJoinedRunOnFrom = startSharding(), assertNodeUp = false)
|
||||
|
||||
// all Up, everywhere before continuing
|
||||
runOn(first, second) {
|
||||
awaitAssert {
|
||||
cluster.state.members.size should ===(2)
|
||||
cluster.state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up))
|
||||
}
|
||||
}
|
||||
|
||||
enterBarrier("after-2")
|
||||
}
|
||||
|
||||
"initialize shards" in {
|
||||
runOn(first) {
|
||||
val shardLocations = system.actorOf(Props[ShardLocations](), "shardLocations")
|
||||
val locations = (for (n <- 1 to 4) yield {
|
||||
val id = n.toString
|
||||
region ! Ping(id)
|
||||
id -> expectMsgType[ActorRef]
|
||||
}).toMap
|
||||
shardLocations ! Locations(locations)
|
||||
system.log.debug("Original locations: {}", locations)
|
||||
}
|
||||
enterBarrier("after-3")
|
||||
}
|
||||
|
||||
"recover after downing other node (not coordinator)" in within(20.seconds) {
|
||||
val secondAddress = address(second)
|
||||
|
||||
runOn(first) {
|
||||
testConductor.blackhole(first, second, Direction.Both).await
|
||||
}
|
||||
|
||||
Thread.sleep(3000)
|
||||
|
||||
runOn(first) {
|
||||
cluster.down(second)
|
||||
awaitAssert {
|
||||
cluster.state.members.size should ===(1)
|
||||
}
|
||||
|
||||
// start a few more new shards, could be allocated to second but should notice that it's terminated
|
||||
val additionalLocations =
|
||||
awaitAssert {
|
||||
val probe = TestProbe()
|
||||
(for (n <- 5 to 8) yield {
|
||||
val id = n.toString
|
||||
region.tell(Ping(id), probe.ref)
|
||||
id -> probe.expectMsgType[ActorRef](1.second)
|
||||
}).toMap
|
||||
}
|
||||
system.log.debug("Additional locations: {}", additionalLocations)
|
||||
|
||||
system.actorSelection(node(first) / "user" / "shardLocations") ! GetLocations
|
||||
val Locations(originalLocations) = expectMsgType[Locations]
|
||||
|
||||
awaitAssert {
|
||||
val probe = TestProbe()
|
||||
(originalLocations ++ additionalLocations).foreach {
|
||||
case (id, ref) =>
|
||||
region.tell(Ping(id), probe.ref)
|
||||
if (ref.path.address == secondAddress) {
|
||||
val newRef = probe.expectMsgType[ActorRef](1.second)
|
||||
newRef should not be (ref)
|
||||
system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef)
|
||||
} else
|
||||
probe.expectMsg(1.second, ref) // should not move
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enterBarrier("after-4")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,183 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sharding
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.Actor
|
||||
import akka.actor.ActorRef
|
||||
import akka.actor.Props
|
||||
import akka.cluster.MemberStatus
|
||||
import akka.remote.transport.ThrottlerTransportAdapter.Direction
|
||||
import akka.serialization.jackson.CborSerializable
|
||||
import akka.testkit._
|
||||
import akka.util.ccompat._
|
||||
|
||||
@ccompatUsedUntil213
|
||||
object ClusterShardCoordinatorDowningSpec {
|
||||
case class Ping(id: String) extends CborSerializable
|
||||
|
||||
class Entity extends Actor {
|
||||
def receive = {
|
||||
case Ping(_) => sender() ! self
|
||||
}
|
||||
}
|
||||
|
||||
case object GetLocations extends CborSerializable
|
||||
case class Locations(locations: Map[String, ActorRef]) extends CborSerializable
|
||||
|
||||
class ShardLocations extends Actor {
|
||||
var locations: Locations = _
|
||||
def receive = {
|
||||
case GetLocations => sender() ! locations
|
||||
case l: Locations => locations = l
|
||||
}
|
||||
}
|
||||
|
||||
val extractEntityId: ShardRegion.ExtractEntityId = {
|
||||
case m @ Ping(id) => (id, m)
|
||||
}
|
||||
|
||||
val extractShardId: ShardRegion.ExtractShardId = {
|
||||
case Ping(id: String) => id.charAt(0).toString
|
||||
}
|
||||
}
|
||||
|
||||
abstract class ClusterShardCoordinatorDowningSpecConfig(mode: String)
|
||||
extends MultiNodeClusterShardingConfig(
|
||||
mode,
|
||||
loglevel = "INFO",
|
||||
additionalConfig = """
|
||||
akka.cluster.sharding.rebalance-interval = 120 s
|
||||
# setting down-removal-margin, for testing of issue #29131
|
||||
akka.cluster.down-removal-margin = 3 s
|
||||
akka.remote.watch-failure-detector.acceptable-heartbeat-pause = 3s
|
||||
""") {
|
||||
val controller = role("controller")
|
||||
val first = role("first")
|
||||
val second = role("second")
|
||||
|
||||
testTransport(on = true)
|
||||
|
||||
}
|
||||
|
||||
object PersistentClusterShardCoordinatorDowningSpecConfig
|
||||
extends ClusterShardCoordinatorDowningSpecConfig(ClusterShardingSettings.StateStoreModePersistence)
|
||||
object DDataClusterShardCoordinatorDowningSpecConfig
|
||||
extends ClusterShardCoordinatorDowningSpecConfig(ClusterShardingSettings.StateStoreModeDData)
|
||||
|
||||
class PersistentClusterShardCoordinatorDowningSpec
|
||||
extends ClusterShardCoordinatorDowningSpec(PersistentClusterShardCoordinatorDowningSpecConfig)
|
||||
class DDataClusterShardCoordinatorDowningSpec
|
||||
extends ClusterShardCoordinatorDowningSpec(DDataClusterShardCoordinatorDowningSpecConfig)
|
||||
|
||||
class PersistentClusterShardCoordinatorDowningMultiJvmNode1 extends PersistentClusterShardCoordinatorDowningSpec
|
||||
class PersistentClusterShardCoordinatorDowningMultiJvmNode2 extends PersistentClusterShardCoordinatorDowningSpec
|
||||
class PersistentClusterShardCoordinatorDowningMultiJvmNode3 extends PersistentClusterShardCoordinatorDowningSpec
|
||||
|
||||
class DDataClusterShardCoordinatorDowningMultiJvmNode1 extends DDataClusterShardCoordinatorDowningSpec
|
||||
class DDataClusterShardCoordinatorDowningMultiJvmNode2 extends DDataClusterShardCoordinatorDowningSpec
|
||||
class DDataClusterShardCoordinatorDowningMultiJvmNode3 extends DDataClusterShardCoordinatorDowningSpec
|
||||
|
||||
abstract class ClusterShardCoordinatorDowningSpec(multiNodeConfig: ClusterShardCoordinatorDowningSpecConfig)
|
||||
extends MultiNodeClusterShardingSpec(multiNodeConfig)
|
||||
with ImplicitSender {
|
||||
import multiNodeConfig._
|
||||
|
||||
import ClusterShardCoordinatorDowningSpec._
|
||||
|
||||
def startSharding(): Unit = {
|
||||
startSharding(
|
||||
system,
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity](),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
||||
s"Cluster sharding ($mode) with down member, scenario 1" must {
|
||||
|
||||
"join cluster" in within(20.seconds) {
|
||||
startPersistenceIfNotDdataMode(startOn = controller, setStoreOn = Seq(first, second))
|
||||
|
||||
join(first, first, onJoinedRunOnFrom = startSharding())
|
||||
join(second, first, onJoinedRunOnFrom = startSharding(), assertNodeUp = false)
|
||||
|
||||
// all Up, everywhere before continuing
|
||||
runOn(first, second) {
|
||||
awaitAssert {
|
||||
cluster.state.members.size should ===(2)
|
||||
cluster.state.members.unsorted.map(_.status) should ===(Set(MemberStatus.Up))
|
||||
}
|
||||
}
|
||||
|
||||
enterBarrier("after-2")
|
||||
}
|
||||
|
||||
"initialize shards" in {
|
||||
runOn(first) {
|
||||
val shardLocations = system.actorOf(Props[ShardLocations](), "shardLocations")
|
||||
val locations = (for (n <- 1 to 4) yield {
|
||||
val id = n.toString
|
||||
region ! Ping(id)
|
||||
id -> expectMsgType[ActorRef]
|
||||
}).toMap
|
||||
shardLocations ! Locations(locations)
|
||||
system.log.debug("Original locations: {}", locations)
|
||||
}
|
||||
enterBarrier("after-3")
|
||||
}
|
||||
|
||||
"recover after downing coordinator node" in within(20.seconds) {
|
||||
val firstAddress = address(first)
|
||||
system.actorSelection(node(first) / "user" / "shardLocations") ! GetLocations
|
||||
val Locations(originalLocations) = expectMsgType[Locations]
|
||||
|
||||
runOn(controller) {
|
||||
testConductor.blackhole(first, second, Direction.Both).await
|
||||
}
|
||||
|
||||
Thread.sleep(3000)
|
||||
|
||||
runOn(second) {
|
||||
cluster.down(first)
|
||||
awaitAssert {
|
||||
cluster.state.members.size should ===(1)
|
||||
}
|
||||
|
||||
// start a few more new shards, could be allocated to first but should notice that it's terminated
|
||||
val additionalLocations =
|
||||
awaitAssert {
|
||||
val probe = TestProbe()
|
||||
(for (n <- 5 to 8) yield {
|
||||
val id = n.toString
|
||||
region.tell(Ping(id), probe.ref)
|
||||
id -> probe.expectMsgType[ActorRef](1.second)
|
||||
}).toMap
|
||||
}
|
||||
system.log.debug("Additional locations: {}", additionalLocations)
|
||||
|
||||
awaitAssert {
|
||||
val probe = TestProbe()
|
||||
(originalLocations ++ additionalLocations).foreach {
|
||||
case (id, ref) =>
|
||||
region.tell(Ping(id), probe.ref)
|
||||
if (ref.path.address == firstAddress) {
|
||||
val newRef = probe.expectMsgType[ActorRef](1.second)
|
||||
newRef should not be (ref)
|
||||
system.log.debug("Moved [{}] from [{}] to [{}]", id, ref, newRef)
|
||||
} else
|
||||
probe.expectMsg(1.second, ref) // should not move
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enterBarrier("after-4")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -131,7 +131,7 @@ abstract class ExternalShardAllocationSpec
|
|||
val forthAddress = address(forth)
|
||||
runOn(second) {
|
||||
system.log.info("Allocating {} on {}", onForthShardId, forthAddress)
|
||||
ExternalShardAllocation(system).clientFor(typeName).updateShardLocation(onForthShardId, forthAddress)
|
||||
ExternalShardAllocation(system).clientFor(typeName).updateShardLocations(Map(onForthShardId -> forthAddress))
|
||||
}
|
||||
enterBarrier("allocated-to-new-node")
|
||||
runOn(forth) {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,9 @@ import scala.util.control.NoStackTrace
|
|||
import com.typesafe.config.{ Config, ConfigFactory }
|
||||
|
||||
import akka.actor.Props
|
||||
import akka.cluster.{ Cluster, MemberStatus, TestLease, TestLeaseExt }
|
||||
import akka.cluster.{ Cluster, MemberStatus }
|
||||
import akka.coordination.lease.TestLease
|
||||
import akka.coordination.lease.TestLeaseExt
|
||||
import akka.testkit.{ AkkaSpec, ImplicitSender }
|
||||
import akka.testkit.TestActors.EchoActor
|
||||
|
||||
|
|
|
|||
|
|
@ -12,20 +12,21 @@ import scala.util.Success
|
|||
import scala.util.control.NoStackTrace
|
||||
|
||||
import akka.actor.{ Actor, ActorLogging, PoisonPill, Props }
|
||||
import akka.cluster.TestLeaseExt
|
||||
import akka.cluster.sharding.ShardRegion.ShardInitialized
|
||||
import akka.coordination.lease.LeaseUsageSettings
|
||||
import akka.coordination.lease.TestLease
|
||||
import akka.coordination.lease.TestLeaseExt
|
||||
import akka.testkit.{ AkkaSpec, ImplicitSender, TestProbe }
|
||||
|
||||
object ShardSpec {
|
||||
val config =
|
||||
"""
|
||||
s"""
|
||||
akka.loglevel = INFO
|
||||
akka.actor.provider = "cluster"
|
||||
akka.remote.classic.netty.tcp.port = 0
|
||||
akka.remote.artery.canonical.port = 0
|
||||
test-lease {
|
||||
lease-class = akka.cluster.TestLease
|
||||
lease-class = ${classOf[TestLease].getName}
|
||||
heartbeat-interval = 1s
|
||||
heartbeat-timeout = 120s
|
||||
lease-operation-timeout = 3s
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
|
|
|||
|
|
@ -197,8 +197,10 @@ akka.cluster.singleton-proxy {
|
|||
# The actor name of the singleton actor that is started by the ClusterSingletonManager
|
||||
singleton-name = ${akka.cluster.singleton.singleton-name}
|
||||
|
||||
# The role of the cluster nodes where the singleton can be deployed.
|
||||
# If the role is not specified then any node will do.
|
||||
# The role of the cluster nodes where the singleton can be deployed.
|
||||
# Corresponding to the role used by the `ClusterSingletonManager`. If the role is not
|
||||
# specified it's a singleton among all nodes in the cluster, and the `ClusterSingletonManager`
|
||||
# must then also be configured in same way.
|
||||
role = ""
|
||||
|
||||
# Interval at which the proxy will try to resolve the singleton instance.
|
||||
|
|
|
|||
|
|
@ -626,6 +626,10 @@ class ClusterSingletonManager(singletonProps: Props, terminationMessage: Any, se
|
|||
goto(BecomingOldest).using(BecomingOldestData(oldest.filterNot(_ == cluster.selfUniqueAddress)))
|
||||
else
|
||||
goto(Younger).using(YoungerData(oldest.filterNot(_ == cluster.selfUniqueAddress)))
|
||||
|
||||
case Event(HandOverToMe, _) =>
|
||||
// nothing to hand over in start
|
||||
stay()
|
||||
}
|
||||
|
||||
when(Younger) {
|
||||
|
|
|
|||
|
|
@ -66,8 +66,11 @@ object ClusterSingletonProxySettings {
|
|||
|
||||
/**
|
||||
* @param singletonName The actor name of the singleton actor that is started by the [[ClusterSingletonManager]].
|
||||
* @param role The role of the cluster nodes where the singleton can be deployed. If None, then any node will do.
|
||||
* @param dataCenter The data center of the cluster nodes where the singleton is running. If None then the same data center as current node.
|
||||
* @param role The role of the cluster nodes where the singleton can be deployed. Corresponding to the `role`
|
||||
* used by the `ClusterSingletonManager`. If the role is not specified it's a singleton among all
|
||||
* nodes in the cluster, and the `ClusterSingletonManager` must then also be configured in
|
||||
* same way.
|
||||
* @param dataCenter The data center of the cluster nodes where the singleton is running. If None then the same data center as current node.
|
||||
* @param singletonIdentificationInterval Interval at which the proxy will try to resolve the singleton instance.
|
||||
* @param bufferSize If the location of the singleton is unknown the proxy will buffer this number of messages
|
||||
* and deliver them when the singleton is identified. When the buffer is full old messages will be dropped
|
||||
|
|
|
|||
|
|
@ -11,8 +11,10 @@ import com.typesafe.config.ConfigFactory
|
|||
import akka.actor.{ Actor, ActorIdentity, ActorLogging, ActorRef, Address, Identify, PoisonPill, Props }
|
||||
import akka.cluster._
|
||||
import akka.cluster.MemberStatus.Up
|
||||
import akka.cluster.TestLeaseActor._
|
||||
import akka.cluster.singleton.ClusterSingletonManagerLeaseSpec.ImportantSingleton.Response
|
||||
import akka.coordination.lease.TestLeaseActor
|
||||
import akka.coordination.lease.TestLeaseActorClient
|
||||
import akka.coordination.lease.TestLeaseActorClientExt
|
||||
import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec, STMultiNodeSpec }
|
||||
import akka.testkit._
|
||||
|
||||
|
|
@ -25,14 +27,14 @@ object ClusterSingletonManagerLeaseSpec extends MultiNodeConfig {
|
|||
|
||||
testTransport(true)
|
||||
|
||||
commonConfig(ConfigFactory.parseString("""
|
||||
commonConfig(ConfigFactory.parseString(s"""
|
||||
akka.loglevel = INFO
|
||||
akka.actor.provider = "cluster"
|
||||
akka.remote.log-remote-lifecycle-events = off
|
||||
akka.cluster.downing-provider-class = akka.cluster.testkit.AutoDowning
|
||||
akka.cluster.testkit.auto-down-unreachable-after = 0s
|
||||
test-lease {
|
||||
lease-class = akka.cluster.TestLeaseActorClient
|
||||
lease-class = ${classOf[TestLeaseActorClient].getName}
|
||||
heartbeat-interval = 1s
|
||||
heartbeat-timeout = 120s
|
||||
lease-operation-timeout = 3s
|
||||
|
|
@ -79,6 +81,7 @@ class ClusterSingletonManagerLeaseSpec
|
|||
|
||||
import ClusterSingletonManagerLeaseSpec._
|
||||
import ClusterSingletonManagerLeaseSpec.ImportantSingleton._
|
||||
import TestLeaseActor._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
|
|
@ -128,10 +131,11 @@ class ClusterSingletonManagerLeaseSpec
|
|||
}
|
||||
|
||||
"Start singleton and ping from all nodes" in {
|
||||
runOn(first, second, third, fourth) {
|
||||
// fourth doesn't have the worker role
|
||||
runOn(first, second, third) {
|
||||
system.actorOf(
|
||||
ClusterSingletonManager
|
||||
.props(props(), PoisonPill, ClusterSingletonManagerSettings(system).withRole("worker")),
|
||||
.props(ImportantSingleton.props(), PoisonPill, ClusterSingletonManagerSettings(system).withRole("worker")),
|
||||
"important")
|
||||
}
|
||||
enterBarrier("singleton-started")
|
||||
|
|
|
|||
|
|
@ -20,10 +20,8 @@ import akka.actor.PoisonPill
|
|||
import akka.actor.Props
|
||||
import akka.cluster.Cluster
|
||||
import akka.cluster.MemberStatus
|
||||
import akka.cluster.TestLease
|
||||
import akka.cluster.TestLease.AcquireReq
|
||||
import akka.cluster.TestLease.ReleaseReq
|
||||
import akka.cluster.TestLeaseExt
|
||||
import akka.coordination.lease.TestLease
|
||||
import akka.coordination.lease.TestLeaseExt
|
||||
import akka.testkit.AkkaSpec
|
||||
import akka.testkit.TestException
|
||||
import akka.testkit.TestProbe
|
||||
|
|
@ -55,6 +53,7 @@ class ClusterSingletonLeaseSpec extends AkkaSpec(ConfigFactory.parseString("""
|
|||
lease-retry-interval = 2000ms
|
||||
}
|
||||
""").withFallback(TestLease.config)) {
|
||||
import TestLease.{ AcquireReq, ReleaseReq }
|
||||
|
||||
val cluster = Cluster(system)
|
||||
val testLeaseExt = TestLeaseExt(system)
|
||||
|
|
|
|||
|
|
@ -41,6 +41,8 @@ akka {
|
|||
seed-nodes = [
|
||||
"akka://ClusterSystem@127.0.0.1:2551",
|
||||
"akka://ClusterSystem@127.0.0.1:2552"]
|
||||
|
||||
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
|
||||
}
|
||||
}
|
||||
#config-seeds
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2019-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
|
|
|
|||
|
|
@ -42,6 +42,10 @@ akka {
|
|||
# This is useful if you implement downing strategies that handle network partitions,
|
||||
# e.g. by keeping the larger side of the partition and shutting down the smaller side.
|
||||
# Disable with "off" or specify a duration to enable.
|
||||
#
|
||||
# When using the `akka.cluster.sbr.SplitBrainResolver` as downing provider it will use
|
||||
# the akka.cluster.split-brain-resolver.stable-after as the default down-removal-margin
|
||||
# if this down-removal-margin is undefined.
|
||||
down-removal-margin = off
|
||||
|
||||
# Pluggable support for downing of nodes in the cluster.
|
||||
|
|
@ -364,3 +368,113 @@ akka {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
#//#split-brain-resolver
|
||||
|
||||
# To enable the split brain resolver you first need to enable the provider in your application.conf:
|
||||
# akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
|
||||
|
||||
akka.cluster.split-brain-resolver {
|
||||
# Select one of the available strategies (see descriptions below):
|
||||
# static-quorum, keep-majority, keep-oldest, down-all, lease-majority
|
||||
active-strategy = keep-majority
|
||||
|
||||
#//#stable-after
|
||||
# Time margin after which shards or singletons that belonged to a downed/removed
|
||||
# partition are created in surviving partition. The purpose of this margin is that
|
||||
# in case of a network partition the persistent actors in the non-surviving partitions
|
||||
# must be stopped before corresponding persistent actors are started somewhere else.
|
||||
# This is useful if you implement downing strategies that handle network partitions,
|
||||
# e.g. by keeping the larger side of the partition and shutting down the smaller side.
|
||||
# Decision is taken by the strategy when there has been no membership or
|
||||
# reachability changes for this duration, i.e. the cluster state is stable.
|
||||
stable-after = 20s
|
||||
#//#stable-after
|
||||
|
||||
# When reachability observations by the failure detector are changed the SBR decisions
|
||||
# are deferred until there are no changes within the 'stable-after' duration.
|
||||
# If this continues for too long it might be an indication of an unstable system/network
|
||||
# and it could result in delayed or conflicting decisions on separate sides of a network
|
||||
# partition.
|
||||
# As a precaution for that scenario all nodes are downed if no decision is made within
|
||||
# `stable-after + down-all-when-unstable` from the first unreachability event.
|
||||
# The measurement is reset if all unreachable have been healed, downed or removed, or
|
||||
# if there are no changes within `stable-after * 2`.
|
||||
# The value can be on, off, or a duration.
|
||||
# By default it is 'on' and then it is derived to be 3/4 of stable-after.
|
||||
down-all-when-unstable = on
|
||||
|
||||
}
|
||||
#//#split-brain-resolver
|
||||
|
||||
# Down the unreachable nodes if the number of remaining nodes are greater than or equal to
|
||||
# the given 'quorum-size'. Otherwise down the reachable nodes, i.e. it will shut down that
|
||||
# side of the partition. In other words, the 'size' defines the minimum number of nodes
|
||||
# that the cluster must have to be operational. If there are unreachable nodes when starting
|
||||
# up the cluster, before reaching this limit, the cluster may shutdown itself immediately.
|
||||
# This is not an issue if you start all nodes at approximately the same time.
|
||||
#
|
||||
# Note that you must not add more members to the cluster than 'quorum-size * 2 - 1', because
|
||||
# then both sides may down each other and thereby form two separate clusters. For example,
|
||||
# quorum-size configured to 3 in a 6 node cluster may result in a split where each side
|
||||
# consists of 3 nodes each, i.e. each side thinks it has enough nodes to continue by
|
||||
# itself. A warning is logged if this recommendation is violated.
|
||||
#//#static-quorum
|
||||
akka.cluster.split-brain-resolver.static-quorum {
|
||||
# minimum number of nodes that the cluster must have
|
||||
quorum-size = undefined
|
||||
|
||||
# if the 'role' is defined the decision is based only on members with that 'role'
|
||||
role = ""
|
||||
}
|
||||
#//#static-quorum
|
||||
|
||||
# Down the unreachable nodes if the current node is in the majority part based the last known
|
||||
# membership information. Otherwise down the reachable nodes, i.e. the own part. If the
|
||||
# the parts are of equal size the part containing the node with the lowest address is kept.
|
||||
# Note that if there are more than two partitions and none is in majority each part
|
||||
# will shutdown itself, terminating the whole cluster.
|
||||
#//#keep-majority
|
||||
akka.cluster.split-brain-resolver.keep-majority {
|
||||
# if the 'role' is defined the decision is based only on members with that 'role'
|
||||
role = ""
|
||||
}
|
||||
#//#keep-majority
|
||||
|
||||
# Down the part that does not contain the oldest member (current singleton).
|
||||
#
|
||||
# There is one exception to this rule if 'down-if-alone' is defined to 'on'.
|
||||
# Then, if the oldest node has partitioned from all other nodes the oldest
|
||||
# will down itself and keep all other nodes running. The strategy will not
|
||||
# down the single oldest node when it is the only remaining node in the cluster.
|
||||
#
|
||||
# Note that if the oldest node crashes the others will remove it from the cluster
|
||||
# when 'down-if-alone' is 'on', otherwise they will down themselves if the
|
||||
# oldest node crashes, i.e. shutdown the whole cluster together with the oldest node.
|
||||
#//#keep-oldest
|
||||
akka.cluster.split-brain-resolver.keep-oldest {
|
||||
# Enable downing of the oldest node when it is partitioned from all other nodes
|
||||
down-if-alone = on
|
||||
|
||||
# if the 'role' is defined the decision is based only on members with that 'role',
|
||||
# i.e. using the oldest member (singleton) within the nodes with that role
|
||||
role = ""
|
||||
}
|
||||
#//#keep-oldest
|
||||
|
||||
# Keep the part that can acquire the lease, and down the other part.
|
||||
# Best effort is to keep the side that has most nodes, i.e. the majority side.
|
||||
# This is achieved by adding a delay before trying to acquire the lease on the
|
||||
# minority side.
|
||||
#//#lease-majority
|
||||
akka.cluster.split-brain-resolver.lease-majority {
|
||||
lease-implementation = ""
|
||||
|
||||
# This delay is used on the minority side before trying to acquire the lease,
|
||||
# as an best effort to try to keep the majority side.
|
||||
acquire-lease-delay-for-minority = 2s
|
||||
|
||||
# If the 'role' is defined the majority/minority is based only on members with that 'role'.
|
||||
role = ""
|
||||
}
|
||||
#//#lease-majority
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@
|
|||
package akka.cluster
|
||||
|
||||
import java.lang.management.ManagementFactory
|
||||
|
||||
import javax.management.InstanceAlreadyExistsException
|
||||
import javax.management.InstanceNotFoundException
|
||||
import javax.management.ObjectName
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package akka.cluster
|
|||
import akka.actor.Address
|
||||
import akka.annotation.ApiMayChange
|
||||
import akka.annotation.InternalApi
|
||||
import akka.cluster.sbr.DowningStrategy
|
||||
import akka.event.LogMarker
|
||||
|
||||
/**
|
||||
|
|
@ -22,6 +23,7 @@ object ClusterLogMarker {
|
|||
*/
|
||||
@InternalApi private[akka] object Properties {
|
||||
val MemberStatus = "akkaMemberStatus"
|
||||
val SbrDecision = "akkaSbrDecision"
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -91,4 +93,53 @@ object ClusterLogMarker {
|
|||
val singletonTerminated: LogMarker =
|
||||
LogMarker("akkaClusterSingletonTerminated")
|
||||
|
||||
/**
|
||||
* Marker "akkaSbrDowning" of log event when Split Brain Resolver has made a downing decision. Followed
|
||||
* by [[ClusterLogMarker.sbrDowningNode]] for each node that is downed.
|
||||
* @param decision The downing decision. Included as property "akkaSbrDecision".
|
||||
*/
|
||||
def sbrDowning(decision: DowningStrategy.Decision): LogMarker =
|
||||
LogMarker("akkaSbrDowning", Map(Properties.SbrDecision -> decision))
|
||||
|
||||
/**
|
||||
* Marker "akkaSbrDowningNode" of log event when a member is downed by Split Brain Resolver.
|
||||
* @param node The address of the node that is downed. Included as property "akkaRemoteAddress"
|
||||
* and "akkaRemoteAddressUid".
|
||||
* @param decision The downing decision. Included as property "akkaSbrDecision".
|
||||
*/
|
||||
def sbrDowningNode(node: UniqueAddress, decision: DowningStrategy.Decision): LogMarker =
|
||||
LogMarker(
|
||||
"akkaSbrDowningNode",
|
||||
Map(
|
||||
LogMarker.Properties.RemoteAddress -> node.address,
|
||||
LogMarker.Properties.RemoteAddressUid -> node.longUid,
|
||||
Properties.SbrDecision -> decision))
|
||||
|
||||
/**
|
||||
* Marker "akkaSbrInstability" of log event when Split Brain Resolver has detected too much instability
|
||||
* and will down all nodes.
|
||||
*/
|
||||
val sbrInstability: LogMarker =
|
||||
LogMarker("akkaSbrInstability")
|
||||
|
||||
/**
|
||||
* Marker "akkaSbrLeaseAcquired" of log event when Split Brain Resolver has acquired the lease.
|
||||
* @param decision The downing decision. Included as property "akkaSbrDecision".
|
||||
*/
|
||||
def sbrLeaseAcquired(decision: DowningStrategy.Decision): LogMarker =
|
||||
LogMarker("akkaSbrLeaseAcquired", Map(Properties.SbrDecision -> decision))
|
||||
|
||||
/**
|
||||
* Marker "akkaSbrLeaseDenied" of log event when Split Brain Resolver has acquired the lease.
|
||||
* @param reverseDecision The (reverse) downing decision. Included as property "akkaSbrDecision".
|
||||
*/
|
||||
def sbrLeaseDenied(reverseDecision: DowningStrategy.Decision): LogMarker =
|
||||
LogMarker("akkaSbrLeaseDenied", Map(Properties.SbrDecision -> reverseDecision))
|
||||
|
||||
/**
|
||||
* Marker "akkaSbrLeaseReleased" of log event when Split Brain Resolver has released the lease.
|
||||
*/
|
||||
val sbrLeaseReleased: LogMarker =
|
||||
LogMarker("akkaSbrLeaseReleased")
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,6 +70,9 @@ private[cluster] class ClusterRemoteWatcher(
|
|||
|
||||
override val log = Logging(context.system, ActorWithLogClass(this, ClusterLogClass.ClusterCore))
|
||||
|
||||
// allowed to watch even though address not in cluster membership, i.e. remote watch
|
||||
private val watchPathWhitelist = Set("/system/sharding/")
|
||||
|
||||
private var pendingDelayedQuarantine: Set[UniqueAddress] = Set.empty
|
||||
|
||||
var clusterNodes: Set[Address] = Set.empty
|
||||
|
|
@ -164,7 +167,19 @@ private[cluster] class ClusterRemoteWatcher(
|
|||
if (!clusterNodes(watchee.path.address)) super.watchNode(watchee)
|
||||
|
||||
override protected def shouldWatch(watchee: InternalActorRef): Boolean =
|
||||
clusterNodes(watchee.path.address) || super.shouldWatch(watchee)
|
||||
clusterNodes(watchee.path.address) || super.shouldWatch(watchee) || isWatchOutsideClusterAllowed(watchee)
|
||||
|
||||
/**
|
||||
* Allowed to watch some paths even though address not in cluster membership, i.e. remote watch.
|
||||
* Needed for ShardCoordinator that has to watch old incarnations of region ActorRef from the
|
||||
* recovered state.
|
||||
*/
|
||||
private def isWatchOutsideClusterAllowed(watchee: InternalActorRef): Boolean = {
|
||||
context.system.name == watchee.path.address.system && {
|
||||
val pathPrefix = watchee.path.elements.take(2).mkString("/", "/", "/")
|
||||
watchPathWhitelist.contains(pathPrefix)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* When a cluster node is added this class takes over the
|
||||
|
|
|
|||
|
|
@ -40,9 +40,7 @@ private[cluster] object DowningProvider {
|
|||
* When implementing a downing provider you should make sure that it will not split the cluster into
|
||||
* several separate clusters in case of network problems or system overload (long GC pauses). This
|
||||
* is much more difficult than it might be perceived at first, so carefully read the concerns and scenarios
|
||||
* described in
|
||||
* https://doc.akka.io/docs/akka/current/typed/cluster.html#downing and
|
||||
* https://doc.akka.io/docs/akka-enhancements/current/split-brain-resolver.html
|
||||
* described in https://doc.akka.io/docs/akka/current/split-brain-resolver.html
|
||||
*/
|
||||
abstract class DowningProvider {
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,625 @@
|
|||
/*
|
||||
* Copyright (C) 2009-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import scala.collection.immutable
|
||||
import scala.concurrent.duration.Duration
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
|
||||
import akka.actor.Address
|
||||
import akka.annotation.InternalApi
|
||||
import akka.cluster.ClusterSettings.DataCenter
|
||||
import akka.cluster.Member
|
||||
import akka.cluster.MemberStatus
|
||||
import akka.cluster.Reachability
|
||||
import akka.cluster.UniqueAddress
|
||||
import akka.coordination.lease.scaladsl.Lease
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] object DowningStrategy {
|
||||
sealed trait Decision {
|
||||
def isIndirectlyConnected: Boolean
|
||||
}
|
||||
case object DownReachable extends Decision {
|
||||
override def isIndirectlyConnected = false
|
||||
}
|
||||
case object DownUnreachable extends Decision {
|
||||
override def isIndirectlyConnected = false
|
||||
}
|
||||
case object DownAll extends Decision {
|
||||
override def isIndirectlyConnected = false
|
||||
}
|
||||
case object DownIndirectlyConnected extends Decision {
|
||||
override def isIndirectlyConnected = true
|
||||
}
|
||||
sealed trait AcquireLeaseDecision extends Decision {
|
||||
def acquireDelay: FiniteDuration
|
||||
}
|
||||
final case class AcquireLeaseAndDownUnreachable(acquireDelay: FiniteDuration) extends AcquireLeaseDecision {
|
||||
override def isIndirectlyConnected = false
|
||||
}
|
||||
final case class AcquireLeaseAndDownIndirectlyConnected(acquireDelay: FiniteDuration) extends AcquireLeaseDecision {
|
||||
override def isIndirectlyConnected = true
|
||||
}
|
||||
case object ReverseDownIndirectlyConnected extends Decision {
|
||||
override def isIndirectlyConnected = true
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[akka] abstract class DowningStrategy(val selfDc: DataCenter) {
|
||||
import DowningStrategy._
|
||||
|
||||
// may contain Joining and WeaklyUp
|
||||
private var _unreachable: Set[UniqueAddress] = Set.empty[UniqueAddress]
|
||||
|
||||
def unreachable: Set[UniqueAddress] = _unreachable
|
||||
|
||||
def unreachable(m: Member): Boolean = _unreachable(m.uniqueAddress)
|
||||
|
||||
private var _reachability: Reachability = Reachability.empty
|
||||
|
||||
private var _seenBy: Set[Address] = Set.empty
|
||||
|
||||
protected def ordering: Ordering[Member] = Member.ordering
|
||||
|
||||
// all members in self DC, both joining and up.
|
||||
private var _allMembers: immutable.SortedSet[Member] = immutable.SortedSet.empty(ordering)
|
||||
|
||||
def role: Option[String]
|
||||
|
||||
// all Joining and WeaklyUp members in self DC
|
||||
def joining: immutable.SortedSet[Member] =
|
||||
_allMembers.filter(m => m.status == MemberStatus.Joining || m.status == MemberStatus.WeaklyUp)
|
||||
|
||||
// all members in self DC, both joining and up.
|
||||
def allMembersInDC: immutable.SortedSet[Member] = _allMembers
|
||||
|
||||
/**
|
||||
* All members in self DC, but doesn't contain Joining, WeaklyUp, Down and Exiting.
|
||||
*/
|
||||
def members: immutable.SortedSet[Member] =
|
||||
members(includingPossiblyUp = false, excludingPossiblyExiting = false)
|
||||
|
||||
/**
|
||||
* All members in self DC, but doesn't contain Joining, WeaklyUp, Down and Exiting.
|
||||
*
|
||||
* When `includingPossiblyUp=true` it also includes Joining and WeaklyUp members that could have been
|
||||
* changed to Up on the other side of a partition.
|
||||
*
|
||||
* When `excludingPossiblyExiting=true` it doesn't include Leaving members that could have been
|
||||
* changed to Exiting on the other side of the partition.
|
||||
*/
|
||||
def members(includingPossiblyUp: Boolean, excludingPossiblyExiting: Boolean): immutable.SortedSet[Member] =
|
||||
_allMembers.filterNot(
|
||||
m =>
|
||||
(!includingPossiblyUp && m.status == MemberStatus.Joining) ||
|
||||
(!includingPossiblyUp && m.status == MemberStatus.WeaklyUp) ||
|
||||
(excludingPossiblyExiting && m.status == MemberStatus.Leaving) ||
|
||||
m.status == MemberStatus.Down ||
|
||||
m.status == MemberStatus.Exiting)
|
||||
|
||||
def membersWithRole: immutable.SortedSet[Member] =
|
||||
membersWithRole(includingPossiblyUp = false, excludingPossiblyExiting = false)
|
||||
|
||||
def membersWithRole(includingPossiblyUp: Boolean, excludingPossiblyExiting: Boolean): immutable.SortedSet[Member] =
|
||||
role match {
|
||||
case None => members(includingPossiblyUp, excludingPossiblyExiting)
|
||||
case Some(r) => members(includingPossiblyUp, excludingPossiblyExiting).filter(_.hasRole(r))
|
||||
}
|
||||
|
||||
def reachableMembers: immutable.SortedSet[Member] =
|
||||
reachableMembers(includingPossiblyUp = false, excludingPossiblyExiting = false)
|
||||
|
||||
def reachableMembers(includingPossiblyUp: Boolean, excludingPossiblyExiting: Boolean): immutable.SortedSet[Member] = {
|
||||
val mbrs = members(includingPossiblyUp, excludingPossiblyExiting)
|
||||
if (unreachable.isEmpty) mbrs
|
||||
else mbrs.filter(m => !unreachable(m))
|
||||
}
|
||||
|
||||
def reachableMembersWithRole: immutable.SortedSet[Member] =
|
||||
reachableMembersWithRole(includingPossiblyUp = false, excludingPossiblyExiting = false)
|
||||
|
||||
def reachableMembersWithRole(
|
||||
includingPossiblyUp: Boolean,
|
||||
excludingPossiblyExiting: Boolean): immutable.SortedSet[Member] =
|
||||
role match {
|
||||
case None => reachableMembers(includingPossiblyUp, excludingPossiblyExiting)
|
||||
case Some(r) => reachableMembers(includingPossiblyUp, excludingPossiblyExiting).filter(_.hasRole(r))
|
||||
}
|
||||
|
||||
def unreachableMembers: immutable.SortedSet[Member] =
|
||||
unreachableMembers(includingPossiblyUp = false, excludingPossiblyExiting = false)
|
||||
|
||||
def unreachableMembers(
|
||||
includingPossiblyUp: Boolean,
|
||||
excludingPossiblyExiting: Boolean): immutable.SortedSet[Member] = {
|
||||
if (unreachable.isEmpty) immutable.SortedSet.empty
|
||||
else members(includingPossiblyUp, excludingPossiblyExiting).filter(unreachable)
|
||||
}
|
||||
|
||||
def unreachableMembersWithRole: immutable.SortedSet[Member] =
|
||||
unreachableMembersWithRole(includingPossiblyUp = false, excludingPossiblyExiting = false)
|
||||
|
||||
def unreachableMembersWithRole(
|
||||
includingPossiblyUp: Boolean,
|
||||
excludingPossiblyExiting: Boolean): immutable.SortedSet[Member] =
|
||||
role match {
|
||||
case None => unreachableMembers(includingPossiblyUp, excludingPossiblyExiting)
|
||||
case Some(r) => unreachableMembers(includingPossiblyUp, excludingPossiblyExiting).filter(_.hasRole(r))
|
||||
}
|
||||
|
||||
def addUnreachable(m: Member): Unit = {
|
||||
require(m.dataCenter == selfDc)
|
||||
|
||||
add(m)
|
||||
_unreachable = _unreachable + m.uniqueAddress
|
||||
}
|
||||
|
||||
def addReachable(m: Member): Unit = {
|
||||
require(m.dataCenter == selfDc)
|
||||
|
||||
add(m)
|
||||
_unreachable = _unreachable - m.uniqueAddress
|
||||
}
|
||||
|
||||
def add(m: Member): Unit = {
|
||||
require(m.dataCenter == selfDc)
|
||||
|
||||
removeFromAllMembers(m)
|
||||
_allMembers += m
|
||||
}
|
||||
|
||||
def remove(m: Member): Unit = {
|
||||
require(m.dataCenter == selfDc)
|
||||
|
||||
removeFromAllMembers(m)
|
||||
_unreachable -= m.uniqueAddress
|
||||
}
|
||||
|
||||
private def removeFromAllMembers(m: Member): Unit = {
|
||||
if (ordering eq Member.ordering) {
|
||||
_allMembers -= m
|
||||
} else {
|
||||
// must use filterNot for removals/replace in the SortedSet when
|
||||
// ageOrdering is using upNumber and that will change when Joining -> Up
|
||||
_allMembers = _allMembers.filterNot(_.uniqueAddress == m.uniqueAddress)
|
||||
}
|
||||
}
|
||||
|
||||
def reachability: Reachability =
|
||||
_reachability
|
||||
|
||||
private def isInSelfDc(node: UniqueAddress): Boolean = {
|
||||
_allMembers.exists(m => m.uniqueAddress == node && m.dataCenter == selfDc)
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if it was changed
|
||||
*/
|
||||
private[sbr] def setReachability(r: Reachability): Boolean = {
|
||||
// skip records with Reachability.Reachable, and skip records related to other DC
|
||||
val newReachability = r.filterRecords(
|
||||
record =>
|
||||
(record.status == Reachability.Unreachable || record.status == Reachability.Terminated) &&
|
||||
isInSelfDc(record.observer) && isInSelfDc(record.subject))
|
||||
val oldReachability = _reachability
|
||||
|
||||
val changed =
|
||||
if (oldReachability.records.size != newReachability.records.size)
|
||||
true
|
||||
else
|
||||
oldReachability.records.map(r => r.observer -> r.subject).toSet !=
|
||||
newReachability.records.map(r => r.observer -> r.subject).toSet
|
||||
|
||||
_reachability = newReachability
|
||||
changed
|
||||
}
|
||||
|
||||
def seenBy: Set[Address] =
|
||||
_seenBy
|
||||
|
||||
def setSeenBy(s: Set[Address]): Unit =
|
||||
_seenBy = s
|
||||
|
||||
/**
|
||||
* Nodes that are marked as unreachable but can communicate with gossip via a 3rd party.
|
||||
*
|
||||
* Cycle in unreachability graph corresponds to that some node is both
|
||||
* observing another node as unreachable, and is also observed as unreachable by someone
|
||||
* else.
|
||||
*
|
||||
* Another indication of indirectly connected nodes is if a node is marked as unreachable,
|
||||
* but it has still marked current gossip state as seen.
|
||||
*
|
||||
* Those cases will not happen for clean splits and crashed nodes.
|
||||
*/
|
||||
def indirectlyConnected: Set[UniqueAddress] = {
|
||||
indirectlyConnectedFromIntersectionOfObserversAndSubjects.union(indirectlyConnectedFromSeenCurrentGossip)
|
||||
}
|
||||
|
||||
private def indirectlyConnectedFromIntersectionOfObserversAndSubjects: Set[UniqueAddress] = {
|
||||
// cycle in unreachability graph
|
||||
val observers = reachability.allObservers
|
||||
observers.intersect(reachability.allUnreachableOrTerminated)
|
||||
}
|
||||
|
||||
private def indirectlyConnectedFromSeenCurrentGossip: Set[UniqueAddress] = {
|
||||
reachability.records.flatMap { r =>
|
||||
if (seenBy(r.subject.address)) r.observer :: r.subject :: Nil
|
||||
else Nil
|
||||
}.toSet
|
||||
}
|
||||
|
||||
def hasIndirectlyConnected: Boolean = indirectlyConnected.nonEmpty
|
||||
|
||||
def unreachableButNotIndirectlyConnected: Set[UniqueAddress] = unreachable.diff(indirectlyConnected)
|
||||
|
||||
def nodesToDown(decision: Decision = decide()): Set[UniqueAddress] = {
|
||||
val downable = members
|
||||
.union(joining)
|
||||
.filterNot(m => m.status == MemberStatus.Down || m.status == MemberStatus.Exiting)
|
||||
.map(_.uniqueAddress)
|
||||
decision match {
|
||||
case DownUnreachable | AcquireLeaseAndDownUnreachable(_) => downable.intersect(unreachable)
|
||||
case DownReachable => downable.diff(unreachable)
|
||||
case DownAll => downable
|
||||
case DownIndirectlyConnected | AcquireLeaseAndDownIndirectlyConnected(_) =>
|
||||
// Down nodes that have been marked as unreachable via some network links but they are still indirectly
|
||||
// connected via other links. It will keep other "normal" nodes.
|
||||
// If there is a combination of indirectly connected nodes and a clean network partition (or node crashes)
|
||||
// it will combine the above decision with the ordinary decision, e.g. keep majority, after excluding
|
||||
// failure detection observations between the indirectly connected nodes.
|
||||
// Also include nodes that corresponds to the decision without the unreachability observations from
|
||||
// the indirectly connected nodes
|
||||
downable.intersect(indirectlyConnected.union(additionalNodesToDownWhenIndirectlyConnected))
|
||||
case ReverseDownIndirectlyConnected =>
|
||||
// indirectly connected + all reachable
|
||||
downable.intersect(indirectlyConnected).union(downable.diff(unreachable))
|
||||
}
|
||||
}
|
||||
|
||||
private def additionalNodesToDownWhenIndirectlyConnected: Set[UniqueAddress] = {
|
||||
if (unreachableButNotIndirectlyConnected.isEmpty)
|
||||
Set.empty
|
||||
else {
|
||||
val originalUnreachable = _unreachable
|
||||
val originalReachability = _reachability
|
||||
try {
|
||||
val intersectionOfObserversAndSubjects = indirectlyConnectedFromIntersectionOfObserversAndSubjects
|
||||
val haveSeenCurrentGossip = indirectlyConnectedFromSeenCurrentGossip
|
||||
// remove records between the indirectly connected
|
||||
_reachability = reachability.filterRecords(
|
||||
r =>
|
||||
!((intersectionOfObserversAndSubjects(r.observer) && intersectionOfObserversAndSubjects(r.subject)) ||
|
||||
(haveSeenCurrentGossip(r.observer) && haveSeenCurrentGossip(r.subject))))
|
||||
_unreachable = reachability.allUnreachableOrTerminated
|
||||
val additionalDecision = decide()
|
||||
|
||||
if (additionalDecision.isIndirectlyConnected)
|
||||
throw new IllegalStateException(
|
||||
s"SBR double $additionalDecision decision, downing all instead. " +
|
||||
s"originalReachability: [$originalReachability], filtered reachability [$reachability], " +
|
||||
s"still indirectlyConnected: [$indirectlyConnected], seenBy: [$seenBy]")
|
||||
|
||||
nodesToDown(additionalDecision)
|
||||
} finally {
|
||||
_unreachable = originalUnreachable
|
||||
_reachability = originalReachability
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def isAllUnreachableDownOrExiting: Boolean = {
|
||||
_unreachable.isEmpty ||
|
||||
unreachableMembers.forall(m => m.status == MemberStatus.Down || m.status == MemberStatus.Exiting)
|
||||
}
|
||||
|
||||
def reverseDecision(decision: Decision): Decision = {
|
||||
decision match {
|
||||
case DownUnreachable => DownReachable
|
||||
case AcquireLeaseAndDownUnreachable(_) => DownReachable
|
||||
case DownReachable => DownUnreachable
|
||||
case DownAll => DownAll
|
||||
case DownIndirectlyConnected => ReverseDownIndirectlyConnected
|
||||
case AcquireLeaseAndDownIndirectlyConnected(_) => ReverseDownIndirectlyConnected
|
||||
case ReverseDownIndirectlyConnected => DownIndirectlyConnected
|
||||
}
|
||||
}
|
||||
|
||||
def decide(): Decision
|
||||
|
||||
def lease: Option[Lease] = None
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Down the unreachable nodes if the number of remaining nodes are greater than or equal to the
|
||||
* given `quorumSize`. Otherwise down the reachable nodes, i.e. it will shut down that side of the partition.
|
||||
* In other words, the `quorumSize` defines the minimum number of nodes that the cluster must have to be operational.
|
||||
* If there are unreachable nodes when starting up the cluster, before reaching this limit,
|
||||
* the cluster may shutdown itself immediately. This is not an issue if you start all nodes at
|
||||
* approximately the same time.
|
||||
*
|
||||
* Note that you must not add more members to the cluster than `quorumSize * 2 - 1`, because then
|
||||
* both sides may down each other and thereby form two separate clusters. For example,
|
||||
* quorum quorumSize configured to 3 in a 6 node cluster may result in a split where each side
|
||||
* consists of 3 nodes each, i.e. each side thinks it has enough nodes to continue by
|
||||
* itself. A warning is logged if this recommendation is violated.
|
||||
*
|
||||
* If the `role` is defined the decision is based only on members with that `role`.
|
||||
*
|
||||
* It is only counting members within the own data center.
|
||||
*/
|
||||
@InternalApi private[sbr] final class StaticQuorum(
|
||||
selfDc: DataCenter,
|
||||
val quorumSize: Int,
|
||||
override val role: Option[String])
|
||||
extends DowningStrategy(selfDc) {
|
||||
import DowningStrategy._
|
||||
|
||||
override def decide(): Decision = {
|
||||
if (isTooManyMembers)
|
||||
DownAll
|
||||
else if (hasIndirectlyConnected)
|
||||
DownIndirectlyConnected
|
||||
else if (membersWithRole.size - unreachableMembersWithRole.size >= quorumSize)
|
||||
DownUnreachable
|
||||
else
|
||||
DownReachable
|
||||
}
|
||||
|
||||
def isTooManyMembers: Boolean =
|
||||
membersWithRole.size > (quorumSize * 2 - 1)
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Down the unreachable nodes if the current node is in the majority part based the last known
|
||||
* membership information. Otherwise down the reachable nodes, i.e. the own part. If the the
|
||||
* parts are of equal size the part containing the node with the lowest address is kept.
|
||||
*
|
||||
* If the `role` is defined the decision is based only on members with that `role`.
|
||||
*
|
||||
* Note that if there are more than two partitions and none is in majority each part
|
||||
* will shutdown itself, terminating the whole cluster.
|
||||
*
|
||||
* It is only counting members within the own data center.
|
||||
*/
|
||||
@InternalApi private[sbr] final class KeepMajority(selfDc: DataCenter, override val role: Option[String])
|
||||
extends DowningStrategy(selfDc) {
|
||||
import DowningStrategy._
|
||||
|
||||
override def decide(): Decision = {
|
||||
if (hasIndirectlyConnected)
|
||||
DownIndirectlyConnected
|
||||
else {
|
||||
val ms = membersWithRole
|
||||
if (ms.isEmpty)
|
||||
DownAll // no node with matching role
|
||||
else {
|
||||
val reachableSize = reachableMembersWithRole.size
|
||||
val unreachableSize = unreachableMembersWithRole.size
|
||||
|
||||
majorityDecision(reachableSize, unreachableSize, ms.head) match {
|
||||
case DownUnreachable =>
|
||||
majorityDecisionWhenIncludingMembershipChangesEdgeCase() match {
|
||||
case DownUnreachable => DownUnreachable // same conclusion
|
||||
case _ => DownAll // different conclusion, safest to DownAll
|
||||
}
|
||||
case decision => decision
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def majorityDecision(thisSide: Int, otherSide: Int, lowest: Member): Decision = {
|
||||
if (thisSide == otherSide) {
|
||||
// equal size, keep the side with the lowest address (first in members)
|
||||
if (unreachable(lowest)) DownReachable else DownUnreachable
|
||||
} else if (thisSide > otherSide) {
|
||||
// we are in majority
|
||||
DownUnreachable
|
||||
} else {
|
||||
// we are in minority
|
||||
DownReachable
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for edge case when membership change happens at the same time as partition.
|
||||
* Count Joining and WeaklyUp on other side since those might be Up on other side.
|
||||
* Don't count Leaving on this side since those might be Exiting on other side.
|
||||
* Note that the membership changes we are looking for will only be done when all
|
||||
* members have seen previous state, i.e. when a member is moved to Up everybody
|
||||
* has seen it joining.
|
||||
*/
|
||||
private def majorityDecisionWhenIncludingMembershipChangesEdgeCase(): Decision = {
|
||||
// for this side we count as few as could be possible (excluding joining, excluding leaving)
|
||||
val ms = membersWithRole(includingPossiblyUp = false, excludingPossiblyExiting = true)
|
||||
if (ms.isEmpty) {
|
||||
DownAll
|
||||
} else {
|
||||
val thisSideReachableSize =
|
||||
reachableMembersWithRole(includingPossiblyUp = false, excludingPossiblyExiting = true).size
|
||||
// for other side we count as many as could be possible (including joining, including leaving)
|
||||
val otherSideUnreachableSize =
|
||||
unreachableMembersWithRole(includingPossiblyUp = true, excludingPossiblyExiting = false).size
|
||||
majorityDecision(thisSideReachableSize, otherSideUnreachableSize, ms.head)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Down the part that does not contain the oldest member (current singleton).
|
||||
*
|
||||
* There is one exception to this rule if `downIfAlone` is defined to `true`.
|
||||
* Then, if the oldest node has partitioned from all other nodes the oldest will
|
||||
* down itself and keep all other nodes running. The strategy will not down the
|
||||
* single oldest node when it is the only remaining node in the cluster.
|
||||
*
|
||||
* Note that if the oldest node crashes the others will remove it from the cluster
|
||||
* when `downIfAlone` is `true`, otherwise they will down themselves if the
|
||||
* oldest node crashes, i.e. shutdown the whole cluster together with the oldest node.
|
||||
*
|
||||
* If the `role` is defined the decision is based only on members with that `role`,
|
||||
* i.e. using the oldest member (singleton) within the nodes with that role.
|
||||
*
|
||||
* It is only using members within the own data center, i.e. oldest within the
|
||||
* data center.
|
||||
*/
|
||||
@InternalApi private[sbr] final class KeepOldest(
|
||||
selfDc: DataCenter,
|
||||
val downIfAlone: Boolean,
|
||||
override val role: Option[String])
|
||||
extends DowningStrategy(selfDc) {
|
||||
import DowningStrategy._
|
||||
|
||||
// sort by age, oldest first
|
||||
override def ordering: Ordering[Member] = Member.ageOrdering
|
||||
|
||||
override def decide(): Decision = {
|
||||
if (hasIndirectlyConnected)
|
||||
DownIndirectlyConnected
|
||||
else {
|
||||
val ms = membersWithRole
|
||||
if (ms.isEmpty)
|
||||
DownAll // no node with matching role
|
||||
else {
|
||||
val oldest = ms.head
|
||||
val oldestIsReachable = !unreachable(oldest)
|
||||
val reachableCount = reachableMembersWithRole.size
|
||||
val unreachableCount = unreachableMembersWithRole.size
|
||||
|
||||
oldestDecision(oldestIsReachable, reachableCount, unreachableCount) match {
|
||||
case DownUnreachable =>
|
||||
oldestDecisionWhenIncludingMembershipChangesEdgeCase() match {
|
||||
case DownUnreachable => DownUnreachable // same conclusion
|
||||
case _ => DownAll // different conclusion, safest to DownAll
|
||||
}
|
||||
case decision => decision
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def oldestDecision(oldestIsOnThisSide: Boolean, thisSide: Int, otherSide: Int): Decision = {
|
||||
if (oldestIsOnThisSide) {
|
||||
// if there are only 2 nodes in the cluster it is better to keep the oldest, even though it is alone
|
||||
// E.g. 2 nodes: thisSide=1, otherSide=1 => DownUnreachable, i.e. keep the oldest
|
||||
// even though it is alone (because the node on the other side is no better)
|
||||
// E.g. 3 nodes: thisSide=1, otherSide=2 => DownReachable, i.e. shut down the
|
||||
// oldest because it is alone
|
||||
if (downIfAlone && thisSide == 1 && otherSide >= 2) DownReachable
|
||||
else DownUnreachable
|
||||
} else {
|
||||
if (downIfAlone && otherSide == 1 && thisSide >= 2) DownUnreachable
|
||||
else DownReachable
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for edge case when membership change happens at the same time as partition.
|
||||
* Exclude Leaving on this side because those could be Exiting on other side.
|
||||
*
|
||||
* When `downIfAlone` also consider Joining and WeaklyUp since those might be Up on other side,
|
||||
* and thereby flip the alone test.
|
||||
*/
|
||||
private def oldestDecisionWhenIncludingMembershipChangesEdgeCase(): Decision = {
|
||||
val ms = membersWithRole(includingPossiblyUp = false, excludingPossiblyExiting = true)
|
||||
if (ms.isEmpty) {
|
||||
DownAll
|
||||
} else {
|
||||
val oldest = ms.head
|
||||
val oldestIsReachable = !unreachable(oldest)
|
||||
// Joining and WeaklyUp are only relevant when downIfAlone = true
|
||||
val includingPossiblyUp = downIfAlone
|
||||
val reachableCount = reachableMembersWithRole(includingPossiblyUp, excludingPossiblyExiting = true).size
|
||||
val unreachableCount = unreachableMembersWithRole(includingPossiblyUp, excludingPossiblyExiting = true).size
|
||||
|
||||
oldestDecision(oldestIsReachable, reachableCount, unreachableCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Down all nodes unconditionally.
|
||||
*/
|
||||
@InternalApi private[sbr] final class DownAllNodes(selfDc: DataCenter) extends DowningStrategy(selfDc) {
|
||||
import DowningStrategy._
|
||||
|
||||
override def decide(): Decision =
|
||||
DownAll
|
||||
|
||||
override def role: Option[String] = None
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Keep the part that can acquire the lease, and down the other part.
|
||||
*
|
||||
* Best effort is to keep the side that has most nodes, i.e. the majority side.
|
||||
* This is achieved by adding a delay before trying to acquire the lease on the
|
||||
* minority side.
|
||||
*
|
||||
* If the `role` is defined the majority/minority is based only on members with that `role`.
|
||||
* It is only counting members within the own data center.
|
||||
*/
|
||||
@InternalApi private[sbr] final class LeaseMajority(
|
||||
selfDc: DataCenter,
|
||||
override val role: Option[String],
|
||||
_lease: Lease,
|
||||
acquireLeaseDelayForMinority: FiniteDuration)
|
||||
extends DowningStrategy(selfDc) {
|
||||
import DowningStrategy._
|
||||
|
||||
override val lease: Option[Lease] = Some(_lease)
|
||||
|
||||
override def decide(): Decision = {
|
||||
if (hasIndirectlyConnected)
|
||||
AcquireLeaseAndDownIndirectlyConnected(Duration.Zero)
|
||||
else
|
||||
AcquireLeaseAndDownUnreachable(acquireLeaseDelay)
|
||||
}
|
||||
|
||||
private def acquireLeaseDelay: FiniteDuration =
|
||||
if (isInMinority) acquireLeaseDelayForMinority else Duration.Zero
|
||||
|
||||
private def isInMinority: Boolean = {
|
||||
val ms = membersWithRole
|
||||
if (ms.isEmpty)
|
||||
false // no node with matching role
|
||||
else {
|
||||
val unreachableSize = unreachableMembersWithRole.size
|
||||
val membersSize = ms.size
|
||||
|
||||
if (unreachableSize * 2 == membersSize) {
|
||||
// equal size, try to keep the side with the lowest address (first in members)
|
||||
unreachable(ms.head)
|
||||
} else if (unreachableSize * 2 < membersSize) {
|
||||
// we are in majority
|
||||
false
|
||||
} else {
|
||||
// we are in minority
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,595 @@
|
|||
/*
|
||||
* Copyright (C) 2009-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import java.time.Instant
|
||||
import java.time.temporal.ChronoUnit
|
||||
|
||||
import scala.concurrent.ExecutionContext
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import akka.actor.Actor
|
||||
import akka.actor.Address
|
||||
import akka.actor.ExtendedActorSystem
|
||||
import akka.actor.Props
|
||||
import akka.actor.Stash
|
||||
import akka.actor.Timers
|
||||
import akka.annotation.InternalApi
|
||||
import akka.cluster.Cluster
|
||||
import akka.cluster.ClusterEvent
|
||||
import akka.cluster.ClusterEvent._
|
||||
import akka.cluster.ClusterLogMarker
|
||||
import akka.cluster.ClusterSettings.DataCenter
|
||||
import akka.cluster.Member
|
||||
import akka.cluster.Reachability
|
||||
import akka.cluster.UniqueAddress
|
||||
import akka.cluster.sbr.DowningStrategy.Decision
|
||||
import akka.event.DiagnosticMarkerBusLoggingAdapter
|
||||
import akka.event.Logging
|
||||
import akka.pattern.pipe
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[sbr] object SplitBrainResolver {
|
||||
|
||||
def props(stableAfter: FiniteDuration, strategy: DowningStrategy): Props =
|
||||
Props(new SplitBrainResolver(stableAfter, strategy))
|
||||
|
||||
case object Tick
|
||||
|
||||
/**
|
||||
* Response (result) of the acquire lease request.
|
||||
*/
|
||||
final case class AcquireLeaseResult(holdingLease: Boolean)
|
||||
|
||||
/**
|
||||
* Response (result) of the release lease request.
|
||||
*/
|
||||
final case class ReleaseLeaseResult(released: Boolean)
|
||||
|
||||
/**
|
||||
* For delayed acquire of the lease.
|
||||
*/
|
||||
case object AcquireLease
|
||||
|
||||
sealed trait ReleaseLeaseCondition
|
||||
object ReleaseLeaseCondition {
|
||||
case object NoLease extends ReleaseLeaseCondition
|
||||
final case class WhenMembersRemoved(nodes: Set[UniqueAddress]) extends ReleaseLeaseCondition
|
||||
final case class WhenTimeElapsed(deadline: Deadline) extends ReleaseLeaseCondition
|
||||
}
|
||||
|
||||
final case class ReachabilityChangedStats(
|
||||
firstChangeTimestamp: Long,
|
||||
latestChangeTimestamp: Long,
|
||||
changeCount: Long) {
|
||||
|
||||
def isEmpty: Boolean =
|
||||
changeCount == 0
|
||||
|
||||
override def toString: String = {
|
||||
if (isEmpty)
|
||||
"reachability unchanged"
|
||||
else {
|
||||
val now = System.nanoTime()
|
||||
s"reachability changed $changeCount times since ${(now - firstChangeTimestamp).nanos.toMillis} ms ago, " +
|
||||
s"latest change was ${(now - latestChangeTimestamp).nanos.toMillis} ms ago"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Unreachable members will be downed by this actor according to the given strategy.
|
||||
* It is active on the leader node in the cluster.
|
||||
*
|
||||
* The implementation is split into two classes SplitBrainResolver and SplitBrainResolverBase to be
|
||||
* able to unit test the logic without running cluster.
|
||||
*/
|
||||
@InternalApi private[sbr] final class SplitBrainResolver(stableAfter: FiniteDuration, strategy: DowningStrategy)
|
||||
extends SplitBrainResolverBase(stableAfter, strategy) {
|
||||
|
||||
private val cluster = Cluster(context.system)
|
||||
|
||||
log.info(
|
||||
"SBR started. Config: stableAfter: {} ms, strategy: {}, selfUniqueAddress: {}, selfDc: {}",
|
||||
stableAfter.toMillis,
|
||||
Logging.simpleName(strategy.getClass),
|
||||
selfUniqueAddress,
|
||||
selfDc)
|
||||
|
||||
override def selfUniqueAddress: UniqueAddress = cluster.selfUniqueAddress
|
||||
override def selfDc: DataCenter = cluster.selfDataCenter
|
||||
|
||||
// re-subscribe when restart
|
||||
override def preStart(): Unit = {
|
||||
cluster.subscribe(self, ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent])
|
||||
super.preStart()
|
||||
}
|
||||
override def postStop(): Unit = {
|
||||
cluster.unsubscribe(self)
|
||||
super.postStop()
|
||||
}
|
||||
|
||||
override def down(node: UniqueAddress, decision: Decision): Unit = {
|
||||
log.info(ClusterLogMarker.sbrDowningNode(node, decision), "SBR is downing [{}]", node)
|
||||
cluster.down(node.address)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* The implementation is split into two classes SplitBrainResolver and SplitBrainResolverBase to be
|
||||
* able to unit test the logic without running cluster.
|
||||
*/
|
||||
@InternalApi private[sbr] abstract class SplitBrainResolverBase(stableAfter: FiniteDuration, strategy: DowningStrategy)
|
||||
extends Actor
|
||||
with Stash
|
||||
with Timers {
|
||||
|
||||
import DowningStrategy._
|
||||
import SplitBrainResolver.ReleaseLeaseCondition.NoLease
|
||||
import SplitBrainResolver._
|
||||
|
||||
val log: DiagnosticMarkerBusLoggingAdapter = Logging.withMarker(this)
|
||||
|
||||
def selfUniqueAddress: UniqueAddress
|
||||
|
||||
def selfDc: DataCenter
|
||||
|
||||
def down(node: UniqueAddress, decision: Decision): Unit
|
||||
|
||||
// would be better as constructor parameter, but don't want to break Cinnamon instrumentation
|
||||
private val settings = new SplitBrainResolverSettings(context.system.settings.config)
|
||||
|
||||
def downAllWhenUnstable: FiniteDuration =
|
||||
settings.DownAllWhenUnstable
|
||||
|
||||
private val releaseLeaseAfter = stableAfter * 2
|
||||
|
||||
def tickInterval: FiniteDuration = 1.second
|
||||
|
||||
timers.startTimerWithFixedDelay(Tick, Tick, tickInterval)
|
||||
|
||||
var leader = false
|
||||
var selfMemberAdded = false
|
||||
|
||||
private def internalDispatcher: ExecutionContext =
|
||||
context.system.asInstanceOf[ExtendedActorSystem].dispatchers.internalDispatcher
|
||||
|
||||
// overridden in tests
|
||||
protected def newStableDeadline(): Deadline = Deadline.now + stableAfter
|
||||
var stableDeadline: Deadline = _
|
||||
def resetStableDeadline(): Unit = {
|
||||
stableDeadline = newStableDeadline()
|
||||
}
|
||||
|
||||
resetStableDeadline()
|
||||
|
||||
private var reachabilityChangedStats: ReachabilityChangedStats =
|
||||
ReachabilityChangedStats(System.nanoTime(), System.nanoTime(), 0)
|
||||
|
||||
private def resetReachabilityChangedStats(): Unit = {
|
||||
val now = System.nanoTime()
|
||||
reachabilityChangedStats = ReachabilityChangedStats(now, now, 0)
|
||||
}
|
||||
|
||||
private def resetReachabilityChangedStatsIfAllUnreachableDowned(): Unit = {
|
||||
if (!reachabilityChangedStats.isEmpty && strategy.isAllUnreachableDownOrExiting) {
|
||||
log.debug("SBR resetting reachability stats, after all unreachable healed, downed or removed")
|
||||
resetReachabilityChangedStats()
|
||||
}
|
||||
}
|
||||
|
||||
private var releaseLeaseCondition: ReleaseLeaseCondition = NoLease
|
||||
|
||||
/** Helper to wrap updates to strategy info with, so that stable-after timer is reset and information is logged about state change */
|
||||
def mutateMemberInfo(resetStable: Boolean)(f: () => Unit): Unit = {
|
||||
val unreachableBefore = strategy.unreachable.size
|
||||
f()
|
||||
val unreachableAfter = strategy.unreachable.size
|
||||
|
||||
def earliestTimeOfDecision: String =
|
||||
Instant.now().plus(stableAfter.toMillis, ChronoUnit.MILLIS).toString
|
||||
|
||||
if (resetStable) {
|
||||
if (isResponsible) {
|
||||
if (unreachableBefore == 0 && unreachableAfter > 0) {
|
||||
log.info(
|
||||
"SBR found unreachable members, waiting for stable-after = {} ms before taking downing decision. " +
|
||||
"Now {} unreachable members found. Downing decision will not be made before {}.",
|
||||
stableAfter.toMillis,
|
||||
unreachableAfter,
|
||||
earliestTimeOfDecision)
|
||||
} else if (unreachableBefore > 0 && unreachableAfter == 0) {
|
||||
log.info(
|
||||
"SBR found all unreachable members healed during stable-after period, no downing decision necessary for now.")
|
||||
} else if (unreachableAfter > 0) {
|
||||
log.info(
|
||||
"SBR found unreachable members changed during stable-after period. Resetting timer. " +
|
||||
"Now {} unreachable members found. Downing decision will not be made before {}.",
|
||||
unreachableAfter,
|
||||
earliestTimeOfDecision)
|
||||
}
|
||||
// else no unreachable members found but set of members changed
|
||||
}
|
||||
|
||||
log.debug("SBR reset stable deadline when members/unreachable changed")
|
||||
resetStableDeadline()
|
||||
}
|
||||
}
|
||||
|
||||
/** Helper to wrap updates to `leader` and `selfMemberAdded` to log changes in responsibility status */
|
||||
def mutateResponsibilityInfo(f: () => Unit): Unit = {
|
||||
val responsibleBefore = isResponsible
|
||||
f()
|
||||
val responsibleAfter = isResponsible
|
||||
|
||||
if (!responsibleBefore && responsibleAfter)
|
||||
log.info(
|
||||
"This node is now the leader responsible for taking SBR decisions among the reachable nodes " +
|
||||
"(more leaders may exist).")
|
||||
else if (responsibleBefore && !responsibleAfter)
|
||||
log.info("This node is not the leader any more and not responsible for taking SBR decisions.")
|
||||
|
||||
if (leader && !selfMemberAdded)
|
||||
log.debug("This node is leader but !selfMemberAdded.")
|
||||
}
|
||||
|
||||
private var unreachableDataCenters = Set.empty[DataCenter]
|
||||
|
||||
override def postStop(): Unit = {
|
||||
if (releaseLeaseCondition != NoLease) {
|
||||
log.info(
|
||||
"SBR is stopped and owns the lease. The lease will not be released until after the " +
|
||||
"lease heartbeat-timeout.")
|
||||
}
|
||||
super.postStop()
|
||||
}
|
||||
|
||||
def receive: Receive = {
|
||||
case SeenChanged(_, seenBy) => seenChanged(seenBy)
|
||||
case MemberJoined(m) => addJoining(m)
|
||||
case MemberWeaklyUp(m) => addWeaklyUp(m)
|
||||
case MemberUp(m) => addUp(m)
|
||||
case MemberLeft(m) => leaving(m)
|
||||
case UnreachableMember(m) => unreachableMember(m)
|
||||
case MemberDowned(m) => unreachableMember(m)
|
||||
case MemberExited(m) => unreachableMember(m)
|
||||
case ReachableMember(m) => reachableMember(m)
|
||||
case ReachabilityChanged(r) => reachabilityChanged(r)
|
||||
case MemberRemoved(m, _) => remove(m)
|
||||
case UnreachableDataCenter(dc) => unreachableDataCenter(dc)
|
||||
case ReachableDataCenter(dc) => reachableDataCenter(dc)
|
||||
case LeaderChanged(leaderOption) => leaderChanged(leaderOption)
|
||||
case ReleaseLeaseResult(released) => releaseLeaseResult(released)
|
||||
case Tick => tick()
|
||||
case _: ClusterDomainEvent => // not interested in other events
|
||||
}
|
||||
|
||||
private def leaderChanged(leaderOption: Option[Address]): Unit = {
|
||||
mutateResponsibilityInfo { () =>
|
||||
leader = leaderOption.contains(selfUniqueAddress.address)
|
||||
}
|
||||
}
|
||||
|
||||
private def tick(): Unit = {
|
||||
// note the DownAll due to instability is running on all nodes to make that decision as quickly and
|
||||
// aggressively as possible if time is out
|
||||
if (reachabilityChangedStats.changeCount > 0) {
|
||||
val now = System.nanoTime()
|
||||
val durationSinceLatestChange = (now - reachabilityChangedStats.latestChangeTimestamp).nanos
|
||||
val durationSinceFirstChange = (now - reachabilityChangedStats.firstChangeTimestamp).nanos
|
||||
|
||||
if (durationSinceLatestChange > (stableAfter * 2)) {
|
||||
log.debug("SBR no reachability changes within {} ms, resetting stats", (stableAfter * 2).toMillis)
|
||||
resetReachabilityChangedStats()
|
||||
} else if (downAllWhenUnstable > Duration.Zero &&
|
||||
durationSinceFirstChange > (stableAfter + downAllWhenUnstable)) {
|
||||
log.warning(
|
||||
ClusterLogMarker.sbrInstability,
|
||||
"SBR detected instability and will down all nodes: {}",
|
||||
reachabilityChangedStats)
|
||||
actOnDecision(DownAll)
|
||||
}
|
||||
}
|
||||
|
||||
if (isResponsible && strategy.unreachable.nonEmpty && stableDeadline.isOverdue()) {
|
||||
strategy.decide() match {
|
||||
case decision: AcquireLeaseDecision =>
|
||||
strategy.lease match {
|
||||
case Some(lease) =>
|
||||
if (lease.checkLease()) {
|
||||
log.info(
|
||||
ClusterLogMarker.sbrLeaseAcquired(decision),
|
||||
"SBR has acquired lease for decision [{}]",
|
||||
decision)
|
||||
actOnDecision(decision)
|
||||
} else {
|
||||
if (decision.acquireDelay == Duration.Zero)
|
||||
acquireLease() // reply message is AcquireLeaseResult
|
||||
else {
|
||||
log.debug("SBR delayed attempt to acquire lease for [{} ms]", decision.acquireDelay.toMillis)
|
||||
timers.startSingleTimer(AcquireLease, AcquireLease, decision.acquireDelay)
|
||||
}
|
||||
context.become(waitingForLease(decision))
|
||||
}
|
||||
case None =>
|
||||
throw new IllegalStateException("Unexpected lease decision although lease is not configured")
|
||||
}
|
||||
|
||||
case decision =>
|
||||
actOnDecision(decision)
|
||||
}
|
||||
}
|
||||
|
||||
releaseLeaseCondition match {
|
||||
case ReleaseLeaseCondition.WhenTimeElapsed(deadline) =>
|
||||
if (deadline.isOverdue())
|
||||
releaseLease() // reply message is ReleaseLeaseResult, which will update the releaseLeaseCondition
|
||||
case _ =>
|
||||
// no lease or first waiting for downed nodes to be removed
|
||||
}
|
||||
}
|
||||
|
||||
private def acquireLease(): Unit = {
|
||||
log.debug("SBR trying to acquire lease")
|
||||
implicit val ec: ExecutionContext = internalDispatcher
|
||||
strategy.lease.foreach(
|
||||
_.acquire()
|
||||
.recover {
|
||||
case t =>
|
||||
log.error(t, "SBR acquire of lease failed")
|
||||
false
|
||||
}
|
||||
.map(AcquireLeaseResult)
|
||||
.pipeTo(self))
|
||||
}
|
||||
|
||||
def waitingForLease(decision: Decision): Receive = {
|
||||
case AcquireLease =>
|
||||
acquireLease() // reply message is LeaseResult
|
||||
|
||||
case AcquireLeaseResult(holdingLease) =>
|
||||
if (holdingLease) {
|
||||
log.info(ClusterLogMarker.sbrLeaseAcquired(decision), "SBR acquired lease for decision [{}]", decision)
|
||||
val downedNodes = actOnDecision(decision)
|
||||
releaseLeaseCondition = releaseLeaseCondition match {
|
||||
case ReleaseLeaseCondition.WhenMembersRemoved(nodes) =>
|
||||
ReleaseLeaseCondition.WhenMembersRemoved(nodes.union(downedNodes))
|
||||
case _ =>
|
||||
if (downedNodes.isEmpty)
|
||||
ReleaseLeaseCondition.WhenTimeElapsed(Deadline.now + releaseLeaseAfter)
|
||||
else
|
||||
ReleaseLeaseCondition.WhenMembersRemoved(downedNodes)
|
||||
}
|
||||
} else {
|
||||
val reverseDecision = strategy.reverseDecision(decision)
|
||||
log.info(
|
||||
ClusterLogMarker.sbrLeaseDenied(reverseDecision),
|
||||
"SBR couldn't acquire lease, reverse decision [{}] to [{}]",
|
||||
decision,
|
||||
reverseDecision)
|
||||
actOnDecision(reverseDecision)
|
||||
releaseLeaseCondition = NoLease
|
||||
}
|
||||
|
||||
unstashAll()
|
||||
context.become(receive)
|
||||
|
||||
case ReleaseLeaseResult(_) => // superseded by new acquire release request
|
||||
case Tick => // ignore ticks while waiting
|
||||
case _ =>
|
||||
stash()
|
||||
}
|
||||
|
||||
private def releaseLeaseResult(released: Boolean): Unit = {
|
||||
releaseLeaseCondition match {
|
||||
case ReleaseLeaseCondition.WhenTimeElapsed(deadline) =>
|
||||
if (released && deadline.isOverdue()) {
|
||||
log.info(ClusterLogMarker.sbrLeaseReleased, "SBR released lease.")
|
||||
releaseLeaseCondition = NoLease // released successfully
|
||||
}
|
||||
case _ =>
|
||||
// no lease or first waiting for downed nodes to be removed
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the nodes that were downed
|
||||
*/
|
||||
def actOnDecision(decision: Decision): Set[UniqueAddress] = {
|
||||
val nodesToDown =
|
||||
try {
|
||||
strategy.nodesToDown(decision)
|
||||
} catch {
|
||||
case e: IllegalStateException =>
|
||||
log.warning(e.getMessage)
|
||||
strategy.nodesToDown(DownAll)
|
||||
}
|
||||
|
||||
val downMyself = nodesToDown.contains(selfUniqueAddress)
|
||||
|
||||
val indirectlyConnectedLogMessage =
|
||||
if (decision.isIndirectlyConnected)
|
||||
s", indirectly connected [${strategy.indirectlyConnected.mkString(", ")}]"
|
||||
else ""
|
||||
val unreachableDataCentersLogMessage =
|
||||
if (unreachableDataCenters.nonEmpty)
|
||||
s", unreachable DCs [${unreachableDataCenters.mkString(", ")}]"
|
||||
else ""
|
||||
|
||||
log.warning(
|
||||
ClusterLogMarker.sbrDowning(decision),
|
||||
s"SBR took decision $decision and is downing [${nodesToDown.map(_.address).mkString(", ")}]${if (downMyself) " including myself,"
|
||||
else ""}, " +
|
||||
s"[${strategy.unreachable.size}] unreachable of [${strategy.members.size}] members" +
|
||||
indirectlyConnectedLogMessage +
|
||||
s", all members in DC [${strategy.allMembersInDC.mkString(", ")}], full reachability status: ${strategy.reachability}" +
|
||||
unreachableDataCentersLogMessage)
|
||||
|
||||
if (nodesToDown.nonEmpty) {
|
||||
// downing is idempotent, and we also avoid calling down on nodes with status Down
|
||||
// down selfAddress last, since it may shutdown itself if down alone
|
||||
nodesToDown.foreach(uniqueAddress => if (uniqueAddress != selfUniqueAddress) down(uniqueAddress, decision))
|
||||
if (downMyself)
|
||||
down(selfUniqueAddress, decision)
|
||||
|
||||
resetReachabilityChangedStats()
|
||||
resetStableDeadline()
|
||||
}
|
||||
nodesToDown
|
||||
}
|
||||
|
||||
def isResponsible: Boolean = leader && selfMemberAdded
|
||||
|
||||
def unreachableMember(m: Member): Unit = {
|
||||
if (m.uniqueAddress != selfUniqueAddress && m.dataCenter == selfDc) {
|
||||
log.debug("SBR unreachableMember [{}]", m)
|
||||
mutateMemberInfo(resetStable = true) { () =>
|
||||
strategy.addUnreachable(m)
|
||||
resetReachabilityChangedStatsIfAllUnreachableDowned()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def reachableMember(m: Member): Unit = {
|
||||
if (m.uniqueAddress != selfUniqueAddress && m.dataCenter == selfDc) {
|
||||
log.debug("SBR reachableMember [{}]", m)
|
||||
mutateMemberInfo(resetStable = true) { () =>
|
||||
strategy.addReachable(m)
|
||||
resetReachabilityChangedStatsIfAllUnreachableDowned()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private[sbr] def reachabilityChanged(r: Reachability): Unit = {
|
||||
if (strategy.setReachability(r)) {
|
||||
// resetStableDeadline is done from unreachableMember/reachableMember
|
||||
updateReachabilityChangedStats()
|
||||
// it may also change when members are removed and therefore the reset may be needed
|
||||
resetReachabilityChangedStatsIfAllUnreachableDowned()
|
||||
log.debug("SBR noticed {}", reachabilityChangedStats)
|
||||
}
|
||||
}
|
||||
|
||||
private def updateReachabilityChangedStats(): Unit = {
|
||||
val now = System.nanoTime()
|
||||
if (reachabilityChangedStats.changeCount == 0)
|
||||
reachabilityChangedStats = ReachabilityChangedStats(now, now, 1)
|
||||
else
|
||||
reachabilityChangedStats = reachabilityChangedStats.copy(
|
||||
latestChangeTimestamp = now,
|
||||
changeCount = reachabilityChangedStats.changeCount + 1)
|
||||
}
|
||||
|
||||
def unreachableDataCenter(dc: DataCenter): Unit = {
|
||||
unreachableDataCenters += dc
|
||||
log.warning(
|
||||
"Data center [{}] observed as unreachable. " +
|
||||
"Note that nodes in other data center will not be downed by SBR in this data center [{}]",
|
||||
dc,
|
||||
selfDc)
|
||||
}
|
||||
|
||||
def reachableDataCenter(dc: DataCenter): Unit = {
|
||||
unreachableDataCenters -= dc
|
||||
log.info("Data center [{}] observed as reachable again", dc)
|
||||
}
|
||||
|
||||
def seenChanged(seenBy: Set[Address]): Unit = {
|
||||
strategy.setSeenBy(seenBy)
|
||||
}
|
||||
|
||||
def addUp(m: Member): Unit = {
|
||||
if (selfDc == m.dataCenter) {
|
||||
log.debug("SBR add Up [{}]", m)
|
||||
mutateMemberInfo(resetStable = true) { () =>
|
||||
strategy.add(m)
|
||||
if (m.uniqueAddress == selfUniqueAddress) mutateResponsibilityInfo { () =>
|
||||
selfMemberAdded = true
|
||||
}
|
||||
}
|
||||
strategy match {
|
||||
case s: StaticQuorum =>
|
||||
if (s.isTooManyMembers)
|
||||
log.warning(
|
||||
"The cluster size is [{}] and static-quorum.quorum-size is [{}]. You should not add " +
|
||||
"more than [{}] (static-quorum.size * 2 - 1) members to the cluster. If the exceeded cluster size " +
|
||||
"remains when a SBR decision is needed it will down all nodes.",
|
||||
s.membersWithRole.size,
|
||||
s.quorumSize,
|
||||
s.quorumSize * 2 - 1)
|
||||
case _ => // ok
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def leaving(m: Member): Unit = {
|
||||
if (selfDc == m.dataCenter) {
|
||||
log.debug("SBR leaving [{}]", m)
|
||||
mutateMemberInfo(resetStable = false) { () =>
|
||||
strategy.add(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def addJoining(m: Member): Unit = {
|
||||
if (selfDc == m.dataCenter) {
|
||||
log.debug("SBR add Joining/WeaklyUp [{}]", m)
|
||||
strategy.add(m)
|
||||
}
|
||||
}
|
||||
|
||||
def addWeaklyUp(m: Member): Unit = {
|
||||
if (m.uniqueAddress == selfUniqueAddress) mutateResponsibilityInfo { () =>
|
||||
selfMemberAdded = true
|
||||
}
|
||||
// treat WeaklyUp in same way as joining
|
||||
addJoining(m)
|
||||
}
|
||||
|
||||
def remove(m: Member): Unit = {
|
||||
if (selfDc == m.dataCenter) {
|
||||
if (m.uniqueAddress == selfUniqueAddress)
|
||||
context.stop(self)
|
||||
else
|
||||
mutateMemberInfo(resetStable = false) { () =>
|
||||
log.debug("SBR remove [{}]", m)
|
||||
strategy.remove(m)
|
||||
|
||||
resetReachabilityChangedStatsIfAllUnreachableDowned()
|
||||
|
||||
releaseLeaseCondition = releaseLeaseCondition match {
|
||||
case ReleaseLeaseCondition.WhenMembersRemoved(downedNodes) =>
|
||||
val remainingDownedNodes = downedNodes - m.uniqueAddress
|
||||
if (remainingDownedNodes.isEmpty)
|
||||
ReleaseLeaseCondition.WhenTimeElapsed(Deadline.now + releaseLeaseAfter)
|
||||
else
|
||||
ReleaseLeaseCondition.WhenMembersRemoved(remainingDownedNodes)
|
||||
case other =>
|
||||
// no lease or not holding lease
|
||||
other
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private def releaseLease(): Unit = {
|
||||
implicit val ec: ExecutionContext = internalDispatcher
|
||||
strategy.lease.foreach { l =>
|
||||
if (releaseLeaseCondition != NoLease) {
|
||||
log.debug("SBR releasing lease")
|
||||
l.release().recover { case _ => false }.map(ReleaseLeaseResult.apply).pipeTo(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Copyright (C) 2009-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import scala.concurrent.duration.Duration
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
|
||||
import akka.actor.ActorSystem
|
||||
import akka.actor.Props
|
||||
import akka.cluster.Cluster
|
||||
import akka.cluster.DowningProvider
|
||||
import akka.coordination.lease.scaladsl.LeaseProvider
|
||||
|
||||
/**
|
||||
* See reference documentation: https://doc.akka.io/docs/akka/current/split-brain-resolver.html
|
||||
*
|
||||
* Enabled with configuration:
|
||||
* {{{
|
||||
* akka.cluster.downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
|
||||
* }}}
|
||||
*/
|
||||
final class SplitBrainResolverProvider(system: ActorSystem) extends DowningProvider {
|
||||
|
||||
private val settings = new SplitBrainResolverSettings(system.settings.config)
|
||||
|
||||
override def downRemovalMargin: FiniteDuration = {
|
||||
// if down-removal-margin is defined we let it trump stable-after to allow
|
||||
// for two different values for SBR downing and cluster tool stop/start after downing
|
||||
val drm = Cluster(system).settings.DownRemovalMargin
|
||||
if (drm != Duration.Zero) drm
|
||||
else settings.DowningStableAfter
|
||||
}
|
||||
|
||||
override def downingActorProps: Option[Props] = {
|
||||
import SplitBrainResolverSettings._
|
||||
|
||||
val cluster = Cluster(system)
|
||||
val selfDc = cluster.selfDataCenter
|
||||
val strategy =
|
||||
settings.DowningStrategy match {
|
||||
case KeepMajorityName =>
|
||||
new KeepMajority(selfDc, settings.keepMajorityRole)
|
||||
case StaticQuorumName =>
|
||||
val s = settings.staticQuorumSettings
|
||||
new StaticQuorum(selfDc, s.size, s.role)
|
||||
case KeepOldestName =>
|
||||
val s = settings.keepOldestSettings
|
||||
new KeepOldest(selfDc, s.downIfAlone, s.role)
|
||||
case DownAllName =>
|
||||
new DownAllNodes(selfDc)
|
||||
case LeaseMajorityName =>
|
||||
val s = settings.leaseMajoritySettings
|
||||
val leaseOwnerName = cluster.selfUniqueAddress.address.hostPort
|
||||
val lease = LeaseProvider(system).getLease(s"${system.name}-akka-sbr", s.leaseImplementation, leaseOwnerName)
|
||||
new LeaseMajority(selfDc, s.role, lease, s.acquireLeaseDelayForMinority)
|
||||
}
|
||||
|
||||
Some(SplitBrainResolver.props(settings.DowningStableAfter, strategy))
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright (C) 2009-2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import java.util.Locale
|
||||
import java.util.concurrent.TimeUnit
|
||||
|
||||
import scala.concurrent.duration.Duration
|
||||
import scala.concurrent.duration.FiniteDuration
|
||||
|
||||
import com.typesafe.config.Config
|
||||
|
||||
import akka.ConfigurationException
|
||||
import akka.annotation.InternalApi
|
||||
import akka.util.Helpers
|
||||
import akka.util.Helpers.Requiring
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[sbr] object SplitBrainResolverSettings {
|
||||
final val KeepMajorityName = "keep-majority"
|
||||
final val LeaseMajorityName = "lease-majority"
|
||||
final val StaticQuorumName = "static-quorum"
|
||||
final val KeepOldestName = "keep-oldest"
|
||||
final val DownAllName = "down-all"
|
||||
|
||||
def allStrategyNames =
|
||||
Set(KeepMajorityName, LeaseMajorityName, StaticQuorumName, KeepOldestName, DownAllName)
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[sbr] final class SplitBrainResolverSettings(config: Config) {
|
||||
|
||||
import SplitBrainResolverSettings._
|
||||
|
||||
private val cc = config.getConfig("akka.cluster.split-brain-resolver")
|
||||
|
||||
val DowningStableAfter: FiniteDuration = {
|
||||
val key = "stable-after"
|
||||
FiniteDuration(cc.getDuration(key).toMillis, TimeUnit.MILLISECONDS).requiring(_ >= Duration.Zero, key + " >= 0s")
|
||||
}
|
||||
|
||||
val DowningStrategy: String =
|
||||
cc.getString("active-strategy").toLowerCase(Locale.ROOT) match {
|
||||
case strategyName if allStrategyNames(strategyName) => strategyName
|
||||
case unknown =>
|
||||
throw new ConfigurationException(
|
||||
s"Unknown downing strategy [$unknown]. Select one of [${allStrategyNames.mkString(",")}]")
|
||||
}
|
||||
|
||||
val DownAllWhenUnstable: FiniteDuration = {
|
||||
val key = "down-all-when-unstable"
|
||||
Helpers.toRootLowerCase(cc.getString("down-all-when-unstable")) match {
|
||||
case "on" =>
|
||||
// based on stable-after
|
||||
DowningStableAfter * 3 / 4
|
||||
case "off" =>
|
||||
// disabled
|
||||
Duration.Zero
|
||||
case _ =>
|
||||
FiniteDuration(cc.getDuration(key).toMillis, TimeUnit.MILLISECONDS)
|
||||
.requiring(_ > Duration.Zero, key + " > 0s, or 'off' to disable")
|
||||
}
|
||||
}
|
||||
|
||||
// the individual sub-configs below should only be called when the strategy has been selected
|
||||
|
||||
def keepMajorityRole: Option[String] = role(strategyConfig(KeepMajorityName))
|
||||
|
||||
def staticQuorumSettings: StaticQuorumSettings = {
|
||||
val c = strategyConfig(StaticQuorumName)
|
||||
val size = c
|
||||
.getInt("quorum-size")
|
||||
.requiring(_ >= 1, s"akka.cluster.split-brain-resolver.$StaticQuorumName.quorum-size must be >= 1")
|
||||
StaticQuorumSettings(size, role(c))
|
||||
}
|
||||
|
||||
def keepOldestSettings: KeepOldestSettings = {
|
||||
val c = strategyConfig(KeepOldestName)
|
||||
val downIfAlone = c.getBoolean("down-if-alone")
|
||||
KeepOldestSettings(downIfAlone, role(c))
|
||||
}
|
||||
|
||||
def leaseMajoritySettings: LeaseMajoritySettings = {
|
||||
val c = strategyConfig(LeaseMajorityName)
|
||||
|
||||
val leaseImplementation = c.getString("lease-implementation")
|
||||
require(
|
||||
leaseImplementation != "",
|
||||
s"akka.cluster.split-brain-resolver.$LeaseMajorityName.lease-implementation must be defined")
|
||||
|
||||
val acquireLeaseDelayForMinority =
|
||||
FiniteDuration(c.getDuration("acquire-lease-delay-for-minority").toMillis, TimeUnit.MILLISECONDS)
|
||||
|
||||
LeaseMajoritySettings(leaseImplementation, acquireLeaseDelayForMinority, role(c))
|
||||
}
|
||||
|
||||
private def strategyConfig(strategyName: String): Config = cc.getConfig(strategyName)
|
||||
|
||||
private def role(c: Config): Option[String] = c.getString("role") match {
|
||||
case "" => None
|
||||
case r => Some(r)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[sbr] final case class StaticQuorumSettings(size: Int, role: Option[String])
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[sbr] final case class KeepOldestSettings(downIfAlone: Boolean, role: Option[String])
|
||||
|
||||
/**
|
||||
* INTERNAL API
|
||||
*/
|
||||
@InternalApi private[sbr] final case class LeaseMajoritySettings(
|
||||
leaseImplementation: String,
|
||||
acquireLeaseDelayForMinority: FiniteDuration,
|
||||
role: Option[String])
|
||||
|
|
@ -31,7 +31,11 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig {
|
|||
loggers = ["akka.testkit.TestEventListener"]
|
||||
loglevel = INFO
|
||||
remote.log-remote-lifecycle-events = off
|
||||
cluster.failure-detector.monitored-by-nr-of-members = 3
|
||||
cluster {
|
||||
failure-detector.monitored-by-nr-of-members = 3
|
||||
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
|
||||
split-brain-resolver.active-strategy = keep-majority
|
||||
}
|
||||
}
|
||||
"""))
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import com.typesafe.config.ConfigFactory
|
||||
|
||||
import akka.cluster.Cluster
|
||||
import akka.cluster.MemberStatus
|
||||
import akka.cluster.MultiNodeClusterSpec
|
||||
import akka.remote.testkit.MultiNodeConfig
|
||||
import akka.remote.testkit.MultiNodeSpec
|
||||
import akka.remote.transport.ThrottlerTransportAdapter
|
||||
|
||||
object DownAllIndirectlyConnected5NodeSpec extends MultiNodeConfig {
|
||||
val node1 = role("node1")
|
||||
val node2 = role("node2")
|
||||
val node3 = role("node3")
|
||||
val node4 = role("node4")
|
||||
val node5 = role("node5")
|
||||
|
||||
commonConfig(ConfigFactory.parseString("""
|
||||
akka {
|
||||
loglevel = INFO
|
||||
cluster {
|
||||
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
|
||||
split-brain-resolver.active-strategy = keep-majority
|
||||
split-brain-resolver.stable-after = 6s
|
||||
|
||||
run-coordinated-shutdown-when-down = off
|
||||
}
|
||||
|
||||
actor.provider = cluster
|
||||
|
||||
test.filter-leeway = 10s
|
||||
}
|
||||
"""))
|
||||
|
||||
testTransport(on = true)
|
||||
}
|
||||
|
||||
class DownAllIndirectlyConnected5NodeSpecMultiJvmNode1 extends DownAllIndirectlyConnected5NodeSpec
|
||||
class DownAllIndirectlyConnected5NodeSpecMultiJvmNode2 extends DownAllIndirectlyConnected5NodeSpec
|
||||
class DownAllIndirectlyConnected5NodeSpecMultiJvmNode3 extends DownAllIndirectlyConnected5NodeSpec
|
||||
class DownAllIndirectlyConnected5NodeSpecMultiJvmNode4 extends DownAllIndirectlyConnected5NodeSpec
|
||||
class DownAllIndirectlyConnected5NodeSpecMultiJvmNode5 extends DownAllIndirectlyConnected5NodeSpec
|
||||
|
||||
class DownAllIndirectlyConnected5NodeSpec
|
||||
extends MultiNodeSpec(DownAllIndirectlyConnected5NodeSpec)
|
||||
with MultiNodeClusterSpec {
|
||||
import DownAllIndirectlyConnected5NodeSpec._
|
||||
|
||||
"A 5-node cluster with keep-one-indirectly-connected = off" should {
|
||||
"down all when indirectly connected combined with clean partition" in {
|
||||
val cluster = Cluster(system)
|
||||
|
||||
runOn(node1) {
|
||||
cluster.join(cluster.selfAddress)
|
||||
}
|
||||
enterBarrier("node1 joined")
|
||||
runOn(node2, node3, node4, node5) {
|
||||
cluster.join(node(node1).address)
|
||||
}
|
||||
within(10.seconds) {
|
||||
awaitAssert {
|
||||
cluster.state.members.size should ===(5)
|
||||
cluster.state.members.foreach {
|
||||
_.status should ===(MemberStatus.Up)
|
||||
}
|
||||
}
|
||||
}
|
||||
enterBarrier("Cluster formed")
|
||||
|
||||
runOn(node1) {
|
||||
for (x <- List(node1, node2, node3); y <- List(node4, node5)) {
|
||||
testConductor.blackhole(x, y, ThrottlerTransportAdapter.Direction.Both).await
|
||||
}
|
||||
}
|
||||
enterBarrier("blackholed-clean-partition")
|
||||
|
||||
runOn(node1) {
|
||||
testConductor.blackhole(node2, node3, ThrottlerTransportAdapter.Direction.Both).await
|
||||
}
|
||||
enterBarrier("blackholed-indirectly-connected")
|
||||
|
||||
within(10.seconds) {
|
||||
awaitAssert {
|
||||
runOn(node1) {
|
||||
cluster.state.unreachable.map(_.address) should ===(Set(node2, node3, node4, node5).map(node(_).address))
|
||||
}
|
||||
runOn(node2) {
|
||||
cluster.state.unreachable.map(_.address) should ===(Set(node3, node4, node5).map(node(_).address))
|
||||
}
|
||||
runOn(node3) {
|
||||
cluster.state.unreachable.map(_.address) should ===(Set(node2, node4, node5).map(node(_).address))
|
||||
}
|
||||
runOn(node4, node5) {
|
||||
cluster.state.unreachable.map(_.address) should ===(Set(node1, node2, node3).map(node(_).address))
|
||||
}
|
||||
}
|
||||
}
|
||||
enterBarrier("unreachable")
|
||||
|
||||
runOn(node1) {
|
||||
within(15.seconds) {
|
||||
awaitAssert {
|
||||
cluster.state.members.map(_.address) should ===(Set(node(node1).address))
|
||||
cluster.state.members.foreach {
|
||||
_.status should ===(MemberStatus.Up)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
runOn(node2, node3, node4, node5) {
|
||||
// downed
|
||||
awaitCond(cluster.isTerminated, max = 15.seconds)
|
||||
}
|
||||
|
||||
enterBarrier("done")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* Copyright (C) 2020 Lightbend Inc. <https://www.lightbend.com>
|
||||
*/
|
||||
|
||||
package akka.cluster.sbr
|
||||
|
||||
import scala.concurrent.duration._
|
||||
|
||||
import com.typesafe.config.ConfigFactory
|
||||
|
||||
import akka.cluster.Cluster
|
||||
import akka.cluster.MemberStatus
|
||||
import akka.cluster.MultiNodeClusterSpec
|
||||
import akka.remote.testkit.MultiNodeConfig
|
||||
import akka.remote.testkit.MultiNodeSpec
|
||||
import akka.remote.transport.ThrottlerTransportAdapter
|
||||
|
||||
object DownAllUnstable5NodeSpec extends MultiNodeConfig {
|
||||
val node1 = role("node1")
|
||||
val node2 = role("node2")
|
||||
val node3 = role("node3")
|
||||
val node4 = role("node4")
|
||||
val node5 = role("node5")
|
||||
|
||||
commonConfig(ConfigFactory.parseString("""
|
||||
akka {
|
||||
loglevel = INFO
|
||||
cluster {
|
||||
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
|
||||
failure-detector.acceptable-heartbeat-pause = 3s
|
||||
split-brain-resolver.active-strategy = keep-majority
|
||||
split-brain-resolver.stable-after = 10s
|
||||
split-brain-resolver.down-all-when-unstable = 7s
|
||||
|
||||
run-coordinated-shutdown-when-down = off
|
||||
}
|
||||
|
||||
# quicker reconnect
|
||||
remote.retry-gate-closed-for = 1s
|
||||
remote.netty.tcp.connection-timeout = 3 s
|
||||
|
||||
actor.provider = cluster
|
||||
|
||||
test.filter-leeway = 10s
|
||||
}
|
||||
"""))
|
||||
|
||||
testTransport(on = true)
|
||||
}
|
||||
|
||||
class DownAllUnstable5NodeSpecMultiJvmNode1 extends DownAllUnstable5NodeSpec
|
||||
class DownAllUnstable5NodeSpecMultiJvmNode2 extends DownAllUnstable5NodeSpec
|
||||
class DownAllUnstable5NodeSpecMultiJvmNode3 extends DownAllUnstable5NodeSpec
|
||||
class DownAllUnstable5NodeSpecMultiJvmNode4 extends DownAllUnstable5NodeSpec
|
||||
class DownAllUnstable5NodeSpecMultiJvmNode5 extends DownAllUnstable5NodeSpec
|
||||
|
||||
class DownAllUnstable5NodeSpec extends MultiNodeSpec(DownAllUnstable5NodeSpec) with MultiNodeClusterSpec {
|
||||
import DownAllUnstable5NodeSpec._
|
||||
|
||||
"A 5-node cluster with down-all-when-unstable" should {
|
||||
"down all when instability continues" in {
|
||||
val cluster = Cluster(system)
|
||||
|
||||
runOn(node1) {
|
||||
cluster.join(cluster.selfAddress)
|
||||
}
|
||||
enterBarrier("node1 joined")
|
||||
runOn(node2, node3, node4, node5) {
|
||||
cluster.join(node(node1).address)
|
||||
}
|
||||
within(10.seconds) {
|
||||
awaitAssert {
|
||||
cluster.state.members.size should ===(5)
|
||||
cluster.state.members.foreach {
|
||||
_.status should ===(MemberStatus.Up)
|
||||
}
|
||||
}
|
||||
}
|
||||
enterBarrier("Cluster formed")
|
||||
|
||||
// acceptable-heartbeat-pause = 3s
|
||||
// stable-after = 10s
|
||||
// down-all-when-unstable = 7s
|
||||
|
||||
runOn(node1) {
|
||||
for (x <- List(node1, node2, node3); y <- List(node4, node5)) {
|
||||
testConductor.blackhole(x, y, ThrottlerTransportAdapter.Direction.Both).await
|
||||
}
|
||||
}
|
||||
enterBarrier("blackholed-clean-partition")
|
||||
|
||||
within(10.seconds) {
|
||||
awaitAssert {
|
||||
runOn(node1, node2, node3) {
|
||||
cluster.state.unreachable.map(_.address) should ===(Set(node4, node5).map(node(_).address))
|
||||
}
|
||||
runOn(node4, node5) {
|
||||
cluster.state.unreachable.map(_.address) should ===(Set(node1, node2, node3).map(node(_).address))
|
||||
}
|
||||
}
|
||||
}
|
||||
enterBarrier("unreachable-clean-partition")
|
||||
|
||||
// no decision yet
|
||||
Thread.sleep(2000)
|
||||
cluster.state.members.size should ===(5)
|
||||
cluster.state.members.foreach {
|
||||
_.status should ===(MemberStatus.Up)
|
||||
}
|
||||
|
||||
runOn(node1) {
|
||||
testConductor.blackhole(node2, node3, ThrottlerTransportAdapter.Direction.Both).await
|
||||
}
|
||||
enterBarrier("blackhole-2")
|
||||
// then it takes about 5 seconds for failure detector to observe that
|
||||
Thread.sleep(7000)
|
||||
|
||||
runOn(node1) {
|
||||
testConductor.passThrough(node2, node3, ThrottlerTransportAdapter.Direction.Both).await
|
||||
}
|
||||
enterBarrier("passThrough-2")
|
||||
|
||||
// now it should have been unstable for more than 17 seconds
|
||||
|
||||
// all downed
|
||||
awaitCond(cluster.isTerminated, max = 15.seconds)
|
||||
|
||||
enterBarrier("done")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue