* ClusterReceptionist using own ddata Replicator #26936 * otherwise application configuration, such as role may break it * creating it as a child actor, path will be same on all nodes * rolling update from 2.5 not supported * mention in migration guide * update migration note about receptionist rolling update
This commit is contained in:
parent
686b36962e
commit
8d1dcea5d2
4 changed files with 31 additions and 8 deletions
|
|
@ -19,6 +19,10 @@ akka.cluster.typed.receptionist {
|
|||
# etc. instead the keys are sharded across this number of keys. This must be the same on all nodes
|
||||
# in a cluster, changing it requires a full cluster restart (stopping all nodes before starting them again)
|
||||
distributed-key-count = 5
|
||||
|
||||
# Settings for the Distributed Data replicator used by Receptionist.
|
||||
# Same layout as akka.cluster.distributed-data.
|
||||
distributed-data = ${akka.cluster.distributed-data}
|
||||
}
|
||||
|
||||
akka {
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ import akka.actor.typed.{ ActorRef, Behavior, Terminated }
|
|||
import akka.annotation.InternalApi
|
||||
import akka.cluster.ClusterEvent.MemberRemoved
|
||||
import akka.cluster.ddata.typed.scaladsl.DistributedData
|
||||
import akka.cluster.{ ddata => dd }
|
||||
import akka.cluster.ddata.{ ORMultiMap, ORMultiMapKey, Replicator }
|
||||
import akka.cluster.{ Cluster, ClusterEvent, UniqueAddress }
|
||||
import akka.remote.AddressUidExtension
|
||||
|
|
@ -70,7 +69,6 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider {
|
|||
final class Setup(ctx: ActorContext[Command]) {
|
||||
val untypedSystem = ctx.system.toUntyped
|
||||
val settings = ClusterReceptionistSettings(ctx.system)
|
||||
val replicator = dd.DistributedData(untypedSystem).replicator
|
||||
val selfSystemUid = AddressUidExtension(untypedSystem).longAddressUid
|
||||
lazy val keepTombstonesFor = cluster.settings.PruneGossipTombstonesAfter match {
|
||||
case f: FiniteDuration => f
|
||||
|
|
@ -78,6 +76,9 @@ private[typed] object ClusterReceptionist extends ReceptionistBehaviorProvider {
|
|||
}
|
||||
val cluster = Cluster(untypedSystem)
|
||||
implicit val selfNodeAddress = DistributedData(ctx.system).selfUniqueAddress
|
||||
|
||||
val replicator = ctx.actorOf(Replicator.props(settings.replicatorSettings), "replicator")
|
||||
|
||||
def newTombstoneDeadline() = Deadline(keepTombstonesFor)
|
||||
def selfUniqueAddress: UniqueAddress = cluster.selfUniqueAddress
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,10 +10,11 @@ import akka.cluster.ddata.Replicator
|
|||
import akka.cluster.ddata.Replicator.WriteConsistency
|
||||
import akka.util.Helpers.toRootLowerCase
|
||||
import com.typesafe.config.Config
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.concurrent.duration.{ FiniteDuration, MILLISECONDS }
|
||||
|
||||
import akka.cluster.ddata.ReplicatorSettings
|
||||
|
||||
/**
|
||||
* Internal API
|
||||
*/
|
||||
|
|
@ -33,10 +34,14 @@ private[akka] object ClusterReceptionistSettings {
|
|||
case _ => Replicator.WriteTo(config.getInt(key), writeTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
val replicatorSettings = ReplicatorSettings(config.getConfig("distributed-data"))
|
||||
|
||||
ClusterReceptionistSettings(
|
||||
writeConsistency,
|
||||
pruningInterval = config.getDuration("pruning-interval", MILLISECONDS).millis,
|
||||
config.getInt("distributed-key-count"))
|
||||
config.getInt("distributed-key-count"),
|
||||
replicatorSettings)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -47,4 +52,5 @@ private[akka] object ClusterReceptionistSettings {
|
|||
private[akka] case class ClusterReceptionistSettings(
|
||||
writeConsistency: WriteConsistency,
|
||||
pruningInterval: FiniteDuration,
|
||||
distributedKeyCount: Int)
|
||||
distributedKeyCount: Int,
|
||||
replicatorSettings: ReplicatorSettings)
|
||||
|
|
|
|||
|
|
@ -197,11 +197,23 @@ akka.coordinated-shutdown.run-by-actor-system-terminate = off
|
|||
### Receptionist has moved
|
||||
|
||||
The receptionist had a name clash with the default Cluster Client Receptionist at `/system/receptionist` and will now
|
||||
instead either run under `/system/localReceptionist` or `/system/clusterReceptionist`.
|
||||
instead either run under `/system/localReceptionist` or `/system/clusterReceptionist`.
|
||||
|
||||
The path change makes it impossible to do a rolling upgrade from 2.5 to 2.6 if you use Akka Typed and the receptionist
|
||||
as the old and the new nodes receptionists will not be able to communicate.
|
||||
The path change means that the receptionist information will not be disseminated between 2.5 and 2.6 nodes during a
|
||||
rolling update from 2.5 to 2.6 if you use Akka Typed. When all old nodes have been shutdown
|
||||
it will work properly again.
|
||||
|
||||
### Cluster Receptionist using own Distributed Data
|
||||
|
||||
In 2.5 the Cluster Receptionist was using the shared Distributed Data extension but that could result in
|
||||
undesired configuration changes if the application was also using that and changed for example the `role`
|
||||
configuration.
|
||||
|
||||
In 2.6 the Cluster Receptionist is using it's own independent instance of Distributed Data.
|
||||
|
||||
This means that the receptionist information will not be disseminated between 2.5 and 2.6 nodes during a
|
||||
rolling update from 2.5 to 2.6 if you use Akka Typed. When all old nodes have been shutdown
|
||||
it will work properly again.
|
||||
|
||||
### Akka Typed API changes
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue