* Allow for dispatcher aliases and define a internal dispatcher * Test checking dispatcher name * MiMa for Dispatchers * Migration guide entry * No need to have custom dispatcher lookup logic in streams anymore * Default dispatcher size and migration note about that * Test checking exact config values... * Typed receptionist on internal dispatcher * All internal usages of system.dispatcher gone through
194 lines
8.3 KiB
Text
194 lines
8.3 KiB
Text
###############################################
|
|
# Akka Cluster Sharding Reference Config File #
|
|
###############################################
|
|
|
|
# This is the reference config file that contains all the default settings.
|
|
# Make your edits/overrides in your application.conf.
|
|
|
|
|
|
# //#sharding-ext-config
|
|
# Settings for the ClusterShardingExtension
|
|
akka.cluster.sharding {
|
|
|
|
# The extension creates a top level actor with this name in top level system scope,
|
|
# e.g. '/system/sharding'
|
|
guardian-name = sharding
|
|
|
|
# Specifies that entities runs on cluster nodes with a specific role.
|
|
# If the role is not specified (or empty) all nodes in the cluster are used.
|
|
role = ""
|
|
|
|
# When this is set to 'on' the active entity actors will automatically be restarted
|
|
# upon Shard restart. i.e. if the Shard is started on a different ShardRegion
|
|
# due to rebalance or crash.
|
|
remember-entities = off
|
|
|
|
# Set this to a time duration to have sharding passivate entities when they have not
|
|
# received any message in this length of time. Set to 'off' to disable.
|
|
passivate-idle-entity-after = 120s
|
|
|
|
# If the coordinator can't store state changes it will be stopped
|
|
# and started again after this duration, with an exponential back-off
|
|
# of up to 5 times this duration.
|
|
coordinator-failure-backoff = 5 s
|
|
|
|
# The ShardRegion retries registration and shard location requests to the
|
|
# ShardCoordinator with this interval if it does not reply.
|
|
retry-interval = 2 s
|
|
|
|
# Maximum number of messages that are buffered by a ShardRegion actor.
|
|
buffer-size = 100000
|
|
|
|
# Timeout of the shard rebalancing process.
|
|
# Additionally, if an entity doesn't handle the stopMessage
|
|
# after (handoff-timeout - 5.seconds).max(1.second) it will be stopped forcefully
|
|
handoff-timeout = 60 s
|
|
|
|
# Time given to a region to acknowledge it's hosting a shard.
|
|
shard-start-timeout = 10 s
|
|
|
|
# If the shard is remembering entities and can't store state changes
|
|
# will be stopped and then started again after this duration. Any messages
|
|
# sent to an affected entity may be lost in this process.
|
|
shard-failure-backoff = 10 s
|
|
|
|
# If the shard is remembering entities and an entity stops itself without
|
|
# using passivate. The entity will be restarted after this duration or when
|
|
# the next message for it is received, which ever occurs first.
|
|
entity-restart-backoff = 10 s
|
|
|
|
# Rebalance check is performed periodically with this interval.
|
|
rebalance-interval = 10 s
|
|
|
|
# Absolute path to the journal plugin configuration entity that is to be
|
|
# used for the internal persistence of ClusterSharding. If not defined
|
|
# the default journal plugin is used. Note that this is not related to
|
|
# persistence used by the entity actors.
|
|
# Only used when state-store-mode=persistence
|
|
journal-plugin-id = ""
|
|
|
|
# Absolute path to the snapshot plugin configuration entity that is to be
|
|
# used for the internal persistence of ClusterSharding. If not defined
|
|
# the default snapshot plugin is used. Note that this is not related to
|
|
# persistence used by the entity actors.
|
|
# Only used when state-store-mode=persistence
|
|
snapshot-plugin-id = ""
|
|
|
|
# Defines how the coordinator stores its state. Same is also used by the
|
|
# shards for rememberEntities.
|
|
# Valid values are "ddata" or "persistence".
|
|
state-store-mode = "ddata"
|
|
|
|
# The shard saves persistent snapshots after this number of persistent
|
|
# events. Snapshots are used to reduce recovery times.
|
|
# Only used when state-store-mode=persistence
|
|
snapshot-after = 1000
|
|
|
|
# The shard deletes persistent events (messages and snapshots) after doing snapshot
|
|
# keeping this number of old persistent batches.
|
|
# Batch is of size `snapshot-after`.
|
|
# When set to 0 after snapshot is successfully done all events with equal or lower sequence number will be deleted.
|
|
# Default value of 2 leaves last maximum 2*`snapshot-after` events and 3 snapshots (2 old ones + latest snapshot)
|
|
keep-nr-of-batches = 2
|
|
|
|
# Setting for the default shard allocation strategy
|
|
least-shard-allocation-strategy {
|
|
# Threshold of how large the difference between most and least number of
|
|
# allocated shards must be to begin the rebalancing.
|
|
# The difference between number of shards in the region with most shards and
|
|
# the region with least shards must be greater than (>) the `rebalanceThreshold`
|
|
# for the rebalance to occur.
|
|
# 1 gives the best distribution and therefore typically the best choice.
|
|
# Increasing the threshold can result in quicker rebalance but has the
|
|
# drawback of increased difference between number of shards (and therefore load)
|
|
# on different nodes before rebalance will occur.
|
|
rebalance-threshold = 1
|
|
|
|
# The number of ongoing rebalancing processes is limited to this number.
|
|
max-simultaneous-rebalance = 3
|
|
}
|
|
|
|
# Timeout of waiting the initial distributed state (an initial state will be queried again if the timeout happened)
|
|
# Only used when state-store-mode=ddata
|
|
waiting-for-state-timeout = 5 s
|
|
|
|
# Timeout of waiting for update the distributed state (update will be retried if the timeout happened)
|
|
# Only used when state-store-mode=ddata
|
|
updating-state-timeout = 5 s
|
|
|
|
# The shard uses this strategy to determines how to recover the underlying entity actors. The strategy is only used
|
|
# by the persistent shard when rebalancing or restarting. The value can either be "all" or "constant". The "all"
|
|
# strategy start all the underlying entity actors at the same time. The constant strategy will start the underlying
|
|
# entity actors at a fix rate. The default strategy "all".
|
|
entity-recovery-strategy = "all"
|
|
|
|
# Default settings for the constant rate entity recovery strategy
|
|
entity-recovery-constant-rate-strategy {
|
|
# Sets the frequency at which a batch of entity actors is started.
|
|
frequency = 100 ms
|
|
# Sets the number of entity actors to be restart at a particular interval
|
|
number-of-entities = 5
|
|
}
|
|
|
|
# Settings for the coordinator singleton. Same layout as akka.cluster.singleton.
|
|
# The "role" of the singleton configuration is not used. The singleton role will
|
|
# be the same as "akka.cluster.sharding.role".
|
|
# A lease can be configured in these settings for the coordinator singleton
|
|
coordinator-singleton = ${akka.cluster.singleton}
|
|
|
|
# Settings for the Distributed Data replicator.
|
|
# Same layout as akka.cluster.distributed-data.
|
|
# The "role" of the distributed-data configuration is not used. The distributed-data
|
|
# role will be the same as "akka.cluster.sharding.role".
|
|
# Note that there is one Replicator per role and it's not possible
|
|
# to have different distributed-data settings for different sharding entity types.
|
|
# Only used when state-store-mode=ddata
|
|
distributed-data = ${akka.cluster.distributed-data}
|
|
distributed-data {
|
|
# minCap parameter to MajorityWrite and MajorityRead consistency level.
|
|
majority-min-cap = 5
|
|
durable.keys = ["shard-*"]
|
|
|
|
# When using many entities with "remember entities" the Gossip message
|
|
# can become to large if including to many in same message. Limit to
|
|
# the same number as the number of ORSet per shard.
|
|
max-delta-elements = 5
|
|
|
|
}
|
|
|
|
# The id of the dispatcher to use for ClusterSharding actors.
|
|
# If specified you need to define the settings of the actual dispatcher.
|
|
# This dispatcher for the entity actors is defined by the user provided
|
|
# Props, i.e. this dispatcher is not used for the entity actors.
|
|
use-dispatcher = "akka.actor.internal-dispatcher"
|
|
|
|
# Config path of the lease that each shard must acquire before starting entity actors
|
|
# default is no lease
|
|
# A lease can also be used for the singleton coordinator by settings it in the coordinator-singleton properties
|
|
use-lease = ""
|
|
|
|
# The interval between retries for acquiring the lease
|
|
lease-retry-interval = 5s
|
|
}
|
|
# //#sharding-ext-config
|
|
|
|
akka.cluster {
|
|
configuration-compatibility-check {
|
|
checkers {
|
|
akka-cluster-sharding = "akka.cluster.sharding.JoinConfigCompatCheckSharding"
|
|
}
|
|
}
|
|
}
|
|
|
|
# Protobuf serializer for Cluster Sharding messages
|
|
akka.actor {
|
|
serializers {
|
|
akka-sharding = "akka.cluster.sharding.protobuf.ClusterShardingMessageSerializer"
|
|
}
|
|
serialization-bindings {
|
|
"akka.cluster.sharding.ClusterShardingSerializable" = akka-sharding
|
|
}
|
|
serialization-identifiers {
|
|
"akka.cluster.sharding.protobuf.ClusterShardingMessageSerializer" = 13
|
|
}
|
|
}
|