157 lines
6.9 KiB
Text
157 lines
6.9 KiB
Text
##############################################
|
|
# Akka Distributed DataReference Config File #
|
|
##############################################
|
|
|
|
# This is the reference config file that contains all the default settings.
|
|
# Make your edits/overrides in your application.conf.
|
|
|
|
|
|
#//#distributed-data
|
|
# Settings for the DistributedData extension
|
|
akka.cluster.distributed-data {
|
|
# Actor name of the Replicator actor, /system/ddataReplicator
|
|
name = ddataReplicator
|
|
|
|
# Replicas are running on members tagged with this role.
|
|
# All members are used if undefined or empty.
|
|
role = ""
|
|
|
|
# How often the Replicator should send out gossip information
|
|
gossip-interval = 2 s
|
|
|
|
# How often the subscribers will be notified of changes, if any
|
|
notify-subscribers-interval = 500 ms
|
|
|
|
# Logging of data with payload size in bytes larger than
|
|
# this value. Maximum detected size per key is logged once,
|
|
# with an increase threshold of 10%.
|
|
# It can be disabled by setting the property to off.
|
|
log-data-size-exceeding = 10 KiB
|
|
|
|
# Maximum number of entries to transfer in one gossip message when synchronizing
|
|
# the replicas. Next chunk will be transferred in next round of gossip.
|
|
max-delta-elements = 500
|
|
|
|
# The id of the dispatcher to use for Replicator actors.
|
|
# If specified you need to define the settings of the actual dispatcher.
|
|
use-dispatcher = "akka.actor.internal-dispatcher"
|
|
|
|
# How often the Replicator checks for pruning of data associated with
|
|
# removed cluster nodes. If this is set to 'off' the pruning feature will
|
|
# be completely disabled.
|
|
pruning-interval = 120 s
|
|
|
|
# How long time it takes to spread the data to all other replica nodes.
|
|
# This is used when initiating and completing the pruning process of data associated
|
|
# with removed cluster nodes. The time measurement is stopped when any replica is
|
|
# unreachable, but it's still recommended to configure this with certain margin.
|
|
# It should be in the magnitude of minutes even though typical dissemination time
|
|
# is shorter (grows logarithmic with number of nodes). There is no advantage of
|
|
# setting this too low. Setting it to large value will delay the pruning process.
|
|
max-pruning-dissemination = 300 s
|
|
|
|
# The markers of that pruning has been performed for a removed node are kept for this
|
|
# time and thereafter removed. If and old data entry that was never pruned is somehow
|
|
# injected and merged with existing data after this time the value will not be correct.
|
|
# This would be possible (although unlikely) in the case of a long network partition.
|
|
# It should be in the magnitude of hours. For durable data it is configured by
|
|
# 'akka.cluster.distributed-data.durable.pruning-marker-time-to-live'.
|
|
pruning-marker-time-to-live = 6 h
|
|
|
|
# Serialized Write and Read messages are cached when they are sent to
|
|
# several nodes. If no further activity they are removed from the cache
|
|
# after this duration.
|
|
serializer-cache-time-to-live = 10s
|
|
|
|
# Update and Get operations are sent to oldest nodes first.
|
|
# This is useful together with Cluster Singleton, which is running on oldest nodes.
|
|
prefer-oldest = off
|
|
|
|
# Settings for delta-CRDT
|
|
delta-crdt {
|
|
# enable or disable delta-CRDT replication
|
|
enabled = on
|
|
|
|
# Some complex deltas grow in size for each update and above this
|
|
# threshold such deltas are discarded and sent as full state instead.
|
|
# This is number of elements or similar size hint, not size in bytes.
|
|
max-delta-size = 50
|
|
}
|
|
|
|
durable {
|
|
# List of keys that are durable. Prefix matching is supported by using * at the
|
|
# end of a key.
|
|
keys = []
|
|
|
|
# The markers of that pruning has been performed for a removed node are kept for this
|
|
# time and thereafter removed. If and old data entry that was never pruned is
|
|
# injected and merged with existing data after this time the value will not be correct.
|
|
# This would be possible if replica with durable data didn't participate in the pruning
|
|
# (e.g. it was shutdown) and later started after this time. A durable replica should not
|
|
# be stopped for longer time than this duration and if it is joining again after this
|
|
# duration its data should first be manually removed (from the lmdb directory).
|
|
# It should be in the magnitude of days. Note that there is a corresponding setting
|
|
# for non-durable data: 'akka.cluster.distributed-data.pruning-marker-time-to-live'.
|
|
pruning-marker-time-to-live = 10 d
|
|
|
|
# Fully qualified class name of the durable store actor. It must be a subclass
|
|
# of akka.actor.Actor and handle the protocol defined in
|
|
# akka.cluster.ddata.DurableStore. The class must have a constructor with
|
|
# com.typesafe.config.Config parameter.
|
|
store-actor-class = akka.cluster.ddata.LmdbDurableStore
|
|
|
|
use-dispatcher = akka.cluster.distributed-data.durable.pinned-store
|
|
|
|
pinned-store {
|
|
executor = thread-pool-executor
|
|
type = PinnedDispatcher
|
|
}
|
|
|
|
# Config for the LmdbDurableStore
|
|
lmdb {
|
|
# Directory of LMDB file. There are two options:
|
|
# 1. A relative or absolute path to a directory that ends with 'ddata'
|
|
# the full name of the directory will contain name of the ActorSystem
|
|
# and its remote port.
|
|
# 2. Otherwise the path is used as is, as a relative or absolute path to
|
|
# a directory.
|
|
#
|
|
# When running in production you may want to configure this to a specific
|
|
# path (alt 2), since the default directory contains the remote port of the
|
|
# actor system to make the name unique. If using a dynamically assigned
|
|
# port (0) it will be different each time and the previously stored data
|
|
# will not be loaded.
|
|
dir = "ddata"
|
|
|
|
# Size in bytes of the memory mapped file.
|
|
map-size = 100 MiB
|
|
|
|
# Accumulate changes before storing improves performance with the
|
|
# risk of losing the last writes if the JVM crashes.
|
|
# The interval is by default set to 'off' to write each update immediately.
|
|
# Enabling write behind by specifying a duration, e.g. 200ms, is especially
|
|
# efficient when performing many writes to the same key, because it is only
|
|
# the last value for each key that will be serialized and stored.
|
|
# write-behind-interval = 200 ms
|
|
write-behind-interval = off
|
|
}
|
|
}
|
|
|
|
}
|
|
#//#distributed-data
|
|
|
|
# Protobuf serializer for cluster DistributedData messages
|
|
akka.actor {
|
|
serializers {
|
|
akka-data-replication = "akka.cluster.ddata.protobuf.ReplicatorMessageSerializer"
|
|
akka-replicated-data = "akka.cluster.ddata.protobuf.ReplicatedDataSerializer"
|
|
}
|
|
serialization-bindings {
|
|
"akka.cluster.ddata.Replicator$ReplicatorMessage" = akka-data-replication
|
|
"akka.cluster.ddata.ReplicatedDataSerialization" = akka-replicated-data
|
|
}
|
|
serialization-identifiers {
|
|
"akka.cluster.ddata.protobuf.ReplicatedDataSerializer" = 11
|
|
"akka.cluster.ddata.protobuf.ReplicatorMessageSerializer" = 12
|
|
}
|
|
}
|