pekko/config/akka-reference.conf
Jonas Bonér 0a63350452 Added configuration for failure detection; both via akka.conf and via Deploy(..).
Signed-off-by: Jonas Bonér <jonas@jonasboner.com>
2011-08-31 15:07:18 +02:00

279 lines
16 KiB
Text

##############################
# Akka Reference Config File #
##############################
# This the reference config file has all the default settings.
# All these could be removed with no visible effect.
# Modify as needed.
# This file is imported in the 'akka.conf' file. Make your edits/overrides there.
akka {
version = "2.0-SNAPSHOT" # Akka version, checked against the runtime version of Akka.
enabled-modules = [] # Comma separated list of the enabled modules. Options: ["cluster", "camel", "http"]
time-unit = "seconds" # Time unit for all timeout properties throughout the config
event-handlers = ["akka.event.EventHandler$DefaultListener"] # Event handlers to register at boot time (EventHandler$DefaultListener logs to STDOUT)
event-handler-level = "INFO" # Options: ERROR, WARNING, INFO, DEBUG
# These boot classes are loaded (and created) automatically when the Akka Microkernel boots up
# Can be used to bootstrap your application(s)
# Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor
# boot = ["sample.camel.Boot",
# "sample.rest.java.Boot",
# "sample.rest.scala.Boot",
# "sample.security.Boot"]
boot = []
actor {
timeout = 5 # Default timeout for Future based invocations
# - Actor: ask && ?
# - UntypedActor: ask
# - TypedActor: methods with non-void return type
serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability
throughput = 5 # Default throughput for all Dispatcher, set to 1 for complete fairness
throughput-deadline-time = -1 # Default throughput deadline for all Dispatcher, set to 0 or negative for no deadline
dispatcher-shutdown-timeout = 1 # Using the akka.time-unit, how long dispatchers by default will wait for new actors until they shut down
deployment {
service-ping { # stateless actor with replication factor 3 and round-robin load-balancer
router = "least-cpu" # routing (load-balance) scheme to use
# available: "direct", "round-robin", "random",
# "least-cpu", "least-ram", "least-messages"
# or: fully qualified class name of the router class
# default is "direct";
# if 'replication' is used then the only available router is "direct"
failure-detector = "remove-connection-on-first-remote-failure" # failure detection scheme to use
# available: "remove-connection-on-first-local-failure"
# "remove-connection-on-first-remote-failure"
# "circuit-breaker"
# or: fully qualified class name of the router class
# default is "remove-connection-on-first-remote-failure";
clustered { # makes the actor available in the cluster registry
# default (if omitted) is local non-clustered actor
preferred-nodes = ["node:node1"] # a list of preferred nodes for instantiating the actor instances on
# defined as node name
# available: "node:<node name>"
replication-factor = 3 # number of actor instances in the cluster
# available: positive integer (0-N) or the string "auto" for auto-scaling
# if "auto" is used then 'home' has no meaning
# default is '0', meaning no replicas;
# if the "direct" router is used then this element is ignored (always '1')
replication { # use replication or not? only makes sense for a stateful actor
# FIXME should we have this config option here? If so, implement it all through.
serialize-mailbox = off # should the actor mailbox be part of the serialized snapshot?
# default is 'off'
storage = "transaction-log" # storage model for replication
# available: "transaction-log" and "data-grid"
# default is "transaction-log"
strategy = "write-through" # guaranteees for replication
# available: "write-through" and "write-behind"
# default is "write-through"
}
}
}
}
default-dispatcher {
type = "Dispatcher" # Must be one of the following
# Dispatcher, (BalancingDispatcher, only valid when all actors using it are of the same type),
# A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor
name = "MyDispatcher" # Optional, will be a generated UUID if omitted
keep-alive-time = 60 # Keep alive time for threads
core-pool-size-factor = 8.0 # No of core threads ... ceil(available processors * factor)
max-pool-size-factor = 8.0 # Max no of threads ... ceil(available processors * factor)
executor-bounds = -1 # Makes the Executor bounded, -1 is unbounded
task-queue-size = -1 # Specifies the bounded capacity of the task queue (< 1 == unbounded)
task-queue-type = "linked" # Specifies which type of task queue will be used, can be "array" or "linked" (default)
allow-core-timeout = on # Allow core threads to time out
rejection-policy = "caller-runs" # abort, caller-runs, discard-oldest, discard
throughput = 5 # Throughput for Dispatcher, set to 1 for complete fairness
throughput-deadline-time = -1 # Throughput deadline for Dispatcher, set to 0 or negative for no deadline
mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
# If positive then a bounded mailbox is used and the capacity is set using the property
# NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care
# The following are only used for Dispatcher and only if mailbox-capacity > 0
mailbox-push-timeout-time = 10 # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout
# (in unit defined by the time-unit property)
}
debug {
receive = off # enable function of Actor.loggable(), which is to log any received message at DEBUG level
autoreceive = off # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill and the like)
lifecycle = off # enable DEBUG logging of actor lifecycle changes
}
mailbox {
file-based {
directory-path = "./_mb"
max-items = 2147483647
max-size = 2147483647
max-items = 2147483647
max-age = 0
max-journal-size = 16777216 # 16 * 1024 * 1024
max-memory-size = 134217728 # 128 * 1024 * 1024
max-journal-overflow = 10
max-journal-size-absolute = 9223372036854775807
discard-old-when-full = on
keep-journal = on
sync-journal = off
}
redis {
hostname = "127.0.0.1"
port = 6379
}
mongodb {
# Any specified collection name will be used as a prefix for collections that use durable mongo mailboxes
uri = "mongodb://localhost/akka.mailbox" # Follow Mongo URI Spec - http://www.mongodb.org/display/DOCS/Connections
# Configurable timeouts for certain ops
timeout {
read = 3000 # number of milliseconds to wait for a read to succeed before timing out the future
write = 3000 # number of milliseconds to wait for a write to succeed before timing out the future
}
}
zookeeper {
server-addresses = "localhost:2181"
session-timeout = 60
connection-timeout = 60
blocking-queue = on
}
beanstalk {
hostname = "127.0.0.1"
port = 11300
reconnect-window = 5
message-submit-delay = 0
message-submit-timeout = 5
message-time-to-live = 120
}
}
# Entries for pluggable serializers and their bindings. If a binding for a specific class is not found,
# then the default serializer (Java serialization) is used.
#
# serializers {
# java = "akka.serialization.JavaSerializer"
# proto = "akka.testing.ProtobufSerializer"
# sjson = "akka.testing.SJSONSerializer"
# default = "akka.serialization.JavaSerializer"
# }
# serialization-bindings {
# java = ["akka.serialization.SerializeSpec$Address",
# "akka.serialization.MyJavaSerializableActor",
# "akka.serialization.MyStatelessActorWithMessagesInMailbox",
# "akka.serialization.MyActorWithProtobufMessagesInMailbox"]
# sjson = ["akka.serialization.SerializeSpec$Person"]
# proto = ["com.google.protobuf.Message",
# "akka.actor.ProtobufProtocol$MyMessage"]
# }
}
cluster {
name = "test-cluster"
zookeeper-server-addresses = "localhost:2181" # comma-separated list of '<hostname>:<port>' elements
remote-server-port = 2552
max-time-to-wait-until-connected = 30
session-timeout = 60
connection-timeout = 60
use-compression = off
remote-daemon-ack-timeout = 30 # Timeout for ACK of cluster operations, lik checking actor out etc.
include-ref-node-in-replica-set = on # Can a replica be instantiated on the same node as the cluster reference to the actor
# Default: on
compression-scheme = "" # Options: "zlib" (lzf to come), leave out for no compression
zlib-compression-level = 6 # Options: 0-9 (1 being fastest and 9 being the most compressed), default is 6
# FIXME rename to transport
layer = "akka.cluster.netty.NettyRemoteSupport"
secure-cookie = "" # Generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh'
# or using 'akka.util.Crypt.generateSecureCookie'
log-directory = "_akka_cluster" # Where ZooKeeper should store the logs and data files
replication {
digest-type = "MAC" # Options: CRC32 (cheap & unsafe), MAC (expensive & secure using password)
password = "secret" # FIXME: store open in file?
ensemble-size = 3
quorum-size = 2
snapshot-frequency = 1000 # The number of messages that should be logged between every actor snapshot
timeout = 30 # Timeout for asyncronous (write-behind) operations
}
server {
port = 2552 # The default remote server port clients should connect to. Default is 2552 (AKKA)
message-frame-size = 1048576 # Increase this if you want to be able to send messages with large payloads
connection-timeout = 120 # Length in time-unit
require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)?
untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect.
backlog = 4096 # Sets the size of the connection backlog
execution-pool-keepalive = 60 # Length in akka.time-unit how long core threads will be kept alive if idling
execution-pool-size = 16 # Size of the core pool of the remote execution unit
max-channel-memory-size = 0 # Maximum channel size, 0 for off
max-total-memory-size = 0 # Maximum total size of all channels, 0 for off
}
client {
buffering {
retry-message-send-on-failure = false # Should message buffering on remote client error be used (buffer flushed on successful reconnect)
capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default)
# If positive then a bounded mailbox is used and the capacity is set using the property
}
reconnect-delay = 5
read-timeout = 3600
message-frame-size = 1048576
reap-futures-delay = 5
reconnection-time-window = 600 # Maximum time window that a client should try to reconnect for
}
}
stm {
fair = on # Should global transactions be fair or non-fair (non fair yield better performance)
max-retries = 1000
timeout = 5 # Default timeout for blocking transactions and transaction set (in unit defined by
# the time-unit property)
write-skew = true
blocking-allowed = false
interruptible = false
speculative = true
quick-release = true
propagation = "requires"
trace-level = "none"
}
http {
hostname = "localhost"
port = 9998
mist-dispatcher { # If you are using akka.http.AkkaMistServlet
#type = "GlobalDispatcher" # Uncomment if you want to use a different dispatcher than the default one for Comet
}
connection-close = true # toggles the addition of the "Connection" response header with a "close" value
root-actor-id = "_httproot" # the id of the actor to use as the root endpoint
root-actor-builtin = true # toggles the use of the built-in root endpoint base class
timeout = 1000 # the default timeout for all async requests (in ms)
expired-header-name = "Async-Timeout" # the name of the response header to use when an async request expires
expired-header-value = "expired" # the value of the response header to use when an async request expires
}
test {
timefactor = "1.0" # factor by which to scale timeouts during tests, e.g. to account for shared build system load
}
}