diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala index e38ea1c3d4..ff13edb373 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala @@ -51,6 +51,7 @@ class DeployerSpec extends AkkaSpec(DeployerSpec.deployerConf) { "A Deployer" must { "be able to parse 'akka.actor.deployment._' with all default values" in { + println(system.settings.toString) val service = "/user/service1" val deployment = system.asInstanceOf[ActorSystemImpl].provider.deployer.lookup(service) deployment must be('defined) diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index f31e61bcbe..8814a65a3b 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -3,27 +3,37 @@ ############################## # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { - version = "2.0-SNAPSHOT" # Akka version, checked against the runtime version of Akka. - - home = "" # Home directory of Akka, modules in the deploy directory will be loaded + # Akka version, checked against the runtime version of Akka. + version = "2.0-SNAPSHOT" - enabled-modules = [] # Comma separated list of the enabled modules. Options: ["cluster", "camel", "http"] + # Home directory of Akka, modules in the deploy directory will be loaded + home = "" - event-handlers = ["akka.event.Logging$DefaultLogger"] # Event handlers to register at boot time (Logging$DefaultLogger logs to STDOUT) - loglevel = "INFO" # Options: ERROR, WARNING, INFO, DEBUG - # this level is used by the configured loggers (see "event-handlers") as soon - # as they have been started; before that, see "stdout-loglevel" - stdout-loglevel = "WARNING" # Loglevel for the very basic logger activated during AkkaApplication startup - # FIXME: Is there any sensible reason why we have 2 different log levels? + # Comma separated list of the enabled modules. Options: ["cluster", "camel", "http"] + enabled-modules = [] - logConfigOnStart = off # Log the complete configuration at INFO level when the actor system is started. - # This is useful when you are uncertain of what configuration is used. + # Event handlers to register at boot time (Logging$DefaultLogger logs to STDOUT) + event-handlers = ["akka.event.Logging$DefaultLogger"] - extensions = [] # List FQCN of extensions which shall be loaded at actor system startup. - # FIXME: clarify "extensions" here, "Akka Extensions ()" + # Log level used by the configured loggers (see "event-handlers") as soon + # as they have been started; before that, see "stdout-loglevel" + # Options: ERROR, WARNING, INFO, DEBUG + loglevel = "INFO" + + # Log level for the very basic logger activated during AkkaApplication startup + # Options: ERROR, WARNING, INFO, DEBUG + stdout-loglevel = "WARNING" + + # Log the complete configuration at INFO level when the actor system is started. + # This is useful when you are uncertain of what configuration is used. + logConfigOnStart = off + + # List FQCN of extensions which shall be loaded at actor system startup. + # FIXME: clarify "extensions" here, "Akka Extensions ()" + extensions = [] # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up # Can be used to bootstrap your application(s) @@ -35,88 +45,127 @@ akka { boot = [] actor { + provider = "akka.actor.LocalActorRefProvider" - creation-timeout = 20s # Timeout for ActorSystem.actorOf - timeout = 5s # Default timeout for Future based invocations - # - Actor: ask && ? - # - UntypedActor: ask - # - TypedActor: methods with non-void return type - serialize-messages = off # Does a deep clone of (non-primitive) messages to ensure immutability - dispatcher-shutdown-timeout = 1s # How long dispatchers by default will wait for new actors until they shut down + + # Timeout for ActorSystem.actorOf + creation-timeout = 20s + + # Default timeout for Future based invocations + # - Actor: ask && ? + # - UntypedActor: ask + # - TypedActor: methods with non-void return type + timeout = 5s + + # Does a deep clone of (non-primitive) messages to ensure immutability + serialize-messages = off + + # How long dispatchers by default will wait for new actors until they shut down + dispatcher-shutdown-timeout = 1s deployment { - - default { # deployment id pattern, e.g. /app/service-ping - router = "direct" # routing (load-balance) scheme to use - # available: "direct", "round-robin", "random", "scatter-gather" - # or: fully qualified class name of the router class - # default is "direct"; - # In case of non-direct routing, the actors to be routed to can be specified - # in several ways: - # - nr-of-instances: will create that many children given the actor factory - # supplied in the source code (overridable using create-as below) - # - target.paths: will look the paths up using actorFor and route to - # them, i.e. will not create children + # deployment id pattern, e.g. /user/service-ping + default { - nr-of-instances = 1 # number of children to create in case of a non-direct router; this setting - # is ignored if target.paths is given - create-as { # FIXME document 'create-as' - class = "" # fully qualified class name of recipe implementation + # routing (load-balance) scheme to use + # available: "direct", "round-robin", "random", "scatter-gather" + # or: fully qualified class name of the router class + # default is "direct"; + # In case of non-direct routing, the actors to be routed to can be specified + # in several ways: + # - nr-of-instances: will create that many children given the actor factory + # supplied in the source code (overridable using create-as below) + # - target.paths: will look the paths up using actorFor and route to + # them, i.e. will not create children + router = "direct" + + # number of children to create in case of a non-direct router; this setting + # is ignored if target.paths is given + nr-of-instances = 1 + + # FIXME document 'create-as' + create-as { + # fully qualified class name of recipe implementation + class = "" } target { - paths = [] # Alternatively to giving nr-of-instances you can specify the full paths of - # those actors which should be routed to. This setting takes precedence over - # nr-of-instances + # Alternatively to giving nr-of-instances you can specify the full paths of + # those actors which should be routed to. This setting takes precedence over + # nr-of-instances + paths = [] } - + } } default-dispatcher { - type = "Dispatcher" # Must be one of the following - # Dispatcher, (BalancingDispatcher, only valid when all actors using it are of the same type), - # A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor - name = "DefaultDispatcher" # Name used in log messages and thread names. - daemonic = off # Toggles whether the threads created by this dispatcher should be daemons or not - keep-alive-time = 60s # Keep alive time for threads - core-pool-size-min = 8 # minimum number of threads to cap factor-based core number to - core-pool-size-factor = 8.0 # No of core threads ... ceil(available processors * factor) - core-pool-size-max = 4096 # maximum number of threads to cap factor-based number to - # Hint: max-pool-size is only used for bounded task queues - max-pool-size-min = 8 # minimum number of threads to cap factor-based max number to - max-pool-size-factor = 8.0 # Max no of threads ... ceil(available processors * factor) - max-pool-size-max = 4096 # maximum number of threads to cap factor-based max number to - task-queue-size = -1 # Specifies the bounded capacity of the task queue (< 1 == unbounded) - task-queue-type = "linked" # Specifies which type of task queue will be used, can be "array" or "linked" (default) - allow-core-timeout = on # Allow core threads to time out - throughput = 5 # Throughput defines the number of messages that are processed in a batch before the - # thread is returned to the pool. Set to 1 for as fair as possible. - throughput-deadline-time = 0ms # Throughput deadline for Dispatcher, set to 0 or negative for no deadline - mailbox-capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set using the property - # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care - # The following are only used for Dispatcher and only if mailbox-capacity > 0 - mailbox-push-timeout-time = 10s # Specifies the timeout to add a new message to a mailbox that is full - negative number means infinite timeout + # Must be one of the following + # Dispatcher, (BalancingDispatcher, only valid when all actors using it are of the same type), + # A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor + type = "Dispatcher" + # Name used in log messages and thread names. + name = "DefaultDispatcher" + # Toggles whether the threads created by this dispatcher should be daemons or not + daemonic = off + # Keep alive time for threads + keep-alive-time = 60s + # minimum number of threads to cap factor-based core number to + core-pool-size-min = 8 + # No of core threads ... ceil(available processors * factor) + core-pool-size-factor = 8.0 + # maximum number of threads to cap factor-based number to + core-pool-size-max = 4096 + # Hint: max-pool-size is only used for bounded task queues + # minimum number of threads to cap factor-based max number to + max-pool-size-min = 8 + # Max no of threads ... ceil(available processors * factor) + max-pool-size-factor = 8.0 + # maximum number of threads to cap factor-based max number to + max-pool-size-max = 4096 + # Specifies the bounded capacity of the task queue (< 1 == unbounded) + task-queue-size = -1 + # Specifies which type of task queue will be used, can be "array" or "linked" (default) + task-queue-type = "linked" + # Allow core threads to time out + allow-core-timeout = on + # Throughput defines the number of messages that are processed in a batch before the + # thread is returned to the pool. Set to 1 for as fair as possible. + throughput = 5 + # Throughput deadline for Dispatcher, set to 0 or negative for no deadline + throughput-deadline-time = 0ms + # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set using the property + # NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care + # The following are only used for Dispatcher and only if mailbox-capacity > 0 + mailbox-capacity = -1 + # Specifies the timeout to add a new message to a mailbox that is full - + # negative number means infinite timeout + mailbox-push-timeout-time = 10s } debug { - receive = off # enable function of Actor.loggable(), which is to log any received message at DEBUG level - autoreceive = off # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill and the like) - lifecycle = off # enable DEBUG logging of actor lifecycle changes - fsm = off # enable DEBUG logging of all LoggingFSMs for events, transitions and timers - event-stream = off # enable DEBUG logging of subscription changes on the eventStream + # enable function of Actor.loggable(), which is to log any received message at DEBUG level + receive = off + # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill and the like) + autoreceive = off + # enable DEBUG logging of actor lifecycle changes + lifecycle = off + # enable DEBUG logging of all LoggingFSMs for events, transitions and timers + fsm = off + # enable DEBUG logging of subscription changes on the eventStream + event-stream = off } - + # Entries for pluggable serializers and their bindings. If a binding for a specific class is not found, # then the default serializer (Java serialization) is used. - # serializers { # java = "akka.serialization.JavaSerializer" # proto = "akka.testing.ProtobufSerializer" # sjson = "akka.testing.SJSONSerializer" + default = "akka.serialization.JavaSerializer" } @@ -137,7 +186,6 @@ akka { # scheduler { # The HashedWheelTimer (HWT) implementation from Netty is used as the default scheduler in the system. - # # HWT does not execute the scheduled tasks on exact time. # It will, on every tick, check if there are any tasks behind the schedule and execute them. # You can increase or decrease the accuracy of the execution timing by specifying smaller or larger tick duration. @@ -146,5 +194,5 @@ akka { tickDuration = 100ms ticksPerWheel = 512 } - + } diff --git a/akka-docs/general/configuration.rst b/akka-docs/general/configuration.rst index 6f00cae81f..5bbb012a1d 100644 --- a/akka-docs/general/configuration.rst +++ b/akka-docs/general/configuration.rst @@ -11,26 +11,26 @@ Configuration Specifying the configuration file --------------------------------- -If you don't specify a configuration file then Akka uses default values, corresponding to the reference -configuration files that you see below. You can specify your own configuration file to override any -property in the reference config. You only have to define the properties that differ from the default +If you don't specify a configuration file then Akka uses default values, corresponding to the reference +configuration files that you see below. You can specify your own configuration file to override any +property in the reference config. You only have to define the properties that differ from the default configuration. -By default the ``ConfigFactory.load`` method is used, which will load all ``application.conf`` (and +By default the ``ConfigFactory.load`` method is used, which will load all ``application.conf`` (and ``application.json`` and ``application.properties``) from the root of the classpath, if they exists. -It uses ``ConfigFactory.defaultOverrides``, i.e. system properties, before falling back to +It uses ``ConfigFactory.defaultOverrides``, i.e. system properties, before falling back to application and reference configuration. Note that *all* ``application.{conf,json,properties}`` classpath resources, from all directories and -jar files, are loaded and merged. Therefore it is a good practice to define separate sub-trees in the +jar files, are loaded and merged. Therefore it is a good practice to define separate sub-trees in the configuration for each actor system, and grab the specific configuration when instantiating the ActorSystem. :: - - myapp1 { + + myapp1 { akka.loglevel = WARNING } - myapp2 { + myapp2 { akka.loglevel = ERROR } @@ -44,7 +44,7 @@ classpath resource, file, or URL specified in those properties will be used rath ``application.{conf,json,properties}`` classpath resources. Note that classpath resource names start with ``/``. ``-Dconfig.resource=/dev.conf`` will load the ``dev.conf`` from the root of the classpath. -You may also specify and parse the configuration programmatically in other ways when instantiating +You may also specify and parse the configuration programmatically in other ways when instantiating the ``ActorSystem``. .. includecode:: code/akka/docs/config/ConfigDocSpec.scala @@ -66,7 +66,7 @@ Each Akka module has a reference configuration file with the default values. .. literalinclude:: ../../akka-remote/src/main/resources/reference.conf :language: none - + *akka-testkit:* .. literalinclude:: ../../akka-testkit/src/main/resources/reference.conf @@ -103,30 +103,30 @@ A custom ``application.conf`` might look like this:: # Copy in parts of the reference files and modify as you please. akka { + + # Event handlers to register at boot time (Logging$DefaultLogger logs to STDOUT) event-handlers = ["akka.event.slf4j.Slf4jEventHandler"] - loglevel = DEBUG # Options: ERROR, WARNING, INFO, DEBUG - # this level is used by the configured loggers (see "event-handlers") as soon - # as they have been started; before that, see "stdout-loglevel" - stdout-loglevel = DEBUG # Loglevel for the very basic logger activated during AkkaApplication startup - # Comma separated list of the enabled modules. - enabled-modules = ["camel", "remote"] + # Log level used by the configured loggers (see "event-handlers") as soon + # as they have been started; before that, see "stdout-loglevel" + # Options: ERROR, WARNING, INFO, DEBUG + loglevel = DEBUG - # These boot classes are loaded (and created) automatically when the Akka Microkernel boots up - # Can be used to bootstrap your application(s) - # Should be the FQN (Fully Qualified Name) of the boot class which needs to have a default constructor - boot = ["sample.camel.Boot", - "sample.myservice.Boot"] + # Log level for the very basic logger activated during AkkaApplication startup + # Options: ERROR, WARNING, INFO, DEBUG + stdout-loglevel = DEBUG actor { default-dispatcher { - throughput = 10 # Throughput for default Dispatcher, set to 1 for as fair as possible + # Throughput for default Dispatcher, set to 1 for as fair as possible + throughput = 10 } } remote { server { - port = 2562 # The port clients should connect to. Default is 2552 (AKKA) + # The port clients should connect to. Default is 2552 (AKKA) + port = 2562 } } } @@ -136,7 +136,7 @@ Config file format ------------------ The configuration file syntax is described in the `HOCON `_ -specification. Note that it supports three formats; conf, json, and properties. +specification. Note that it supports three formats; conf, json, and properties. Including files @@ -145,7 +145,7 @@ Including files Sometimes it can be useful to include another configuration file, for example if you have one ``application.conf`` with all environment independent settings and then override some settings for specific environments. -Specifying system property with ``-Dconfig.resource=/dev.conf`` will load the ``dev.conf`` file, which includes the ``application.conf`` +Specifying system property with ``-Dconfig.resource=/dev.conf`` will load the ``dev.conf`` file, which includes the ``application.conf`` dev.conf: @@ -166,6 +166,6 @@ specification. Logging of Configuration ------------------------ -If the system or config property ``akka.logConfigOnStart`` is set to ``on``, then the -complete configuration at INFO level when the actor system is started. This is useful +If the system or config property ``akka.logConfigOnStart`` is set to ``on``, then the +complete configuration at INFO level when the actor system is started. This is useful when you are uncertain of what configuration is used. diff --git a/akka-docs/java/dispatchers.rst b/akka-docs/java/dispatchers.rst index 79397b7c66..d053501d78 100644 --- a/akka-docs/java/dispatchers.rst +++ b/akka-docs/java/dispatchers.rst @@ -6,7 +6,7 @@ Dispatchers (Java) .. sidebar:: Contents .. contents:: :local: - + The Dispatcher is an important piece that allows you to configure the right semantics and parameters for optimal performance, throughput and scalability. Different Actors have different needs. Akka supports dispatchers for both event-driven lightweight threads, allowing creation of millions of threads on a single workstation, and thread-based Actors, where each dispatcher is bound to a dedicated OS thread. @@ -44,7 +44,7 @@ There are 4 different types of message dispatchers: It is recommended to define the dispatcher in :ref:`configuration` to allow for tuning for different environments. -Example of a custom event-based dispatcher, which can be fetched with ``system.dispatcherFactory().lookup("my-dispatcher")`` +Example of a custom event-based dispatcher, which can be fetched with ``system.dispatcherFactory().lookup("my-dispatcher")`` as in the example above: .. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-dispatcher-config @@ -115,7 +115,7 @@ Priority event-based ^^^^^^^^^^^^^^^^^^^^ Sometimes it's useful to be able to specify priority order of messages, that is done by using Dispatcher and supply -an UnboundedPriorityMailbox or BoundedPriorityMailbox with a ``java.util.Comparator[Envelope]`` or use a +an UnboundedPriorityMailbox or BoundedPriorityMailbox with a ``java.util.Comparator[Envelope]`` or use a ``akka.dispatch.PriorityGenerator`` (recommended). Creating a Dispatcher using PriorityGenerator: @@ -129,9 +129,9 @@ Work-sharing event-based The ``BalancingDispatcher`` is a variation of the ``Dispatcher`` in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they -have less messages to process. +have less messages to process. Although the technique used in this implementation is commonly known as "work stealing", the actual implementation is probably -best described as "work donating" because the actor of which work is being stolen takes the initiative. +best described as "work donating" because the actor of which work is being stolen takes the initiative. This can be a great way to improve throughput at the cost of a little higher latency. .. includecode:: ../scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-balancing-config @@ -154,8 +154,9 @@ if not specified otherwise. akka { actor { default-dispatcher { - task-queue-size = 1000 # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set to the number specified + # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set to the number specified + task-queue-size = 1000 } } } diff --git a/akka-docs/java/logging.rst b/akka-docs/java/logging.rst index c9ad9256fc..20920d940b 100644 --- a/akka-docs/java/logging.rst +++ b/akka-docs/java/logging.rst @@ -25,14 +25,14 @@ The source object is translated to a String according to the following rules: * in case of a class an approximation of its simpleName * and in all other cases the simpleName of its class -The log message may contain argument placeholders ``{}``, which will be substituted if the log level +The log message may contain argument placeholders ``{}``, which will be substituted if the log level is enabled. Event Handler ============= -Logging is performed asynchronously through an event bus. You can configure which event handlers that should -subscribe to the logging events. That is done using the 'event-handlers' element in the :ref:`configuration`. +Logging is performed asynchronously through an event bus. You can configure which event handlers that should +subscribe to the logging events. That is done using the 'event-handlers' element in the :ref:`configuration`. Here you can also define the log level. .. code-block:: ruby @@ -40,16 +40,17 @@ Here you can also define the log level. akka { # Event handlers to register at boot time (Logging$DefaultLogger logs to STDOUT) event-handlers = ["akka.event.Logging$DefaultLogger"] - loglevel = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG + # Options: ERROR, WARNING, INFO, DEBUG + loglevel = "DEBUG" } -The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an :ref:`slf4j-java` +The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an :ref:`slf4j-java` event handler available in the 'akka-slf4j' module. Example of creating a listener: .. includecode:: code/akka/docs/event/LoggingDocTestBase.java - :include: imports,imports-listener,my-event-listener + :include: imports,imports-listener,my-event-listener .. _slf4j-java: @@ -57,7 +58,7 @@ Example of creating a listener: SLF4J ===== -Akka provides an event handler for `SL4FJ `_. This module is available in the 'akka-slf4j.jar'. +Akka provides an event handler for `SL4FJ `_. This module is available in the 'akka-slf4j.jar'. It has one single dependency; the slf4j-api jar. In runtime you also need a SLF4J backend, we recommend `Logback `_: .. code-block:: xml @@ -69,10 +70,10 @@ It has one single dependency; the slf4j-api jar. In runtime you also need a SLF4 runtime -You need to enable the Slf4jEventHandler in the 'event-handlers' element in -the :ref:`configuration`. Here you can also define the log level of the event bus. +You need to enable the Slf4jEventHandler in the 'event-handlers' element in +the :ref:`configuration`. Here you can also define the log level of the event bus. More fine grained log levels can be defined in the configuration of the SLF4J backend -(e.g. logback.xml). The String representation of the source object that is used when +(e.g. logback.xml). The String representation of the source object that is used when creating the ``LoggingAdapter`` correspond to the name of the SL4FJ logger. .. code-block:: ruby @@ -89,9 +90,9 @@ Since the logging is done asynchronously the thread in which the logging was per Mapped Diagnostic Context (MDC) with attribute name ``sourceThread``. With Logback the thread name is available with ``%X{sourceThread}`` specifier within the pattern layout configuration:: - - - %date{ISO8601} %-5level %logger{36} %X{sourceThread} - %msg%n - - + + + %date{ISO8601} %-5level %logger{36} %X{sourceThread} - %msg%n + + diff --git a/akka-docs/modules/camel.rst b/akka-docs/modules/camel.rst index 4aa988d609..f556914853 100644 --- a/akka-docs/modules/camel.rst +++ b/akka-docs/modules/camel.rst @@ -1528,7 +1528,8 @@ when camel is added to the enabled-modules list in :ref:`configuration`, for exa akka { ... - enabled-modules = ["camel"] # Options: ["remote", "camel", "http"] + # Options: ["remote", "camel", "http"] + enabled-modules = ["camel"] ... } diff --git a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala index 27d3995c1c..ffe0e4ed4b 100644 --- a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala @@ -16,27 +16,35 @@ object DispatcherDocSpec { val config = """ //#my-dispatcher-config my-dispatcher { - type = Dispatcher # Dispatcher is the name of the event-based dispatcher - daemonic = off # Toggles whether the threads created by this dispatcher should be daemons or not - core-pool-size-min = 2 # minimum number of threads to cap factor-based core number to - core-pool-size-factor = 2.0 # No of core threads ... ceil(available processors * factor) - core-pool-size-max = 10 # maximum number of threads to cap factor-based number to - throughput = 100 # Throughput defines the number of messages that are processed in a batch before the - # thread is returned to the pool. Set to 1 for as fair as possible. + # Dispatcher is the name of the event-based dispatcher + type = Dispatcher + # Toggles whether the threads created by this dispatcher should be daemons or not + daemonic = off + # minimum number of threads to cap factor-based core number to + core-pool-size-min = 2 + # No of core threads ... ceil(available processors * factor) + core-pool-size-factor = 2.0 + # maximum number of threads to cap factor-based number to + core-pool-size-max = 10 + # Throughput defines the number of messages that are processed in a batch before the + # thread is returned to the pool. Set to 1 for as fair as possible. + throughput = 100 } //#my-dispatcher-config - + //#my-bounded-config my-dispatcher-bounded-queue { type = Dispatcher core-pool-size-factor = 8.0 max-pool-size-factor = 16.0 - task-queue-size = 100 # Specifies the bounded capacity of the task queue - task-queue-type = "array" # Specifies which type of task queue will be used, can be "array" or "linked" (default) + # Specifies the bounded capacity of the task queue + task-queue-size = 100 + # Specifies which type of task queue will be used, can be "array" or "linked" (default) + task-queue-type = "array" throughput = 3 } //#my-bounded-config - + //#my-balancing-config my-balancing-dispatcher { type = BalancingDispatcher diff --git a/akka-docs/scala/dispatchers.rst b/akka-docs/scala/dispatchers.rst index dc3dd50e12..a5a4453e89 100644 --- a/akka-docs/scala/dispatchers.rst +++ b/akka-docs/scala/dispatchers.rst @@ -6,7 +6,7 @@ Dispatchers (Scala) .. sidebar:: Contents .. contents:: :local: - + The Dispatcher is an important piece that allows you to configure the right semantics and parameters for optimal performance, throughput and scalability. Different Actors have different needs. Akka supports dispatchers for both event-driven lightweight threads, allowing creation of millions of threads on a single workstation, and thread-based Actors, where each dispatcher is bound to a dedicated OS thread. @@ -127,9 +127,9 @@ Work-sharing event-based The ``BalancingDispatcher`` is a variation of the ``Dispatcher`` in which Actors of the same type can be set up to share this dispatcher and during execution time the different actors will steal messages from other actors if they -have less messages to process. +have less messages to process. Although the technique used in this implementation is commonly known as "work stealing", the actual implementation is probably -best described as "work donating" because the actor of which work is being stolen takes the initiative. +best described as "work donating" because the actor of which work is being stolen takes the initiative. This can be a great way to improve throughput at the cost of a little higher latency. .. includecode:: code/akka/docs/dispatcher/DispatcherDocSpec.scala#my-balancing-config @@ -152,8 +152,9 @@ if not specified otherwise. akka { actor { default-dispatcher { - task-queue-size = 1000 # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set to the number specified + # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set to the number specified + task-queue-size = 1000 } } } diff --git a/akka-docs/scala/logging.rst b/akka-docs/scala/logging.rst index e5cc7597a9..35f4e838ff 100644 --- a/akka-docs/scala/logging.rst +++ b/akka-docs/scala/logging.rst @@ -21,7 +21,7 @@ For convenience you can mixin the ``log`` member into actors, instead of definin .. code-block:: scala - class MyActor extends Actor with akka.actor.ActorLogging { + class MyActor extends Actor with akka.actor.ActorLogging { The second parameter to the ``Logging`` is the source of this logging channel. The source object is translated to a String according to the following rules: @@ -31,14 +31,14 @@ The source object is translated to a String according to the following rules: * in case of a class an approximation of its simpleName * and in all other cases the simpleName of its class -The log message may contain argument placeholders ``{}``, which will be substituted if the log level +The log message may contain argument placeholders ``{}``, which will be substituted if the log level is enabled. Event Handler ============= -Logging is performed asynchronously through an event bus. You can configure which event handlers that should -subscribe to the logging events. That is done using the 'event-handlers' element in the :ref:`configuration`. +Logging is performed asynchronously through an event bus. You can configure which event handlers that should +subscribe to the logging events. That is done using the 'event-handlers' element in the :ref:`configuration`. Here you can also define the log level. .. code-block:: ruby @@ -46,10 +46,11 @@ Here you can also define the log level. akka { # Event handlers to register at boot time (Logging$DefaultLogger logs to STDOUT) event-handlers = ["akka.event.Logging$DefaultLogger"] - loglevel = "DEBUG" # Options: ERROR, WARNING, INFO, DEBUG + # Options: ERROR, WARNING, INFO, DEBUG + loglevel = "DEBUG" } -The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an :ref:`slf4j-scala` +The default one logs to STDOUT and is registered by default. It is not intended to be used for production. There is also an :ref:`slf4j-scala` event handler available in the 'akka-slf4j' module. Example of creating a listener: @@ -63,7 +64,7 @@ Example of creating a listener: SLF4J ===== -Akka provides an event handler for `SL4FJ `_. This module is available in the 'akka-slf4j.jar'. +Akka provides an event handler for `SL4FJ `_. This module is available in the 'akka-slf4j.jar'. It has one single dependency; the slf4j-api jar. In runtime you also need a SLF4J backend, we recommend `Logback `_: .. code-block:: scala @@ -71,10 +72,10 @@ It has one single dependency; the slf4j-api jar. In runtime you also need a SLF4 lazy val logback = "ch.qos.logback" % "logback-classic" % "1.0.0" % "runtime" -You need to enable the Slf4jEventHandler in the 'event-handlers' element in -the :ref:`configuration`. Here you can also define the log level of the event bus. +You need to enable the Slf4jEventHandler in the 'event-handlers' element in +the :ref:`configuration`. Here you can also define the log level of the event bus. More fine grained log levels can be defined in the configuration of the SLF4J backend -(e.g. logback.xml). The String representation of the source object that is used when +(e.g. logback.xml). The String representation of the source object that is used when creating the ``LoggingAdapter`` correspond to the name of the SL4FJ logger. .. code-block:: ruby @@ -91,9 +92,9 @@ Since the logging is done asynchronously the thread in which the logging was per Mapped Diagnostic Context (MDC) with attribute name ``sourceThread``. With Logback the thread name is available with ``%X{sourceThread}`` specifier within the pattern layout configuration:: - - - %date{ISO8601} %-5level %logger{36} %X{sourceThread} - %msg%n - - + + + %date{ISO8601} %-5level %logger{36} %X{sourceThread} - %msg%n + + diff --git a/akka-docs/scala/typed-actors.rst b/akka-docs/scala/typed-actors.rst index 0e70acd282..5f6920138a 100644 --- a/akka-docs/scala/typed-actors.rst +++ b/akka-docs/scala/typed-actors.rst @@ -4,7 +4,7 @@ Typed Actors (Scala) .. sidebar:: Contents .. contents:: :local: - + The Typed Actors are implemented through `Typed Actors `_. It uses AOP through `AspectWerkz `_ to turn regular POJOs into asynchronous non-blocking Actors with semantics of the Actor Model. Each method dispatch is turned into a message that is put on a queue to be processed by the Typed Actor sequentially one by one. If you are using the `Spring Framework `_ then take a look at Akka's `Spring integration `_. @@ -182,7 +182,8 @@ Akka can help you in this regard. It allows you to turn on an option for seriali akka { actor { - serialize-messages = on # does a deep clone of messages to ensure immutability + # does a deep clone of messages to ensure immutability + serialize-messages = on } } diff --git a/akka-durable-mailboxes/akka-beanstalk-mailbox/src/main/resources/reference.conf b/akka-durable-mailboxes/akka-beanstalk-mailbox/src/main/resources/reference.conf index 3e6b914bf7..82beeeddd8 100644 --- a/akka-durable-mailboxes/akka-beanstalk-mailbox/src/main/resources/reference.conf +++ b/akka-durable-mailboxes/akka-beanstalk-mailbox/src/main/resources/reference.conf @@ -3,7 +3,7 @@ ################################################## # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { actor { diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf b/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf index f81f8995f9..93ee52fcc7 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf @@ -3,7 +3,7 @@ ############################################# # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { actor { diff --git a/akka-durable-mailboxes/akka-mongo-mailbox/src/main/resources/reference.conf b/akka-durable-mailboxes/akka-mongo-mailbox/src/main/resources/reference.conf index 09a0c316ec..991f638053 100644 --- a/akka-durable-mailboxes/akka-mongo-mailbox/src/main/resources/reference.conf +++ b/akka-durable-mailboxes/akka-mongo-mailbox/src/main/resources/reference.conf @@ -3,19 +3,23 @@ ################################################ # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { actor { mailbox { mongodb { + # Any specified collection name will be used as a prefix for collections that use durable mongo mailboxes - uri = "mongodb://localhost/akka.mailbox" # Follow Mongo URI Spec - http://www.mongodb.org/display/DOCS/Connections + # Follow Mongo URI Spec - http://www.mongodb.org/display/DOCS/Connections + uri = "mongodb://localhost/akka.mailbox" # Configurable timeouts for certain ops timeout { - read = 3000ms # time to wait for a read to succeed before timing out the future - write = 3000ms # time to wait for a write to succeed before timing out the future + # time to wait for a read to succeed before timing out the future + read = 3000ms + # time to wait for a write to succeed before timing out the future + write = 3000ms } } } diff --git a/akka-durable-mailboxes/akka-redis-mailbox/src/main/resources/reference.conf b/akka-durable-mailboxes/akka-redis-mailbox/src/main/resources/reference.conf index 20f1d03abd..7b12dc24b2 100644 --- a/akka-durable-mailboxes/akka-redis-mailbox/src/main/resources/reference.conf +++ b/akka-durable-mailboxes/akka-redis-mailbox/src/main/resources/reference.conf @@ -3,7 +3,7 @@ ############################################## # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { actor { diff --git a/akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/resources/reference.conf b/akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/resources/reference.conf index b31de45f76..3dfea7a944 100644 --- a/akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/resources/reference.conf +++ b/akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/resources/reference.conf @@ -3,7 +3,7 @@ ################################################## # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { actor { diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 4083a64ea2..1838ae47fa 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -3,7 +3,7 @@ ##################################### # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { @@ -13,18 +13,22 @@ akka { default { - remote = "" # if this is set to a valid remote address, the named actor will be deployed at that node - # e.g. "akka://sys@host:port" + # if this is set to a valid remote address, the named actor will be deployed at that node + # e.g. "akka://sys@host:port" + remote = "" target { - nodes = [] # A list of hostnames and ports for instantiating the children of a non-direct router - # The format should be on "akka://sys@host:port", where: - # - sys is the remote actor system name - # - hostname can be either hostname or IP address the remote actor should connect to - # - port should be the port for the remote server on the other node - # The number of actor instances to be spawned is still taken from the nr-of-instances - # setting as for local routers; the instances will be distributed round-robin among the - # given nodes. + + # A list of hostnames and ports for instantiating the children of a non-direct router + # The format should be on "akka://sys@host:port", where: + # - sys is the remote actor system name + # - hostname can be either hostname or IP address the remote actor should connect to + # - port should be the port for the remote server on the other node + # The number of actor instances to be spawned is still taken from the nr-of-instances + # setting as for local routers; the instances will be distributed round-robin among the + # given nodes. + nodes = [] + } } } @@ -35,50 +39,68 @@ akka { use-compression = off - secure-cookie = "" # Generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' - # or using 'akka.util.Crypt.generateSecureCookie' + # Generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' + # or using 'akka.util.Crypt.generateSecureCookie' + secure-cookie = "" - remote-daemon-ack-timeout = 30s # Timeout for ACK of cluster operations, lik checking actor out etc. + # Timeout for ACK of cluster operations, lik checking actor out etc. + remote-daemon-ack-timeout = 30s - use-passive-connections = on # Reuse inbound connections for outbound messages + # Reuse inbound connections for outbound messages + use-passive-connections = on - failure-detector { # accrual failure detection config - threshold = 8 # defines the failure detector threshold - # A low threshold is prone to generate many wrong suspicions but ensures a - # quick detection in the event of a real crash. Conversely, a high threshold - # generates fewer mistakes but needs more time to detect actual crashes + # accrual failure detection config + failure-detector { + # defines the failure detector threshold + # A low threshold is prone to generate many wrong suspicions but ensures a + # quick detection in the event of a real crash. Conversely, a high threshold + # generates fewer mistakes but needs more time to detect actual crashes + threshold = 8 max-sample-size = 1000 } - + gossip { initialDelay = 5s frequency = 1s } - - compute-grid-dispatcher { # The dispatcher used for remote system messages - name = ComputeGridDispatcher # defaults to same settings as default-dispatcher + + # The dispatcher used for remote system messages + compute-grid-dispatcher { + # defaults to same settings as default-dispatcher + name = ComputeGridDispatcher } server { - hostname = "" # The hostname or ip to bind the remoting to, InetAddress.getLocalHost.getHostAddress is used if empty - port = 2552 # The default remote server port clients should connect to. Default is 2552 (AKKA) - message-frame-size = 1 MiB # Increase this if you want to be able to send messages with large payloads - connection-timeout = 120s # Timeout duration - require-cookie = off # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)? - untrusted-mode = off # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect. - backlog = 4096 # Sets the size of the connection backlog + # The hostname or ip to bind the remoting to, InetAddress.getLocalHost.getHostAddress is used if empty + hostname = "" + # The default remote server port clients should connect to. Default is 2552 (AKKA) + port = 2552 + # Increase this if you want to be able to send messages with large payloads + message-frame-size = 1 MiB + # Timeout duration + connection-timeout = 120s + # Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)? + require-cookie = off + # Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect. + untrusted-mode = off + # Sets the size of the connection backlog + backlog = 4096 } client { buffering { - retry-message-send-on-failure = off # Should message buffering on remote client error be used (buffer flushed on successful reconnect) - capacity = -1 # If negative (or zero) then an unbounded mailbox is used (default) - # If positive then a bounded mailbox is used and the capacity is set using the property + # Should message buffering on remote client error be used (buffer flushed on successful reconnect) + retry-message-send-on-failure = off + # If negative (or zero) then an unbounded mailbox is used (default) + # If positive then a bounded mailbox is used and the capacity is set using the property + capacity = -1 + } reconnect-delay = 5s read-timeout = 3600s message-frame-size = 1 MiB - reconnection-time-window = 600s # Maximum time window that a client should try to reconnect for + # Maximum time window that a client should try to reconnect for + reconnection-time-window = 600s } } diff --git a/akka-stm/src/main/resources/reference.conf b/akka-stm/src/main/resources/reference.conf index 98a3e70d5d..05aa9b433c 100644 --- a/akka-stm/src/main/resources/reference.conf +++ b/akka-stm/src/main/resources/reference.conf @@ -3,19 +3,21 @@ ################################## # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { stm { - fair = on # Should global transactions be fair or non-fair (non fair yield better performance) + # Should global transactions be fair or non-fair (non fair yield better performance) + fair = on max-retries = 1000 - timeout = 5s # Default timeout for blocking transactions and transaction set - write-skew = on - blocking-allowed = off - interruptible = off - speculative = on - quick-release = on + # Default timeout for blocking transactions and transaction set + timeout = 5s + write-skew = on + blocking-allowed = off + interruptible = off + speculative = on + quick-release = on propagation = "requires" trace-level = "none" } diff --git a/akka-testkit/src/main/resources/reference.conf b/akka-testkit/src/main/resources/reference.conf index 0aa150e4b5..d2a4859c30 100644 --- a/akka-testkit/src/main/resources/reference.conf +++ b/akka-testkit/src/main/resources/reference.conf @@ -3,12 +3,15 @@ ###################################### # This the reference config file has all the default settings. -# Make your edits/overrides in your akka.conf. +# Make your edits/overrides in your application.conf. akka { test { - timefactor = 1.0 # factor by which to scale timeouts during tests, e.g. to account for shared build system load - filter-leeway = 3s # duration of EventFilter.intercept waits after the block is finished until all required messages are received - single-expect-default = 3s # duration to wait in expectMsg and friends outside of within() block by default + # factor by which to scale timeouts during tests, e.g. to account for shared build system load + timefactor = 1.0 + # duration of EventFilter.intercept waits after the block is finished until all required messages are received + filter-leeway = 3s + # duration to wait in expectMsg and friends outside of within() block by default + single-expect-default = 3s } }