Merge branch 'master' into wip-2349-multi-node-and-multi-jvm-doc-ban

Conflicts:
	project/plugins.sbt
This commit is contained in:
Björn Antonsson 2012-09-21 17:00:34 +02:00
commit 78597ed7c1
317 changed files with 1302 additions and 805 deletions

View file

@ -0,0 +1,25 @@
package docs.osgi
case object SomeMessage
class SomeActor extends akka.actor.Actor {
def receive = { case SomeMessage }
}
//#Activator
import akka.actor.{ Props, ActorSystem }
import org.osgi.framework.BundleContext
import akka.osgi.ActorSystemActivator
class Activator extends ActorSystemActivator {
def configure(context: BundleContext, system: ActorSystem) {
// optionally register the ActorSystem in the OSGi Service Registry
registerService(context, system)
val someActor = system.actorOf(Props[SomeActor], name = "someName")
someActor ! SomeMessage
}
}
//#Activator

View file

@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
xmlns:akka="http://akka.io/xmlns/blueprint/v1.0.0">
<akka:actor-system name="BlueprintSystem" />
<akka:actor-system name="BlueprintSystemWithConfig">
<akka:config>
some.config {
key=value
}
</akka:config>
</akka:actor-system>
</blueprint>

View file

@ -0,0 +1,9 @@
Additional Information
======================
.. toctree::
:maxdepth: 2
recipes
language-bindings
osgi

View file

@ -0,0 +1,17 @@
Other Language Bindings
=======================
JRuby
-----
Read more here: `<https://github.com/iconara/mikka>`_.
Groovy/Groovy++
---------------
Read more here: `<https://gist.github.com/620439>`_.
Clojure
-------
Read more here: `<http://blog.darevay.com/2011/06/clojure-and-akka-a-match-made-in/>`_.

View file

@ -0,0 +1,27 @@
Akka in OSGi
============
Configuring the OSGi Framework
------------------------------
To use Akka in an OSGi environment, the ``org.osgi.framework.bootdelegation``
property must be set to always delegate the ``sun.misc`` package to the boot classloader
instead of resolving it through the normal OSGi class space.
Activator
---------
To bootstrap Akka inside an OSGi environment, you can use the akka.osgi.AkkaSystemActivator class
to conveniently set up the ActorSystem.
.. includecode:: code/osgi/Activator.scala#Activator
Blueprint
---------
For the Apache Aries Blueprint implementation, there's also a namespace handler available. The namespace URI
is http://akka.io/xmlns/blueprint/v1.0.0 and it can be used to set up an ActorSystem.
.. includecode:: code/osgi/blueprint.xml

View file

@ -0,0 +1,4 @@
Here is a list of recipes for all things Akka
=============================================
* `Martin Krassers Akka Event Sourcing example <https://github.com/krasserm/eventsourcing-example>`_

View file

@ -0,0 +1,481 @@
.. _cluster_usage:
###############
Cluster Usage
###############
.. note:: This module is :ref:`experimental <experimental>`. This document describes how to use the features implemented so far. More features are coming in Akka Coltrane. Track progress of the Coltrane milestone in `Assembla <http://www.assembla.com/spaces/akka/tickets>`_ and the `Roadmap <https://docs.google.com/document/d/18W9-fKs55wiFNjXL9q50PYOnR7-nnsImzJqHOPPbM4E/edit?hl=en_US>`_.
For introduction to the Akka Cluster concepts please see :ref:`cluster`.
Preparing Your Project for Clustering
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Akka cluster is a separate jar file. Make sure that you have the following dependency in your project:
.. parsed-literal::
"com.typesafe.akka" %% "akka-cluster" % "@version@" @crossString@
If you are using the latest nightly build you should pick a timestamped Akka
version from
`<http://repo.typesafe.com/typesafe/snapshots/com/typesafe/akka/akka-cluster-experimental_@binVersion@/>`_.
We recommend against using ``SNAPSHOT`` in order to obtain stable builds.
A Simple Cluster Example
^^^^^^^^^^^^^^^^^^^^^^^^
The following small program together with its configuration starts an ``ActorSystem``
with the Cluster extension enabled. It joins the cluster and logs some membership events.
Try it out:
1. Add the following ``application.conf`` in your project, place it in ``src/main/resources``:
.. literalinclude:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf
:language: none
To enable cluster capabilities in your Akka project you should, at a minimum, add the :ref:`remoting-scala`
settings, but with ``akka.cluster.ClusterActorRefProvider``.
The ``akka.cluster.seed-nodes`` and cluster extension should normally also be added to your
``application.conf`` file.
The seed nodes are configured contact points for initial, automatic, join of the cluster.
Note that if you are going to start the nodes on different machines you need to specify the
ip-addresses or host names of the machines in ``application.conf`` instead of ``127.0.0.1``
2. Add the following main program to your project, place it in ``src/main/scala``:
.. literalinclude:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala
:language: scala
3. Start the first seed node. Open a sbt session in one terminal window and run::
run-main sample.cluster.simple.SimpleClusterApp 2551
2551 corresponds to the port of the first seed-nodes element in the configuration.
In the log output you see that the cluster node has been started and changed status to 'Up'.
4. Start the second seed node. Open a sbt session in another terminal window and run::
run-main sample.cluster.simple.SimpleClusterApp 2552
2552 corresponds to the port of the second seed-nodes element in the configuration.
In the log output you see that the cluster node has been started and joins the other seed node
and becomes a member of the cluster. It's status changed to 'Up'.
Switch over to the first terminal window and see in the log output that the member joined.
5. Start another node. Open a sbt session in yet another terminal window and run::
run-main sample.cluster.simple.SimpleClusterApp
Now you don't need to specify the port number, and it will use a random available port.
It joins one of the configured seed nodes. Look at the log output in the different terminal
windows.
Start even more nodes in the same way, if you like.
6. Shut down one of the nodes by pressing 'ctrl-c' in one of the terminal windows.
The other nodes will detect the failure after a while, which you can see in the log
output in the other terminals.
Look at the source code of the program again. What it does is to create an actor
and register it as subscriber of certain cluster events. It gets notified with
an snapshot event, ``CurrentClusterState`` that holds full state information of
the cluster. After that it receives events for changes that happen in the cluster.
Automatic vs. Manual Joining
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You may decide if joining to the cluster should be done automatically or manually.
By default it is automatic and you need to define the seed nodes in configuration
so that a new node has an initial contact point. When a new node is started it
sends a message to all seed nodes and then sends join command to the one that
answers first. If no one of the seed nodes replied (might not be started yet)
it retries this procedure until successful or shutdown.
There is one thing to be aware of regarding the seed node configured as the
first element in the ``seed-nodes`` configuration list.
The seed nodes can be started in any order and it is not necessary to have all
seed nodes running, but the first seed node must be started when initially
starting a cluster, otherwise the other seed-nodes will not become initialized
and no other node can join the cluster. Once more than two seed nodes have been
started it is no problem to shut down the first seed node. If it goes down it
must be manually joined to the cluster again.
Automatic joining of the first seed node is not possible, it would only join
itself. It is only the first seed node that has this restriction.
You can disable automatic joining with configuration:
akka.cluster.auto-join = off
Then you need to join manually, using :ref:`cluster_jmx` or :ref:`cluster_command_line`.
You can join to any node in the cluster. It doesn't have to be configured as
seed node. If you are not using auto-join there is no need to configure
seed nodes at all.
Joining can also be performed programatically with ``Cluster(system).join``.
Automatic vs. Manual Downing
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When a member is considered by the failure detector to be unreachable the
leader is not allowed to perform its duties, such as changing status of
new joining members to 'Up'. The status of the unreachable member must be
changed to 'Down'. This can be performed automatically or manually. By
default it must be done manually, using using :ref:`cluster_jmx` or
:ref:`cluster_command_line`.
It can also be performed programatically with ``Cluster(system).down``.
You can enable automatic downing with configuration:
akka.cluster.auto-down = on
Be aware of that using auto-down implies that two separate clusters will
automatically be formed in case of network partition. That might be
desired by some applications but not by others.
Subscribe to Cluster Events
^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can subscribe to change notifications of the cluster membership by using
``Cluster(system).subscribe``. A snapshot of the full state,
``akka.cluster.ClusterEvent.CurrentClusterState``, is sent to the subscriber
as the first event, followed by events for incremental updates.
There are several types of change events, consult the API documentation
of classes that extends ``akka.cluster.ClusterEvent.ClusterDomainEvent``
for details about the events.
Worker Dial-in Example
----------------------
Let's take a look at an example that illustrates how workers, here named *backend*,
can detect and register to new master nodes, here named *frontend*.
The example application provides a service to transform text. When some text
is sent to one of the frontend services, it will be delegated to one of the
backend workers, which performs the transformation job, and sends the result back to
the original client. New backend nodes, as well as new frontend nodes, can be
added or removed to the cluster dynamically.
In this example the following imports are used:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala#imports
Messages:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala#messages
The backend worker that performs the transformation job:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala#backend
Note that the ``TransformationBackend`` actor subscribes to cluster events to detect new,
potential, frontend nodes, and send them a registration message so that they know
that they can use the backend worker.
The frontend that receives user jobs and delegates to one of the registered backend workers:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/transformation/TransformationSample.scala#frontend
Note that the ``TransformationFrontend`` actor watch the registered backend
to be able to remove it from its list of availble backend workers.
Death watch uses the cluster failure detector for nodes in the cluster, i.e. it detects
network failures and JVM crashes, in addition to graceful termination of watched
actor.
This example is included in ``akka-samples/akka-sample-cluster``
and you can try by starting nodes in different terminal windows. For example, starting 2
frontend nodes and 3 backend nodes::
sbt
project akka-sample-cluster-experimental
run-main sample.cluster.transformation.TransformationFrontend 2551
run-main sample.cluster.transformation.TransformationBackend 2552
run-main sample.cluster.transformation.TransformationBackend
run-main sample.cluster.transformation.TransformationBackend
run-main sample.cluster.transformation.TransformationFrontend
.. note:: The above example should probably be designed as two separate, frontend/backend, clusters, when there is a `cluster client for decoupling clusters <https://www.assembla.com/spaces/akka/tickets/1165>`_.
Failure Detector
^^^^^^^^^^^^^^^^
The nodes in the cluster monitor each other by sending heartbeats to detect if a node is
unreachable from the rest of the cluster. The heartbeat arrival times is interpreted
by an implementation of
`The Phi Accrual Failure Detector <http://ddg.jaist.ac.jp/pub/HDY+04.pdf>`_.
The suspicion level of failure is given by a value called *phi*.
The basic idea of the phi failure detector is to express the value of *phi* on a scale that
is dynamically adjusted to reflect current network conditions.
The value of *phi* is calculated as::
phi = -log10(1 - F(timeSinceLastHeartbeat)
where F is the cumulative distribution function of a normal distribution with mean
and standard deviation estimated from historical heartbeat inter-arrival times.
In the :ref:`cluster_configuration` you can adjust the ``akka.cluster.failure-detector.threshold``
to define when a *phi* value is considered to be a failure.
A low ``threshold`` is prone to generate many false positives but ensures
a quick detection in the event of a real crash. Conversely, a high ``threshold``
generates fewer mistakes but needs more time to detect actual crashes. The
default ``threshold`` is 8 and is appropriate for most situations. However in
cloud environments, such as Amazon EC2, the value could be increased to 12 in
order to account for network issues that sometimes occur on such platforms.
The following chart illustrates how *phi* increase with increasing time since the
previous heartbeat.
.. image:: images/phi1.png
Phi is calculated from the mean and standard deviation of historical
inter arrival times. The previous chart is an example for standard deviation
of 200 ms. If the heartbeats arrive with less deviation the curve becomes steeper,
i.e. it's possible to determine failure more quickly. The curve looks like this for
a standard deviation of 100 ms.
.. image:: images/phi2.png
To be able to survive sudden abnormalities, such as garbage collection pauses and
transient network failures the failure detector is configured with a margin,
``akka.cluster.failure-detector.acceptable-heartbeat-pause``. You may want to
adjust the :ref:`cluster_configuration` of this depending on you environment.
This is how the curve looks like for ``acceptable-heartbeat-pause`` configured to
3 seconds.
.. image:: images/phi3.png
Cluster Aware Routers
^^^^^^^^^^^^^^^^^^^^^
All :ref:`routers <routing-scala>` can be made aware of member nodes in the cluster, i.e.
deploying new routees or looking up routees on nodes in the cluster.
When a node becomes unavailble or leaves the cluster the routees of that node are
automatically unregistered from the router. When new nodes join the cluster additional
routees are added to the router, according to the configuration.
When using a router with routees looked up on the cluster member nodes, i.e. the routees
are already running, the configuration for a router looks like this:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala#router-lookup-config
It's the relative actor path defined in ``routees-path`` that identify what actor to lookup.
``nr-of-instances`` defines total number of routees in the cluster, but there will not be
more than one per node. Setting ``nr-of-instances`` to a high value will result in new routees
added to the router when nodes join the cluster.
The same type of router could also have been defined in code:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#router-lookup-in-code
When using a router with routees created and deployed on the cluster member nodes
the configuration for a router looks like this:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala#router-deploy-config
``nr-of-instances`` defines total number of routees in the cluster, but the number of routees
per node, ``max-nr-of-instances-per-node``, will not be exceeded. Setting ``nr-of-instances``
to a high value will result in creating and deploying additional routees when new nodes join
the cluster.
The same type of router could also have been defined in code:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#router-deploy-in-code
See :ref:`cluster_configuration` section for further descriptions of the settings.
Router Example
--------------
Let's take a look at how to use cluster aware routers.
The example application provides a service to calculate statistics for a text.
When some text is sent to the service it splits it into words, and delegates the task
to count number of characters in each word to a separate worker, a routee of a router.
The character count for each word is sent back to an aggregator that calculates
the average number of characters per word when all results have been collected.
In this example we use the following imports:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#imports
Messages:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#messages
The worker that counts number of characters in each word:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#worker
The service that receives text from users and splits it up into words, delegates to workers and aggregates:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#service
Note, nothing cluster specific so far, just plain actors.
We can use these actors with two different types of router setup. Either with lookup of routees,
or with create and deploy of routees. Remember, routees are the workers in this case.
We start with the router setup with lookup of routees. All nodes start ``StatsService`` and
``StatsWorker`` actors and the router is configured with ``routees-path``:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#start-router-lookup
This means that user requests can be sent to ``StatsService`` on any node and it will use
``StatsWorker`` on all nodes. There can only be one worker per node, but that worker could easily
fan out to local children if more parallelism is needed.
This example is included in ``akka-samples/akka-sample-cluster``
and you can try by starting nodes in different terminal windows. For example, starting 3
service nodes and 1 client::
run-main sample.cluster.stats.StatsSample 2551
run-main sample.cluster.stats.StatsSample 2552
run-main sample.cluster.stats.StatsSampleClient
run-main sample.cluster.stats.StatsSample
The above setup is nice for this example, but we will also take a look at how to use
a single master node that creates and deploys workers. To keep track of a single
master we need one additional actor:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#facade
The ``StatsFacade`` receives text from users and delegates to the current ``StatsService``, the single
master. It listens to cluster events to create or lookup the ``StatsService`` depending on if
it is on the same same node or on another node. We run the master on the same node as the leader of
the cluster members, which is nothing more than the address currently sorted first in the member ring,
i.e. it can change when new nodes join or when current leader leaves.
All nodes start ``StatsFacade`` and the router is now configured like this:
.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala#start-router-deploy
This example is included in ``akka-samples/akka-sample-cluster``
and you can try by starting nodes in different terminal windows. For example, starting 3
service nodes and 1 client::
run-main sample.cluster.stats.StatsSampleOneMaster 2551
run-main sample.cluster.stats.StatsSampleOneMaster 2552
run-main sample.cluster.stats.StatsSampleOneMasterClient
run-main sample.cluster.stats.StatsSampleOneMaster
.. note:: The above example, especially the last part, will be simplified when the cluster handles automatic actor partitioning.
.. _cluster_jmx:
JMX
^^^
Information and management of the cluster is available as JMX MBeans with the root name ``akka.Cluster``.
The JMX information can be displayed with an ordinary JMX console such as JConsole or JVisualVM.
From JMX you can:
* see what members that are part of the cluster
* see status of this node
* join this node to another node in cluster
* mark any node in the cluster as down
* tell any node in the cluster to leave
Member nodes are identified with their address, in format `akka://actor-system-name@hostname:port`.
.. _cluster_command_line:
Command Line Management
^^^^^^^^^^^^^^^^^^^^^^^
The cluster can be managed with the script `bin/akka-cluster` provided in the
Akka distribution.
Run it without parameters to see instructions about how to use the script::
Usage: bin/akka-cluster <node-hostname:jmx-port> <command> ...
Supported commands are:
join <node-url> - Sends request a JOIN node with the specified URL
leave <node-url> - Sends a request for node with URL to LEAVE the cluster
down <node-url> - Sends a request for marking node with URL as DOWN
member-status - Asks the member node for its current status
cluster-status - Asks the cluster for its current status (member ring,
unavailable nodes, meta data etc.)
leader - Asks the cluster who the current leader is
is-singleton - Checks if the cluster is a singleton cluster (single
node cluster)
is-available - Checks if the member node is available
is-running - Checks if the member node is running
has-convergence - Checks if there is a cluster convergence
Where the <node-url> should be on the format of 'akka://actor-system-name@hostname:port'
Examples: bin/akka-cluster localhost:9999 is-available
bin/akka-cluster localhost:9999 join akka://MySystem@darkstar:2552
bin/akka-cluster localhost:9999 cluster-status
To be able to use the script you must enable remote monitoring and management when starting the JVMs of the cluster nodes,
as described in `Monitoring and Management Using JMX Technology <http://docs.oracle.com/javase/6/docs/technotes/guides/management/agent.html>`_
Example of system properties to enable remote monitoring and management::
java -Dcom.sun.management.jmxremote.port=9999 \
-Dcom.sun.management.jmxremote.authenticate=false \
-Dcom.sun.management.jmxremote.ssl=false
.. _cluster_configuration:
Configuration
^^^^^^^^^^^^^
There are several configuration properties for the cluster. We refer to the following
reference file for more information:
.. literalinclude:: ../../../akka-cluster/src/main/resources/reference.conf
:language: none
Cluster Scheduler
-----------------
It is recommended that you change the ``tick-duration`` to 33 ms or less
of the default scheduler when using cluster, if you don't need to have it
configured to a longer duration for other reasons. If you don't do this
a dedicated scheduler will be used for periodic tasks of the cluster, which
introduce the extra overhead of another thread.
::
# shorter tick-duration of default scheduler when using cluster
akka.scheduler.tick-duration.tick-duration = 33ms

View file

@ -0,0 +1,644 @@
.. _cluster:
######################
Cluster Specification
######################
.. note:: This module is :ref:`experimental <experimental>`. This document describes the design concepts of the new clustering coming in Akka Coltrane. Not everything described here is implemented yet.
Intro
=====
Akka Cluster provides a fault-tolerant, elastic, decentralized peer-to-peer
cluster with no single point of failure (SPOF) or single point of bottleneck
(SPOB). It implements a Dynamo-style system using gossip protocols, automatic
failure detection, automatic partitioning, handoff, and cluster rebalancing. But
with some differences due to the fact that it is not just managing passive data,
but actors - active, sometimes stateful, components that also have requirements
on message ordering, the number of active instances in the cluster, etc.
Terms
=====
These terms are used throughout the documentation.
**node**
A logical member of a cluster. There could be multiple nodes on a physical
machine. Defined by a `hostname:port` tuple.
**cluster**
A set of nodes. Contains distributed Akka applications.
**partition**
An actor or subtree of actors in the Akka application that is distributed
within the cluster.
**partition point**
The actor at the head of a partition. The point around which a partition is
formed.
**partition path**
Also referred to as the actor address. Has the format `actor1/actor2/actor3`
**instance count**
The number of instances of a partition in the cluster. Also referred to as the
``N-value`` of the partition.
**instance node**
A node that an actor instance is assigned to.
**partition table**
A mapping from partition path to a set of instance nodes (where the nodes are
referred to by the ordinal position given the nodes in sorted order).
**leader**
A single node in the cluster that acts as the leader. Managing cluster convergence,
partitions, fail-over, rebalancing etc.
Membership
==========
A cluster is made up of a set of member nodes. The identifier for each node is a
``hostname:port`` pair. An Akka application is distributed over a cluster with
each node hosting some part of the application. Cluster membership and
partitioning of the application are decoupled. A node could be a member of a
cluster without hosting any actors.
Singleton Cluster
-----------------
If a node does not have a preconfigured contact point to join in the Akka
configuration, then it is considered a singleton cluster (single node cluster)
and will automatically transition from ``joining`` to ``up``. Singleton clusters
can later explicitly send a ``Join`` message to another node to form a N-node
cluster. It is also possible to link multiple N-node clusters by ``joining`` them.
Gossip
------
The cluster membership used in Akka is based on Amazon's `Dynamo`_ system and
particularly the approach taken in Basho's' `Riak`_ distributed database.
Cluster membership is communicated using a `Gossip Protocol`_, where the current
state of the cluster is gossiped randomly through the cluster. Joining a cluster
is initiated by issuing a ``Join`` command to one of the nodes in the cluster to
join.
.. _Gossip Protocol: http://en.wikipedia.org/wiki/Gossip_protocol
.. _Dynamo: http://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf
.. _Riak: http://basho.com/technology/architecture/
Vector Clocks
^^^^^^^^^^^^^
`Vector clocks`_ are an algorithm for generating a partial ordering of events in
a distributed system and detecting causality violations.
We use vector clocks to to reconcile and merge differences in cluster state
during gossiping. A vector clock is a set of (node, counter) pairs. Each update
to the cluster state has an accompanying update to the vector clock.
One problem with vector clocks is that their history can over time be very long,
which will both make comparisons take longer time as well as take up unnecessary
memory. To solve that problem we do pruning of the vector clocks according to
the `pruning algorithm`_ in Riak.
.. _Vector Clocks: http://en.wikipedia.org/wiki/Vector_clock
.. _pruning algorithm: http://wiki.basho.com/Vector-Clocks.html#Vector-Clock-Pruning
Gossip Convergence
^^^^^^^^^^^^^^^^^^
Information about the cluster converges at certain points of time. This is when
all nodes have seen the same cluster state. Convergence is recognised by passing
a map from node to current state version during gossip. This information is
referred to as the gossip overview. When all versions in the overview are equal
there is convergence. Gossip convergence cannot occur while any nodes are
unreachable, either the nodes become reachable again, or the nodes need to be
moved into the ``down`` or ``removed`` states (see section on `Member states`_
below).
Failure Detector
^^^^^^^^^^^^^^^^
The failure detector is responsible for trying to detect if a node is
unreachable from the rest of the cluster. For this we are using an
implementation of `The Phi Accrual Failure Detector`_ by Hayashibara et al.
An accrual failure detector decouple monitoring and interpretation. That makes
them applicable to a wider area of scenarios and more adequate to build generic
failure detection services. The idea is that it is keeping a history of failure
statistics, calculated from heartbeats received from other nodes, and is
trying to do educated guesses by taking multiple factors, and how they
accumulate over time, into account in order to come up with a better guess if a
specific node is up or down. Rather than just answering "yes" or "no" to the
question "is the node down?" it returns a ``phi`` value representing the
likelihood that the node is down.
The ``threshold`` that is the basis for the calculation is configurable by the
user. A low ``threshold`` is prone to generate many wrong suspicions but ensures
a quick detection in the event of a real crash. Conversely, a high ``threshold``
generates fewer mistakes but needs more time to detect actual crashes. The
default ``threshold`` is 8 and is appropriate for most situations. However in
cloud environments, such as Amazon EC2, the value could be increased to 12 in
order to account for network issues that sometimes occur on such platforms.
.. _The Phi Accrual Failure Detector: http://ddg.jaist.ac.jp/pub/HDY+04.pdf
Leader
^^^^^^
After gossip convergence a ``leader`` for the cluster can be determined. There is no
``leader`` election process, the ``leader`` can always be recognised deterministically
by any node whenever there is gossip convergence. The ``leader`` is simply the first
node in sorted order that is able to take the leadership role, where the only
allowed member states for a ``leader`` are ``up``, ``leaving`` or ``exiting`` (see
below for more information about member states).
The role of the ``leader`` is to shift members in and out of the cluster, changing
``joining`` members to the ``up`` state or ``exiting`` members to the
``removed`` state, and to schedule rebalancing across the cluster. Currently
``leader`` actions are only triggered by receiving a new cluster state with gossip
convergence but it may also be possible for the user to explicitly rebalance the
cluster by specifying migrations, or to rebalance the cluster automatically
based on metrics from member nodes. Metrics may be spread using the gossip
protocol or possibly more efficiently using a *random chord* method, where the
``leader`` contacts several random nodes around the cluster ring and each contacted
node gathers information from their immediate neighbours, giving a random
sampling of load information.
The ``leader`` also has the power, if configured so, to "auto-down" a node that
according to the Failure Detector is considered unreachable. This means setting
the unreachable node status to ``down`` automatically.
Seed Nodes
^^^^^^^^^^
The seed nodes are configured contact points for inital join of the cluster.
When a new node is started started it sends a message to all seed nodes and
then sends join command to the one that answers first.
It is possible to turn off automatic join.
Gossip Protocol
^^^^^^^^^^^^^^^
A variation of *push-pull gossip* is used to reduce the amount of gossip
information sent around the cluster. In push-pull gossip a digest is sent
representing current versions but not actual values; the recipient of the gossip
can then send back any values for which it has newer versions and also request
values for which it has outdated versions. Akka uses a single shared state with
a vector clock for versioning, so the variant of push-pull gossip used in Akka
makes use of the gossip overview (containing the current state versions for all
nodes) to only push the actual state as needed. This also allows any node to
easily determine which other nodes have newer or older information, not just the
nodes involved in a gossip exchange.
Periodically, the default is every 1 second, each node chooses another random
node to initiate a round of gossip with. The choice of node is random but can
also include extra gossiping nodes with either newer or older state versions.
The gossip overview contains the current state version for all nodes and also a
list of unreachable nodes. Whenever a node receives a gossip overview it updates
the `Failure Detector`_ with the liveness information.
The nodes defined as ``seed`` nodes are just regular member nodes whose only
"special role" is to function as contact points in the cluster.
During each round of gossip exchange it sends Gossip to random node with
newer or older state information, if any, based on the current gossip overview,
with some probability. Otherwise Gossip to any random live node.
The gossiper only sends the gossip overview to the chosen node. The recipient of
the gossip can use the gossip overview to determine whether:
1. it has a newer version of the gossip state, in which case it sends that back
to the gossiper, or
2. it has an outdated version of the state, in which case the recipient requests
the current state from the gossiper
If the recipient and the gossip have the same version then the gossip state is
not sent or requested.
The main structures used in gossiping are the gossip overview and the gossip
state::
GossipOverview {
versions: Map[Node, VectorClock],
unreachable: Set[Node]
}
GossipState {
version: VectorClock,
members: SortedSet[Member],
partitions: Tree[PartitionPath, Node],
pending: Set[PartitionChange],
meta: Option[Map[String, Array[Byte]]]
}
Some of the other structures used are::
Node = InetSocketAddress
Member {
node: Node,
state: MemberState
}
MemberState = Joining | Up | Leaving | Exiting | Down | Removed
PartitionChange {
from: Node,
to: Node,
path: PartitionPath,
status: PartitionChangeStatus
}
PartitionChangeStatus = Awaiting | Complete
Membership Lifecycle
--------------------
A node begins in the ``joining`` state. Once all nodes have seen that the new
node is joining (through gossip convergence) the ``leader`` will set the member
state to ``up`` and can start assigning partitions to the new node.
If a node is leaving the cluster in a safe, expected manner then it switches to
the ``leaving`` state. The ``leader`` will reassign partitions across the cluster
(it is possible for a leaving node to itself be the ``leader``). When all partition
handoff has completed then the node will change to the ``exiting`` state. Once
all nodes have seen the exiting state (convergence) the ``leader`` will remove the
node from the cluster, marking it as ``removed``.
If a node is unreachable then gossip convergence is not possible and therefore
any ``leader`` actions are also not possible (for instance, allowing a node to
become a part of the cluster, or changing actor distribution). To be able to
move forward the state of the unreachable nodes must be changed. If the
unreachable node is experiencing only transient difficulties then it can be
explicitly marked as ``down`` using the ``down`` user action. When this node
comes back up and begins gossiping it will automatically go through the joining
process again. If the unreachable node will be permanently down then it can be
removed from the cluster directly by shutting the actor system down or killing it
through an external ``SIGKILL`` signal, invocation of ``System.exit(status)`` or
similar. The cluster can, through the leader, also *auto-down* a node.
This means that nodes can join and leave the cluster at any point in time, i.e.
provide cluster elasticity.
State Diagram for the Member States
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. image:: images/member-states.png
Member States
^^^^^^^^^^^^^
- **joining**
transient state when joining a cluster
- **up**
normal operating state
- **leaving** / **exiting**
states during graceful removal
- **down**
marked as down/offline/unreachable
- **removed**
tombstone state (no longer a member)
User Actions
^^^^^^^^^^^^
- **join**
join a single node to a cluster - can be explicit or automatic on
startup if a node to join have been specified in the configuration
- **leave**
tell a node to leave the cluster gracefully
- **down**
mark a node as temporarily down
Leader Actions
^^^^^^^^^^^^^^
The ``leader`` has the following duties:
- shifting members in and out of the cluster
- joining -> up
- exiting -> removed
- partition distribution
- scheduling handoffs (pending changes)
- setting the partition table (partition path -> base node)
- Automatic rebalancing based on runtime metrics in the system (such as CPU,
RAM, Garbage Collection, mailbox depth etc.)
Partitioning
============
Each partition (an actor or actor subtree) in the actor system is assigned to a
set of nodes in the cluster. The actor at the head of the partition is referred
to as the partition point. The mapping from partition path (actor address of the
format "a/b/c") to instance nodes is stored in the partition table and is
maintained as part of the cluster state through the gossip protocol. The
partition table is only updated by the ``leader`` node. Currently the only possible
partition points are *routed* actors.
Routed actors can have an instance count greater than one. The instance count is
also referred to as the ``N-value``. If the ``N-value`` is greater than one then
a set of instance nodes will be given in the partition table.
Note that in the first implementation there may be a restriction such that only
top-level partitions are possible (the highest possible partition points are
used and sub-partitioning is not allowed). Still to be explored in more detail.
The cluster ``leader`` determines the current instance count for a partition based
on two axes: fault-tolerance and scaling.
Fault-tolerance determines a minimum number of instances for a routed actor
(allowing N-1 nodes to crash while still maintaining at least one running actor
instance). The user can specify a function from current number of nodes to the
number of acceptable node failures: n: Int => f: Int where f < n.
Scaling reflects the number of instances needed to maintain good throughput and
is influenced by metrics from the system, particularly a history of mailbox
size, CPU load, and GC percentages. It may also be possible to accept scaling
hints from the user that indicate expected load.
The balancing of partitions can be determined in a very simple way in the first
implementation, where the overlap of partitions is minimized. Partitions are
spread over the cluster ring in a circular fashion, with each instance node in
the first available space. For example, given a cluster with ten nodes and three
partitions, A, B, and C, having N-values of 4, 3, and 5; partition A would have
instances on nodes 1-4; partition B would have instances on nodes 5-7; partition
C would have instances on nodes 8-10 and 1-2. The only overlap is on nodes 1 and
2.
The distribution of partitions is not limited, however, to having instances on
adjacent nodes in the sorted ring order. Each instance can be assigned to any
node and the more advanced load balancing algorithms will make use of this. The
partition table contains a mapping from path to instance nodes. The partitioning
for the above example would be::
A -> { 1, 2, 3, 4 }
B -> { 5, 6, 7 }
C -> { 8, 9, 10, 1, 2 }
If 5 new nodes join the cluster and in sorted order these nodes appear after the
current nodes 2, 4, 5, 7, and 8, then the partition table could be updated to
the following, with all instances on the same physical nodes as before::
A -> { 1, 2, 4, 5 }
B -> { 7, 9, 10 }
C -> { 12, 14, 15, 1, 2 }
When rebalancing is required the ``leader`` will schedule handoffs, gossiping a set
of pending changes, and when each change is complete the ``leader`` will update the
partition table.
Handoff
-------
Handoff for an actor-based system is different than for a data-based system. The
most important point is that message ordering (from a given node to a given
actor instance) may need to be maintained. If an actor is a singleton actor
(only one instance possible throughout the cluster) then the cluster may also
need to assure that there is only one such actor active at any one time. Both of
these situations can be handled by forwarding and buffering messages during
transitions.
A *graceful handoff* (one where the previous host node is up and running during
the handoff), given a previous host node ``N1``, a new host node ``N2``, and an
actor partition ``A`` to be migrated from ``N1`` to ``N2``, has this general
structure:
1. the ``leader`` sets a pending change for ``N1`` to handoff ``A`` to ``N2``
2. ``N1`` notices the pending change and sends an initialization message to ``N2``
3. in response ``N2`` creates ``A`` and sends back a ready message
4. after receiving the ready message ``N1`` marks the change as
complete and shuts down ``A``
5. the ``leader`` sees the migration is complete and updates the partition table
6. all nodes eventually see the new partitioning and use ``N2``
Transitions
^^^^^^^^^^^
There are transition times in the handoff process where different approaches can
be used to give different guarantees.
Migration Transition
~~~~~~~~~~~~~~~~~~~~
The first transition starts when ``N1`` initiates the moving of ``A`` and ends
when ``N1`` receives the ready message, and is referred to as the *migration
transition*.
The first question is; during the migration transition, should:
- ``N1`` continue to process messages for ``A``?
- Or is it important that no messages for ``A`` are processed on
``N1`` once migration begins?
If it is okay for the previous host node ``N1`` to process messages during
migration then there is nothing that needs to be done at this point.
If no messages are to be processed on the previous host node during migration
then there are two possibilities: the messages are forwarded to the new host and
buffered until the actor is ready, or the messages are simply dropped by
terminating the actor and allowing the normal dead letter process to be used.
Update Transition
~~~~~~~~~~~~~~~~~
The second transition begins when the migration is marked as complete and ends
when all nodes have the updated partition table (when all nodes will use ``N2``
as the host for ``A``, i.e. we have convergence) and is referred to as the
*update transition*.
Once the update transition begins ``N1`` can forward any messages it receives
for ``A`` to the new host ``N2``. The question is whether or not message
ordering needs to be preserved. If messages sent to the previous host node
``N1`` are being forwarded, then it is possible that a message sent to ``N1``
could be forwarded after a direct message to the new host ``N2``, breaking
message ordering from a client to actor ``A``.
In this situation ``N2`` can keep a buffer for messages per sending node. Each
buffer is flushed and removed when an acknowledgement (``ack``) message has been
received. When each node in the cluster sees the partition update it first sends
an ``ack`` message to the previous host node ``N1`` before beginning to use
``N2`` as the new host for ``A``. Any messages sent from the client node
directly to ``N2`` will be buffered. ``N1`` can count down the number of acks to
determine when no more forwarding is needed. The ``ack`` message from any node
will always follow any other messages sent to ``N1``. When ``N1`` receives the
``ack`` message it also forwards it to ``N2`` and again this ``ack`` message
will follow any other messages already forwarded for ``A``. When ``N2`` receives
an ``ack`` message, the buffer for the sending node can be flushed and removed.
Any subsequent messages from this sending node can be queued normally. Once all
nodes in the cluster have acknowledged the partition change and ``N2`` has
cleared all buffers, the handoff is complete and message ordering has been
preserved. In practice the buffers should remain small as it is only those
messages sent directly to ``N2`` before the acknowledgement has been forwarded
that will be buffered.
Graceful Handoff
^^^^^^^^^^^^^^^^
A more complete process for graceful handoff would be:
1. the ``leader`` sets a pending change for ``N1`` to handoff ``A`` to ``N2``
2. ``N1`` notices the pending change and sends an initialization message to
``N2``. Options:
a. keep ``A`` on ``N1`` active and continuing processing messages as normal
b. ``N1`` forwards all messages for ``A`` to ``N2``
c. ``N1`` drops all messages for ``A`` (terminate ``A`` with messages
becoming dead letters)
3. in response ``N2`` creates ``A`` and sends back a ready message. Options:
a. ``N2`` simply processes messages for ``A`` as normal
b. ``N2`` creates a buffer per sending node for ``A``. Each buffer is
opened (flushed and removed) when an acknowledgement for the sending
node has been received (via ``N1``)
4. after receiving the ready message ``N1`` marks the change as complete. Options:
a. ``N1`` forwards all messages for ``A`` to ``N2`` during the update transition
b. ``N1`` drops all messages for ``A`` (terminate ``A`` with messages
becoming dead letters)
5. the ``leader`` sees the migration is complete and updates the partition table
6. all nodes eventually see the new partitioning and use ``N2``
i. each node sends an acknowledgement message to ``N1``
ii. when ``N1`` receives the acknowledgement it can count down the pending
acknowledgements and remove forwarding when complete
iii. when ``N2`` receives the acknowledgement it can open the buffer for the
sending node (if buffers are used)
The default approach is to take options 2a, 3a, and 4a - allowing ``A`` on
``N1`` to continue processing messages during migration and then forwarding any
messages during the update transition. This assumes stateless actors that do not
have a dependency on message ordering from any given source.
- If an actor has a distributed durable mailbox then nothing needs to be done,
other than migrating the actor.
- If message ordering needs to be maintained during the update transition then
option 3b can be used, creating buffers per sending node.
- If the actors are robust to message send failures then the dropping messages
approach can be used (with no forwarding or buffering needed).
- If an actor is a singleton (only one instance possible throughout the cluster)
and state is transferred during the migration initialization, then options 2b
and 3b would be required.
Stateful Actor Replication
==========================
Support for stateful singleton actors will come in future releases of Akka, and
is scheduled for Akka 2.2. Having a Dynamo base for the clustering already we
should use the same infrastructure to provide stateful actor clustering and
datastore as well. The stateful actor clustering should be layered on top of the
distributed datastore. See the next section for a rough outline on how the
distributed datastore could be implemented.
Implementing a Dynamo-style Distributed Database on top of Akka Cluster
-----------------------------------------------------------------------
The missing pieces to implement a full Dynamo-style eventually consistent data
storage on top of the Akka Cluster as described in this document are:
- Configuration of ``READ`` and ``WRITE`` consistency levels according to the
``N/R/W`` numbers defined in the Dynamo paper.
- R = read replica count
- W = write replica count
- N = replication factor
- Q = QUORUM = N / 2 + 1
- W + R > N = full consistency
- Define a versioned data message wrapper::
Versioned[T](hash: Long, version: VectorClock, data: T)
- Define a single system data broker actor on each node that uses a ``Consistent
Hashing Router`` and that have instances on all other nodes in the node ring.
- For ``WRITE``:
1. Wrap data in a ``Versioned Message``
2. Send a ``Versioned Message`` with the data is sent to a number of nodes
matching the ``W-value``.
- For ``READ``:
1. Read in the ``Versioned Message`` with the data from as many replicas as
you need for the consistency level required by the ``R-value``.
2. Do comparison on the versions (using `Vector Clocks`_)
3. If the versions differ then do `Read Repair`_ to update the inconsistent
nodes.
4. Return the latest versioned data.
.. _Read Repair: http://wiki.apache.org/cassandra/ReadRepair

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

View file

@ -0,0 +1,8 @@
Cluster
=======
.. toctree::
:maxdepth: 2
cluster
cluster-usage

View file

@ -0,0 +1,109 @@
.. _circuit-breaker:
###############
Circuit Breaker
###############
==================
Why are they used?
==================
A circuit breaker is used to provide stability and prevent cascading failures in distributed
systems. These should be used in conjunction with judicious timeouts at the interfaces between
remote systems to prevent the failure of a single component from bringing down all components.
As an example, we have a web application interacting with a remote third party web service.
Let's say the third party has oversold their capacity and their database melts down under load.
Assume that the database fails in such a way that it takes a very long time to hand back an
error to the third party web service. This in turn makes calls fail after a long period of
time. Back to our web application, the users have noticed that their form submissions take
much longer seeming to hang. Well the users do what they know to do which is use the refresh
button, adding more requests to their already running requests. This eventually causes the
failure of the web application due to resource exhaustion. This will affect all users, even
those who are not using functionality dependent on this third party web service.
Introducing circuit breakers on the web service call would cause the requests to begin to
fail-fast, letting the user know that something is wrong and that they need not refresh
their request. This also confines the failure behavior to only those users that are using
functionality dependent on the third party, other users are no longer affected as there is no
resource exhaustion. Circuit breakers can also allow savvy developers to mark portions of
the site that use the functionality unavailable, or perhaps show some cached content as
appropriate while the breaker is open.
The Akka library provides an implementation of a circuit breaker called
:class:`akka.pattern.CircuitBreaker` which has the behavior described below.
=================
What do they do?
=================
* During normal operation, a circuit breaker is in the `Closed` state:
* Exceptions or calls exceeding the configured `callTimeout` increment a failure counter
* Successes reset the failure count to zero
* When the failure counter reaches a `maxFailures` count, the breaker is tripped into `Open` state
* While in `Open` state:
* All calls fail-fast with a :class:`CircuitBreakerOpenException`
* After the configured `resetTimeout`, the circuit breaker enters a `Half-Open` state
* In `Half-Open` state:
* The first call attempted is allowed through without failing fast
* All other calls fail-fast with an exception just as in `Open` state
* If the first call succeeds, the breaker is reset back to `Closed` state
* If the first call fails, the breaker is tripped again into the `Open` state for another full `resetTimeout`
* State transition listeners:
* Callbacks can be provided for every state entry via `onOpen`, `onClose`, and `onHalfOpen`
* These are executed in the :class:`ExecutionContext` provided.
.. image:: ../images/circuit-breaker-states.png
========
Examples
========
--------------
Initialization
--------------
Here's how a :class:`CircuitBreaker` would be configured for:
* 5 maximum failures
* a call timeout of 10 seconds
* a reset timeout of 1 minute
^^^^^^^
Scala
^^^^^^^
.. includecode:: code/docs/circuitbreaker/CircuitBreakerDocSpec.scala
:include: imports1,circuit-breaker-initialization
^^^^^^^
Java
^^^^^^^
.. includecode:: code/docs/circuitbreaker/DangerousJavaActor.java
:include: imports1,circuit-breaker-initialization
---------------
Call Protection
---------------
Here's how the :class:`CircuitBreaker` would be used to protect an asynchronous
call as well as a synchronous one:
^^^^^^^
Scala
^^^^^^^
.. includecode:: code/docs/circuitbreaker/CircuitBreakerDocSpec.scala
:include: circuit-breaker-usage
^^^^^^
Java
^^^^^^
.. includecode:: code/docs/circuitbreaker/DangerousJavaActor.java
:include: circuit-breaker-usage
.. note::
Using the :class:`CircuitBreaker` companion object's `apply` or `create` methods
will return a :class:`CircuitBreaker` where callbacks are executed in the caller's thread.
This can be useful if the asynchronous :class:`Future` behavior is unnecessary, for
example invoking a synchronous-only API.

View file

@ -0,0 +1,43 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.circuitbreaker
//#imports1
import scala.concurrent.util.duration._ // small d is important here
import akka.pattern.CircuitBreaker
import akka.actor.Actor
import scala.concurrent.Future
import akka.event.Logging
//#imports1
class CircuitBreakerDocSpec {}
//#circuit-breaker-initialization
class DangerousActor extends Actor {
val log = Logging(context.system, this)
implicit val executionContext = context.dispatcher
val breaker =
new CircuitBreaker(context.system.scheduler, 5, 10.seconds, 1.minute)
.onOpen(notifyMeOnOpen)
def notifyMeOnOpen =
log.warning("My CircuitBreaker is now open, and will not close for one minute")
//#circuit-breaker-initialization
//#circuit-breaker-usage
def dangerousCall: String = "This really isn't that dangerous of a call after all"
def receive = {
case "is my middle name"
sender ! breaker.withCircuitBreaker(Future(dangerousCall))
case "block for me"
sender ! breaker.withSyncCircuitBreaker(dangerousCall)
}
//#circuit-breaker-usage
}

View file

@ -0,0 +1,83 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.circuitbreaker;
//#imports1
import akka.actor.UntypedActor;
import scala.concurrent.Future;
import akka.event.LoggingAdapter;
import scala.concurrent.util.Duration;
import akka.pattern.CircuitBreaker;
import akka.event.Logging;
import static akka.dispatch.Futures.future;
import java.util.concurrent.Callable;
//#imports1
//#circuit-breaker-initialization
public class DangerousJavaActor extends UntypedActor {
private final CircuitBreaker breaker;
private final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
public DangerousJavaActor() {
this.breaker = new CircuitBreaker(
getContext().dispatcher(), getContext().system().scheduler(),
5, Duration.create(10, "s"), Duration.create(1, "m"))
.onOpen(new Callable<Object>() {
public Object call() throws Exception {
notifyMeOnOpen();
return null;
}
});
}
public void notifyMeOnOpen() {
log.warning("My CircuitBreaker is now open, and will not close for one minute");
}
//#circuit-breaker-initialization
//#circuit-breaker-usage
public String dangerousCall() {
return "This really isn't that dangerous of a call after all";
}
@Override
public void onReceive(Object message) {
if (message instanceof String) {
String m = (String) message;
if ("is my middle name".equals(m)) {
final Future<String> f = future(
new Callable<String>() {
public String call() {
return dangerousCall();
}
}, getContext().dispatcher());
getSender().tell(breaker
.callWithCircuitBreaker(
new Callable<Future<String>>() {
public Future<String> call() throws Exception {
return f;
}
}), getSelf());
}
if ("block for me".equals(m)) {
getSender().tell(breaker
.callWithSyncCircuitBreaker(
new Callable<String>() {
@Override
public String call() throws Exception {
return dangerousCall();
}
}), getSelf());
}
}
}
//#circuit-breaker-usage
}

View file

@ -0,0 +1,27 @@
/**
* Copyright (C) 2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.duration;
//#import
import scala.concurrent.util.Duration;
import scala.concurrent.util.Deadline;
//#import
class Java {
public void demo() {
//#dsl
final Duration fivesec = Duration.create(5, "seconds");
final Duration threemillis = Duration.parse("3 millis");
final Duration diff = fivesec.minus(threemillis);
assert diff.lt(fivesec);
assert Duration.Zero().lt(Duration.Inf());
//#dsl
//#deadline
final Deadline deadline = Duration.create(10, "seconds").fromNow();
final Duration rest = deadline.timeLeft();
//#deadline
rest.toString();
}
}

View file

@ -0,0 +1,24 @@
/**
* Copyright (C) 2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.duration
object Scala {
//#dsl
import scala.concurrent.util.duration._ // notice the small d
val fivesec = 5.seconds
val threemillis = 3.millis
val diff = fivesec - threemillis
assert(diff < fivesec)
val fourmillis = threemillis * 4 / 3 // though you cannot write it the other way around
val n = threemillis / (1 millisecond)
//#dsl
//#deadline
val deadline = 10.seconds.fromNow
// do something
val rest = deadline.timeLeft
//#deadline
}

View file

@ -0,0 +1,60 @@
.. _Duration:
########
Duration
########
Durations are used throughout the Akka library, wherefore this concept is
represented by a special data type, :class:`scala.concurrent.util.Duration`.
Values of this type may represent infinite (:obj:`Duration.Inf`,
:obj:`Duration.MinusInf`) or finite durations, or be :obj:`Duration.Undefined`.
Finite vs. Infinite
===================
Since trying to convert an infinite duration into a concrete time unit like
seconds will throw an exception, there are different types available for
distinguishing the two kinds at compile time:
* :class:`FiniteDuration` is guaranteed to be finite, calling :meth:`toNanos`
and friends is safe
* :class:`Duration` can be finite or infinite, so this type should only be used
when finite-ness does not matter; this is a supertype of :class:`FiniteDuration`
Scala
=====
In Scala durations are constructable using a mini-DSL and support all expected
arithmetic operations:
.. includecode:: code/docs/duration/Sample.scala#dsl
.. note::
You may leave out the dot if the expression is clearly delimited (e.g.
within parentheses or in an argument list), but it is recommended to use it
if the time unit is the last token on a line, otherwise semi-colon inference
might go wrong, depending on what starts the next line.
Java
====
Java provides less syntactic sugar, so you have to spell out the operations as
method calls instead:
.. includecode:: code/docs/duration/Java.java#import
.. includecode:: code/docs/duration/Java.java#dsl
Deadline
========
Durations have a brother named :class:`Deadline`, which is a class holding a representation
of an absolute point in time, and support deriving a duration from this by calculating the
difference between now and the deadline. This is useful when you want to keep one overall
deadline without having to take care of the book-keeping wrt. the passing of time yourself:
.. includecode:: code/docs/duration/Sample.scala#deadline
In Java you create these from durations:
.. includecode:: code/docs/duration/Java.java#deadline

View file

@ -0,0 +1,8 @@
Common utilities
==========================
.. toctree::
:maxdepth: 2
duration
circuitbreaker

85
akka-docs/rst/conf.py Normal file
View file

@ -0,0 +1,85 @@
# -*- coding: utf-8 -*-
#
# Akka documentation build configuration file.
#
import sys, os
# -- General configuration -----------------------------------------------------
sys.path.append(os.path.abspath('../_sphinx/exts'))
extensions = ['sphinx.ext.todo', 'includecode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
exclude_patterns = ['_build', 'pending', 'disabled']
project = u'Akka'
copyright = u'2011, Typesafe Inc'
version = '@version@'
release = '@version@'
pygments_style = 'simple'
highlight_language = 'scala'
add_function_parentheses = False
show_authors = True
# -- Options for HTML output ---------------------------------------------------
html_theme = 'akka'
html_theme_path = ['../_sphinx/themes']
html_favicon = '../_sphinx/static/favicon.ico'
html_title = 'Akka Documentation'
html_logo = '../_sphinx/static/logo.png'
#html_favicon = None
html_static_path = ['../_sphinx/static']
html_last_updated_fmt = '%b %d, %Y'
#html_sidebars = {}
#html_additional_pages = {}
html_domain_indices = False
html_use_index = False
html_show_sourcelink = False
html_show_sphinx = False
html_show_copyright = True
htmlhelp_basename = 'Akkadoc'
html_use_smartypants = False
html_add_permalinks = ''
html_context = {
'include_analytics': 'online' in tags
}
# -- Options for EPUB output ---------------------------------------------------
epub_author = "Typesafe Inc"
epub_language = "en"
epub_publisher = epub_author
epub_identifier = "http://doc.akka.io/docs/akka/snapshot/"
epub_scheme = "URL"
epub_cover = ("../_sphinx/static/akka.png", "")
# -- Options for LaTeX output --------------------------------------------------
def setup(app):
from sphinx.util.texescape import tex_replacements
tex_replacements.append((u'', ur'\(\Rightarrow\)'))
latex_paper_size = 'a4'
latex_font_size = '10pt'
latex_documents = [
('index', 'Akka.tex', u' Akka Documentation',
u'Typesafe Inc', 'manual'),
]
latex_elements = {
'classoptions': ',oneside,openany',
'babel': '\\usepackage[english]{babel}',
'fontpkg': '\\PassOptionsToPackage{warn}{textcomp} \\usepackage{times}',
'preamble': '\\definecolor{VerbatimColor}{rgb}{0.935,0.935,0.935}'
}
# latex_logo = '_sphinx/static/akka.png'

View file

@ -0,0 +1,146 @@
.. highlightlang:: none
.. _building-akka:
###############
Building Akka
###############
This page describes how to build and run Akka from the latest source code.
Get the Source Code
===================
Akka uses `Git`_ and is hosted at `Github`_.
.. _Git: http://git-scm.com
.. _Github: http://github.com
You first need Git installed on your machine. You can then clone the source
repository from http://github.com/akka/akka.
For example::
git clone git://github.com/akka/akka.git
If you have already cloned the repository previously then you can update the
code with ``git pull``::
git pull origin master
sbt - Simple Build Tool
=======================
Akka is using the excellent `sbt`_ build system. So the first thing you have to
do is to download and install sbt. You can read more about how to do that in the
`sbt setup`_ documentation.
.. _sbt: https://github.com/harrah/xsbt
.. _sbt setup: https://github.com/harrah/xsbt/wiki/Setup
The sbt commands that you'll need to build Akka are all included below. If you
want to find out more about sbt and using it for your own projects do read the
`sbt documentation`_.
.. _sbt documentation: https://github.com/harrah/xsbt/wiki
The Akka sbt build file is ``project/AkkaBuild.scala``.
Building Akka
=============
First make sure that you are in the akka code directory::
cd akka
Building
--------
To compile all the Akka core modules use the ``compile`` command::
sbt compile
You can run all tests with the ``test`` command::
sbt test
If compiling and testing are successful then you have everything working for the
latest Akka development version.
Parallel Execution
------------------
By default the tests are executed sequentially. They can be executed in parallel to reduce build times,
if hardware can handle the increased memory and cpu usage. Add the following system property to sbt
launch script to activate parallel execution::
-Dakka.parallelExecution=true
Long Running and Time Sensitive Tests
-------------------------------------
By default are the long running tests (mainly cluster tests) and time sensitive tests (dependent on the
performance of the machine it is running on) disabled. You can enable them by adding one of the flags::
-Dakka.test.tags.include=long-running
-Dakka.test.tags.include=timing
Or if you need to enable them both::
-Dakka.test.tags.include=long-running,timing
Publish to Local Ivy Repository
-------------------------------
If you want to deploy the artifacts to your local Ivy repository (for example,
to use from an sbt project) use the ``publish-local`` command::
sbt publish-local
sbt Interactive Mode
--------------------
Note that in the examples above we are calling ``sbt compile`` and ``sbt test``
and so on, but sbt also has an interactive mode. If you just run ``sbt`` you
enter the interactive sbt prompt and can enter the commands directly. This saves
starting up a new JVM instance for each command and can be much faster and more
convenient.
For example, building Akka as above is more commonly done like this::
% sbt
[info] Set current project to default (in build file:/.../akka/project/plugins/)
[info] Set current project to akka (in build file:/.../akka/)
> compile
...
> test
...
sbt Batch Mode
--------------
It's also possible to combine commands in a single call. For example, testing,
and publishing Akka to the local Ivy repository can be done with::
sbt test publish-local
.. _dependencies:
Dependencies
============
You can look at the Ivy dependency resolution information that is created on
``sbt update`` and found in ``~/.ivy2/cache``. For example, the
``~/.ivy2/cache/com.typesafe.akka-akka-remote-compile.xml`` file contains
the resolution information for the akka-remote module compile dependencies. If
you open this file in a web browser you will get an easy to navigate view of
dependencies.

View file

@ -0,0 +1,66 @@
.. _developer_guidelines:
Developer Guidelines
====================
Code Style
----------
The Akka code style follows the `Scala Style Guide <http://docs.scala-lang.org/style/>`_ .
Akka is using ``Scalariform`` to format the source code as part of the build. So just hack away and then run ``sbt compile`` and it will reformat the code according to Akka standards.
Process
-------
* Make sure you have signed the Akka CLA, if not, `sign it online <http://www.typesafe.com/contribute/cla>`_.
* Pick a ticket, if there is no ticket for your work then create one first.
* Start working in a feature branch. Name it something like ``wip-<ticket number>-<descriptive name>-<your username>``.
* When you are done, create a GitHub Pull-Request towards the targeted branch and email the Akka Mailing List that you want it reviewed
* When there's consensus on the review, someone from the Akka Core Team will merge it.
Commit messages
---------------
Please follow these guidelines when creating public commits and writing commit messages.
1. If your work spans multiple local commits (for example; if you do safe point commits while working in a topic branch or work in a branch for long time doing merges/rebases etc.) then please do **not** commit it all but rewrite the history by squashing the commits into a single big commit which you write a good commit message for (like discussed below). Here is a great article for how to do that: `http://sandofsky.com/blog/git-workflow.html <http://sandofsky.com/blog/git-workflow.html>`_. Every commit should be able to be used in isolation, cherry picked etc.
2. First line should be a descriptive sentence what the commit is doing. It should be possible to fully understand what the commit does by just reading this single line. It is **not** ok to only list the ticket number, type "minor fix" or similar. Include reference to ticket number, prefixed with #, at the end of the first line. If the commit is a **small** fix, then you are done. If not, go to 3.
3. Following the single line description should be a blank line followed by an enumerated list with the details of the commit.
Example::
Completed replication over BookKeeper based transaction log with configurable actor snapshotting every X message. Fixes #XXX
* Details 1
* Details 2
* Details 3
Testing
-------
All code that is checked in **should** have tests. All testing is done with ``ScalaTest`` and ``ScalaCheck``.
* Name tests as **Test.scala** if they do not depend on any external stuff. That keeps surefire happy.
* Name tests as **Spec.scala** if they have external dependencies.
There is a testing standard that should be followed: `Ticket001Spec <https://github.com/akka/akka/blob/master/akka-actor-tests/src/test/scala/akka/ticket/Ticket001Spec.scala>`_
Actor TestKit
^^^^^^^^^^^^^
There is a useful test kit for testing actors: `akka.util.TestKit <https://github.com/akka/akka/tree/master/akka-testkit/src/main/scala/akka/testkit/TestKit.scala>`_. It enables assertions concerning replies received and their timing, there is more documentation in the :ref:`akka-testkit` module.
Multi-JVM Testing
^^^^^^^^^^^^^^^^^
Included in the example is an sbt trait for multi-JVM testing which will fork
JVMs for multi-node testing. There is support for running applications (objects
with main methods) and running ScalaTest tests.
NetworkFailureTest
^^^^^^^^^^^^^^^^^^
You can use the 'NetworkFailureTest' trait to test network failure.

View file

@ -0,0 +1,149 @@
.. highlightlang:: rest
.. _documentation:
#########################
Documentation Guidelines
#########################
The Akka documentation uses `reStructuredText`_ as its markup language and is
built using `Sphinx`_.
.. _reStructuredText: http://docutils.sourceforge.net/rst.html
.. _sphinx: http://sphinx.pocoo.org
Sphinx
======
More to come...
reStructuredText
================
More to come...
Sections
--------
Section headings are very flexible in reST. We use the following convention in
the Akka documentation:
* ``#`` (over and under) for module headings
* ``=`` for sections
* ``-`` for subsections
* ``^`` for subsubsections
* ``~`` for subsubsubsections
Cross-referencing
-----------------
Sections that may be cross-referenced across the documentation should be marked
with a reference. To mark a section use ``.. _ref-name:`` before the section
heading. The section can then be linked with ``:ref:`ref-name```. These are
unique references across the entire documentation.
For example::
.. _akka-module:
#############
Akka Module
#############
This is the module documentation.
.. _akka-section:
Akka Section
============
Akka Subsection
---------------
Here is a reference to "akka section": :ref:`akka-section` which will have the
name "Akka Section".
Build the documentation
=======================
First install `Sphinx`_. See below.
Building
--------
::
cd akka-docs
make html
open _build/html/index.html
make pdf
open _build/latex/Akka.pdf
Installing Sphinx on OS X
-------------------------
Install `Homebrew <https://github.com/mxcl/homebrew>`_
Install Python and pip:
::
brew install python
/usr/local/share/python/easy_install pip
Add the Homebrew Python path to your $PATH:
::
/usr/local/Cellar/python/2.7.1/bin
More information in case of trouble:
https://github.com/mxcl/homebrew/wiki/Homebrew-and-Python
Install sphinx:
::
pip install sphinx
Add sphinx_build to your $PATH:
::
/usr/local/share/python
Install BasicTeX package from:
http://www.tug.org/mactex/morepackages.html
Add texlive bin to $PATH:
::
/usr/local/texlive/2010basic/bin/universal-darwin
Add missing tex packages:
::
sudo tlmgr update --self
sudo tlmgr install titlesec
sudo tlmgr install framed
sudo tlmgr install threeparttable
sudo tlmgr install wrapfig
sudo tlmgr install helvetic
sudo tlmgr install courier
Link the akka pygments style:
::
cd /usr/local/Cellar/python/2.7.1/lib/python2.7/site-packages/pygments/styles
ln -s /path/to/akka/akka-docs/themes/akka/pygments/akka.py akka.py

View file

@ -0,0 +1,12 @@
Information for Developers
==========================
.. toctree::
:maxdepth: 2
building-akka
multi-jvm-testing
developer-guidelines
documentation
team

View file

@ -0,0 +1,229 @@
.. _multi-jvm-testing:
###################
Multi JVM Testing
###################
Supports running applications (objects with main methods) and ScalaTest tests in multiple JVMs at the same time.
Useful for integration testing where multiple systems communicate with each other.
Setup
=====
The multi-JVM testing is an sbt plugin that you can find at `<http://github.com/typesafehub/sbt-multi-jvm>`_.
You can add it as a plugin by adding the following to your project/plugins.sbt:
.. includecode:: ../../project/plugins.sbt#sbt-multi-jvm
You can then add multi-JVM testing to ``project/Build.scala`` by including the ``MultiJvm``
settings and config. For example, here is an example of how the akka-remote-tests project adds
multi-JVM testing (Simplified for clarity):
.. parsed-literal::
import sbt._
import Keys._
import com.typesafe.sbt.SbtMultiJvm
import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.{ MultiJvm, extraOptions }
object AkkaBuild extends Build {
lazy val remoteTests = Project(
id = "akka-remote-tests",
base = file("akka-remote-tests"),
dependencies = Seq(remote, actorTests % "test->test", testkit % "test->test"),
settings = defaultSettings ++ Seq(
// disable parallel tests
parallelExecution in Test := false,
extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src =>
(name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq
},
test in Test <<= (test in Test) dependsOn (test in MultiJvm)
)
) configs (MultiJvm)
lazy val buildSettings = Defaults.defaultSettings ++ SbtMultiJvm.multiJvmSettings ++ Seq(
organization := "com.typesafe.akka",
version := "@version@",
scalaVersion := "@scalaVersion@",
crossPaths := false
)
lazy val defaultSettings = buildSettings ++ Seq(
resolvers += "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/"
)
}
You can specify JVM options for the forked JVMs::
jvmOptions in MultiJvm := Seq("-Xmx256M")
Running tests
=============
The multi-jvm tasks are similar to the normal tasks: ``test``, ``test-only``,
and ``run``, but are under the ``multi-jvm`` configuration.
So in Akka, to run all the multi-JVM tests in the akka-remote project use (at
the sbt prompt):
.. code-block:: none
akka-remote-tests/multi-jvm:test
Or one can change to the ``akka-remote-tests`` project first, and then run the
tests:
.. code-block:: none
project akka-remote-tests
multi-jvm:test
To run individual tests use ``test-only``:
.. code-block:: none
multi-jvm:test-only akka.remote.RandomRoutedRemoteActor
More than one test name can be listed to run multiple specific
tests. Tab-completion in sbt makes it easy to complete the test names.
It's also possible to specify JVM options with ``test-only`` by including those
options after the test names and ``--``. For example:
.. code-block:: none
multi-jvm:test-only akka.remote.RandomRoutedRemoteActor -- -Dsome.option=something
Creating application tests
==========================
The tests are discovered, and combined, through a naming convention. MultiJvm tests are
located in ``src/multi-jvm/scala`` directory. A test is named with the following pattern:
.. code-block:: none
{TestName}MultiJvm{NodeName}
That is, each test has ``MultiJvm`` in the middle of its name. The part before
it groups together tests/applications under a single ``TestName`` that will run
together. The part after, the ``NodeName``, is a distinguishing name for each
forked JVM.
So to create a 3-node test called ``Sample``, you can create three applications
like the following::
package sample
object SampleMultiJvmNode1 {
def main(args: Array[String]) {
println("Hello from node 1")
}
}
object SampleMultiJvmNode2 {
def main(args: Array[String]) {
println("Hello from node 2")
}
}
object SampleMultiJvmNode3 {
def main(args: Array[String]) {
println("Hello from node 3")
}
}
When you call ``multi-jvm:run sample.Sample`` at the sbt prompt, three JVMs will be
spawned, one for each node. It will look like this:
.. code-block:: none
> multi-jvm:run sample.Sample
...
[info] Starting JVM-Node1 for sample.SampleMultiJvmNode1
[info] Starting JVM-Node2 for sample.SampleMultiJvmNode2
[info] Starting JVM-Node3 for sample.SampleMultiJvmNode3
[JVM-Node1] Hello from node 1
[JVM-Node2] Hello from node 2
[JVM-Node3] Hello from node 3
[success] Total time: ...
Naming
======
You can change what the ``MultiJvm`` identifier is. For example, to change it to
``ClusterTest`` use the ``multiJvmMarker`` setting::
multiJvmMarker in MultiJvm := "ClusterTest"
Your tests should now be named ``{TestName}ClusterTest{NodeName}``.
Configuration of the JVM instances
==================================
You can define specific JVM options for each of the spawned JVMs. You do that by creating
a file named after the node in the test with suffix ``.opts`` and put them in the same
directory as the test.
For example, to feed the JVM options ``-Dakka.remote.port=9991`` to the ``SampleMultiJvmNode1``
let's create three ``*.opts`` files and add the options to them.
``SampleMultiJvmNode1.opts``::
-Dakka.remote.port=9991
``SampleMultiJvmNode2.opts``::
-Dakka.remote.port=9992
``SampleMultiJvmNode3.opts``::
-Dakka.remote.port=9993
ScalaTest
=========
There is also support for creating ScalaTest tests rather than applications. To
do this use the same naming convention as above, but create ScalaTest suites
rather than objects with main methods. You need to have ScalaTest on the
classpath. Here is a similar example to the one above but using ScalaTest::
package sample
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
class SpecMultiJvmNode1 extends WordSpec with MustMatchers {
"A node" should {
"be able to say hello" in {
val message = "Hello from node 1"
message must be("Hello from node 1")
}
}
}
class SpecMultiJvmNode2 extends WordSpec with MustMatchers {
"A node" should {
"be able to say hello" in {
val message = "Hello from node 2"
message must be("Hello from node 2")
}
}
}
To run just these tests you would call ``multi-jvm:test-only sample.Spec`` at
the sbt prompt.
Multi Node Additions
====================
There has also been some additions made to the ``SbtMultiJvm`` plugin to accomodate the
:ref:`experimental <experimental>` module :ref:`multi node testing <multi-node-testing>`,
described in that section.

View file

@ -0,0 +1,35 @@
.. _team:
######
Team
######
=================== ========================== ====================================
Name Role Email
=================== ========================== ====================================
Jonas Bonér Founder, Despot, Committer jonas AT jonasboner DOT com
Viktor Klang Project Owner viktor DOT klang AT gmail DOT com
Roland Kuhn Committer
Patrik Nordwall Committer patrik DOT nordwall AT gmail DOT com
Derek Williams Committer derek AT nebvin DOT ca
Henrik Engström Committer
Peter Vlugter Committer
Martin Krasser Committer krasserm AT googlemail DOT com
Raymond Roestenburg Committer
Piotr Gabryanczyk Committer
Debasish Ghosh Alumni dghosh AT acm DOT org
Ross McDonald Alumni rossajmcd AT gmail DOT com
Eckhart Hertzler Alumni
Mikael Högqvist Alumni
Tim Perrett Alumni
Jeanfrancois Arcand Alumni jfarcand AT apache DOT org
Jan Van Besien Alumni
Michael Kober Alumni
Peter Veentjer Alumni
Irmo Manie Alumni
Heiko Seeberger Alumni
Hiram Chirino Alumni
Scott Clasen Alumni
=================== ========================== ====================================

View file

@ -0,0 +1,27 @@
.. _experimental:
####################
Experimental Modules
####################
The following modules of Akka are marked as experimental, which means
that they are in early access mode, which also means that they are not
covered by commercial support. The purpose of releasing them early, as
experimental, is to make them easily available and improve based on
feedback, or even discover that the module wasn't useful.
An experimental module doesn't have to obey the rule of staying binary
compatible between minor releases. Breaking API changes may be introduced
in minor releases without notice as we refine and simplify based on your
feedback. An experimental module may be dropped in major releases without
prior deprecation.
Another reason for marking a module as experimental is that it's too early
to tell if the module has a maintainer that can take the responsibility
of the module over time.
.. toctree::
:maxdepth: 2
../cluster/index
../dev/multi-node-testing

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

View file

@ -0,0 +1,125 @@
.. _actor-systems:
Actor Systems
=============
Actors are objects which encapsulate state and behavior, they communicate
exclusively by exchanging messages which are placed into the recipients
mailbox. In a sense, actors are the most stringent form of object-oriented
programming, but it serves better to view them as persons: while modeling a
solution with actors, envision a group of people and assign sub-tasks to them,
arrange their functions into an organizational structure and think about how to
escalate failure (all with the benefit of not actually dealing with people,
which means that we need not concern ourselves with their emotional state or
moral issues). The result can then serve as a mental scaffolding for building
the software implementation.
.. note::
An ActorSystem is a heavyweight structure that will allocate 1…N Threads,
so create one per logical application.
Hierarchical Structure
----------------------
Like in an economic organization, actors naturally form hierarchies. One actor,
which is to oversee a certain function in the program might want to split up
its task into smaller, more manageable pieces. For this purpose it starts child
actors which it supervises. While the details of supervision are explained
:ref:`here <supervision>`, we shall concentrate on the underlying concepts in
this section. The only prerequisite is to know that each actor has exactly one
supervisor, which is the actor that created it.
The quintessential feature of actor systems is that tasks are split up and
delegated until they become small enough to be handled in one piece. In doing
so, not only is the task itself clearly structured, but the resulting actors
can be reasoned about in terms of which messages they should process, how they
should react normally and how failure should be handled. If one actor does not
have the means for dealing with a certain situation, it sends a corresponding
failure message to its supervisor, asking for help. The recursive structure
then allows to handle failure at the right level.
Compare this to layered software design which easily devolves into defensive
programming with the aim of not leaking any failure out: if the problem is
communicated to the right person, a better solution can be found than if
trying to keep everything “under the carpet”.
Now, the difficulty in designing such a system is how to decide who should
supervise what. There is of course no single best solution, but there are a few
guidelines which might be helpful:
- If one actor manages the work another actor is doing, e.g. by passing on
sub-tasks, then the manager should supervise the child. The reason is that
the manager knows which kind of failures are expected and how to handle
them.
- If one actor carries very important data (i.e. its state shall not be lost
if avoidable), this actor should source out any possibly dangerous sub-tasks
to children it supervises and handle failures of these children as
appropriate. Depending on the nature of the requests, it may be best to
create a new child for each request, which simplifies state management for
collecting the replies. This is known as the “Error Kernel Pattern” from
Erlang.
- If one actor depends on another actor for carrying out its duty, it should
watch that other actors liveness and act upon receiving a termination
notice. This is different from supervision, as the watching party has no
influence on the supervisor strategy, and it should be noted that a
functional dependency alone is not a criterion for deciding where to place a
certain child actor in the hierarchy.
There are of course always exceptions to these rules, but no matter whether you
follow the rules or break them, you should always have a reason.
Configuration Container
-----------------------
The actor system as a collaborating ensemble of actors is the natural unit for
managing shared facilities like scheduling services, configuration, logging,
etc. Several actor systems with different configuration may co-exist within the
same JVM without problems, there is no global shared state within Akka itself.
Couple this with the transparent communication between actor systems—within one
node or across a network connection—to see that actor systems themselves can be
used as building blocks in a functional hierarchy.
Actor Best Practices
--------------------
#. Actors should be like nice co-workers: do their job efficiently without
bothering everyone else needlessly and avoid hogging resources. Translated
to programming this means to process events and generate responses (or more
requests) in an event-driven manner. Actors should not block (i.e. passively
wait while occupying a Thread) on some external entity, which might be a
lock, a network socket, etc. The blocking operations should be done in some
special-cased thread which sends messages to the actors which shall act on
them.
#. Do not pass mutable objects between actors. In order to ensure that, prefer
immutable messages. If the encapsulation of actors is broken by exposing
their mutable state to the outside, you are back in normal Java concurrency
land with all the drawbacks.
#. Actors are made to be containers for behavior and state, embracing this
means to not routinely send behavior within messages (which may be tempting
using Scala closures). One of the risks is to accidentally share mutable
state between actors, and this violation of the actor model unfortunately
breaks all the properties which make programming in actors such a nice
experience.
#. Top-level actors are the innermost part of your Error Kernel, so create them
sparingly and prefer truly hierarchical systems. This has benefits wrt.
fault-handling (both considering the granularity of configuration and the
performance) and it also reduces the number of blocking calls made, since
the creation of top-level actors involves synchronous messaging.
What you should not concern yourself with
-----------------------------------------
An actor system manages the resources it is configured to use in order to run
the actors which it contains. There may be millions of actors within one such
system, after all the mantra is to view them as abundant and they weigh in at
an overhead of only roughly 300 bytes per instance. Naturally, the exact order
in which messages are processed in large systems is not controllable by the
application author, but this is also not intended. Take a step back and relax
while Akka does the heavy lifting under the hood.

View file

@ -0,0 +1,146 @@
.. _actors-general:
What is an Actor?
=================
The previous section about :ref:`actor-systems` explained how actors form
hierarchies and are the smallest unit when building an application. This
section looks at one such actor in isolation, explaining the concepts you
encounter while implementing it. For more an in depth reference with all the
details please refer to :ref:`actors-scala` and :ref:`untyped-actors-java`.
An actor is a container for `State`_, `Behavior`_, a `Mailbox`_, `Children`_
and a `Supervisor Strategy`_. All of this is encapsulated behind an `Actor
Reference`_. Finally, this happens `When an Actor Terminates`_.
Actor Reference
---------------
As detailed below, an actor object needs to be shielded from the outside in
order to benefit from the actor model. Therefore, actors are represented to the
outside using actor references, which are objects that can be passed around
freely and without restriction. This split into inner and outer object enables
transparency for all the desired operations: restarting an actor without
needing to update references elsewhere, placing the actual actor object on
remote hosts, sending messages to actors in completely different applications.
But the most important aspect is that it is not possible to look inside an
actor and get hold of its state from the outside, unless the actor unwisely
publishes this information itself.
State
-----
Actor objects will typically contain some variables which reflect possible
states the actor may be in. This can be an explicit state machine (e.g. using
the :ref:`fsm-scala` module), or it could be a counter, set of listeners,
pending requests, etc. These data are what make an actor valuable, and they
must be protected from corruption by other actors. The good news is that Akka
actors conceptually each have their own light-weight thread, which is
completely shielded from the rest of the system. This means that instead of
having to synchronize access using locks you can just write your actor code
without worrying about concurrency at all.
Behind the scenes Akka will run sets of actors on sets of real threads, where
typically many actors share one thread, and subsequent invocations of one actor
may end up being processed on different threads. Akka ensures that this
implementation detail does not affect the single-threadedness of handling the
actors state.
Because the internal state is vital to an actors operations, having
inconsistent state is fatal. Thus, when the actor fails and is restarted by its
supervisor, the state will be created from scratch, like upon first creating
the actor. This is to enable the ability of self-healing of the system.
Behavior
--------
Every time a message is processed, it is matched against the current behavior
of the actor. Behavior means a function which defines the actions to be taken
in reaction to the message at that point in time, say forward a request if the
client is authorized, deny it otherwise. This behavior may change over time,
e.g. because different clients obtain authorization over time, or because the
actor may go into an “out-of-service” mode and later come back. These changes
are achieved by either encoding them in state variables which are read from the
behavior logic, or the function itself may be swapped out at runtime, see the
``become`` and ``unbecome`` operations. However, the initial behavior defined
during construction of the actor object is special in the sense that a restart
of the actor will reset its behavior to this initial one.
.. note::
The initial behavior of an Actor is extracted prior to constructor is run,
so if you want to base your initial behavior on member state, you should
use ``become`` in the constructor.
Mailbox
-------
An actors purpose is the processing of messages, and these messages were sent
to the actor from other actors (or from outside the actor system). The piece
which connects sender and receiver is the actors mailbox: each actor has
exactly one mailbox to which all senders enqueue their messages. Enqueuing
happens in the time-order of send operations, which means that messages sent
from different actors may not have a defined order at runtime due to the
apparent randomness of distributing actors across threads. Sending multiple
messages to the same target from the same actor, on the other hand, will
enqueue them in the same order.
There are different mailbox implementations to choose from, the default being a
FIFO: the order of the messages processed by the actor matches the order in
which they were enqueued. This is usually a good default, but applications may
need to prioritize some messages over others. In this case, a priority mailbox
will enqueue not always at the end but at a position as given by the message
priority, which might even be at the front. While using such a queue, the order
of messages processed will naturally be defined by the queues algorithm and in
general not be FIFO.
An important feature in which Akka differs from some other actor model
implementations is that the current behavior must always handle the next
dequeued message, there is no scanning the mailbox for the next matching one.
Failure to handle a message will typically be treated as a failure, unless this
behavior is overridden.
Children
--------
Each actor is potentially a supervisor: if it creates children for delegating
sub-tasks, it will automatically supervise them. The list of children is
maintained within the actors context and the actor has access to it.
Modifications to the list are done by creating (``context.actorOf(...)``) or
stopping (``context.stop(child)``) children and these actions are reflected
immediately. The actual creation and termination actions happen behind the
scenes in an asynchronous way, so they do not “block” their supervisor.
Supervisor Strategy
-------------------
The final piece of an actor is its strategy for handling faults of its
children. Fault handling is then done transparently by Akka, applying one
of the strategies described in :ref:`supervision` for each incoming failure.
As this strategy is fundamental to how an actor system is structured, it
cannot be changed once an actor has been created.
Considering that there is only one such strategy for each actor, this means
that if different strategies apply to the various children of an actor, the
children should be grouped beneath intermediate supervisors with matching
strategies, preferring once more the structuring of actor systems according to
the splitting of tasks into sub-tasks.
When an Actor Terminates
------------------------
Once an actor terminates, i.e. fails in a way which is not handled by a
restart, stops itself or is stopped by its supervisor, it will free up its
resources, draining all remaining messages from its mailbox into the systems
“dead letter mailbox” which will forward them to the EventStream as DeadLetters.
The mailbox is then replaced within the actor reference with a system mailbox,
redirecting all new messages to the EventStream as DeadLetters. This
is done on a best effort basis, though, so do not rely on it in order to
construct “guaranteed delivery”.
The reason for not just silently dumping the messages was inspired by our
tests: we register the TestEventListener on the event bus to which the dead
letters are forwarded, and that will log a warning for every dead letter
received—this has been very helpful for deciphering test failures more quickly.
It is conceivable that this feature may also be of use for other purposes.

View file

@ -0,0 +1,351 @@
.. _addressing:
Actor References, Paths and Addresses
=====================================
This chapter describes how actors are identified and located within a possibly
distributed actor system. It ties into the central idea that
:ref:`actor-systems` form intrinsic supervision hierarchies as well as that
communication between actors is transparent with respect to their placement
across multiple network nodes.
.. image:: ActorPath.png
The above image displays the relationship between the most important entities
within an actor system, please read on for the details.
What is an Actor Reference?
---------------------------
An actor reference is a subtype of :class:`ActorRef`, whose foremost purpose is
to support sending messages to the actor it represents. Each actor has access
to its canonical (local) reference through the :meth:`self` field; this
reference is also included as sender reference by default for all messages sent
to other actors. Conversely, during message processing the actor has access to
a reference representing the sender of the current message through the
:meth:`sender` field.
There are several different types of actor references that are supported
depending on the configuration of the actor system:
- Purely local actor references are used by actor systems which are not
configured to support networking functions. These actor references cannot
ever be sent across a network connection while retaining their functionality.
- Local actor references when remoting is enabled are used by actor systems
which support networking functions for those references which represent
actors within the same JVM. In order to be recognizable also when sent to
other network nodes, these references include protocol and remote addressing
information.
- There is a subtype of local actor references which is used for routers (i.e.
actors mixing in the :class:`Router` trait). Its logical structure is the
same as for the aforementioned local references, but sending a message to
them dispatches to one of their children directly instead.
- Remote actor references represent actors which are reachable using remote
communication, i.e. sending messages to them will serialize the messages
transparently and send them to the other JVM.
- There are several special types of actor references which behave like local
actor references for all practical purposes:
- :class:`PromiseActorRef` is the special representation of a :meth:`Promise` for
the purpose of being completed by the response from an actor; it is created
by the :meth:`ActorRef.ask` invocation.
- :class:`DeadLetterActorRef` is the default implementation of the dead
letters service, where all messages are re-routed whose routees are shut
down or non-existent.
- :class:`EmptyLocalActorRef` is what is returned when looking up a
non-existing local actor path: it is equivalent to a
:class:`DeadLetterActorRef`, but it retains its path so that it can be sent
over the network and compared to other existing actor refs for that path,
some of which might have been obtained before the actor stopped existing.
- And then there are some one-off internal implementations which you should
never really see:
- There is an actor reference which does not represent an actor but acts only
as a pseudo-supervisor for the root guardian, we call it “the one who walks
the bubbles of space-time”.
- The first logging service started before actually firing up actor creation
facilities is a fake actor reference which accepts log events and prints
them directly to standard output; it is :class:`Logging.StandardOutLogger`.
- **(Future Extension)** Cluster actor references represent clustered actor
services which may be replicated, migrated or load-balanced across multiple
cluster nodes. As such they are virtual names which the cluster service
translates into local or remote actor references as appropriate.
What is an Actor Path?
----------------------
Since actors are created in a strictly hierarchical fashion, there exists a
unique sequence of actor names given by recursively following the supervision
links between child and parent down towards the root of the actor system. This
sequence can be seen as enclosing folders in a file system, hence we adopted
the name “path” to refer to it. As in some real file-systems there also are
“symbolic links”, i.e. one actor may be reachable using more than one path,
where all but one involve some translation which decouples part of the path
from the actors actual supervision ancestor line; these specialities are
described in the sub-sections to follow.
An actor path consists of an anchor, which identifies the actor system,
followed by the concatenation of the path elements, from root guardian to the
designated actor; the path elements are the names of the traversed actors and
are separated by slashes.
Actor Path Anchors
^^^^^^^^^^^^^^^^^^
Each actor path has an address component, describing the protocol and location
by which the corresponding actor is reachable, followed by the names of the
actors in the hierarchy from the root up. Examples are::
"akka://my-system/user/service-a/worker1" // purely local
"akka://my-system@serv.example.com:5678/user/service-b" // local or remote
"cluster://my-cluster/service-c" // clustered (Future Extension)
Here, ``akka`` is the default remote protocol for the 2.0 release, and others
are pluggable. The interpretation of the host & port part (i.e.
``serv.example.com:5678`` in the example) depends on the transport mechanism
used, but it must abide by the URI structural rules.
Logical Actor Paths
^^^^^^^^^^^^^^^^^^^
The unique path obtained by following the parental supervision links towards
the root guardian is called the logical actor path. This path matches exactly
the creation ancestry of an actor, so it is completely deterministic as soon as
the actor systems remoting configuration (and with it the address component of
the path) is set.
Physical Actor Paths
^^^^^^^^^^^^^^^^^^^^
While the logical actor path describes the functional location within one actor
system, configuration-based remote deployment means that an actor may be
created on a different network host as its parent, i.e. within a different
actor system. In this case, following the actor path from the root guardian up
entails traversing the network, which is a costly operation. Therefore, each
actor also has a physical path, starting at the root guardian of the actor
system where the actual actor object resides. Using this path as sender
reference when querying other actors will let them reply directly to this
actor, minimizing delays incurred by routing.
One important aspect is that a physical actor path never spans multiple actor
systems or JVMs. This means that the logical path (supervision hierarchy) and
the physical path (actor deployment) of an actor may diverge if one of its
ancestors is remotely supervised.
Virtual Actor Paths **(Future Extension)**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to be able to replicate and migrate actors across a cluster of Akka
nodes, another level of indirection has to be introduced. The cluster component
therefore provides a translation from virtual paths to physical paths which may
change in reaction to node failures, cluster rebalancing, etc.
*This area is still under active development, expect updates in this section
for the 2.1 release.*
How are Actor References obtained?
----------------------------------
There are two general categories to how actor references may be obtained: by
creating actors or by looking them up, where the latter functionality comes in
the two flavours of creating actor references from concrete actor paths and
querying the logical actor hierarchy.
*While local and remote actor references and their paths work in the same way
concerning the facilities mentioned below, the exact semantics of clustered
actor references and their paths—while certainly as similar as possible—may
differ in certain aspects, owing to the virtual nature of those paths. Expect
updates for the 2.1 release.*
Creating Actors
^^^^^^^^^^^^^^^
An actor system is typically started by creating actors beneath the guardian
actor using the :meth:`ActorSystem.actorOf` method and then using
:meth:`ActorContext.actorOf` from within the created actors to spawn the actor
tree. These methods return a reference to the newly created actor. Each actor
has direct access (through its ``ActorContext``) to references for its parent,
itself and its children. These references may be sent within messages to other actors,
enabling those to reply directly.
Looking up Actors by Concrete Path
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In addition, actor references may be looked up using the
:meth:`ActorSystem.actorFor` method, which returns an (unverified) local,
remote or clustered actor reference. Sending messages to such a reference or
attempting to observe its liveness will traverse the actor hierarchy of the
actor system from top to bottom by passing messages from parent to child until
either the target is reached or failure is certain, i.e. a name in the path
does not exist (in practice this process will be optimized using caches, but it
still has added cost compared to using the physical actor path, which can for
example be obtained from the sender reference included in replies from that
actor). The messages passed are handled automatically by Akka, so this process
is not visible to client code.
Absolute vs. Relative Paths
```````````````````````````
In addition to :meth:`ActorSystem.actorFor` there is also
:meth:`ActorContext.actorFor`, which is available inside any actor as
``context.actorFor``. This yields an actor reference much like its twin on
:class:`ActorSystem`, but instead of looking up the path starting from the root
of the actor tree it starts out on the current actor. Path elements consisting
of two dots (``".."``) may be used to access the parent actor. You can for
example send a message to a specific sibling::
context.actorFor("../brother") ! msg
Absolute paths may of course also be looked up on `context` in the usual way, i.e.
.. code-block:: scala
context.actorFor("/user/serviceA") ! msg
will work as expected.
Querying the Logical Actor Hierarchy
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Since the actor system forms a file-system like hierarchy, matching on paths is
possible in the same way as supported by Unix shells: you may replace (parts
of) path element names with wildcards (`«*»` and `«?»`) to formulate a
selection which may match zero or more actual actors. Because the result is not
a single actor reference, it has a different type :class:`ActorSelection` and
does not support the full set of operations an :class:`ActorRef` does.
Selections may be formulated using the :meth:`ActorSystem.actorSelection` and
:meth:`ActorContext.actorSelection` methods and do support sending messages::
context.actorSelection("../*") ! msg
will send `msg` to all siblings including the current actor. As for references
obtained using `actorFor`, a traversal of the supervision hierarchy is done in
order to perform the message send. As the exact set of actors which match a
selection may change even while a message is making its way to the recipients,
it is not possible to watch a selection for liveliness changes. In order to do
that, resolve the uncertainty by sending a request and gathering all answers,
extracting the sender references, and then watch all discovered concrete
actors. This scheme of resolving a selection may be improved upon in a future
release.
.. _actorOf-vs-actorFor:
Summary: ``actorOf`` vs. ``actorFor``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
What the above sections described in some detail can be summarized and
memorized easily as follows:
- ``actorOf`` only ever creates a new actor, and it creates it as a direct
child of the context on which this method is invoked (which may be any
actor or actor system).
- ``actorFor`` only ever looks up an existing actor, i.e. does not create
one.
Reusing Actor Paths
-------------------
When an actor is terminated, its path will point to the dead letter mailbox,
DeathWatch will publish its final transition and in general it is not expected
to come back to life again (since the actor life cycle does not allow this).
While it is possible to create an actor at a later time with an identical
path—simply due to it being impossible to enforce the opposite without keeping
the set of all actors ever created available—this is not good practice: remote
actor references which “died” suddenly start to work again, but without any
guarantee of ordering between this transition and any other event, hence the
new inhabitant of the path may receive messages which were destined for the
previous tenant.
It may be the right thing to do in very specific circumstances, but make sure
to confine the handling of this precisely to the actors supervisor, because
that is the only actor which can reliably detect proper deregistration of the
name, before which creation of the new child will fail.
It may also be required during testing, when the test subject depends on being
instantiated at a specific path. In that case it is best to mock its supervisor
so that it will forward the Terminated message to the appropriate point in the
test procedure, enabling the latter to await proper deregistration of the name.
The Interplay with Remote Deployment
------------------------------------
When an actor creates a child, the actor systems deployer will decide whether
the new actor resides in the same JVM or on another node. In the second case,
creation of the actor will be triggered via a network connection to happen in a
different JVM and consequently within a different actor system. The remote
system will place the new actor below a special path reserved for this purpose
and the supervisor of the new actor will be a remote actor reference
(representing that actor which triggered its creation). In this case,
:meth:`context.parent` (the supervisor reference) and
:meth:`context.path.parent` (the parent node in the actors path) do not
represent the same actor. However, looking up the childs name within the
supervisor will find it on the remote node, preserving logical structure e.g.
when sending to an unresolved actor reference.
.. image:: RemoteDeployment.png
The Interplay with Clustering **(Future Extension)**
----------------------------------------------------
*This section is subject to change!*
When creating a scaled-out actor subtree, a cluster name is created for a
routed actor reference, where sending to this reference will send to one (or
more) of the actual actors created in the cluster. In order for those actors to
be able to query other actors while processing their messages, their sender
reference must be unique for each of the replicas, which means that physical
paths will be used as ``self`` references for these instances. In the case
of replication for achieving fault-tolerance the opposite is required: the
``self`` reference will be a virtual (cluster) path so that in case of
migration or fail-over communication is resumed with the fresh instance.
What is the Address part used for?
----------------------------------
When sending an actor reference across the network, it is represented by its
path. Hence, the path must fully encode all information necessary to send
messages to the underlying actor. This is achieved by encoding protocol, host
and port in the address part of the path string. When an actor system receives
an actor path from a remote node, it checks whether that paths address matches
the address of this actor system, in which case it will be resolved to the
actors local reference. Otherwise, it will be represented by a remote actor
reference.
.. _toplevel-paths:
Top-Level Scopes for Actor Paths
--------------------------------
At the root of the path hierarchy resides the root guardian above which all
other actors are found; its name is ``"/"``. The next level consists of the
following:
- ``"/user"`` is the guardian actor for all user-created top-level actors;
actors created using :meth:`ActorSystem.actorOf` are found below this one.
- ``"/system"`` is the guardian actor for all system-created top-level actors,
e.g. logging listeners or actors automatically deployed by configuration at
the start of the actor system.
- ``"/deadLetters"`` is the dead letter actor, which is where all messages sent to
stopped or non-existing actors are re-routed (on a best-effort basis: messages
may be lost even within the local JVM).
- ``"/temp"`` is the guardian for all short-lived system-created actors, e.g.
those which are used in the implementation of :meth:`ActorRef.ask`.
- ``"/remote"`` is an artificial path below which all actors reside whose
supervisors are remote actor references
The need to structure the name space for actors like this arises from a central
and very simple design goal: everything in the hierarchy is an actor, and all
actors function in the same way. Hence you can not only look up the actors you
created, you can also look up the system guardian and send it a message (which
it will dutifully discard in this case). This powerful principle means that
there are no quirks to remember, it makes the whole system more uniform and
consistent.
If you want to read more about the top-level structure of an actor system, have
a look at :ref:`toplevel-supervisors`.

View file

@ -0,0 +1,33 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.config;
import akka.actor.ActorSystem;
import com.typesafe.config.*;
public class ConfigDoc {
public ActorSystem createConfiguredSystem() {
//#java-custom-config
// make a Config with just your special setting
Config myConfig =
ConfigFactory.parseString("something=somethingElse");
// load the normal config stack (system props,
// then application.conf, then reference.conf)
Config regularConfig =
ConfigFactory.load();
// override regular stack with myConfig
Config combined =
myConfig.withFallback(regularConfig);
// put the result in between the overrides
// (system props) and defaults again
Config complete =
ConfigFactory.load(combined);
// create ActorSystem
ActorSystem system =
ActorSystem.create("myname", complete);
//#java-custom-config
return system;
}
}

View file

@ -0,0 +1,33 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.config
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
//#imports
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
//#imports
class ConfigDocSpec extends WordSpec with MustMatchers {
"programmatically configure ActorSystem" in {
//#custom-config
val customConf = ConfigFactory.parseString("""
akka.actor.deployment {
/my-service {
router = round-robin
nr-of-instances = 3
}
}
""")
// ConfigFactory.load sandwiches customConfig between default reference
// config and default overrides, and then resolves it.
val system = ActorSystem("MySystem", ConfigFactory.load(customConf))
//#custom-config
system.shutdown()
}
}

View file

@ -0,0 +1,377 @@
.. _configuration:
Configuration
=============
Akka uses the `Typesafe Config Library
<https://github.com/typesafehub/config>`_, which might also be a good choice
for the configuration of your own application or library built with or without
Akka. This library is implemented in Java with no external dependencies; you
should have a look at its documentation (in particular about `ConfigFactory
<http://typesafehub.github.com/config/latest/api/com/typesafe/config/ConfigFactory.html>`_),
which is only summarized in the following.
.. warning::
If you use Akka from the Scala REPL from the 2.9.x series,
and you do not provide your own ClassLoader to the ActorSystem,
start the REPL with "-Yrepl-sync" to work around a deficiency in
the REPLs provided Context ClassLoader.
Where configuration is read from
--------------------------------
All configuration for Akka is held within instances of :class:`ActorSystem`, or
put differently, as viewed from the outside, :class:`ActorSystem` is the only
consumer of configuration information. While constructing an actor system, you
can either pass in a :class:`Config` object or not, where the second case is
equivalent to passing ``ConfigFactory.load()`` (with the right class loader).
This means roughly that the default is to parse all ``application.conf``,
``application.json`` and ``application.properties`` found at the root of the
class path—please refer to the aforementioned documentation for details. The
actor system then merges in all ``reference.conf`` resources found at the root
of the class path to form the fallback configuration, i.e. it internally uses
.. code-block:: scala
appConfig.withFallback(ConfigFactory.defaultReference(classLoader))
The philosophy is that code never contains default values, but instead relies
upon their presence in the ``reference.conf`` supplied with the library in
question.
Highest precedence is given to overrides given as system properties, see `the
HOCON specification
<https://github.com/typesafehub/config/blob/master/HOCON.md>`_ (near the
bottom). Also noteworthy is that the application configuration—which defaults
to ``application``—may be overridden using the ``config.resource`` property
(there are more, please refer to the `Config docs
<https://github.com/typesafehub/config/blob/master/README.md>`_).
.. note::
If you are writing an Akka application, keep you configuration in
``application.conf`` at the root of the class path. If you are writing an
Akka-based library, keep its configuration in ``reference.conf`` at the root
of the JAR file.
When using JarJar, OneJar, Assembly or any jar-bundler
------------------------------------------------------
.. warning::
Akka's configuration approach relies heavily on the notion of every
module/jar having its own reference.conf file, all of these will be
discovered by the configuration and loaded. Unfortunately this also means
that if you put/merge multiple jars into the same jar, you need to merge all the
reference.confs as well. Otherwise all defaults will be lost and Akka will not function.
Custom application.conf
-----------------------
A custom ``application.conf`` might look like this::
# In this file you can override any option defined in the reference files.
# Copy in parts of the reference files and modify as you please.
akka {
# Event handlers to register at boot time (Logging$DefaultLogger logs to STDOUT)
event-handlers = ["akka.event.slf4j.Slf4jEventHandler"]
# Log level used by the configured loggers (see "event-handlers") as soon
# as they have been started; before that, see "stdout-loglevel"
# Options: ERROR, WARNING, INFO, DEBUG
loglevel = DEBUG
# Log level for the very basic logger activated during AkkaApplication startup
# Options: ERROR, WARNING, INFO, DEBUG
stdout-loglevel = DEBUG
actor {
default-dispatcher {
# Throughput for default Dispatcher, set to 1 for as fair as possible
throughput = 10
}
}
remote {
server {
# The port clients should connect to. Default is 2552 (AKKA)
port = 2562
}
}
}
Including files
---------------
Sometimes it can be useful to include another configuration file, for example if you have one ``application.conf`` with all
environment independent settings and then override some settings for specific environments.
Specifying system property with ``-Dconfig.resource=/dev.conf`` will load the ``dev.conf`` file, which includes the ``application.conf``
dev.conf:
::
include "application"
akka {
loglevel = "DEBUG"
}
More advanced include and substitution mechanisms are explained in the `HOCON <https://github.com/typesafehub/config/blob/master/HOCON.md>`_
specification.
.. _-Dakka.log-config-on-start:
Logging of Configuration
------------------------
If the system or config property ``akka.log-config-on-start`` is set to ``on``, then the
complete configuration at INFO level when the actor system is started. This is useful
when you are uncertain of what configuration is used.
If in doubt, you can also easily and nicely inspect configuration objects
before or after using them to construct an actor system:
.. parsed-literal::
Welcome to Scala version @scalaVersion@ (Java HotSpot(TM) 64-Bit Server VM, Java 1.6.0_27).
Type in expressions to have them evaluated.
Type :help for more information.
scala> import com.typesafe.config._
import com.typesafe.config._
scala> ConfigFactory.parseString("a.b=12")
res0: com.typesafe.config.Config = Config(SimpleConfigObject({"a" : {"b" : 12}}))
scala> res0.root.render
res1: java.lang.String =
{
# String: 1
"a" : {
# String: 1
"b" : 12
}
}
The comments preceding every item give detailed information about the origin of
the setting (file & line number) plus possible comments which were present,
e.g. in the reference configuration. The settings as merged with the reference
and parsed by the actor system can be displayed like this:
.. code-block:: java
final ActorSystem system = ActorSystem.create();
println(system.settings());
// this is a shortcut for system.settings().config().root().render()
A Word About ClassLoaders
-------------------------
In several places of the configuration file it is possible to specify the
fully-qualified class name of something to be instantiated by Akka. This is
done using Java reflection, which in turn uses a :class:`ClassLoader`. Getting
the right one in challenging environments like application containers or OSGi
bundles is not always trivial, the current approach of Akka is that each
:class:`ActorSystem` implementation stores the current threads context class
loader (if available, otherwise just its own loader as in
``this.getClass.getClassLoader``) and uses that for all reflective accesses.
This implies that putting Akka on the boot class path will yield
:class:`NullPointerException` from strange places: this is simply not
supported.
Application specific settings
-----------------------------
The configuration can also be used for application specific settings.
A good practice is to place those settings in an Extension, as described in:
* Scala API: :ref:`extending-akka-scala.settings`
* Java API: :ref:`extending-akka-java.settings`
Configuring multiple ActorSystem
--------------------------------
If you have more than one ``ActorSystem`` (or you're writing a
library and have an ``ActorSystem`` that may be separate from the
application's) you may want to separate the configuration for each
system.
Given that ``ConfigFactory.load()`` merges all resources with matching name
from the whole class path, it is easiest to utilize that functionality and
differentiate actor systems within the hierarchy of the configuration::
myapp1 {
akka.loglevel = WARNING
my.own.setting = 43
}
myapp2 {
akka.loglevel = ERROR
app2.setting = "appname"
}
my.own.setting = 42
my.other.setting = "hello"
.. code-block:: scala
val config = ConfigFactory.load()
val app1 = ActorSystem("MyApp1", config.getConfig("myapp1").withFallback(config))
val app2 = ActorSystem("MyApp2", config.getConfig("myapp2").withOnlyPath("akka").withFallback(config))
These two samples demonstrate different variations of the “lift-a-subtree”
trick: in the first case, the configuration accessible from within the actor
system is this
.. code-block:: ruby
akka.loglevel = WARNING
my.own.setting = 43
my.other.setting = "hello"
// plus myapp1 and myapp2 subtrees
while in the second one, only the “akka” subtree is lifted, with the following
result::
akka.loglevel = ERROR
my.own.setting = 42
my.other.setting = "hello"
// plus myapp1 and myapp2 subtrees
.. note::
The configuration library is really powerful, explaining all features exceeds
the scope affordable here. In particular not covered are how to include other
configuration files within other files (see a small example at `Including
files`_) and copying parts of the configuration tree by way of path
substitutions.
You may also specify and parse the configuration programmatically in other ways when instantiating
the ``ActorSystem``.
.. includecode:: code/docs/config/ConfigDocSpec.scala
:include: imports,custom-config
Reading configuration from a custom location
--------------------------------------------
You can replace or supplement ``application.conf`` either in code
or using system properties.
If you're using ``ConfigFactory.load()`` (which Akka does by
default) you can replace ``application.conf`` by defining
``-Dconfig.resource=whatever``, ``-Dconfig.file=whatever``, or
``-Dconfig.url=whatever``.
From inside your replacement file specified with
``-Dconfig.resource`` and friends, you can ``include
"application"`` if you still want to use
``application.{conf,json,properties}`` as well. Settings
specified before ``include "application"`` would be overridden by
the included file, while those after would override the included
file.
In code, there are many customization options.
There are several overloads of ``ConfigFactory.load()``; these
allow you to specify something to be sandwiched between system
properties (which override) and the defaults (from
``reference.conf``), replacing the usual
``application.{conf,json,properties}`` and replacing
``-Dconfig.file`` and friends.
The simplest variant of ``ConfigFactory.load()`` takes a resource
basename (instead of ``application``); ``myname.conf``,
``myname.json``, and ``myname.properties`` would then be used
instead of ``application.{conf,json,properties}``.
The most flexible variant takes a ``Config`` object, which
you can load using any method in ``ConfigFactory``. For example
you could put a config string in code using
``ConfigFactory.parseString()`` or you could make a map and
``ConfigFactory.parseMap()``, or you could load a file.
You can also combine your custom config with the usual config,
that might look like:
.. includecode:: code/docs/config/ConfigDoc.java
:include: java-custom-config
When working with ``Config`` objects, keep in mind that there are
three "layers" in the cake:
- ``ConfigFactory.defaultOverrides()`` (system properties)
- the app's settings
- ``ConfigFactory.defaultReference()`` (reference.conf)
The normal goal is to customize the middle layer while leaving the
other two alone.
- ``ConfigFactory.load()`` loads the whole stack
- the overloads of ``ConfigFactory.load()`` let you specify a
different middle layer
- the ``ConfigFactory.parse()`` variations load single files or resources
To stack two layers, use ``override.withFallback(fallback)``; try
to keep system props (``defaultOverrides()``) on top and
``reference.conf`` (``defaultReference()``) on the bottom.
Do keep in mind, you can often just add another ``include``
statement in ``application.conf`` rather than writing code.
Includes at the top of ``application.conf`` will be overridden by
the rest of ``application.conf``, while those at the bottom will
override the earlier stuff.
Listing of the Reference Configuration
--------------------------------------
Each Akka module has a reference configuration file with the default values.
akka-actor
~~~~~~~~~~
.. literalinclude:: ../../../akka-actor/src/main/resources/reference.conf
:language: none
akka-remote
~~~~~~~~~~~
.. literalinclude:: ../../../akka-remote/src/main/resources/reference.conf
:language: none
akka-testkit
~~~~~~~~~~~~
.. literalinclude:: ../../../akka-testkit/src/main/resources/reference.conf
:language: none
akka-transactor
~~~~~~~~~~~~~~~
.. literalinclude:: ../../../akka-transactor/src/main/resources/reference.conf
:language: none
akka-agent
~~~~~~~~~~
.. literalinclude:: ../../../akka-agent/src/main/resources/reference.conf
:language: none
akka-zeromq
~~~~~~~~~~~
.. literalinclude:: ../../../akka-zeromq/src/main/resources/reference.conf
:language: none
akka-file-mailbox
~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../../akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf
:language: none

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

View file

@ -0,0 +1,14 @@
General
=======
.. toctree::
:maxdepth: 2
actor-systems
actors
supervision
addressing
remoting
jmm
message-send-semantics
configuration

View file

@ -0,0 +1,123 @@
.. _jmm:
Akka and the Java Memory Model
================================
A major benefit of using the Typesafe Stack, including Scala and Akka, is that it simplifies the process of writing
concurrent software. This article discusses how the Typesafe Stack, and Akka in particular, approaches shared memory
in concurrent applications.
The Java Memory Model
---------------------
Prior to Java 5, the Java Memory Model (JMM) was ill defined. It was possible to get all kinds of strange results when
shared memory was accessed by multiple threads, such as:
* a thread not seeing values written by other threads: a visibility problem
* a thread observing 'impossible' behavior of other threads, caused by instructions not being executed in the order
expected: an instruction reordering problem.
With the implementation of JSR 133 in Java 5, a lot of these issues have been resolved. The JMM is a set of rules based
on the "happens-before" relation, which constrain when one memory access must happen before another, and conversely,
when they are allowed to happen out of order. Two examples of these rules are:
* **The monitor lock rule:** a release of a lock happens before every subsequent acquire of the same lock.
* **The volatile variable rule:** a write of a volatile variable happens before every subsequent read of the same volatile variable
Although the JMM can seem complicated, the specification tries to find a balance between ease of use and the ability to
write performant and scalable concurrent data structures.
Actors and the Java Memory Model
--------------------------------
With the Actors implementation in Akka, there are two ways multiple threads can execute actions on shared memory:
* if a message is sent to an actor (e.g. by another actor). In most cases messages are immutable, but if that message
is not a properly constructed immutable object, without a "happens before" rule, it would be possible for the receiver
to see partially initialized data structures and possibly even values out of thin air (longs/doubles).
* if an actor makes changes to its internal state while processing a message, and accesses that state while processing
another message moments later. It is important to realize that with the actor model you don't get any guarantee that
the same thread will be executing the same actor for different messages.
To prevent visibility and reordering problems on actors, Akka guarantees the following two "happens before" rules:
* **The actor send rule:** the send of the message to an actor happens before the receive of that message by the same actor.
* **The actor subsequent processing rule:** processing of one message happens before processing of the next message by the same actor.
.. note::
In layman's terms this means that changes to internal fields of the actor are visible when the next message
is processed by that actor. So fields in your actor need not be volatile or equivalent.
Both rules only apply for the same actor instance and are not valid if different actors are used.
Futures and the Java Memory Model
---------------------------------
The completion of a Future "happens before" the invocation of any callbacks registered to it are executed.
We recommend not to close over non-final fields (final in Java and val in Scala), and if you *do* choose to close over
non-final fields, they must be marked *volatile* in order for the current value of the field to be visible to the callback.
If you close over a reference, you must also ensure that the instance that is referred to is thread safe.
We highly recommend staying away from objects that use locking, since it can introduce performance problems and in the worst case, deadlocks.
Such are the perils of synchronized.
STM and the Java Memory Model
-----------------------------
Akka's Software Transactional Memory (STM) also provides a "happens before" rule:
* **The transactional reference rule:** a successful write during commit, on an transactional reference, happens before every
subsequent read of the same transactional reference.
This rule looks a lot like the 'volatile variable' rule from the JMM. Currently the Akka STM only supports deferred writes,
so the actual writing to shared memory is deferred until the transaction commits. Writes during the transaction are placed
in a local buffer (the writeset of the transaction) and are not visible to other transactions. That is why dirty reads are
not possible.
How these rules are realized in Akka is an implementation detail and can change over time, and the exact details could
even depend on the used configuration. But they will build on the other JMM rules like the monitor lock rule or the
volatile variable rule. This means that you, the Akka user, do not need to worry about adding synchronization to provide
such a "happens before" relation, because it is the responsibility of Akka. So you have your hands free to deal with your
business logic, and the Akka framework makes sure that those rules are guaranteed on your behalf.
.. _jmm-shared-state:
Actors and shared mutable state
-------------------------------
Since Akka runs on the JVM there are still some rules to be followed.
* Closing over internal Actor state and exposing it to other threads
.. code-block:: scala
class MyActor extends Actor {
var state = ...
def receive = {
case _ =>
//Wrongs
// Very bad, shared mutable state,
// will break your application in weird ways
Future { state = NewState }
anotherActor ? message onSuccess { r => state = r }
// Very bad, "sender" changes for every message,
// shared mutable state bug
Future { expensiveCalculation(sender) }
//Rights
// Completely safe, "self" is OK to close over
// and it's an ActorRef, which is thread-safe
Future { expensiveCalculation() } onComplete { f => self ! f.value.get }
// Completely safe, we close over a fixed value
// and it's an ActorRef, which is thread-safe
val currentSender = sender
Future { expensiveCalculation(currentSender) }
}
}
* Messages **should** be immutable, this is to avoid the shared mutable state trap.

View file

@ -0,0 +1,116 @@
.. _message-send-semantics:
#######################
Message send semantics
#######################
Guaranteed Delivery?
====================
Akka does *not* support guaranteed delivery.
First it is close to impossible to actually give guarantees like that,
second it is extremely costly trying to do so.
The network is inherently unreliable and there is no such thing as 100%
guarantee delivery, so it can never be guaranteed.
The question is what to guarantee. That:
1. The message is sent out on the network?
2. The message is received by the other host?
3. The message is put on the target actor's mailbox?
4. The message is applied to the target actor?
5. The message is starting to be executed by the target actor?
6. The message is finished executing by the target actor?
Each one of this have different challenges and costs.
Akka embraces distributed computing and the network and makes it explicit
through message passing, therefore it does not try to lie and emulate a
leaky abstraction. This is a model that have been used with great success
in Erlang and requires the user to model his application around. You can
read more about this approach in the `Erlang documentation`_ (section
10.9 and 10.10), Akka follows it closely.
Bottom line: you as a developer know what guarantees you need in your
application and can solve it fastest and most reliable by explicit ``ACK`` and
``RETRY`` (if you really need it, most often you don't). Using Akka's Durable
Mailboxes could help with this.
Delivery semantics
==================
At-most-once
------------
Actual transports may provide stronger semantics,
but at-most-once is the semantics you should expect.
The alternatives would be once-and-only-once, which is extremely costly,
or at-least-once which essentially requires idempotency of message processing,
which is a user-level concern.
Ordering is preserved on a per-sender basis
-------------------------------------------
Actor ``A1`` sends messages ``M1``, ``M2``, ``M3`` to ``A2``
Actor ``A3`` sends messages ``M4``, ``M5``, ``M6`` to ``A2``
This means that:
1) If ``M1`` is delivered it must be delivered before ``M2`` and ``M3``
2) If ``M2`` is delivered it must be delivered before ``M3``
3) If ``M4`` is delivered it must be delivered before ``M5`` and ``M6``
4) If ``M5`` is delivered it must be delivered before ``M6``
5) ``A2`` can see messages from ``A1`` interleaved with messages from ``A3``
6) Since there is no guaranteed delivery, none, some or all of the messages may arrive to ``A2``
.. _deadletters:
Dead Letters
============
Messages which cannot be delivered (and for which this can be ascertained) will
be delivered to a synthetic actor called ``/deadLetters``. This delivery
happens on a best-effort basis; it may fail even within the local JVM (e.g.
during actor termination). Messages sent via unreliable network transports will
be lost without turning up as dead letters.
How do I Receive Dead Letters?
------------------------------
An actor can subscribe to class :class:`akka.actor.DeadLetter` on the event
stream, see :ref:`event-stream-java` (Java) or :ref:`event-stream-scala`
(Scala) for how to do that. The subscribed actor will then receive all dead
letters published in the (local) system from that point onwards. Dead letters
are not propagated over the network, if you want to collect them in one place
you will have to subscribe one actor per network node and forward them
manually. Also consider that dead letters are generated at that node which can
determine that a send operation is failed, which for a remote send can be the
local system (if no network connection can be established) or the remote one
(if the actor you are sending to does not exist at that point in time).
What Should I Use Dead Letters For?
-----------------------------------
The dead letter service follows the same rules with respect to delivery
guarantees as all other message sends, hence it cannot be used to implement
guaranteed delivery. The main use is for debugging, especially if an actor send
does not arrive consistently (where usually inspecting the dead letters will
tell you that the sender or recipient was set wrong somewhere along the way).
Dead Letters Which are (Usually) not Worrisome
----------------------------------------------
Every time an actor does not terminate by its own decision, there is a chance
that some messages which it sends to itself are lost. There is one which
happens quite easily in complex shutdown scenarios that is usually benign:
seeing a :class:`akka.dispatch.Terminate` message dropped means that two
termination requests were given, but of course only one can succeed. In the
same vein, you might see :class:`akka.actor.Terminated` messages from children
while stopping a hierarchy of actors turning up in dead letters if the parent
is still watching the child when the parent terminates.
.. _Erlang documentation: http://www.erlang.org/faq/academic.html

View file

@ -0,0 +1,69 @@
.. _remoting:
Location Transparency
=====================
The previous section describes how actor paths are used to enable location
transparency. This special feature deserves some extra explanation, because the
related term “transparent remoting” was used quite differently in the context
of programming languages, platforms and technologies.
Distributed by Default
----------------------
Everything in Akka is designed to work in a distributed setting: all
interactions of actors use purely message passing and everything is
asynchronous. This effort has been undertaken to ensure that all functions are
available equally when running within a single JVM or on a cluster of hundreds
of machines. The key for enabling this is to go from remote to local by way of
optimization instead of trying to go from local to remote by way of
generalization. See `this classic paper
<http://labs.oracle.com/techrep/1994/abstract-29.html>`_ for a detailed
discussion on why the second approach is bound to fail.
Ways in which Transparency is Broken
------------------------------------
What is true of Akka need not be true of the application which uses it, since
designing for distributed execution poses some restrictions on what is
possible. The most obvious one is that all messages sent over the wire must be
serializable. While being a little less obvious this includes closures which
are used as actor factories (i.e. within :class:`Props`) if the actor is to be
created on a remote node.
Another consequence is that everything needs to be aware of all interactions
being fully asynchronous, which in a computer network might mean that it may
take several minutes for a message to reach its recipient (depending on
configuration). It also means that the probability for a message to be lost is
much higher than within one JVM, where it is close to zero (still: no hard
guarantee!).
How is Remoting Used?
---------------------
We took the idea of transparency to the limit in that there is nearly no API
for the remoting layer of Akka: it is purely driven by configuration. Just
write your application according to the principles outlined in the previous
sections, then specify remote deployment of actor sub-trees in the
configuration file. This way, your application can be scaled out without having
to touch the code. The only piece of the API which allows programmatic
influence on remote deployment is that :class:`Props` contain a field which may
be set to a specific :class:`Deploy` instance; this has the same effect as
putting an equivalent deployment into the configuration file (if both are
given, configuration file wins).
Marking Points for Scaling Up with Routers
------------------------------------------
In addition to being able to run different parts of an actor system on
different nodes of a cluster, it is also possible to scale up onto more cores
by multiplying actor sub-trees which support parallelization (think for example
a search engine processing different queries in parallel). The clones can then
be routed to in different fashions, e.g. round-robin. The only thing necessary
to achieve this is that the developer needs to declare a certain actor as
“withRouter”, then—in its stead—a router actor will be created which will spawn
up a configurable number of children of the desired type and route to them in
the configured fashion. Once such a router has been declared, its configuration
can be freely overridden from the configuration file, including mixing it with
the remote deployment of (some of) the children. Read more about
this in :ref:`routing-scala` and :ref:`routing-java`.

View file

@ -0,0 +1,225 @@
.. _supervision:
Supervision and Monitoring
==========================
This chapter outlines the concept behind supervision, the primitives offered
and their semantics. For details on how that translates into real code, please
refer to the corresponding chapters for Scala and Java APIs.
.. _supervision-directives:
What Supervision Means
----------------------
As described in :ref:`actor-systems` supervision describes a dependency
relationship between actors: the supervisor delegates tasks to subordinates and
therefore must respond to their failures. When a subordinate detects a failure
(i.e. throws an exception), it suspends itself and all its subordinates and
sends a message to its supervisor, signaling failure. Depending on the nature
of the work to be supervised and the nature of the failure, the supervisor has
a choice of the following four options:
#. Resume the subordinate, keeping its accumulated internal state
#. Restart the subordinate, clearing out its accumulated internal state
#. Terminate the subordinate permanently
#. Escalate the failure
It is important to always view an actor as part of a supervision hierarchy,
which explains the existence of the fourth choice (as a supervisor also is
subordinate to another supervisor higher up) and has implications on the first
three: resuming an actor resumes all its subordinates, restarting an actor
entails restarting all its subordinates (but see below for more details),
similarly terminating an actor will also terminate all its subordinates. It
should be noted that the default behavior of the :meth:`preRestart` hook of the
:class:`Actor` class is to terminate all its children before restarting, but
this hook can be overridden; the recursive restart applies to all children left
after this hook has been executed.
Each supervisor is configured with a function translating all possible failure
causes (i.e. exceptions) into one of the four choices given above; notably,
this function does not take the failed actors identity as an input. It is
quite easy to come up with examples of structures where this might not seem
flexible enough, e.g. wishing for different strategies to be applied to
different subordinates. At this point it is vital to understand that
supervision is about forming a recursive fault handling structure. If you try
to do too much at one level, it will become hard to reason about, hence the
recommended way in this case is to add a level of supervision.
Akka implements a specific form called “parental supervision”. Actors can only
be created by other actors—where the top-level actor is provided by the
library—and each created actor is supervised by its parent. This restriction
makes the formation of actor supervision hierarchies implicit and encourages
sound design decisions. It should be noted that this also guarantees that
actors cannot be orphaned or attached to supervisors from the outside, which
might otherwise catch them unawares. In addition, this yields a natural and
clean shutdown procedure for (sub-trees of) actor applications.
.. _toplevel-supervisors:
The Top-Level Supervisors
-------------------------
.. image:: guardians.png
:align: center
:width: 360
An actor system will during its creation start at least three actors, shown in
the image above. For more information about the consequences for actor paths
see :ref:`toplevel-paths`.
.. _user-guardian:
``/user``: The Guardian Actor
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The actor which is probably most interacted with is the parent of all
user-created actors, the guardian named ``"/user"``. Actors created using
``system.actorOf()`` are children of this actor. This means that when this
guardian terminates, all normal actors in the system will be shutdown, too. It
also means that this guardians supervisor strategy determines how the
top-level normal actors are supervised. Since Akka 2.1 it is possible to
configure this using the setting ``akka.actor.guardian-supervisor-strategy``,
which takes the fully-qualified class-name of a
:class:`SupervisorStrategyConfigurator`. When the guardian escalates a failure,
the root guardians response will be to terminate the guardian, which in effect
will shut down the whole actor system.
``/system``: The System Guardian
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This special guardian has been introduced in order to achieve an orderly
shut-down sequence where logging remains active while all normal actors
terminate, even though logging itself is implemented using actors. This is
realized by having the system guardian watch the user guardian and initiate its own
shut-down upon reception of the :class:`Terminated` message. The top-level
system actors are supervised using a strategy which will restart indefinitely
upon all types of :class:`Exception` except for
:class:`ActorInitializationException` and :class:`ActorKilledException`, which
will terminate the child in question. All other throwables are escalated,
which will shut down the whole actor system.
``/``: The Root Guardian
^^^^^^^^^^^^^^^^^^^^^^^^
The root guardian is the grand-parent of all so-called “top-level” actors and
supervises all the special actors mentioned in :ref:`toplevel-paths` using the
``SupervisorStrategy.stoppingStrategy``, whose purpose is to terminate the
child upon any type of :class:`Exception`. All other throwables will be
escalated … but to whom? Since every real actor has a supervisor, the
supervisor of the root guardian cannot be a real actor. And because this means
that it is “outside of the bubble”, it is called the “bubble-walker”. This is a
synthetic :class:`ActorRef` which in effect stops its child upon the first sign
of trouble and sets the actor systems ``isTerminated`` status to ``true`` as
soon as the root guardian is fully terminated (all children recursively
stopped).
.. _supervision-restart:
What Restarting Means
---------------------
When presented with an actor which failed while processing a certain message,
causes for the failure fall into three categories:
* Systematic (i.e. programming) error for the specific message received
* (Transient) failure of some external resource used during processing the message
* Corrupt internal state of the actor
Unless the failure is specifically recognizable, the third cause cannot be
ruled out, which leads to the conclusion that the internal state needs to be
cleared out. If the supervisor decides that its other children or itself is not
affected by the corruption—e.g. because of conscious application of the error
kernel pattern—it is therefore best to restart the child. This is carried out
by creating a new instance of the underlying :class:`Actor` class and replacing
the failed instance with the fresh one inside the childs :class:`ActorRef`;
the ability to do this is one of the reasons for encapsulating actors within
special references. The new actor then resumes processing its mailbox, meaning
that the restart is not visible outside of the actor itself with the notable
exception that the message during which the failure occurred is not
re-processed.
The precise sequence of events during a restart is the following:
#. suspend the actor (which means that it will not process normal messages until
resumed), and recursively suspend all children
#. call the old instances :meth:`preRestart` hook (defaults to sending
termination requests to all children and calling :meth:`postStop`)
#. wait for all children which were requested to terminate (using
``context.stop()``) during :meth:`preRestart` to actually terminate
#. create new actor instance by invoking the originally provided factory again
#. invoke :meth:`postRestart` on the new instance (which by default also calls :meth:`preStart`)
#. send restart request to all children (they will follow the same process
recursively, from step 2)
#. resume the actor
What Lifecycle Monitoring Means
-------------------------------
.. note::
Lifecycle Monitoring in Akka is usually referred to as ``DeathWatch``
In contrast to the special relationship between parent and child described
above, each actor may monitor any other actor. Since actors emerge from
creation fully alive and restarts are not visible outside of the affected
supervisors, the only state change available for monitoring is the transition
from alive to dead. Monitoring is thus used to tie one actor to another so that
it may react to the other actors termination, in contrast to supervision which
reacts to failure.
Lifecycle monitoring is implemented using a :class:`Terminated` message to be
received by the monitoring actor, where the default behavior is to throw a
special :class:`DeathPactException` if not otherwise handled. In order to
start listening for :class:`Terminated` messages is to use ``ActorContext.watch(targetActorRef)``
and then ``ActorContext.unwatch(targetActorRef)`` to stop listening for that.
One important property is that the message will be delivered irrespective of the order in
which the monitoring request and targets termination occur, i.e. you still get
the message even if at the time of registration the target is already dead.
Monitoring is particularly useful if a supervisor cannot simply restart its
children and has to terminate them, e.g. in case of errors during actor
initialization. In that case it should monitor those children and re-create
them or schedule itself to retry this at a later time.
Another common use case is that an actor needs to fail in the absence of an
external resource, which may also be one of its own children. If a third party
terminates a child by way of the ``system.stop(child)`` method or sending a
:class:`PoisonPill`, the supervisor might well be affected.
One-For-One Strategy vs. All-For-One Strategy
---------------------------------------------
There are two classes of supervision strategies which come with Akka:
:class:`OneForOneStrategy` and :class:`AllForOneStrategy`. Both are configured
with a mapping from exception type to supervision directive (see
:ref:`above <supervision-directives>`) and limits on how often a child is allowed to fail
before terminating it. The difference between them is that the former applies
the obtained directive only to the failed child, whereas the latter applies it
to all siblings as well. Normally, you should use the
:class:`OneForOneStrategy`, which also is the default if none is specified
explicitly.
The :class:`AllForOneStrategy` is applicable in cases where the ensemble of
children has so tight dependencies among them, that a failure of one child
affects the function of the others, i.e. they are intricably linked. Since a
restart does not clear out the mailbox, it often is best to terminate the children
upon failure and re-create them explicitly from the supervisor (by watching the
childrens lifecycle); otherwise you have to make sure that it is no problem
for any of the actors to receive a message which was queued before the restart
but processed afterwards.
Normally stopping a child (i.e. not in response to a failure) will not
automatically terminate the other children in an all-for-one strategy, that can
easily be done by watching their lifecycle: if the :class:`Terminated` message
is not handled by the supervisor, it will throw a :class:`DeathPactException`
which (depending on its supervisor) will restart it, and the default
:meth:`preRestart` action will terminate all children. Of course this can be
handled explicitly as well.
Please note that creating one-off actors from an all-for-one supervisor entails
that failures escalated by the temporary actor will affect all the permanent
ones. If this is not desired, install an intermediate supervisor; this can very
easily be done by declaring a router of size 1 for the worker, see
:ref:`routing-scala` or :ref:`routing-java`.

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 119 KiB

View file

@ -0,0 +1,19 @@
digraph circuit_breaker {
rankdir = "LR";
size = "6,5";
graph [ bgcolor = "transparent" ]
node [ fontname = "Helvetica",
fontsize = 14,
shape = circle,
color = white,
style = filled ];
edge [ fontname = "Helvetica", fontsize = 12 ]
Closed [ fillcolor = green2 ];
"Half-Open" [fillcolor = yellow2 ];
Open [ fillcolor = red2 ];
Closed -> Closed [ label = "Success" ];
"Half-Open" -> Open [ label = "Trip Breaker" ];
"Half-Open" -> Closed [ label = "Reset Breaker" ];
Closed -> Open [ label = "Trip Breaker" ];
Open -> Open [ label = "Calls failing fast" ];
Open -> "Half-Open" [ label = "Attempt Reset" ];

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

File diff suppressed because it is too large Load diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

34
akka-docs/rst/index.rst Normal file
View file

@ -0,0 +1,34 @@
Contents
========
.. toctree::
:maxdepth: 2
intro/index
general/index
common/index
java/index
scala/index
modules/index
experimental/index
dev/index
project/index
additional/index
Links
=====
* :ref:`migration`
* `Downloads <http://akka.io/downloads/>`_
* `Source Code <http://github.com/akka/akka>`_
* :ref:`scaladoc`
* :ref:`other-doc`
* :ref:`issue_tracking`
* :ref:`support`

View file

@ -0,0 +1,32 @@
.. _deployment-scenarios:
###################################
Use-case and Deployment Scenarios
###################################
How can I use and deploy Akka?
==============================
Akka can be used in different ways:
- As a library: used as a regular JAR on the classpath and/or in a web app, to
be put into ``WEB-INF/lib``
- As a stand alone application by instantiating ActorSystem in a main class or
using the :ref:`microkernel-scala` / :ref:`microkernel-java`
Using Akka as library
---------------------
This is most likely what you want if you are building Web applications. There
are several ways you can use Akka in Library mode by adding more and more
modules to the stack.
Using Akka as a stand alone microkernel
----------------------------------------
Akka can also be run as a stand-alone microkernel. See
:ref:`microkernel-scala` / :ref:`microkernel-java` for
more information.

View file

@ -0,0 +1,164 @@
Getting Started
===============
Prerequisites
-------------
Akka requires that you have `Java 1.6 <http://www.oracle.com/technetwork/java/javase/downloads/index.html>`_ or
later installed on you machine.
Getting Started Guides and Template Projects
--------------------------------------------
The best way to start learning Akka is to download the Typesafe Stack and either try out
the Akka Getting Started Tutorials or check out one of Akka Template Projects. Both comes
in several flavours depending on your development environment preferences.
- `Download Typesafe Stack <http://typesafe.com/stack/download>`_
- `Getting Started Tutorials <http://typesafe.com/resources/getting-started>`_
- `Template Projects <http://typesafe.com/stack/download#template>`_
Download
--------
There are several ways to download Akka. You can download it as part of the Typesafe Stack
(as described above). You can download the full distribution with microkernel, which includes
all modules. Or you can use a build tool like Maven or SBT to download dependencies from the
Akka Maven repository.
Modules
-------
Akka is very modular and consists of several JARs containing different features.
- ``akka-actor`` -- Classic Actors, Typed Actors, IO Actor etc.
- ``akka-remote`` -- Remote Actors
- ``akka-testkit`` -- Toolkit for testing Actor systems
- ``akka-kernel`` -- Akka microkernel for running a bare-bones mini application server
- ``akka-transactor`` -- Transactors - transactional actors, integrated with Scala STM
- ``akka-agent`` -- Agents, integrated with Scala STM
- ``akka-camel`` -- Apache Camel integration
- ``akka-zeromq`` -- ZeroMQ integration
- ``akka-slf4j`` -- SLF4J Event Handler Listener
- ``akka-filebased-mailbox`` -- Akka durable mailbox (find more among community projects)
The filename of the actual JAR is for example ``@jarName@`` (and analog for
the other modules).
How to see the JARs dependencies of each Akka module is described in the
:ref:`dependencies` section.
Using a release distribution
----------------------------
Download the release you need from http://akka.io/downloads and unzip it.
Using a snapshot version
------------------------
The Akka nightly snapshots are published to http://repo.akka.io/snapshots/ and are
versioned with both ``SNAPSHOT`` and timestamps. You can choose a timestamped
version to work with and can decide when to update to a newer version. The Akka
snapshots repository is also proxied through http://repo.typesafe.com/typesafe/snapshots/
which includes proxies for several other repositories that Akka modules depend on.
Microkernel
-----------
The Akka distribution includes the microkernel. To run the microkernel put your
application jar in the ``deploy`` directory and use the scripts in the ``bin``
directory.
More information is available in the documentation of the
:ref:`microkernel-scala` / :ref:`microkernel-java`.
Using a build tool
------------------
Akka can be used with build tools that support Maven repositories. The Akka
Maven repository can be found at http://repo.akka.io/releases/ and Typesafe provides
http://repo.typesafe.com/typesafe/releases/ that proxies several other
repositories, including akka.io.
Using Akka with Maven
---------------------
The simplest way to get started with Akka and Maven is to check out the
`Akka/Maven template <http://typesafe.com/resources/getting-started/typesafe-stack/downloading-installing.html#template-projects-for-scala-akka-and-play>`_
project.
Since Akka is published to Maven Central (for versions since 2.1-M2), is it
enough to add the Akka dependencies to the POM. For example, here is the
dependency for akka-actor:
.. code-block:: xml
<dependency>
<groupId>com.typesafe.akka</groupId>
<artifactId>akka-actor_@binVersion@</artifactId>
<version>@version@</version>
</dependency>
**Note**: for snapshot versions both ``SNAPSHOT`` and timestamped versions are published.
Using Akka with SBT
-------------------
The simplest way to get started with Akka and SBT is to check out the
`Akka/SBT template <http://typesafe.com/resources/getting-started/typesafe-stack/downloading-installing.html#template-projects-for-scala-akka-and-play>`_
project.
Summary of the essential parts for using Akka with SBT:
SBT installation instructions on `https://github.com/harrah/xsbt/wiki/Setup <https://github.com/harrah/xsbt/wiki/Setup>`_
``build.sbt`` file:
.. parsed-literal::
name := "My Project"
version := "1.0"
scalaVersion := "@scalaVersion@"
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
libraryDependencies +=
"com.typesafe.akka" %% "akka-actor" % "@version@" @crossString@
Using Akka with Eclipse
-----------------------
Setup SBT project and then use `sbteclipse <https://github.com/typesafehub/sbteclipse>`_ to generate a Eclipse project.
Using Akka with IntelliJ IDEA
-----------------------------
Setup SBT project and then use `sbt-idea <https://github.com/mpeltonen/sbt-idea>`_ to generate a IntelliJ IDEA project.
Using Akka with NetBeans
------------------------
Setup SBT project and then use `sbt-netbeans-plugin <https://github.com/remeniuk/sbt-netbeans-plugin>`_ to generate a NetBeans project.
Build from sources
------------------
Akka uses Git and is hosted at `Github <http://github.com>`_.
* Akka: clone the Akka repository from `<http://github.com/akka/akka>`_
Continue reading the page on :ref:`building-akka`
Need help?
----------
If you have questions you can get help on the `Akka Mailing List <http://groups.google.com/group/akka-user>`_.
You can also ask for `commercial support <http://typesafe.com>`_.
Thanks for being a part of the Akka community.

View file

@ -0,0 +1,12 @@
Introduction
============
.. toctree::
:maxdepth: 2
what-is-akka
why-akka
getting-started
deployment-scenarios
use-cases

View file

@ -0,0 +1,59 @@
.. _use-cases:
################################
Examples of use-cases for Akka
################################
We see Akka being adopted by many large organizations in a big range of industries
all from investment and merchant banking, retail and social media, simulation,
gaming and betting, automobile and traffic systems, health care, data analytics
and much more. Any system that have the need for high-throughput and low latency
is a good candidate for using Akka.
There is a great discussion on use-cases for Akka with some good write-ups by production
users `here <http://stackoverflow.com/questions/4493001/good-use-case-for-akka/4494512#4494512>`_
Here are some of the areas where Akka is being deployed into production
=======================================================================
Transaction processing (Online Gaming, Finance/Banking, Trading, Statistics, Betting, Social Media, Telecom)
------------------------------------------------------------------------------------------------------------
Scale up, scale out, fault-tolerance / HA
Service backend (any industry, any app)
---------------------------------------
Service REST, SOAP, Cometd, WebSockets etc
Act as message hub / integration layer
Scale up, scale out, fault-tolerance / HA
Concurrency/parallelism (any app)
---------------------------------
Correct
Simple to work with and understand
Just add the jars to your existing JVM project (use Scala, Java, Groovy or JRuby)
Simulation
----------
Master/Worker, Compute Grid, MapReduce etc.
Batch processing (any industry)
-------------------------------
Camel integration to hook up with batch data sources
Actors divide and conquer the batch workloads
Communications Hub (Telecom, Web media, Mobile media)
-----------------------------------------------------
Scale up, scale out, fault-tolerance / HA
Gaming and Betting (MOM, online gaming, betting)
------------------------------------------------
Scale up, scale out, fault-tolerance / HA
Business Intelligence/Data Mining/general purpose crunching
-----------------------------------------------------------
Scale up, scale out, fault-tolerance / HA
Complex Event Stream Processing
-------------------------------
Scale up, scale out, fault-tolerance / HA

View file

@ -0,0 +1,121 @@
.. _what-is-akka:
###############
What is Akka?
###############
**Scalable real-time transaction processing**
We believe that writing correct concurrent, fault-tolerant and scalable
applications is too hard. Most of the time it's because we are using the wrong
tools and the wrong level of abstraction. Akka is here to change that. Using the
Actor Model we raise the abstraction level and provide a better platform to build
correct, concurrent, and scalable applications. For fault-tolerance we adopt the
"Let it crash" model which the telecom industry has used with great success to
build applications that self-heal and systems that never stop. Actors also provide
the abstraction for transparent distribution and the basis for truly scalable and
fault-tolerant applications.
Akka is Open Source and available under the Apache 2 License.
Download from http://akka.io/downloads/
Akka implements a unique hybrid
===============================
Actors
------
Actors give you:
- Simple and high-level abstractions for concurrency and parallelism.
- Asynchronous, non-blocking and highly performant event-driven programming model.
- Very lightweight event-driven processes (approximately 2.7 million actors per GB RAM).
See :ref:`actors-scala` and :ref:`untyped-actors-java`
Fault Tolerance
---------------
Fault tolerance through supervisor hierarchies with "let-it-crash"
semantics. Excellent for writing highly fault-tolerant systems that never stop,
systems that self-heal. Supervisor hierarchies can span over multiple JVMs to
provide truly fault-tolerant systems.
See :ref:`fault-tolerance-scala` and :ref:`fault-tolerance-java`
Location Transparency
---------------------
Everything in Akka is designed to work in a distributed environment: all
interactions of actors use purely message passing and everything is asynchronous.
For an overview of the remoting see :ref:`remoting`
Transactors
-----------
Transactors combine actors and STM (Software Transactional Memory) into transactional actors.
It allows you to compose atomic message flows with automatic retry and rollback.
See :ref:`transactors-scala` and :ref:`transactors-java`
Scala and Java APIs
===================
Akka has both a :ref:`scala-api` and a :ref:`java-api`.
Akka can be used in two different ways
======================================
- As a library: used by a web app, to be put into ``WEB-INF/lib`` or as a regular
JAR on your classpath.
- As a microkernel: stand-alone kernel to drop your application into.
See the :ref:`deployment-scenarios` for details.
What happened to Cloudy Akka?
=============================
The commercial offering was earlier referred to as Cloudy Akka. This offering
consisted of two things:
- Cluster support for Akka
- Monitoring & Management (formerly called Atmos)
Cloudy Akka have been discontinued and the Cluster support is now being moved into the
Open Source version of Akka (the upcoming Akka 2.1), while the Monitoring & Management
(Atmos) is now rebranded into Typesafe Console and is part of the commercial subscription
for the Typesafe Stack (see below for details).
Typesafe Stack
==============
Akka is now also part of the `Typesafe Stack <http://typesafe.com/stack>`_.
The Typesafe Stack is a modern software platform that makes it easy for developers
to build scalable software applications. It combines the Scala programming language,
Akka, the Play! web framework and robust developer tools in a simple package that
integrates seamlessly with existing Java infrastructure.
The Typesafe Stack is all fully open source.
Typesafe Console
================
On top of the Typesafe Stack we also have a commercial product called Typesafe
Console which provides the following features:
#. Slick Web UI with real-time view into the system
#. Management through Dashboard, JMX and REST
#. Dapper-style tracing of messages across components and remote nodes
#. Real-time statistics
#. Very low overhead monitoring agents (should always be on in production)
#. Consolidation of statistics and logging information to a single node
#. Storage of statistics data for later processing
#. Provisioning and rolling upgrades
Read more `here <http://typesafe.com/products/typesafe-subscription>`_.

View file

@ -0,0 +1,47 @@
Why Akka?
=========
What features can the Akka platform offer, over the competition?
----------------------------------------------------------------
Akka provides scalable real-time transaction processing.
Akka is an unified runtime and programming model for:
- Scale up (Concurrency)
- Scale out (Remoting)
- Fault tolerance
One thing to learn and admin, with high cohesion and coherent semantics.
Akka is a very scalable piece of software, not only in the performance sense,
but in the size of applications it is useful for. The core of Akka, akka-actor,
is very small and easily dropped into an existing project where you need
asynchronicity and lockless concurrency without hassle.
You can choose to include only the parts of akka you need in your application
and then there's the whole package, the Akka Microkernel, which is a standalone
container to deploy your Akka application in. With CPUs growing more and more
cores every cycle, Akka is the alternative that provides outstanding performance
even if you're only running it on one machine. Akka also supplies a wide array
of concurrency-paradigms, allowing for users to choose the right tool for the
job.
What's a good use-case for Akka?
--------------------------------
We see Akka being adopted by many large organizations in a big range of industries
all from investment and merchant banking, retail and social media, simulation,
gaming and betting, automobile and traffic systems, health care, data analytics
and much more. Any system that have the need for high-throughput and low latency
is a good candidate for using Akka.
Actors lets you manage service failures (Supervisors), load management (back-off
strategies, timeouts and processing-isolation), both horizontal and vertical
scalability (add more cores and/or add more machines).
Here's what some of the Akka users have to say about how they are using Akka:
http://stackoverflow.com/questions/4493001/good-use-case-for-akka
All this in the ApacheV2-licensed open source project.

View file

@ -0,0 +1,108 @@
.. _agents-java:
##############
Agents (Java)
##############
Agents in Akka are inspired by `agents in Clojure`_.
.. _agents in Clojure: http://clojure.org/agents
Agents provide asynchronous change of individual locations. Agents are bound to
a single storage location for their lifetime, and only allow mutation of that
location (to a new state) to occur as a result of an action. Update actions are
functions that are asynchronously applied to the Agent's state and whose return
value becomes the Agent's new state. The state of an Agent should be immutable.
While updates to Agents are asynchronous, the state of an Agent is always
immediately available for reading by any thread (using ``get``) without any
messages.
Agents are reactive. The update actions of all Agents get interleaved amongst
threads in a thread pool. At any point in time, at most one ``send`` action for
each Agent is being executed. Actions dispatched to an agent from another thread
will occur in the order they were sent, potentially interleaved with actions
dispatched to the same agent from other sources.
If an Agent is used within an enclosing transaction, then it will participate in
that transaction. Agents are integrated with the STM - any dispatches made in
a transaction are held until that transaction commits, and are discarded if it
is retried or aborted.
Creating and stopping Agents
============================
Agents are created by invoking ``new Agent(value, system)`` passing in the
Agent's initial value and a reference to the ``ActorSystem`` for your
application. An ``ActorSystem`` is required to create the underlying Actors. See
:ref:`actor-systems` for more information about actor systems.
Here is an example of creating an Agent:
.. includecode:: code/docs/agent/AgentDocTest.java
:include: import-system,import-agent
:language: java
.. includecode:: code/docs/agent/AgentDocTest.java#create
:language: java
An Agent will be running until you invoke ``close`` on it. Then it will be
eligible for garbage collection (unless you hold on to it in some way).
.. includecode:: code/docs/agent/AgentDocTest.java#close
:language: java
Updating Agents
===============
You update an Agent by sending a function that transforms the current value or
by sending just a new value. The Agent will apply the new value or function
atomically and asynchronously. The update is done in a fire-forget manner and
you are only guaranteed that it will be applied. There is no guarantee of when
the update will be applied but dispatches to an Agent from a single thread will
occur in order. You apply a value or a function by invoking the ``send``
function.
.. includecode:: code/docs/agent/AgentDocTest.java#import-function
:language: java
.. includecode:: code/docs/agent/AgentDocTest.java#send
:language: java
You can also dispatch a function to update the internal state but on its own
thread. This does not use the reactive thread pool and can be used for
long-running or blocking operations. You do this with the ``sendOff``
method. Dispatches using either ``sendOff`` or ``send`` will still be executed
in order.
.. includecode:: code/docs/agent/AgentDocTest.java#send-off
:language: java
Reading an Agent's value
========================
Agents can be dereferenced (you can get an Agent's value) by calling the get
method:
.. includecode:: code/docs/agent/AgentDocTest.java#read-get
:language: java
Reading an Agent's current value does not involve any message passing and
happens immediately. So while updates to an Agent are asynchronous, reading the
state of an Agent is synchronous.
Awaiting an Agent's value
=========================
It is also possible to read the value after all currently queued sends have
completed. You can do this with ``await``:
.. includecode:: code/docs/agent/AgentDocTest.java#import-timeout
:language: java
.. includecode:: code/docs/agent/AgentDocTest.java#read-await
:language: java

View file

@ -0,0 +1,582 @@
.. _camel-java:
#############
Camel (Java)
#############
Additional Resources
====================
For an introduction to akka-camel 2, see also the Peter Gabryanczyk's talk `Migrating akka-camel module to Akka 2.x`_.
For an introduction to akka-camel 1, see also the `Appendix E - Akka and Camel`_
(pdf) of the book `Camel in Action`_.
.. _Appendix E - Akka and Camel: http://www.manning.com/ibsen/appEsample.pdf
.. _Camel in Action: http://www.manning.com/ibsen/
.. _Migrating akka-camel module to Akka 2.x: http://skillsmatter.com/podcast/scala/akka-2-x
Other, more advanced external articles (for version 1) are:
* `Akka Consumer Actors: New Features and Best Practices <http://krasserm.blogspot.com/2011/02/akka-consumer-actors-new-features-and.html>`_
* `Akka Producer Actors: New Features and Best Practices <http://krasserm.blogspot.com/2011/02/akka-producer-actor-new-features-and.html>`_
Introduction
============
The akka-camel module allows Untyped Actors to receive
and send messages over a great variety of protocols and APIs.
In addition to the native Scala and Java actor API, actors can now exchange messages with other systems over large number
of protocols and APIs such as HTTP, SOAP, TCP, FTP, SMTP or JMS, to mention a
few. At the moment, approximately 80 protocols and APIs are supported.
Apache Camel
------------
The akka-camel module is based on `Apache Camel`_, a powerful and light-weight
integration framework for the JVM. For an introduction to Apache Camel you may
want to read this `Apache Camel article`_. Camel comes with a
large number of `components`_ that provide bindings to different protocols and
APIs. The `camel-extra`_ project provides further components.
.. _Apache Camel: http://camel.apache.org/
.. _Apache Camel article: http://architects.dzone.com/articles/apache-camel-integration
.. _components: http://camel.apache.org/components.html
.. _camel-extra: http://code.google.com/p/camel-extra/
Consumer
--------
Here's an example of using Camel's integration components in Akka.
.. includecode:: code/docs/camel/MyEndpoint.java#Consumer-mina
The above example exposes an actor over a TCP endpoint via Apache
Camel's `Mina component`_. The actor implements the `getEndpointUri` method to define
an endpoint from which it can receive messages. After starting the actor, TCP
clients can immediately send messages to and receive responses from that
actor. If the message exchange should go over HTTP (via Camel's `Jetty
component`_), the actor's `getEndpointUri` method should return a different URI, for instance "jetty:http://localhost:8877/example".
In the above case an extra constructor is added that can set the endpoint URI, which would result in
the `getEndpointUri` returning the URI that was set using this constructor.
.. _Mina component: http://camel.apache.org/mina.html
.. _Jetty component: http://camel.apache.org/jetty.html
Producer
--------
Actors can also trigger message exchanges with external systems i.e. produce to
Camel endpoints.
.. includecode:: code/docs/camel/Orders.java#Producer
In the above example, any message sent to this actor will be sent to
the JMS queue ``Orders``. Producer actors may choose from the same set of Camel
components as Consumer actors do.
Below an example of how to send a message to the Orders producer.
.. includecode:: code/docs/camel/ProducerTestBase.java#TellProducer
CamelMessage
------------
The number of Camel components is constantly increasing. The akka-camel module
can support these in a plug-and-play manner. Just add them to your application's
classpath, define a component-specific endpoint URI and use it to exchange
messages over the component-specific protocols or APIs. This is possible because
Camel components bind protocol-specific message formats to a Camel-specific
`normalized message format`__. The normalized message format hides
protocol-specific details from Akka and makes it therefore very easy to support
a large number of protocols through a uniform Camel component interface. The
akka-camel module further converts mutable Camel messages into immutable
representations which are used by Consumer and Producer actors for pattern
matching, transformation, serialization or storage. In the above example of the Orders Producer,
the XML message is put in the body of a newly created Camel Message with an empty set of headers.
You can also create a CamelMessage yourself with the appropriate body and headers as you see fit.
__ https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Message.java
CamelExtension
--------------
The akka-camel module is implemented as an Akka Extension, the ``CamelExtension`` object.
Extensions will only be loaded once per ``ActorSystem``, which will be managed by Akka.
The ``CamelExtension`` object provides access to the `Camel`_ interface.
The `Camel`_ interface in turn provides access to two important Apache Camel objects, the `CamelContext`_ and the `ProducerTemplate`_.
Below you can see how you can get access to these Apache Camel objects.
.. includecode:: code/docs/camel/CamelExtensionTestBase.java#CamelExtension
One ``CamelExtension`` is only loaded once for every one ``ActorSystem``, which makes it safe to call the ``CamelExtension`` at any point in your code to get to the
Apache Camel objects associated with it. There is one `CamelContext`_ and one `ProducerTemplate`_ for every one ``ActorSystem`` that uses a ``CamelExtension``.
Below an example on how to add the ActiveMQ component to the `CamelContext`_, which is required when you would like to use the ActiveMQ component.
.. includecode:: code/docs/camel/CamelExtensionTestBase.java#CamelExtensionAddComponent
The `CamelContext`_ joins the lifecycle of the ``ActorSystem`` and ``CamelExtension`` it is associated with; the `CamelContext`_ is started when
the ``CamelExtension`` is created, and it is shut down when the associated ``ActorSystem`` is shut down. The same is true for the `ProducerTemplate`_.
The ``CamelExtension`` is used by both `Producer` and `Consumer` actors to interact with Apache Camel internally.
You can access the ``CamelExtension`` inside a `Producer` or a `Consumer` using the ``camel`` method, or get straight at the `CamelContext`
using the ``getCamelContext`` method or to the `ProducerTemplate` using the `getProducerTemplate` method.
Actors are created and started asynchronously. When a `Consumer` actor is created, the `Consumer` is published at its Camel endpoint
(more precisely, the route is added to the `CamelContext`_ from the `Endpoint`_ to the actor).
When a `Producer` actor is created, a `SendProcessor`_ and `Endpoint`_ are created so that the Producer can send messages to it.
Publication is done asynchronously; setting up an endpoint may still be in progress after you have
requested the actor to be created. Some Camel components can take a while to startup, and in some cases you might want to know when the endpoints are activated and ready to be used.
The `Camel`_ interface allows you to find out when the endpoint is activated or deactivated.
.. includecode:: code/docs/camel/ActivationTestBase.java#CamelActivation
The above code shows that you can get a ``Future`` to the activation of the route from the endpoint to the actor, or you can wait in a blocking fashion on the activation of the route.
An ``ActivationTimeoutException`` is thrown if the endpoint could not be activated within the specified timeout. Deactivation works in a similar fashion:
.. includecode:: code/docs/camel/ActivationTestBase.java#CamelDeactivation
Deactivation of a Consumer or a Producer actor happens when the actor is terminated. For a Consumer, the route to the actor is stopped. For a Producer, the `SendProcessor`_ is stopped.
A ``DeActivationTimeoutException`` is thrown if the associated camel objects could not be deactivated within the specified timeout.
.. _Camel: http://github.com/akka/akka/blob/master/akka-camel/src/main/scala/akka/camel/Camel.scala
.. _CamelContext: https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/CamelContext.java
.. _ProducerTemplate: https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/ProducerTemplate.java
.. _SendProcessor: https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/processor/SendProcessor.java
.. _Endpoint: https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Endpoint.java
Consumer Actors
================
For objects to receive messages, they must inherit from the `UntypedConsumerActor`_
class. For example, the following actor class (Consumer1) implements the
`getEndpointUri` method, which is declared in the `UntypedConsumerActor`_ class, in order to receive
messages from the ``file:data/input/actor`` Camel endpoint.
.. _UntypedConsumerActor: http://github.com/akka/akka/blob/master/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumer.scala
.. includecode:: code/docs/camel/Consumer1.java#Consumer1
Whenever a file is put into the data/input/actor directory, its content is
picked up by the Camel `file component`_ and sent as message to the
actor. Messages consumed by actors from Camel endpoints are of type
`CamelMessage`_. These are immutable representations of Camel messages.
.. _file component: http://camel.apache.org/file2.html
.. _Message: http://github.com/akka/akka/blob/master/akka-camel/src/main/scala/akka/camel/CamelMessage.scala
Here's another example that sets the endpointUri to
``jetty:http://localhost:8877/camel/default``. It causes Camel's `Jetty
component`_ to start an embedded `Jetty`_ server, accepting HTTP connections
from localhost on port 8877.
.. _Jetty component: http://camel.apache.org/jetty.html
.. _Jetty: http://www.eclipse.org/jetty/
.. includecode:: code/docs/camel/Consumer2.java#Consumer2
After starting the actor, clients can send messages to that actor by POSTing to
``http://localhost:8877/camel/default``. The actor sends a response by using the
getSender().tell method. For returning a message body and headers to the HTTP
client the response type should be `CamelMessage`_. For any other response type, a
new CamelMessage object is created by akka-camel with the actor response as message
body.
.. _Message: http://github.com/akka/akka/blob/master/akka-camel/src/main/scala/akka/camel/CamelMessage.scala
.. _camel-acknowledgements-java:
Delivery acknowledgements
-------------------------
With in-out message exchanges, clients usually know that a message exchange is
done when they receive a reply from a consumer actor. The reply message can be a
CamelMessage (or any object which is then internally converted to a CamelMessage) on
success, and a Failure message on failure.
With in-only message exchanges, by default, an exchange is done when a message
is added to the consumer actor's mailbox. Any failure or exception that occurs
during processing of that message by the consumer actor cannot be reported back
to the endpoint in this case. To allow consumer actors to positively or
negatively acknowledge the receipt of a message from an in-only message
exchange, they need to override the ``autoAck`` method to return false.
In this case, consumer actors must reply either with a
special akka.camel.Ack message (positive acknowledgement) or a akka.actor.Status.Failure (negative
acknowledgement).
.. includecode:: code/docs/camel/Consumer3.java#Consumer3
.. _camel-timeout-java:
Consumer timeout
----------------
Camel Exchanges (and their corresponding endpoints) that support two-way communications need to wait for a response from
an actor before returning it to the initiating client.
For some endpoint types, timeout values can be defined in an endpoint-specific
way which is described in the documentation of the individual `Camel
components`_. Another option is to configure timeouts on the level of consumer actors.
.. _Camel components: http://camel.apache.org/components.html
Two-way communications between a Camel endpoint and an actor are
initiated by sending the request message to the actor with the `ask`_ pattern
and the actor replies to the endpoint when the response is ready. The ask request to the actor can timeout, which will
result in the `Exchange`_ failing with a TimeoutException set on the failure of the `Exchange`_.
The timeout on the consumer actor can be overridden with the ``replyTimeout``, as shown below.
.. includecode:: code/docs/camel/Consumer4.java#Consumer4
.. _Exchange: https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/Exchange.java
.. _ask: http://github.com/akka/akka/blob/master/akka-actor/src/main/scala/akka/pattern/Patterns.scala
Producer Actors
===============
For sending messages to Camel endpoints, actors need to inherit from the `UntypedProducerActor`_ class and implement the getEndpointUri method.
.. includecode:: code/docs/camel/Producer1.java#Producer1
Producer1 inherits a default implementation of the onReceive method from the
`UntypedProducerActor`_ class. To customize a producer actor's default behavior you must override the `UntypedProducerActor`_.onTransformResponse and
`UntypedProducerActor`_.onTransformOutgoingMessage methods. This is explained later in more detail.
Producer Actors cannot override the `UntypedProducerActor`_.onReceive method.
Any message sent to a Producer actor will be sent to
the associated Camel endpoint, in the above example to
``http://localhost:8080/news``. The `UntypedProducerActor`_ always sends messages asynchronously. Response messages (if supported by the
configured endpoint) will, by default, be returned to the original sender. The
following example uses the ask pattern to send a message to a
Producer actor and waits for a response.
.. includecode:: code/docs/camel/ProducerTestBase.java#AskProducer
The future contains the response CamelMessage, or an ``AkkaCamelException`` when an error occurred, which contains the headers of the response.
.. _camel-custom-processing-java:
Custom Processing
-----------------
Instead of replying to the initial sender, producer actors can implement custom
response processing by overriding the onRouteResponse method. In the following example, the response
message is forwarded to a target actor instead of being replied to the original
sender.
.. includecode:: code/docs/camel/ResponseReceiver.java#RouteResponse
.. includecode:: code/docs/camel/Forwarder.java#RouteResponse
.. includecode:: code/docs/camel/OnRouteResponseTestBase.java#RouteResponse
Before producing messages to endpoints, producer actors can pre-process them by
overriding the `UntypedProducerActor`_.onTransformOutgoingMessage method.
.. includecode:: code/docs/camel/Transformer.java#TransformOutgoingMessage
Producer configuration options
------------------------------
The interaction of producer actors with Camel endpoints can be configured to be
one-way or two-way (by initiating in-only or in-out message exchanges,
respectively). By default, the producer initiates an in-out message exchange
with the endpoint. For initiating an in-only exchange, producer actors have to override the isOneway method to return true.
.. includecode:: code/docs/camel/OnewaySender.java#Oneway
Message correlation
-------------------
To correlate request with response messages, applications can set the
`Message.MessageExchangeId` message header.
.. includecode:: code/docs/camel/ProducerTestBase.java#Correlate
ProducerTemplate
----------------
The `UntypedProducerActor`_ class is a very convenient way for actors to produce messages to Camel endpoints.
Actors may also use a Camel `ProducerTemplate`_ for producing messages to endpoints.
.. includecode:: code/docs/camel/MyActor.java#ProducerTemplate
For initiating a a two-way message exchange, one of the
``ProducerTemplate.request*`` methods must be used.
.. includecode:: code/docs/camel/RequestBodyActor.java#RequestProducerTemplate
.. _UntypedProducerActor: http://github.com/akka/akka/blob/master/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala
.. _ProducerTemplate: https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/ProducerTemplate.java
.. _camel-asynchronous-routing-java:
Asynchronous routing
====================
In-out message exchanges between endpoints and actors are
designed to be asynchronous. This is the case for both, consumer and producer
actors.
* A consumer endpoint sends request messages to its consumer actor using the ``!``
(tell) operator and the actor returns responses with ``sender !`` once they are
ready.
* A producer actor sends request messages to its endpoint using Camel's
asynchronous routing engine. Asynchronous responses are wrapped and added to the
producer actor's mailbox for later processing. By default, response messages are
returned to the initial sender but this can be overridden by Producer
implementations (see also description of the ``onRouteResponse`` method
in :ref:`camel-custom-processing-java`).
However, asynchronous two-way message exchanges, without allocating a thread for
the full duration of exchange, cannot be generically supported by Camel's
asynchronous routing engine alone. This must be supported by the individual
`Camel components`_ (from which endpoints are created) as well. They must be
able to suspend any work started for request processing (thereby freeing threads
to do other work) and resume processing when the response is ready. This is
currently the case for a `subset of components`_ such as the `Jetty component`_.
All other Camel components can still be used, of course, but they will cause
allocation of a thread for the duration of an in-out message exchange. There's
also a :ref:`camel-async-example-java` that implements both, an asynchronous
consumer and an asynchronous producer, with the jetty component.
.. _Camel components: http://camel.apache.org/components.html
.. _subset of components: http://camel.apache.org/asynchronous-routing-engine.html
.. _Jetty component: http://camel.apache.org/jetty.html
Custom Camel routes
===================
In all the examples so far, routes to consumer actors have been automatically
constructed by akka-camel, when the actor was started. Although the default
route construction templates, used by akka-camel internally, are sufficient for
most use cases, some applications may require more specialized routes to actors.
The akka-camel module provides two mechanisms for customizing routes to actors,
which will be explained in this section. These are:
* Usage of :ref:`camel-components-java` to access actors.
Any Camel route can use these components to access Akka actors.
* :ref:`camel-intercepting-route-construction-java` to actors.
This option gives you the ability to change routes that have already been added to Camel.
Consumer actors have a hook into the route definition process which can be used to change the route.
.. _camel-components-java:
Akka Camel components
---------------------
Akka actors can be accessed from Camel routes using the `actor`_ Camel component. This component can be used to
access any Akka actor (not only consumer actors) from Camel routes, as described in the following sections.
.. _actor: http://github.com/akka/akka/blob/master/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala
.. _access-to-actors-java:
Access to actors
----------------
To access actors from custom Camel routes, the `actor`_ Camel
component should be used. It fully supports Camel's `asynchronous routing
engine`_.
.. _actor: http://github.com/akka/akka/blob/master/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala
.. _asynchronous routing engine: http://camel.apache.org/asynchronous-routing-engine.html
This component accepts the following endpoint URI format:
* ``[<actor-path>]?<options>``
where ``<actor-path>`` is the ``ActorPath`` to the actor. The ``<options>`` are
name-value pairs separated by ``&`` (i.e. ``name1=value1&name2=value2&...``).
URI options
^^^^^^^^^^^
The following URI options are supported:
+--------------+----------+---------+------------------------------------------------+
| Name | Type | Default | Description |
+==============+==========+=========+================================================+
| replyTimeout | Duration | false | The reply timeout, specified in the same |
| | | | way that you use the duration in akka, |
| | | | for instance ``10 seconds`` except that |
| | | | in the url it is handy to use a + |
| | | | between the amount and the unit, like |
| | | | for example ``200+millis`` |
| | | | |
| | | | See also :ref:`camel-timeout-java`. |
+--------------+----------+---------+------------------------------------------------+
| autoAck | Boolean | true | If set to true, in-only message exchanges |
| | | | are auto-acknowledged when the message is |
| | | | added to the actor's mailbox. If set to |
| | | | false, actors must acknowledge the |
| | | | receipt of the message. |
| | | | |
| | | | See also :ref:`camel-acknowledgements-java`. |
+--------------+----------+---------+------------------------------------------------+
Here's an actor endpoint URI example containing an actor uuid::
akka://some-system/user/myconsumer?autoAck=false&replyTimeout=100+millis
In the following example, a custom route to an actor is created, using the
actor's path.
.. includecode:: code/docs/camel/Responder.java#CustomRoute
.. includecode:: code/docs/camel/CustomRouteBuilder.java#CustomRoute
.. includecode:: code/docs/camel/CustomRouteTestBase.java#CustomRoute
The `CamelPath.toCamelUri` converts the `ActorRef` to the Camel actor component URI format which points to the actor endpoint as described above.
When a message is received on the jetty endpoint, it is routed to the Responder actor, which in return replies back to the client of
the HTTP request.
.. _camel-intercepting-route-construction-java:
Intercepting route construction
-------------------------------
The previous section, :ref:`camel-components-java`, explained how to setup a route to
an actor manually.
It was the application's responsibility to define the route and add it to the current CamelContext.
This section explains a more convenient way to define custom routes: akka-camel is still setting up the routes to consumer actors
(and adds these routes to the current CamelContext) but applications can define extensions to these routes.
Extensions can be defined with Camel's `Java DSL`_ or `Scala DSL`_. For example, an extension could be a custom error handler that redelivers messages from an endpoint to an actor's bounded mailbox when the mailbox was full.
.. _Java DSL: http://camel.apache.org/dsl.html
.. _Scala DSL: http://camel.apache.org/scala-dsl.html
The following examples demonstrate how to extend a route to a consumer actor for
handling exceptions thrown by that actor.
.. includecode:: code/docs/camel/ErrorThrowingConsumer.java#ErrorThrowingConsumer
The above ErrorThrowingConsumer sends the Failure back to the sender in preRestart
because the Exception that is thrown in the actor would
otherwise just crash the actor, by default the actor would be restarted, and the response would never reach the client of the Consumer.
The akka-camel module creates a RouteDefinition instance by calling
from(endpointUri) on a Camel RouteBuilder (where endpointUri is the endpoint URI
of the consumer actor) and passes that instance as argument to the route
definition handler \*). The route definition handler then extends the route and
returns a ProcessorDefinition (in the above example, the ProcessorDefinition
returned by the end method. See the `org.apache.camel.model`__ package for
details). After executing the route definition handler, akka-camel finally calls
a to(targetActorUri) on the returned ProcessorDefinition to complete the
route to the consumer actor (where targetActorUri is the actor component URI as described in :ref:`access-to-actors-java`).
If the actor cannot be found, a `ActorNotRegisteredException` is thrown.
\*) Before passing the RouteDefinition instance to the route definition handler,
akka-camel may make some further modifications to it.
__ https://svn.apache.org/repos/asf/camel/tags/camel-2.8.0/camel-core/src/main/java/org/apache/camel/model/
.. _camel-examples-java:
Examples
========
.. _camel-async-example-java:
Asynchronous routing and transformation example
-----------------------------------------------
This example demonstrates how to implement consumer and producer actors that
support :ref:`camel-asynchronous-routing-java` with their Camel endpoints. The sample
application transforms the content of the Akka homepage, http://akka.io, by
replacing every occurrence of *Akka* with *AKKA*. To run this example, add
a Boot class that starts the actors. After starting
the :ref:`microkernel-java`, direct the browser to http://localhost:8875 and the
transformed Akka homepage should be displayed. Please note that this example
will probably not work if you're behind an HTTP proxy.
The following figure gives an overview how the example actors interact with
external systems and with each other. A browser sends a GET request to
http://localhost:8875 which is the published endpoint of the ``HttpConsumer``
actor. The ``HttpConsumer`` actor forwards the requests to the ``HttpProducer``
actor which retrieves the Akka homepage from http://akka.io. The retrieved HTML
is then forwarded to the ``HttpTransformer`` actor which replaces all occurrences
of *Akka* with *AKKA*. The transformation result is sent back the HttpConsumer
which finally returns it to the browser.
.. image:: ../modules/camel-async-interact.png
Implementing the example actor classes and wiring them together is rather easy
as shown in the following snippet.
.. includecode:: code/docs/camel/sample/http/HttpConsumer.java#HttpExample
.. includecode:: code/docs/camel/sample/http/HttpProducer.java#HttpExample
.. includecode:: code/docs/camel/sample/http/HttpTransformer.java#HttpExample
.. includecode:: code/docs/camel/sample/http/HttpSample.java#HttpExample
The `jetty endpoints`_ of HttpConsumer and HttpProducer support asynchronous
in-out message exchanges and do not allocate threads for the full duration of
the exchange. This is achieved by using `Jetty continuations`_ on the
consumer-side and by using `Jetty's asynchronous HTTP client`_ on the producer
side. The following high-level sequence diagram illustrates that.
.. _jetty endpoints: http://camel.apache.org/jetty.html
.. _Jetty continuations: http://wiki.eclipse.org/Jetty/Feature/Continuations
.. _Jetty's asynchronous HTTP client: http://wiki.eclipse.org/Jetty/Tutorial/HttpClient
.. image:: ../modules/camel-async-sequence.png
Custom Camel route example
--------------------------
This section also demonstrates the combined usage of a ``Producer`` and a
``Consumer`` actor as well as the inclusion of a custom Camel route. The
following figure gives an overview.
.. image:: ../modules/camel-custom-route.png
* A consumer actor receives a message from an HTTP client
* It forwards the message to another actor that transforms the message (encloses
the original message into hyphens)
* The transformer actor forwards the transformed message to a producer actor
* The producer actor sends the message to a custom Camel route beginning at the
``direct:welcome`` endpoint
* A processor (transformer) in the custom Camel route prepends "Welcome" to the
original message and creates a result message
* The producer actor sends the result back to the consumer actor which returns
it to the HTTP client
The consumer, transformer and
producer actor implementations are as follows.
.. includecode:: code/docs/camel/sample/route/Consumer3.java#CustomRouteExample
.. includecode:: code/docs/camel/sample/route/Transformer.java#CustomRouteExample
.. includecode:: code/docs/camel/sample/route/Producer1.java#CustomRouteExample
.. includecode:: code/docs/camel/sample/route/CustomRouteSample.java#CustomRouteExample
The producer actor knows where to reply the message to because the consumer and
transformer actors have forwarded the original sender reference as well. The
application configuration and the route starting from direct:welcome are done in the code above.
To run the example, add the lines shown in the example to a Boot class and the start the :ref:`microkernel-java` and POST a message to
``http://localhost:8877/camel/welcome``.
.. code-block:: none
curl -H "Content-Type: text/plain" -d "Anke" http://localhost:8877/camel/welcome
The response should be:
.. code-block:: none
Welcome - Anke -
Quartz Scheduler Example
------------------------
Here is an example showing how simple is to implement a cron-style scheduler by
using the Camel Quartz component in Akka.
The following example creates a "timer" actor which fires a message every 2
seconds:
.. includecode:: code/docs/camel/sample/quartz/MyQuartzActor.java#QuartzExample
.. includecode:: code/docs/camel/sample/quartz/QuartzSample.java#QuartzExample
For more information about the Camel Quartz component, see here:
http://camel.apache.org/quartz.html

View file

@ -0,0 +1,8 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor
import org.scalatest.junit.JUnitSuite
class FSMDocTest extends FSMDocTestBase with JUnitSuite

View file

@ -0,0 +1,194 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
//#imports-data
import java.util.ArrayList;
import java.util.List;
import akka.actor.ActorRef;
//#imports-data
//#imports-actor
import akka.event.LoggingAdapter;
import akka.event.Logging;
import akka.actor.UntypedActor;
//#imports-actor
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.testkit.TestProbe;
import akka.testkit.AkkaSpec;
public class FSMDocTestBase {
//#data
public static final class SetTarget {
final ActorRef ref;
public SetTarget(ActorRef ref) {
this.ref = ref;
}
}
public static final class Queue {
final Object o;
public Queue(Object o) {
this.o = o;
}
}
public static final Object flush = new Object();
public static final class Batch {
final List<Object> objects;
public Batch(List<Object> objects) {
this.objects = objects;
}
}
//#data
//#base
static abstract class MyFSMBase extends UntypedActor {
/*
* This is the mutable state of this state machine.
*/
protected enum State {
IDLE, ACTIVE;
}
private State state = State.IDLE;
private ActorRef target;
private List<Object> queue;
/*
* Then come all the mutator methods:
*/
protected void init(ActorRef target) {
this.target = target;
queue = new ArrayList<Object>();
}
protected void setState(State s) {
if (state != s) {
transition(state, s);
state = s;
}
}
protected void enqueue(Object o) {
if (queue != null)
queue.add(o);
}
protected List<Object> drainQueue() {
final List<Object> q = queue;
if (q == null)
throw new IllegalStateException("drainQueue(): not yet initialized");
queue = new ArrayList<Object>();
return q;
}
/*
* Here are the interrogation methods:
*/
protected boolean isInitialized() {
return target != null;
}
protected State getState() {
return state;
}
protected ActorRef getTarget() {
if (target == null)
throw new IllegalStateException("getTarget(): not yet initialized");
return target;
}
/*
* And finally the callbacks (only one in this example: react to state change)
*/
abstract protected void transition(State old, State next);
}
//#base
//#actor
static public class MyFSM extends MyFSMBase {
private final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
@Override
public void onReceive(Object o) {
if (getState() == State.IDLE) {
if (o instanceof SetTarget)
init(((SetTarget) o).ref);
else
whenUnhandled(o);
} else if (getState() == State.ACTIVE) {
if (o == flush)
setState(State.IDLE);
else
whenUnhandled(o);
}
}
@Override
public void transition(State old, State next) {
if (old == State.ACTIVE) {
getTarget().tell(new Batch(drainQueue()), getSelf());
}
}
private void whenUnhandled(Object o) {
if (o instanceof Queue && isInitialized()) {
enqueue(((Queue) o).o);
setState(State.ACTIVE);
} else {
log.warning("received unknown message {} in state {}", o, getState());
}
}
}
//#actor
ActorSystem system;
@org.junit.Before
public void setUp() {
system = ActorSystem.create("FSMSystem", AkkaSpec.testConf());
}
@org.junit.Test
public void mustBunch() {
final ActorRef buncher = system.actorOf(new Props(MyFSM.class));
final TestProbe probe = new TestProbe(system);
buncher.tell(new SetTarget(probe.ref()), null);
buncher.tell(new Queue(1), null);
buncher.tell(new Queue(2), null);
buncher.tell(flush, null);
buncher.tell(new Queue(3), null);
final Batch b = probe.expectMsgClass(Batch.class);
assert b.objects.size() == 2;
assert b.objects.contains(1);
assert b.objects.contains(2);
}
@org.junit.After
public void cleanup() {
system.shutdown();
}
}

View file

@ -0,0 +1,7 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor
import org.scalatest.junit.JUnitSuite
class FaultHandlingTest extends FaultHandlingTestBase with JUnitSuite

View file

@ -0,0 +1,214 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
//#testkit
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.SupervisorStrategy;
import static akka.actor.SupervisorStrategy.*;
import akka.actor.OneForOneStrategy;
import akka.actor.Props;
import akka.actor.Terminated;
import akka.actor.UntypedActor;
import scala.concurrent.Await;
import static akka.pattern.Patterns.ask;
import scala.concurrent.util.Duration;
import akka.testkit.AkkaSpec;
import akka.testkit.TestProbe;
//#testkit
import akka.testkit.ErrorFilter;
import akka.testkit.EventFilter;
import akka.testkit.TestEvent;
import static java.util.concurrent.TimeUnit.SECONDS;
import akka.japi.Function;
import scala.Option;
import scala.collection.JavaConverters;
import scala.collection.Seq;
import org.junit.Test;
import org.junit.BeforeClass;
import org.junit.AfterClass;
//#testkit
public class FaultHandlingTestBase {
//#testkit
//#supervisor
static public class Supervisor extends UntypedActor {
//#strategy
private static SupervisorStrategy strategy = new OneForOneStrategy(10, Duration.parse("1 minute"),
new Function<Throwable, Directive>() {
@Override
public Directive apply(Throwable t) {
if (t instanceof ArithmeticException) {
return resume();
} else if (t instanceof NullPointerException) {
return restart();
} else if (t instanceof IllegalArgumentException) {
return stop();
} else {
return escalate();
}
}
});
@Override
public SupervisorStrategy supervisorStrategy() {
return strategy;
}
//#strategy
public void onReceive(Object o) {
if (o instanceof Props) {
getSender().tell(getContext().actorOf((Props) o), getSelf());
} else {
unhandled(o);
}
}
}
//#supervisor
//#supervisor2
static public class Supervisor2 extends UntypedActor {
//#strategy2
private static SupervisorStrategy strategy = new OneForOneStrategy(10, Duration.parse("1 minute"),
new Function<Throwable, Directive>() {
@Override
public Directive apply(Throwable t) {
if (t instanceof ArithmeticException) {
return resume();
} else if (t instanceof NullPointerException) {
return restart();
} else if (t instanceof IllegalArgumentException) {
return stop();
} else {
return escalate();
}
}
});
@Override
public SupervisorStrategy supervisorStrategy() {
return strategy;
}
//#strategy2
public void onReceive(Object o) {
if (o instanceof Props) {
getSender().tell(getContext().actorOf((Props) o), getSelf());
} else {
unhandled(o);
}
}
@Override
public void preRestart(Throwable cause, Option<Object> msg) {
// do not kill all children, which is the default here
}
}
//#supervisor2
//#child
static public class Child extends UntypedActor {
int state = 0;
public void onReceive(Object o) throws Exception {
if (o instanceof Exception) {
throw (Exception) o;
} else if (o instanceof Integer) {
state = (Integer) o;
} else if (o.equals("get")) {
getSender().tell(state, getSelf());
} else {
unhandled(o);
}
}
}
//#child
//#testkit
static ActorSystem system;
Duration timeout = Duration.create(5, SECONDS);
@BeforeClass
public static void start() {
system = ActorSystem.create("test", AkkaSpec.testConf());
}
@AfterClass
public static void cleanup() {
system.shutdown();
}
@Test
public void mustEmploySupervisorStrategy() throws Exception {
// code here
//#testkit
EventFilter ex1 = (EventFilter) new ErrorFilter(ArithmeticException.class);
EventFilter ex2 = (EventFilter) new ErrorFilter(NullPointerException.class);
EventFilter ex3 = (EventFilter) new ErrorFilter(IllegalArgumentException.class);
EventFilter ex4 = (EventFilter) new ErrorFilter(Exception.class);
Seq<EventFilter> ignoreExceptions = seq(ex1, ex2, ex3, ex4);
system.eventStream().publish(new TestEvent.Mute(ignoreExceptions));
//#create
Props superprops = new Props(Supervisor.class);
ActorRef supervisor = system.actorOf(superprops, "supervisor");
ActorRef child = (ActorRef) Await.result(ask(supervisor, new Props(Child.class), 5000), timeout);
//#create
//#resume
child.tell(42, null);
assert Await.result(ask(child, "get", 5000), timeout).equals(42);
child.tell(new ArithmeticException(), null);
assert Await.result(ask(child, "get", 5000), timeout).equals(42);
//#resume
//#restart
child.tell(new NullPointerException(), null);
assert Await.result(ask(child, "get", 5000), timeout).equals(0);
//#restart
//#stop
final TestProbe probe = new TestProbe(system);
probe.watch(child);
child.tell(new IllegalArgumentException(), null);
probe.expectMsgClass(Terminated.class);
//#stop
//#escalate-kill
child = (ActorRef) Await.result(ask(supervisor, new Props(Child.class), 5000), timeout);
probe.watch(child);
assert Await.result(ask(child, "get", 5000), timeout).equals(0);
child.tell(new Exception(), null);
probe.expectMsgClass(Terminated.class);
//#escalate-kill
//#escalate-restart
superprops = new Props(Supervisor2.class);
supervisor = system.actorOf(superprops);
child = (ActorRef) Await.result(ask(supervisor, new Props(Child.class), 5000), timeout);
child.tell(23, null);
assert Await.result(ask(child, "get", 5000), timeout).equals(23);
child.tell(new Exception(), null);
assert Await.result(ask(child, "get", 5000), timeout).equals(0);
//#escalate-restart
//#testkit
}
//#testkit
public <A> Seq<A> seq(A... args) {
return JavaConverters.collectionAsScalaIterableConverter(java.util.Arrays.asList(args)).asScala().toSeq();
}
//#testkit
}
//#testkit

View file

@ -0,0 +1,21 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.PoisonPill;
import akka.actor.UntypedActor;
//#context-actorOf
public class FirstUntypedActor extends UntypedActor {
ActorRef myActor = getContext().actorOf(new Props(MyActor.class), "myactor");
//#context-actorOf
public void onReceive(Object message) {
myActor.forward(message, getContext());
myActor.tell(PoisonPill.getInstance(), null);
}
}

View file

@ -0,0 +1,28 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
//#immutable-message
public class ImmutableMessage {
private final int sequenceNumber;
private final List<String> values;
public ImmutableMessage(int sequenceNumber, List<String> values) {
this.sequenceNumber = sequenceNumber;
this.values = Collections.unmodifiableList(new ArrayList<String>(values));
}
public int getSequenceNumber() {
return sequenceNumber;
}
public List<String> getValues() {
return values;
}
}
//#immutable-message

View file

@ -0,0 +1,27 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
//#receive-timeout
import akka.actor.ReceiveTimeout;
import akka.actor.UntypedActor;
import scala.concurrent.util.Duration;
public class MyReceivedTimeoutUntypedActor extends UntypedActor {
public MyReceivedTimeoutUntypedActor() {
getContext().setReceiveTimeout(Duration.parse("30 seconds"));
}
public void onReceive(Object message) {
if (message.equals("Hello")) {
getSender().tell("Hello world", getSelf());
} else if (message == ReceiveTimeout.getInstance()) {
throw new RuntimeException("received timeout");
} else {
unhandled(message);
}
}
}
//#receive-timeout

View file

@ -0,0 +1,22 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
//#my-untyped-actor
import akka.actor.UntypedActor;
import akka.event.Logging;
import akka.event.LoggingAdapter;
public class MyUntypedActor extends UntypedActor {
LoggingAdapter log = Logging.getLogger(getContext().system(), this);
public void onReceive(Object message) throws Exception {
if (message instanceof String)
log.info("Received String message: {}", message);
else
unhandled(message);
}
}
//#my-untyped-actor

View file

@ -0,0 +1,8 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor
import org.scalatest.junit.JUnitSuite
class SchedulerDocTest extends SchedulerDocTestBase with JUnitSuite

View file

@ -0,0 +1,89 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
//#imports1
import akka.actor.Props;
import scala.concurrent.util.Duration;
import java.util.concurrent.TimeUnit;
//#imports1
//#imports2
import akka.actor.UntypedActor;
import akka.actor.UntypedActorFactory;
import akka.actor.Cancellable;
//#imports2
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.testkit.AkkaSpec;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class SchedulerDocTestBase {
ActorSystem system;
ActorRef testActor;
@Before
public void setUp() {
system = ActorSystem.create("MySystem", AkkaSpec.testConf());
testActor = system.actorOf(new Props(MyUntypedActor.class));
}
@After
public void tearDown() {
system.shutdown();
}
@Test
public void scheduleOneOffTask() {
//#schedule-one-off-message
//Schedules to send the "foo"-message to the testActor after 50ms
system.scheduler().scheduleOnce(Duration.create(50, TimeUnit.MILLISECONDS), testActor, "foo", system.dispatcher());
//#schedule-one-off-message
//#schedule-one-off-thunk
//Schedules a Runnable to be executed (send the current time) to the testActor after 50ms
system.scheduler().scheduleOnce(Duration.create(50, TimeUnit.MILLISECONDS), new Runnable() {
@Override
public void run() {
testActor.tell(System.currentTimeMillis(), null);
}
}, system.dispatcher());
//#schedule-one-off-thunk
}
@Test
public void scheduleRecurringTask() {
//#schedule-recurring
ActorRef tickActor = system.actorOf(new Props().withCreator(new UntypedActorFactory() {
public UntypedActor create() {
return new UntypedActor() {
public void onReceive(Object message) {
if (message.equals("Tick")) {
// Do someting
} else {
unhandled(message);
}
}
};
}
}));
//This will schedule to send the Tick-message
//to the tickActor after 0ms repeating every 50ms
Cancellable cancellable = system.scheduler().schedule(Duration.Zero(), Duration.create(50, TimeUnit.MILLISECONDS),
tickActor, "Tick", system.dispatcher());
//This cancels further Ticks to be sent
cancellable.cancel();
//#schedule-recurring
system.stop(tickActor);
}
}

View file

@ -0,0 +1,8 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor
import org.scalatest.junit.JUnitSuite
class TypedActorDocTest extends TypedActorDocTestBase with JUnitSuite

View file

@ -0,0 +1,187 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
//#imports
import akka.actor.TypedActor;
import akka.actor.*;
import akka.japi.*;
import akka.dispatch.Futures;
import scala.concurrent.Await;
import scala.concurrent.Future;
import scala.concurrent.util.Duration;
import java.util.concurrent.TimeUnit;
//#imports
import java.lang.Exception;
import org.junit.Test;
import static org.junit.Assert.*;
public class TypedActorDocTestBase {
Object someReference = null;
ActorSystem system = null;
//#typed-actor-iface
public static interface Squarer {
//#typed-actor-iface-methods
void squareDontCare(int i); //fire-forget
Future<Integer> square(int i); //non-blocking send-request-reply
Option<Integer> squareNowPlease(int i);//blocking send-request-reply
int squareNow(int i); //blocking send-request-reply
//#typed-actor-iface-methods
}
//#typed-actor-iface
//#typed-actor-impl
static class SquarerImpl implements Squarer {
private String name;
public SquarerImpl() {
this.name = "default";
}
public SquarerImpl(String name) {
this.name = name;
}
//#typed-actor-impl-methods
public void squareDontCare(int i) {
int sq = i * i; //Nobody cares :(
}
public Future<Integer> square(int i) {
return Futures.successful(i * i);
}
public Option<Integer> squareNowPlease(int i) {
return Option.some(i * i);
}
public int squareNow(int i) {
return i * i;
}
//#typed-actor-impl-methods
}
//#typed-actor-impl
@Test public void mustGetTheTypedActorExtension() {
try {
//#typed-actor-extension-tools
//Returns the Typed Actor Extension
TypedActorExtension extension =
TypedActor.get(system); //system is an instance of ActorSystem
//Returns whether the reference is a Typed Actor Proxy or not
TypedActor.get(system).isTypedActor(someReference);
//Returns the backing Akka Actor behind an external Typed Actor Proxy
TypedActor.get(system).getActorRefFor(someReference);
//Returns the current ActorContext,
// method only valid within methods of a TypedActor implementation
ActorContext context = TypedActor.context();
//Returns the external proxy of the current Typed Actor,
// method only valid within methods of a TypedActor implementation
Squarer sq = TypedActor.<Squarer>self();
//Returns a contextual instance of the Typed Actor Extension
//this means that if you create other Typed Actors with this,
//they will become children to the current Typed Actor.
TypedActor.get(TypedActor.context());
//#typed-actor-extension-tools
} catch (Exception e) {
//dun care
}
}
@Test public void createATypedActor() {
try {
//#typed-actor-create1
Squarer mySquarer =
TypedActor.get(system).typedActorOf(new TypedProps<SquarerImpl>(Squarer.class, SquarerImpl.class));
//#typed-actor-create1
//#typed-actor-create2
Squarer otherSquarer =
TypedActor.get(system).typedActorOf(new TypedProps<SquarerImpl>(Squarer.class,
new Creator<SquarerImpl>() {
public SquarerImpl create() { return new SquarerImpl("foo"); }
}),
"name");
//#typed-actor-create2
//#typed-actor-calls
//#typed-actor-call-oneway
mySquarer.squareDontCare(10);
//#typed-actor-call-oneway
//#typed-actor-call-future
Future<Integer> fSquare = mySquarer.square(10); //A Future[Int]
//#typed-actor-call-future
//#typed-actor-call-option
Option<Integer> oSquare = mySquarer.squareNowPlease(10); //Option[Int]
//#typed-actor-call-option
//#typed-actor-call-strict
int iSquare = mySquarer.squareNow(10); //Int
//#typed-actor-call-strict
//#typed-actor-calls
assertEquals(100, Await.result(fSquare, Duration.create(3, TimeUnit.SECONDS)).intValue());
assertEquals(100, oSquare.get().intValue());
assertEquals(100, iSquare);
//#typed-actor-stop
TypedActor.get(system).stop(mySquarer);
//#typed-actor-stop
//#typed-actor-poisonpill
TypedActor.get(system).poisonPill(otherSquarer);
//#typed-actor-poisonpill
} catch(Exception e) {
//Ignore
}
}
@Test public void createHierarchies() {
try {
//#typed-actor-hierarchy
Squarer childSquarer =
TypedActor.get(TypedActor.context()).
typedActorOf(
new TypedProps<SquarerImpl>(Squarer.class, SquarerImpl.class)
);
//Use "childSquarer" as a Squarer
//#typed-actor-hierarchy
} catch (Exception e) {
//dun care
}
}
@Test public void proxyAnyActorRef() {
try {
//#typed-actor-remote
Squarer typedActor =
TypedActor.get(system).
typedActorOf(
new TypedProps<Squarer>(Squarer.class),
system.actorFor("akka://SomeSystem@somehost:2552/user/some/foobar")
);
//Use "typedActor" as a FooBar
//#typed-actor-remote
} catch (Exception e) {
//dun care
}
}
}

View file

@ -0,0 +1,8 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor
import org.scalatest.junit.JUnitSuite
class UntypedActorDocTest extends UntypedActorDocTestBase with JUnitSuite

View file

@ -0,0 +1,396 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
//#imports
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
//#imports
//#import-future
import scala.concurrent.Future;
import akka.dispatch.Futures;
import akka.dispatch.Mapper;
import scala.concurrent.Await;
import scala.concurrent.util.Duration;
import akka.util.Timeout;
//#import-future
//#import-actors
import akka.actor.PoisonPill;
import akka.actor.Kill;
//#import-actors
//#import-procedure
import akka.japi.Procedure;
//#import-procedure
//#import-watch
import akka.actor.Terminated;
//#import-watch
//#import-gracefulStop
import static akka.pattern.Patterns.gracefulStop;
import scala.concurrent.Future;
import scala.concurrent.Await;
import scala.concurrent.util.Duration;
import akka.pattern.AskTimeoutException;
//#import-gracefulStop
//#import-askPipe
import static akka.pattern.Patterns.ask;
import static akka.pattern.Patterns.pipe;
import scala.concurrent.Future;
import akka.dispatch.Futures;
import scala.concurrent.util.Duration;
import akka.util.Timeout;
import java.util.concurrent.TimeUnit;
import java.util.ArrayList;
//#import-askPipe
//#import-stash
import akka.actor.UntypedActorWithStash;
//#import-stash
import akka.actor.UntypedActor;
import akka.actor.UntypedActorFactory;
import org.junit.Test;
import scala.Option;
import java.lang.Object;
import java.util.Iterator;
import akka.pattern.Patterns;
public class UntypedActorDocTestBase {
@Test
public void createProps() {
//#creating-props-config
Props props1 = new Props();
Props props2 = new Props(MyUntypedActor.class);
Props props3 = new Props(new UntypedActorFactory() {
public UntypedActor create() {
return new MyUntypedActor();
}
});
Props props4 = props1.withCreator(new UntypedActorFactory() {
public UntypedActor create() {
return new MyUntypedActor();
}
});
//#creating-props-config
}
@Test
public void systemActorOf() {
//#system-actorOf
ActorSystem system = ActorSystem.create("MySystem");
ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class), "myactor");
//#system-actorOf
myActor.tell("test", null);
system.shutdown();
}
@Test
public void contextActorOf() {
//#context-actorOf
ActorSystem system = ActorSystem.create("MySystem");
ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class), "myactor");
//#context-actorOf
myActor.tell("test", null);
system.shutdown();
}
@Test
public void constructorActorOf() {
ActorSystem system = ActorSystem.create("MySystem");
//#creating-constructor
// allows passing in arguments to the MyActor constructor
ActorRef myActor = system.actorOf(new Props(new UntypedActorFactory() {
public UntypedActor create() {
return new MyActor("...");
}
}), "myactor");
//#creating-constructor
myActor.tell("test", null);
system.shutdown();
}
@Test
public void propsActorOf() {
ActorSystem system = ActorSystem.create("MySystem");
//#creating-props
ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class).withDispatcher("my-dispatcher"), "myactor");
//#creating-props
myActor.tell("test", null);
system.shutdown();
}
@Test
public void usingAsk() throws Exception {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef myActor = system.actorOf(new Props(new UntypedActorFactory() {
public UntypedActor create() {
return new MyAskActor();
}
}), "myactor");
//#using-ask
Future<Object> future = Patterns.ask(myActor, "Hello", 1000);
Object result = Await.result(future, Duration.create(1, TimeUnit.SECONDS));
//#using-ask
system.shutdown();
}
@Test
public void receiveTimeout() {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef myActor = system.actorOf(new Props(MyReceivedTimeoutUntypedActor.class));
myActor.tell("Hello", null);
system.shutdown();
}
@Test
public void usePoisonPill() {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef myActor = system.actorOf(new Props(MyUntypedActor.class));
//#poison-pill
myActor.tell(PoisonPill.getInstance(), null);
//#poison-pill
system.shutdown();
}
@Test
public void useKill() {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef victim = system.actorOf(new Props(MyUntypedActor.class));
//#kill
victim.tell(Kill.getInstance(), null);
//#kill
system.shutdown();
}
@Test
public void useBecome() {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef myActor = system.actorOf(new Props(new UntypedActorFactory() {
public UntypedActor create() {
return new HotSwapActor();
}
}));
myActor.tell("foo", null);
myActor.tell("bar", null);
myActor.tell("bar", null);
system.shutdown();
}
@Test
public void useWatch() throws Exception {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef myActor = system.actorOf(new Props(WatchActor.class));
Future<Object> future = Patterns.ask(myActor, "kill", 1000);
assert Await.result(future, Duration.parse("1 second")).equals("finished");
system.shutdown();
}
@Test
public void usePatternsGracefulStop() throws Exception {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef actorRef = system.actorOf(new Props(MyUntypedActor.class));
//#gracefulStop
try {
Future<Boolean> stopped = gracefulStop(actorRef, Duration.create(5, TimeUnit.SECONDS), system);
Await.result(stopped, Duration.create(6, TimeUnit.SECONDS));
// the actor has been stopped
} catch (AskTimeoutException e) {
// the actor wasn't stopped within 5 seconds
}
//#gracefulStop
system.shutdown();
}
class Result {
final int x;
final String s;
public Result(int x, String s) {
this.x = x;
this.s = s;
}
}
@Test
public void usePatternsAskPipe() {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef actorA = system.actorOf(new Props(MyUntypedActor.class));
ActorRef actorB = system.actorOf(new Props(MyUntypedActor.class));
ActorRef actorC = system.actorOf(new Props(MyUntypedActor.class));
//#ask-pipe
final Timeout t = new Timeout(Duration.create(5, TimeUnit.SECONDS));
final ArrayList<Future<Object>> futures = new ArrayList<Future<Object>>();
futures.add(ask(actorA, "request", 1000)); // using 1000ms timeout
futures.add(ask(actorB, "another request", t)); // using timeout from above
final Future<Iterable<Object>> aggregate = Futures.sequence(futures, system.dispatcher());
final Future<Result> transformed = aggregate.map(new Mapper<Iterable<Object>, Result>() {
public Result apply(Iterable<Object> coll) {
final Iterator<Object> it = coll.iterator();
final String s = (String) it.next();
final int x = (Integer) it.next();
return new Result(x, s);
}
}, system.dispatcher());
pipe(transformed, system.dispatcher()).to(actorC);
//#ask-pipe
system.shutdown();
}
public static class MyActor extends UntypedActor {
public MyActor(String s) {
}
public void onReceive(Object message) throws Exception {
try {
operation();
} catch (Exception e) {
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
throw e;
}
}
private void operation() {
}
//#lifecycle-callbacks
public void preStart() {
}
public void preRestart(Throwable reason, Option<Object> message) {
for (ActorRef each : getContext().getChildren())
getContext().stop(each);
postStop();
}
public void postRestart(Throwable reason) {
preStart();
}
public void postStop() {
}
//#lifecycle-callbacks
}
public static class MyAskActor extends UntypedActor {
public void onReceive(Object message) throws Exception {
//#reply-exception
try {
String result = operation();
getSender().tell(result, getSelf());
} catch (Exception e) {
getSender().tell(new akka.actor.Status.Failure(e), getSelf());
throw e;
}
//#reply-exception
}
private String operation() {
return "Hi";
}
}
//#hot-swap-actor
public static class HotSwapActor extends UntypedActor {
Procedure<Object> angry = new Procedure<Object>() {
@Override
public void apply(Object message) {
if (message.equals("bar")) {
getSender().tell("I am already angry?", getSelf());
} else if (message.equals("foo")) {
getContext().become(happy);
}
}
};
Procedure<Object> happy = new Procedure<Object>() {
@Override
public void apply(Object message) {
if (message.equals("bar")) {
getSender().tell("I am already happy :-)", getSelf());
} else if (message.equals("foo")) {
getContext().become(angry);
}
}
};
public void onReceive(Object message) {
if (message.equals("bar")) {
getContext().become(angry);
} else if (message.equals("foo")) {
getContext().become(happy);
} else {
unhandled(message);
}
}
}
//#hot-swap-actor
//#stash
public static class ActorWithProtocol extends UntypedActorWithStash {
private Boolean isOpen = false;
public void onReceive(Object msg) {
if (isOpen) {
if (msg.equals("write")) {
// do writing...
} else if (msg.equals("close")) {
unstashAll();
isOpen = false;
} else {
stash();
}
} else {
if (msg.equals("open")) {
unstashAll();
isOpen = true;
} else {
stash();
}
}
}
}
//#stash
//#watch
public static class WatchActor extends UntypedActor {
final ActorRef child = this.getContext().actorOf(Props.empty(), "child");
{
this.getContext().watch(child); // <-- this is the only call needed for registration
}
ActorRef lastSender = getContext().system().deadLetters();
@Override
public void onReceive(Object message) {
if (message.equals("kill")) {
getContext().stop(child);
lastSender = getSender();
} else if (message instanceof Terminated) {
final Terminated t = (Terminated) message;
if (t.getActor() == child) {
lastSender.tell("finished", getSelf());
}
} else {
unhandled(message);
}
}
}
//#watch
}

View file

@ -0,0 +1,56 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor;
import static docs.actor.UntypedActorSwapper.Swap.SWAP;
import akka.actor.ActorRef;
import akka.actor.Props;
import akka.actor.ActorSystem;
import akka.actor.UntypedActor;
import akka.event.Logging;
import akka.event.LoggingAdapter;
import akka.japi.Procedure;
//#swapper
public class UntypedActorSwapper {
public static class Swap {
public static Swap SWAP = new Swap();
private Swap() {
}
}
public static class Swapper extends UntypedActor {
LoggingAdapter log = Logging.getLogger(getContext().system(), this);
public void onReceive(Object message) {
if (message == SWAP) {
log.info("Hi");
getContext().become(new Procedure<Object>() {
@Override
public void apply(Object message) {
log.info("Ho");
getContext().unbecome(); // resets the latest 'become' (just for fun)
}
});
} else {
unhandled(message);
}
}
}
public static void main(String... args) {
ActorSystem system = ActorSystem.create("MySystem");
ActorRef swap = system.actorOf(new Props(Swapper.class));
swap.tell(SWAP, null); // logs Hi
swap.tell(SWAP, null); // logs Ho
swap.tell(SWAP, null); // logs Hi
swap.tell(SWAP, null); // logs Ho
swap.tell(SWAP, null); // logs Hi
swap.tell(SWAP, null); // logs Ho
}
}
//#swapper

View file

@ -0,0 +1,479 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.actor.japi;
//#all
//#imports
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import akka.actor.*;
import akka.dispatch.Mapper;
import akka.japi.Function;
import scala.concurrent.util.Duration;
import akka.util.Timeout;
import akka.event.Logging;
import akka.event.LoggingAdapter;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import static akka.japi.Util.classTag;
import static akka.actor.SupervisorStrategy.*;
import static akka.pattern.Patterns.ask;
import static akka.pattern.Patterns.pipe;
import static docs.actor.japi.FaultHandlingDocSample.WorkerApi.*;
import static docs.actor.japi.FaultHandlingDocSample.CounterServiceApi.*;
import static docs.actor.japi.FaultHandlingDocSample.CounterApi.*;
import static docs.actor.japi.FaultHandlingDocSample.StorageApi.*;
//#imports
public class FaultHandlingDocSample {
/**
* Runs the sample
*/
public static void main(String[] args) {
Config config = ConfigFactory.parseString("akka.loglevel = DEBUG \n" + "akka.actor.debug.lifecycle = on");
ActorSystem system = ActorSystem.create("FaultToleranceSample", config);
ActorRef worker = system.actorOf(new Props(Worker.class), "worker");
ActorRef listener = system.actorOf(new Props(Listener.class), "listener");
// start the work and listen on progress
// note that the listener is used as sender of the tell,
// i.e. it will receive replies from the worker
worker.tell(Start, listener);
}
/**
* Listens on progress from the worker and shuts down the system when enough
* work has been done.
*/
public static class Listener extends UntypedActor {
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
@Override
public void preStart() {
// If we don't get any progress within 15 seconds then the service is unavailable
getContext().setReceiveTimeout(Duration.parse("15 seconds"));
}
public void onReceive(Object msg) {
log.debug("received message {}", msg);
if (msg instanceof Progress) {
Progress progress = (Progress) msg;
log.info("Current progress: {} %", progress.percent);
if (progress.percent >= 100.0) {
log.info("That's all, shutting down");
getContext().system().shutdown();
}
} else if (msg == ReceiveTimeout.getInstance()) {
// No progress within 15 seconds, ServiceUnavailable
log.error("Shutting down due to unavailable service");
getContext().system().shutdown();
} else {
unhandled(msg);
}
}
}
//#messages
public interface WorkerApi {
public static final Object Start = "Start";
public static final Object Do = "Do";
public static class Progress {
public final double percent;
public Progress(double percent) {
this.percent = percent;
}
public String toString() {
return String.format("%s(%s)", getClass().getSimpleName(), percent);
}
}
}
//#messages
/**
* Worker performs some work when it receives the Start message. It will
* continuously notify the sender of the Start message of current Progress.
* The Worker supervise the CounterService.
*/
public static class Worker extends UntypedActor {
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
final Timeout askTimeout = new Timeout(Duration.create(5, "seconds"));
// The sender of the initial Start message will continuously be notified about progress
ActorRef progressListener;
final ActorRef counterService = getContext().actorOf(new Props(CounterService.class), "counter");
final int totalCount = 51;
// Stop the CounterService child if it throws ServiceUnavailable
private static SupervisorStrategy strategy = new OneForOneStrategy(-1, Duration.Inf(),
new Function<Throwable, Directive>() {
@Override
public Directive apply(Throwable t) {
if (t instanceof ServiceUnavailable) {
return stop();
} else {
return escalate();
}
}
});
@Override
public SupervisorStrategy supervisorStrategy() {
return strategy;
}
public void onReceive(Object msg) {
log.debug("received message {}", msg);
if (msg.equals(Start) && progressListener == null) {
progressListener = getSender();
getContext().system().scheduler().schedule(
Duration.Zero(), Duration.create(1, "second"), getSelf(), Do, getContext().dispatcher()
);
} else if (msg.equals(Do)) {
counterService.tell(new Increment(1), getSelf());
counterService.tell(new Increment(1), getSelf());
counterService.tell(new Increment(1), getSelf());
// Send current progress to the initial sender
pipe(ask(counterService, GetCurrentCount, askTimeout)
.mapTo(classTag(CurrentCount.class))
.map(new Mapper<CurrentCount, Progress>() {
public Progress apply(CurrentCount c) {
return new Progress(100.0 * c.count / totalCount);
}
}, getContext().dispatcher()), getContext().dispatcher())
.to(progressListener);
} else {
unhandled(msg);
}
}
}
//#messages
public interface CounterServiceApi {
public static final Object GetCurrentCount = "GetCurrentCount";
public static class CurrentCount {
public final String key;
public final long count;
public CurrentCount(String key, long count) {
this.key = key;
this.count = count;
}
public String toString() {
return String.format("%s(%s, %s)", getClass().getSimpleName(), key, count);
}
}
public static class Increment {
public final long n;
public Increment(long n) {
this.n = n;
}
public String toString() {
return String.format("%s(%s)", getClass().getSimpleName(), n);
}
}
public static class ServiceUnavailable extends RuntimeException {
public ServiceUnavailable(String msg) {
super(msg);
}
}
}
//#messages
/**
* Adds the value received in Increment message to a persistent counter.
* Replies with CurrentCount when it is asked for CurrentCount. CounterService
* supervise Storage and Counter.
*/
public static class CounterService extends UntypedActor {
// Reconnect message
static final Object Reconnect = "Reconnect";
private static class SenderMsgPair {
final ActorRef sender;
final Object msg;
SenderMsgPair(ActorRef sender, Object msg) {
this.msg = msg;
this.sender = sender;
}
}
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
final String key = getSelf().path().name();
ActorRef storage;
ActorRef counter;
final List<SenderMsgPair> backlog = new ArrayList<SenderMsgPair>();
final int MAX_BACKLOG = 10000;
// Restart the storage child when StorageException is thrown.
// After 3 restarts within 5 seconds it will be stopped.
private static SupervisorStrategy strategy = new OneForOneStrategy(3, Duration.parse("5 seconds"),
new Function<Throwable, Directive>() {
@Override
public Directive apply(Throwable t) {
if (t instanceof StorageException) {
return restart();
} else {
return escalate();
}
}
});
@Override
public SupervisorStrategy supervisorStrategy() {
return strategy;
}
@Override
public void preStart() {
initStorage();
}
/**
* The child storage is restarted in case of failure, but after 3 restarts,
* and still failing it will be stopped. Better to back-off than
* continuously failing. When it has been stopped we will schedule a
* Reconnect after a delay. Watch the child so we receive Terminated message
* when it has been terminated.
*/
void initStorage() {
storage = getContext().watch(getContext().actorOf(new Props(Storage.class), "storage"));
// Tell the counter, if any, to use the new storage
if (counter != null)
counter.tell(new UseStorage(storage), getSelf());
// We need the initial value to be able to operate
storage.tell(new Get(key), getSelf());
}
@Override
public void onReceive(Object msg) {
log.debug("received message {}", msg);
if (msg instanceof Entry && ((Entry) msg).key.equals(key) && counter == null) {
// Reply from Storage of the initial value, now we can create the Counter
final long value = ((Entry) msg).value;
counter = getContext().actorOf(new Props().withCreator(new UntypedActorFactory() {
public Actor create() {
return new Counter(key, value);
}
}));
// Tell the counter to use current storage
counter.tell(new UseStorage(storage), getSelf());
// and send the buffered backlog to the counter
for (SenderMsgPair each : backlog) {
counter.tell(each.msg, each.sender);
}
backlog.clear();
} else if (msg instanceof Increment) {
forwardOrPlaceInBacklog(msg);
} else if (msg.equals(GetCurrentCount)) {
forwardOrPlaceInBacklog(msg);
} else if (msg instanceof Terminated) {
// After 3 restarts the storage child is stopped.
// We receive Terminated because we watch the child, see initStorage.
storage = null;
// Tell the counter that there is no storage for the moment
counter.tell(new UseStorage(null), getSelf());
// Try to re-establish storage after while
getContext().system().scheduler().scheduleOnce(
Duration.create(10, "seconds"), getSelf(), Reconnect, getContext().dispatcher()
);
} else if (msg.equals(Reconnect)) {
// Re-establish storage after the scheduled delay
initStorage();
} else {
unhandled(msg);
}
}
void forwardOrPlaceInBacklog(Object msg) {
// We need the initial value from storage before we can start delegate to the counter.
// Before that we place the messages in a backlog, to be sent to the counter when
// it is initialized.
if (counter == null) {
if (backlog.size() >= MAX_BACKLOG)
throw new ServiceUnavailable("CounterService not available, lack of initial value");
backlog.add(new SenderMsgPair(getSender(), msg));
} else {
counter.forward(msg, getContext());
}
}
}
//#messages
public interface CounterApi {
public static class UseStorage {
public final ActorRef storage;
public UseStorage(ActorRef storage) {
this.storage = storage;
}
public String toString() {
return String.format("%s(%s)", getClass().getSimpleName(), storage);
}
}
}
//#messages
/**
* The in memory count variable that will send current value to the Storage,
* if there is any storage available at the moment.
*/
public static class Counter extends UntypedActor {
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
final String key;
long count;
ActorRef storage;
public Counter(String key, long initialValue) {
this.key = key;
this.count = initialValue;
}
@Override
public void onReceive(Object msg) {
log.debug("received message {}", msg);
if (msg instanceof UseStorage) {
storage = ((UseStorage) msg).storage;
storeCount();
} else if (msg instanceof Increment) {
count += ((Increment) msg).n;
storeCount();
} else if (msg.equals(GetCurrentCount)) {
getSender().tell(new CurrentCount(key, count), getSelf());
} else {
unhandled(msg);
}
}
void storeCount() {
// Delegate dangerous work, to protect our valuable state.
// We can continue without storage.
if (storage != null) {
storage.tell(new Store(new Entry(key, count)), getSelf());
}
}
}
//#messages
public interface StorageApi {
public static class Store {
public final Entry entry;
public Store(Entry entry) {
this.entry = entry;
}
public String toString() {
return String.format("%s(%s)", getClass().getSimpleName(), entry);
}
}
public static class Entry {
public final String key;
public final long value;
public Entry(String key, long value) {
this.key = key;
this.value = value;
}
public String toString() {
return String.format("%s(%s, %s)", getClass().getSimpleName(), key, value);
}
}
public static class Get {
public final String key;
public Get(String key) {
this.key = key;
}
public String toString() {
return String.format("%s(%s)", getClass().getSimpleName(), key);
}
}
public static class StorageException extends RuntimeException {
public StorageException(String msg) {
super(msg);
}
}
}
//#messages
/**
* Saves key/value pairs to persistent storage when receiving Store message.
* Replies with current value when receiving Get message. Will throw
* StorageException if the underlying data store is out of order.
*/
public static class Storage extends UntypedActor {
final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
final DummyDB db = DummyDB.instance;
@Override
public void onReceive(Object msg) {
log.debug("received message {}", msg);
if (msg instanceof Store) {
Store store = (Store) msg;
db.save(store.entry.key, store.entry.value);
} else if (msg instanceof Get) {
Get get = (Get) msg;
Long value = db.load(get.key);
getSender().tell(new Entry(get.key, value == null ? Long.valueOf(0L) : value), getSelf());
} else {
unhandled(msg);
}
}
}
//#dummydb
public static class DummyDB {
public static final DummyDB instance = new DummyDB();
private final Map<String, Long> db = new HashMap<String, Long>();
private DummyDB() {
}
public synchronized void save(String key, Long value) throws StorageException {
if (11 <= value && value <= 14)
throw new StorageException("Simulated store failure " + value);
db.put(key, value);
}
public synchronized Long load(String key) throws StorageException {
return db.get(key);
}
}
//#dummydb
}
//#all

View file

@ -0,0 +1,10 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.agent
import org.scalatest.junit.JUnitWrapperSuite
class AgentDocJavaSpec extends JUnitWrapperSuite(
"docs.agent.AgentDocTest",
Thread.currentThread.getContextClassLoader)

View file

@ -0,0 +1,111 @@
/**
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
*/
package docs.agent;
import static org.junit.Assert.*;
import scala.concurrent.ExecutionContext;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import akka.testkit.AkkaSpec;
//#import-system
import akka.actor.ActorSystem;
//#import-system
//#import-agent
import akka.agent.Agent;
//#import-agent
//#import-function
import akka.japi.Function;
//#import-function
//#import-timeout
import akka.util.Timeout;
import static java.util.concurrent.TimeUnit.SECONDS;
//#import-timeout
public class AgentDocTest {
private static ActorSystem testSystem;
private static ExecutionContext ec;
@BeforeClass
public static void beforeAll() {
testSystem = ActorSystem.create("AgentDocTest", AkkaSpec.testConf());
ec = testSystem.dispatcher();
}
@AfterClass
public static void afterAll() {
testSystem.shutdown();
testSystem = null;
}
@Test
public void createAndClose() {
//#create
ActorSystem system = ActorSystem.create("app");
Agent<Integer> agent = new Agent<Integer>(5, system);
//#create
//#close
agent.close();
//#close
system.shutdown();
}
@Test
public void sendAndSendOffAndReadAwait() {
Agent<Integer> agent = new Agent<Integer>(5, testSystem);
//#send
// send a value
agent.send(7);
// send a function
agent.send(new Function<Integer, Integer>() {
public Integer apply(Integer i) {
return i * 2;
}
});
//#send
Function<Integer, Integer> longRunningOrBlockingFunction = new Function<Integer, Integer>() {
public Integer apply(Integer i) {
return i * 1;
}
};
//#send-off
// sendOff a function
agent.sendOff(longRunningOrBlockingFunction, ec);
//#send-off
//#read-await
Integer result = agent.await(new Timeout(5, SECONDS));
//#read-await
assertEquals(result, new Integer(14));
agent.close();
}
@Test
public void readWithGet() {
Agent<Integer> agent = new Agent<Integer>(5, testSystem);
//#read-get
Integer result = agent.get();
//#read-get
assertEquals(result, new Integer(5));
agent.close();
}
}

View file

@ -0,0 +1,49 @@
package docs.camel;
//#CamelActivation
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.camel.Camel;
import akka.camel.CamelExtension;
import akka.camel.javaapi.UntypedConsumerActor;
import scala.concurrent.Future;
import scala.concurrent.util.Duration;
import scala.concurrent.util.FiniteDuration;
import static java.util.concurrent.TimeUnit.SECONDS;
//#CamelActivation
import org.junit.Test;
public class ActivationTestBase {
@Test
public void testActivation() {
//#CamelActivation
// ..
ActorSystem system = ActorSystem.create("some-system");
Props props = new Props(MyConsumer.class);
ActorRef producer = system.actorOf(props,"myproducer");
Camel camel = CamelExtension.get(system);
// get a future reference to the activation of the endpoint of the Consumer Actor
FiniteDuration duration = Duration.create(10, SECONDS);
Future<ActorRef> activationFuture = camel.activationFutureFor(producer, duration, system.dispatcher());
//#CamelActivation
//#CamelDeactivation
// ..
system.stop(producer);
// get a future reference to the deactivation of the endpoint of the Consumer Actor
Future<ActorRef> deactivationFuture = camel.deactivationFutureFor(producer, duration, system.dispatcher());
//#CamelDeactivation
system.shutdown();
}
public static class MyConsumer extends UntypedConsumerActor {
public String getEndpointUri() {
return "direct:test";
}
public void onReceive(Object message) {
}
}
}

View file

@ -0,0 +1,5 @@
package docs.camel
import org.scalatest.junit.JUnitSuite
class CamelExtensionDocTest extends CamelExtensionTestBase with JUnitSuite

View file

@ -0,0 +1,31 @@
package docs.camel;
import akka.actor.ActorSystem;
import akka.camel.Camel;
import akka.camel.CamelExtension;
import org.apache.camel.CamelContext;
import org.apache.camel.ProducerTemplate;
import org.junit.Test;
public class CamelExtensionTestBase {
@Test
public void getCamelExtension() {
//#CamelExtension
ActorSystem system = ActorSystem.create("some-system");
Camel camel = CamelExtension.get(system);
CamelContext camelContext = camel.context();
ProducerTemplate producerTemplate = camel.template();
//#CamelExtension
system.shutdown();
}
public void addActiveMQComponent() {
//#CamelExtensionAddComponent
ActorSystem system = ActorSystem.create("some-system");
Camel camel = CamelExtension.get(system);
CamelContext camelContext = camel.context();
// camelContext.addComponent("activemq", ActiveMQComponent.activeMQComponent("vm://localhost?broker.persistent=false"))
//#CamelExtensionAddComponent
system.shutdown();
}
}

View file

@ -0,0 +1,24 @@
package docs.camel;
//#Consumer1
import akka.camel.CamelMessage;
import akka.camel.javaapi.UntypedConsumerActor;
import akka.event.Logging;
import akka.event.LoggingAdapter;
public class Consumer1 extends UntypedConsumerActor {
LoggingAdapter log = Logging.getLogger(getContext().system(), this);
public String getEndpointUri() {
return "file:data/input/actor";
}
public void onReceive(Object message) {
if (message instanceof CamelMessage) {
CamelMessage camelMessage = (CamelMessage) message;
String body = camelMessage.getBodyAs(String.class, getCamelContext());
log.info("Received message: {}", body);
} else
unhandled(message);
}
}
//#Consumer1

View file

@ -0,0 +1,20 @@
package docs.camel;
//#Consumer2
import akka.camel.CamelMessage;
import akka.camel.javaapi.UntypedConsumerActor;
public class Consumer2 extends UntypedConsumerActor {
public String getEndpointUri() {
return "jetty:http://localhost:8877/camel/default";
}
public void onReceive(Object message) {
if (message instanceof CamelMessage) {
CamelMessage camelMessage = (CamelMessage) message;
String body = camelMessage.getBodyAs(String.class, getCamelContext());
getSender().tell(String.format("Received message: %s",body), getSelf());
} else
unhandled(message);
}
}
//#Consumer2

View file

@ -0,0 +1,31 @@
package docs.camel;
//#Consumer3
import akka.actor.Status;
import akka.camel.Ack;
import akka.camel.CamelMessage;
import akka.camel.javaapi.UntypedConsumerActor;
public class Consumer3 extends UntypedConsumerActor{
@Override
public boolean autoAck() {
return false;
}
public String getEndpointUri() {
return "jms:queue:test";
}
public void onReceive(Object message) {
if (message instanceof CamelMessage) {
getSender().tell(Ack.getInstance(), getSelf());
// on success
// ..
Exception someException = new Exception("e1");
// on failure
getSender().tell(new Status.Failure(someException), getSelf());
} else
unhandled(message);
}
}
//#Consumer3

View file

@ -0,0 +1,31 @@
package docs.camel;
//#Consumer4
import akka.camel.CamelMessage;
import akka.camel.javaapi.UntypedConsumerActor;
import scala.concurrent.util.Duration;
import scala.concurrent.util.FiniteDuration;
import java.util.concurrent.TimeUnit;
public class Consumer4 extends UntypedConsumerActor {
private final static FiniteDuration timeout = Duration.create(500, TimeUnit.MILLISECONDS);
@Override
public Duration replyTimeout() {
return timeout;
}
public String getEndpointUri() {
return "jetty:http://localhost:8877/camel/default";
}
public void onReceive(Object message) {
if (message instanceof CamelMessage) {
CamelMessage camelMessage = (CamelMessage) message;
String body = camelMessage.getBodyAs(String.class, getCamelContext());
getSender().tell(String.format("Hello %s",body), getSelf());
} else
unhandled(message);
}
}
//#Consumer4

View file

@ -0,0 +1,18 @@
package docs.camel;
//#CustomRoute
import akka.actor.ActorRef;
import akka.camel.internal.component.CamelPath;
import org.apache.camel.builder.RouteBuilder;
public class CustomRouteBuilder extends RouteBuilder{
private String uri;
public CustomRouteBuilder(ActorRef responder) {
uri = CamelPath.toUri(responder);
}
public void configure() throws Exception {
from("jetty:http://localhost:8877/camel/custom").to(uri);
}
}
//#CustomRoute

View file

@ -0,0 +1,20 @@
package docs.camel;
import akka.actor.ActorRef;
import akka.actor.ActorSystem;
import akka.actor.Props;
import akka.camel.Camel;
import akka.camel.CamelExtension;
public class CustomRouteTestBase {
public void customRoute() throws Exception{
//#CustomRoute
ActorSystem system = ActorSystem.create("some-system");
Camel camel = CamelExtension.get(system);
ActorRef responder = system.actorOf(new Props(Responder.class), "TestResponder");
camel.context().addRoutes(new CustomRouteBuilder(responder));
//#CustomRoute
system.stop(responder);
system.shutdown();
}
}

View file

@ -0,0 +1,42 @@
package docs.camel;
//#ErrorThrowingConsumer
import akka.actor.Status;
import akka.camel.CamelMessage;
import akka.camel.javaapi.UntypedConsumerActor;
import org.apache.camel.builder.Builder;
import org.apache.camel.model.ProcessorDefinition;
import org.apache.camel.model.RouteDefinition;
import scala.Option;
public class ErrorThrowingConsumer extends UntypedConsumerActor{
private String uri;
public ErrorThrowingConsumer(String uri){
this.uri = uri;
}
public String getEndpointUri() {
return uri;
}
public void onReceive(Object message) throws Exception{
if (message instanceof CamelMessage) {
CamelMessage camelMessage = (CamelMessage) message;
String body = camelMessage.getBodyAs(String.class, getCamelContext());
throw new Exception(String.format("error: %s",body));
} else
unhandled(message);
}
@Override
public ProcessorDefinition<?> onRouteDefinition(RouteDefinition rd) {
// Catch any exception and handle it by returning the exception message as response
return rd.onException(Exception.class).handled(true).transform(Builder.exceptionMessage()).end();
}
@Override
public void preRestart(Throwable reason, Option<Object> message) {
getSender().tell(new Status.Failure(reason), getSelf());
}
}
//#ErrorThrowingConsumer

View file

@ -0,0 +1,10 @@
package docs.camel;
//#Producer1
import akka.camel.javaapi.UntypedProducerActor;
public class FirstProducer extends UntypedProducerActor {
public String getEndpointUri() {
return "http://localhost:8080/news";
}
}
//#Producer1

View file

@ -0,0 +1,24 @@
package docs.camel;
//#RouteResponse
import akka.actor.ActorRef;
import akka.camel.javaapi.UntypedProducerActor;
public class Forwarder extends UntypedProducerActor {
private String uri;
private ActorRef target;
public Forwarder(String uri, ActorRef target) {
this.uri = uri;
this.target = target;
}
public String getEndpointUri() {
return uri;
}
@Override
public void onRouteResponse(Object message) {
target.forward(message, getContext());
}
}
//#RouteResponse

View file

@ -0,0 +1,15 @@
package docs.camel;
//#ProducerTemplate
import akka.actor.UntypedActor;
import akka.camel.Camel;
import akka.camel.CamelExtension;
import org.apache.camel.ProducerTemplate;
public class MyActor extends UntypedActor {
public void onReceive(Object message) {
Camel camel = CamelExtension.get(getContext().system());
ProducerTemplate template = camel.template();
template.sendBody("direct:news", message);
}
}
//#ProducerTemplate

View file

@ -0,0 +1,31 @@
package docs.camel;
//#Consumer-mina
import akka.camel.CamelMessage;
import akka.camel.javaapi.UntypedConsumerActor;
public class MyEndpoint extends UntypedConsumerActor{
private String uri;
public String getEndpointUri() {
return uri;
}
public void onReceive(Object message) throws Exception {
if (message instanceof CamelMessage) {
/* ... */
} else
unhandled(message);
}
// Extra constructor to change the default uri,
// for instance to "jetty:http://localhost:8877/example"
public MyEndpoint(String uri) {
this.uri = uri;
}
public MyEndpoint() {
this.uri = "mina:tcp://localhost:6200?textline=true";
}
}
//#Consumer-mina

Some files were not shown because too many files have changed in this diff Show more