Begin using compiled code examples in the docs. See #781

This commit is contained in:
Peter Vlugter 2011-10-05 17:41:00 +02:00
parent 56f8f858be
commit 7884691d3a
14 changed files with 849 additions and 581 deletions

View file

@ -38,9 +38,7 @@ Here is the reference configuration file:
.. literalinclude:: ../../config/akka-reference.conf
:language: none
A custom ``akka.conf`` might look like this:
::
A custom ``akka.conf`` might look like this::
# In this file you can override any option defined in the 'akka-reference.conf' file.
# Copy in all or parts of the 'akka-reference.conf' file and modify as you please.
@ -122,4 +120,3 @@ Summary of System Properties
* :ref:`akka.config <-Dakka.config>`: explicit configuration file location
* :ref:`akka.mode <-Dakka.mode>` (``AKKA_MODE``): modify configuration file name for multiple profiles
* :ref:`akka.output.config.source <-Dakka.output.config.source>`: whether to print configuration source to console

View file

@ -30,11 +30,11 @@ Actors and the Java Memory Model
With the Actors implementation in Akka, there are two ways multiple threads can execute actions on shared memory:
* if a message is sent to an actor (e.g. by another actor). In most cases messages are immutable, but if that message
is not a properly constructed immutable object, without a "happens before" rule, it would be possible for the receiver
to see partially initialized data structures and possibly even values out of thin air (longs/doubles).
is not a properly constructed immutable object, without a "happens before" rule, it would be possible for the receiver
to see partially initialized data structures and possibly even values out of thin air (longs/doubles).
* if an actor makes changes to its internal state while processing a message, and accesses that state while processing
another message moments later. It is important to realize that with the actor model you don't get any guarantee that
the same thread will be executing the same actor for different messages.
another message moments later. It is important to realize that with the actor model you don't get any guarantee that
the same thread will be executing the same actor for different messages.
To prevent visibility and reordering problems on actors, Akka guarantees the following two "happens before" rules:

View file

@ -0,0 +1,131 @@
/**
* Copyright (C) 2009-2011 Typesafe Inc. <http://www.typesafe.com>
*/
//#imports
package akka.tutorial.first.scala
import akka.actor.{ Actor, PoisonPill }
import akka.actor.Actor._
import akka.routing.Routing.Broadcast
import akka.routing.{ RoutedProps, Routing }
import java.util.concurrent.CountDownLatch
//#imports
//#app
object Pi extends App {
calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
//#actors-and-messages
// ====================
// ===== Messages =====
// ====================
//#messages
sealed trait PiMessage
case object Calculate extends PiMessage
case class Work(start: Int, nrOfElements: Int) extends PiMessage
case class Result(value: Double) extends PiMessage
//#messages
// ==================
// ===== Worker =====
// ==================
//#worker
class Worker extends Actor {
// define the work
//#calculatePiFor
def calculatePiFor(start: Int, nrOfElements: Int): Double = {
var acc = 0.0
for (i start until (start + nrOfElements))
acc += 4.0 * (1 - (i % 2) * 2) / (2 * i + 1)
acc
}
//#calculatePiFor
def receive = {
case Work(start, nrOfElements)
reply(Result(calculatePiFor(start, nrOfElements))) // perform the work
}
}
//#worker
// ==================
// ===== Master =====
// ==================
//#master
class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch) extends Actor {
var pi: Double = _
var nrOfResults: Int = _
var start: Long = _
//#create-workers
// create the workers
val workers = Vector.fill(nrOfWorkers)(actorOf[Worker])
// wrap them with a load-balancing router
val router = Routing.actorOf(RoutedProps().withRoundRobinRouter.withConnections(workers), "pi")
//#create-workers
//#master-receive
// message handler
def receive = {
//#handle-messages
case Calculate
// schedule work
for (i 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements)
// send a PoisonPill to all workers telling them to shut down themselves
router ! Broadcast(PoisonPill)
// send a PoisonPill to the router, telling him to shut himself down
router ! PoisonPill
case Result(value)
// handle result from the worker
pi += value
nrOfResults += 1
if (nrOfResults == nrOfMessages) self.stop()
//#handle-messages
}
//#master-receive
override def preStart() {
start = System.currentTimeMillis
}
override def postStop() {
// tell the world that the calculation is complete
println(
"\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis"
.format(pi, (System.currentTimeMillis - start)))
latch.countDown()
}
}
//#master
//#actors-and-messages
// ==================
// ===== Run it =====
// ==================
def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) {
// this latch is only plumbing to know when the calculation is completed
val latch = new CountDownLatch(1)
// create the master
val master = actorOf(new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch))
// start the calculation
master ! Calculate
// wait for master to shut down
latch.await()
}
}
//#app

View file

@ -49,26 +49,37 @@ Using the Akka sbt plugin to package your application
The Akka sbt plugin can create a full Akka microkernel deployment for your sbt
project.
To use the plugin, first add a plugin definition to your SBT project by creating
``project/plugins/Plugins.scala`` with::
To use the plugin, first add a plugin definition to your sbt project by creating
``project/plugins.sbt`` with::
resolvers += Classpaths.typesafeResolver
addSbtPlugin("se.scalablesolutions.akka" % "akka-sbt-plugin" % "2.0-SNAPSHOT")
Then use the AkkaKernelPlugin settings. In a 'light' configuration (build.sbt)::
seq(akka.sbt.AkkaKernelPlugin.distSettings: _*)
Or in a 'full' configuration (Build.scala). For example::
import sbt._
import sbt.Keys._
import akka.sbt.AkkaKernelPlugin
class Plugins(info: ProjectInfo) extends PluginDefinition(info) {
val akkaRepo = "Akka Repo" at "http://akka.io/repository"
val akkaPlugin = "se.scalablesolutions.akka" % "akka-sbt-plugin" % "2.0-SNAPSHOT"
object SomeBuild extends Build {
lazy val someProject = Project(
id = "some-project",
base = file("."),
settings = Defaults.defaultSettings ++ AkkaKernelPlugin.distSettings ++ Seq(
organization := "org.some",
version := "0.1",
scalaVersion := "2.9.1"
resolvers += "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/",
libraryDependencies += "se.scalablesolutions.akka" % "akka-kernel" % "2.0-SNAPSHOT"
)
)
}
Then mix the ``AkkaKernelProject`` trait into your project definition. For
example::
class MyProject(info: ProjectInfo) extends DefaultProject(info) with AkkaKernelProject
This will automatically add all the Akka dependencies needed for a microkernel
deployment (download them with ``sbt update``).
Place your config files in ``src/main/config``.
To build a microkernel deployment use the ``dist`` task::
sbt dist

View file

@ -1,12 +1,17 @@
.. _getting-started-first-scala:
Getting Started Tutorial (Scala): First Chapter
===============================================
#################################################
Getting Started Tutorial (Scala): First Chapter
#################################################
Introduction
------------
============
Welcome to the first tutorial on how to get started with Akka and Scala. We assume that you already know what Akka and Scala are and will now focus on the steps necessary to start your first project.
Welcome to the first tutorial on how to get started with Akka and Scala. We
assume that you already know what Akka and Scala are and will now focus on the
steps necessary to start your first project.
There are two variations of this first tutorial:
@ -15,38 +20,38 @@ There are two variations of this first tutorial:
Since they are so similar we will present them both.
The sample application that we will create is using actors to calculate the value of Pi. Calculating Pi is a CPU intensive operation and we will utilize Akka Actors to write a concurrent solution that scales out to multi-core processors. This sample will be extended in future tutorials to use Akka Remote Actors to scale out on multiple machines in a cluster.
The sample application that we will create is using actors to calculate the
value of Pi. Calculating Pi is a CPU intensive operation and we will utilize
Akka Actors to write a concurrent solution that scales out to multi-core
processors. This sample will be extended in future tutorials to use Akka Remote
Actors to scale out on multiple machines in a cluster.
We will be using an algorithm that is called "embarrassingly parallel" which just means that each job is completely isolated and not coupled with any other job. Since this algorithm is so parallelizable it suits the actor model very well.
We will be using an algorithm that is called "embarrassingly parallel" which
just means that each job is completely isolated and not coupled with any other
job. Since this algorithm is so parallelizable it suits the actor model very
well.
Here is the formula for the algorithm we will use:
.. image:: ../images/pi-formula.png
In this particular algorithm the master splits the series into chunks which are sent out to each worker actor to be processed. When each worker has processed its chunk it sends a result back to the master which aggregates the total result.
In this particular algorithm the master splits the series into chunks which are
sent out to each worker actor to be processed. When each worker has processed
its chunk it sends a result back to the master which aggregates the total
result.
Tutorial source code
--------------------
If you want don't want to type in the code and/or set up an SBT project then you can check out the full tutorial from the Akka GitHub repository. It is in the ``akka-tutorials/akka-tutorial-first`` module. You can also browse it online `here`__, with the actual source code `here`__.
__ https://github.com/jboner/akka/tree/master/akka-tutorials/akka-tutorial-first
__ https://github.com/jboner/akka/blob/master/akka-tutorials/akka-tutorial-first/src/main/scala/Pi.scala
To check out the code using Git invoke the following::
$ git clone git://github.com/jboner/akka.git
Then you can navigate down to the tutorial::
$ cd akka/akka-tutorials/akka-tutorial-first
Prerequisites
-------------
=============
This tutorial assumes that you have Java 1.6 or later installed on you machine and ``java`` on your ``PATH``. You also need to know how to run commands in a shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala code.
This tutorial assumes that you have Java 1.6 or later installed on you machine
and ``java`` on your ``PATH``. You also need to know how to run commands in a
shell (ZSH, Bash, DOS etc.) and a decent text editor or IDE to type in the Scala
code.
You need to make sure that ``$JAVA_HOME`` environment variable is set to the root of the Java distribution. You also need to make sure that the ``$JAVA_HOME/bin`` is on your ``PATH``::
You need to make sure that ``$JAVA_HOME`` environment variable is set to the
root of the Java distribution. You also need to make sure that the
``$JAVA_HOME/bin`` is on your ``PATH``::
$ export JAVA_HOME=..root of java distribution..
$ export PATH=$PATH:$JAVA_HOME/bin
@ -60,10 +65,11 @@ You can test your installation by invoking ``java``::
Downloading and installing Akka
-------------------------------
===============================
To build and run the tutorial sample from the command line, you have to download
Akka. If you prefer to use SBT to build and run the sample then you can skipthis section and jump to the next one.
Akka. If you prefer to use SBT to build and run the sample then you can skipthis
section and jump to the next one.
Let's get the ``akka-actors-2.0-SNAPSHOT.zip`` distribution of Akka from
http://akka.io/downloads/ which includes everything we need for this
@ -101,7 +107,8 @@ The only JAR we will need for this tutorial (apart from the
directory. This is a self-contained JAR with zero dependencies and contains
everything we need to write a system using Actors.
Akka is very modular and has many JARs for containing different features. The core distribution has seven modules:
Akka is very modular and has many JARs for containing different features. The
core distribution has seven modules:
- ``akka-actor-2.0-SNAPSHOT.jar`` -- Standard Actors
- ``akka-typed-actor-2.0-SNAPSHOT.jar`` -- Typed Actors
@ -120,13 +127,19 @@ The Akka Microkernel distribution also includes these jars:
Downloading and installing Scala
--------------------------------
================================
To build and run the tutorial sample from the command line, you have to install the Scala distribution. If you prefer to use SBT to build and run the sample then you can skip this section and jump to the next one.
To build and run the tutorial sample from the command line, you have to install
the Scala distribution. If you prefer to use SBT to build and run the sample
then you can skip this section and jump to the next one.
Scala can be downloaded from `http://www.scala-lang.org/downloads <http://www.scala-lang.org/downloads>`_. Browse there and download the Scala 2.9.0 release. If you pick the ``tgz`` or ``zip`` distribution then just unzip it where you want it installed. If you pick the IzPack Installer then double click on it and follow the instructions.
Scala can be downloaded from http://www.scala-lang.org/downloads. Browse there
and download the Scala 2.9.0 release. If you pick the ``tgz`` or ``zip``
distribution then just unzip it where you want it installed. If you pick the
IzPack Installer then double click on it and follow the instructions.
You also need to make sure that the ``scala-2.9.0/bin`` (if that is the directory where you installed Scala) is on your ``PATH``::
You also need to make sure that the ``scala-2.9.0/bin`` (if that is the
directory where you installed Scala) is on your ``PATH``::
$ export PATH=$PATH:scala-2.9.0/bin
@ -135,131 +148,174 @@ You can test your installation by invoking scala::
$ scala -version
Scala code runner version 2.9.0.final -- Copyright 2002-2011, LAMP/EPFL
Looks like we are all good. Finally let's create a source file ``Pi.scala`` for the tutorial and put it in the root of the Akka distribution in the ``tutorial`` directory (you have to create it first).
Looks like we are all good. Finally let's create a source file ``Pi.scala`` for
the tutorial and put it in the root of the Akka distribution in the ``tutorial``
directory (you have to create it first).
Some tools require you to set the ``SCALA_HOME`` environment variable to the root of the Scala distribution, however Akka does not require that.
Some tools require you to set the ``SCALA_HOME`` environment variable to the
root of the Scala distribution, however Akka does not require that.
.. _getting-started-first-scala-download-sbt:
Downloading and installing SBT
------------------------------
==============================
SBT, short for 'Simple Build Tool' is an excellent build system written in Scala. It uses Scala to write the build scripts which gives you a lot of power. It has a plugin architecture with many plugins available, something that we will take advantage of soon. SBT is the preferred way of building software in Scala and is probably the easiest way of getting through this tutorial. If you want to use SBT for this tutorial then follow the following instructions, if not you can skip this section and the next.
SBT, short for 'Simple Build Tool' is an excellent build system written in
Scala. It uses Scala to write the build scripts which gives you a lot of
power. It has a plugin architecture with many plugins available, something that
we will take advantage of soon. SBT is the preferred way of building software in
Scala and is probably the easiest way of getting through this tutorial. If you
want to use SBT for this tutorial then follow the following instructions, if not
you can skip this section and the next.
To install SBT and create a project for this tutorial it is easiest to follow the instructions on `https://github.com/harrah/xsbt/wiki/Setup <https://github.com/harrah/xsbt/wiki/Setup>`_.
To install SBT and create a project for this tutorial it is easiest to follow
the instructions on https://github.com/harrah/xsbt/wiki/Setup.
Now we need to create our first Akka project. You could add the dependencies
manually to the build script, but the easiest way is to use Akka's SBT Plugin,
covered in the next section.
Now we need to create our first Akka project. You could add the dependencies manually to the build script, but the easiest way is to use Akka's SBT Plugin, covered in the next section.
Creating an Akka SBT project
----------------------------
============================
If you have not already done so, now is the time to create an SBT project for our tutorial. You do that by adding the following content to ``build.sbt`` file in the directory you want to create your project in::
If you have not already done so, now is the time to create an SBT project for
our tutorial. You do that by adding the following content to ``build.sbt`` file
in the directory you want to create your project in::
name := "My Project"
version := "1.0"
scalaVersion := "2.9.0-1"
scalaVersion := "2.9.1"
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
libraryDependencies += "se.scalablesolutions.akka" % "akka-actor" % "1.2-SNAPSHOT"
libraryDependencies += "se.scalablesolutions.akka" % "akka-actor" % "2.0-SNAPSHOT"
Create a directory ``src/main/scala`` in which you will store the Scala source files.
Create a directory ``src/main/scala`` in which you will store the Scala source
files.
Not needed in this tutorial, but if you would like to use additional Akka modules beyond ``akka-actor``, you can add these as ``libraryDependencies`` in ``build.sbt``. Note that there must be a blank line between each. Here is an example adding ``akka-remote`` and ``akka-stm``::
Not needed in this tutorial, but if you would like to use additional Akka
modules beyond ``akka-actor``, you can add these as ``libraryDependencies`` in
``build.sbt``. Note that there must be a blank line between each. Here is an
example adding ``akka-remote`` and ``akka-stm``::
libraryDependencies += "se.scalablesolutions.akka" % "akka-actor" % "1.2-SNAPSHOT"
libraryDependencies += "se.scalablesolutions.akka" % "akka-actor" % "2.0-SNAPSHOT"
libraryDependencies += "se.scalablesolutions.akka" % "akka-remote" % "1.2-SNAPSHOT"
libraryDependencies += "se.scalablesolutions.akka" % "akka-remote" % "2.0-SNAPSHOT"
libraryDependencies += "se.scalablesolutions.akka" % "akka-stm" % "1.2-SNAPSHOT"
libraryDependencies += "se.scalablesolutions.akka" % "akka-stm" % "2.0-SNAPSHOT"
So, now we are all set.
SBT itself needs a whole bunch of dependencies but our project will only need one; ``akka-actor-2.0-SNAPSHOT.jar``. SBT will download that as well.
SBT itself needs a whole bunch of dependencies but our project will only need
one; ``akka-actor-2.0-SNAPSHOT.jar``. SBT will download that as well.
Start writing the code
----------------------
======================
Now it's about time to start hacking.
We start by creating a ``Pi.scala`` file and adding these import statements at the top of the file::
We start by creating a ``Pi.scala`` file and adding these import statements at
the top of the file:
package akka.tutorial.first.scala
.. includecode:: code/tutorials/first/Pi.scala#imports
import akka.actor.{Actor, PoisonPill}
import Actor._
import akka.routing.{Routing, CyclicIterator}
import Routing._
import akka.dispatch.Dispatchers
If you are using SBT in this tutorial then create the file in the
``src/main/scala`` directory.
import java.util.concurrent.CountDownLatch
If you are using the command line tools then create the file wherever you
want. I will create it in a directory called ``tutorial`` at the root of the
Akka distribution, e.g. in ``$AKKA_HOME/tutorial/Pi.scala``.
If you are using SBT in this tutorial then create the file in the ``src/main/scala`` directory.
If you are using the command line tools then create the file wherever you want. I will create it in a directory called ``tutorial`` at the root of the Akka distribution, e.g. in ``$AKKA_HOME/tutorial/Pi.scala``.
Creating the messages
---------------------
=====================
The design we are aiming for is to have one ``Master`` actor initiating the computation, creating a set of ``Worker`` actors. Then it splits up the work into discrete chunks, and sends these chunks to the different workers in a round-robin fashion. The master waits until all the workers have completed their work and sent back results for aggregation. When computation is completed the master prints out the result, shuts down all workers and then itself.
The design we are aiming for is to have one ``Master`` actor initiating the
computation, creating a set of ``Worker`` actors. Then it splits up the work
into discrete chunks, and sends these chunks to the different workers in a
round-robin fashion. The master waits until all the workers have completed their
work and sent back results for aggregation. When computation is completed the
master prints out the result, shuts down all workers and then itself.
With this in mind, let's now create the messages that we want to have flowing in the system. We need three different messages:
With this in mind, let's now create the messages that we want to have flowing in
the system. We need three different messages:
- ``Calculate`` -- sent to the ``Master`` actor to start the calculation
- ``Work`` -- sent from the ``Master`` actor to the ``Worker`` actors containing the work assignment
- ``Result`` -- sent from the ``Worker`` actors to the ``Master`` actor containing the result from the worker's calculation
- ``Work`` -- sent from the ``Master`` actor to the ``Worker`` actors containing
the work assignment
- ``Result`` -- sent from the ``Worker`` actors to the ``Master`` actor
containing the result from the worker's calculation
Messages sent to actors should always be immutable to avoid sharing mutable state. In scala we have 'case classes' which make excellent messages. So let's start by creating three messages as case classes. We also create a common base trait for our messages (that we define as being ``sealed`` in order to prevent creating messages outside our control)::
Messages sent to actors should always be immutable to avoid sharing mutable
state. In scala we have 'case classes' which make excellent messages. So let's
start by creating three messages as case classes. We also create a common base
trait for our messages (that we define as being ``sealed`` in order to prevent
creating messages outside our control):
sealed trait PiMessage
.. includecode:: code/tutorials/first/Pi.scala#messages
case object Calculate extends PiMessage
case class Work(start: Int, nrOfElements: Int) extends PiMessage
case class Result(value: Double) extends PiMessage
Creating the worker
-------------------
===================
Now we can create the worker actor. This is done by mixing in the ``Actor`` trait and defining the ``receive`` method. The ``receive`` method defines our message handler. We expect it to be able to handle the ``Work`` message so we need to add a handler for this message::
Now we can create the worker actor. This is done by mixing in the ``Actor``
trait and defining the ``receive`` method. The ``receive`` method defines our
message handler. We expect it to be able to handle the ``Work`` message so we
need to add a handler for this message:
class Worker extends Actor {
def receive = {
case Work(start, nrOfElements) =>
self reply Result(calculatePiFor(start, nrOfElements)) // perform the work
}
}
.. includecode:: code/tutorials/first/Pi.scala#worker
:exclude: calculatePiFor
As you can see we have now created an ``Actor`` with a ``receive`` method as a handler for the ``Work`` message. In this handler we invoke the ``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send it back to the original sender using ``self.reply``. In Akka the sender reference is implicitly passed along with the message so that the receiver can always reply or store away the sender reference for future use.
As you can see we have now created an ``Actor`` with a ``receive`` method as a
handler for the ``Work`` message. In this handler we invoke the
``calculatePiFor(..)`` method, wrap the result in a ``Result`` message and send
it back to the original sender using ``self.reply``. In Akka the sender
reference is implicitly passed along with the message so that the receiver can
always reply or store away the sender reference for future use.
The only thing missing in our ``Worker`` actor is the implementation on the ``calculatePiFor(..)`` method. While there are many ways we can implement this algorithm in Scala, in this introductory tutorial we have chosen an imperative style using a for comprehension and an accumulator::
The only thing missing in our ``Worker`` actor is the implementation on the
``calculatePiFor(..)`` method. While there are many ways we can implement this
algorithm in Scala, in this introductory tutorial we have chosen an imperative
style using a for comprehension and an accumulator:
.. includecode:: code/tutorials/first/Pi.scala#calculatePiFor
def calculatePiFor(start: Int, nrOfElements: Int): Double = {
var acc = 0.0
for (i <- start until (start + nrOfElements))
acc += 4.0 * (1 - (i % 2) * 2) / (2 * i + 1)
acc
}
Creating the master
-------------------
===================
The master actor is a little bit more involved. In its constructor we need to create the workers (the ``Worker`` actors) and start them. We will also wrap them in a load-balancing router to make it easier to spread out the work evenly between the workers. Let's do that first::
The master actor is a little bit more involved. In its constructor we need to
create the workers (the ``Worker`` actors) and start them. We will also wrap
them in a load-balancing router to make it easier to spread out the work evenly
between the workers. Let's do that first:
// create the workers
val workers = Vector.fill(nrOfWorkers)(actorOf[Worker])
.. includecode:: code/tutorials/first/Pi.scala#create-workers
// wrap them with a load-balancing router
val router = Routing.loadBalancerActor(CyclicIterator(workers))
As you can see we are using the ``actorOf`` factory method to create actors, this method returns as an ``ActorRef`` which is a reference to our newly created actor. This method is available in the ``Actor`` object but is usually imported::
As you can see we are using the ``actorOf`` factory method to create actors,
this method returns as an ``ActorRef`` which is a reference to our newly created
actor. This method is available in the ``Actor`` object but is usually
imported::
import akka.actor.Actor.actorOf
There are two versions of ``actorOf``; one of them taking a actor type and the other one an instance of an actor. The former one (``actorOf[MyActor]``) is used when the actor class has a no-argument constructor while the second one (``actorOf(new MyActor(..))``) is used when the actor class has a constructor that takes arguments. This is the only way to create an instance of an Actor and the ``actorOf`` method ensures this. The latter version is using call-by-name and lazily creates the actor within the scope of the ``actorOf`` method. The ``actorOf`` method instantiates the actor and returns, not an instance to the actor, but an instance to an ``ActorRef``. This reference is the handle through which you communicate with the actor. It is immutable, serializable and location-aware meaning that it "remembers" its original actor even if it is sent to other nodes across the network and can be seen as the equivalent to the Erlang actor's PID.
There are two versions of ``actorOf``; one of them taking a actor type and the
other one an instance of an actor. The former one (``actorOf[MyActor]``) is used
when the actor class has a no-argument constructor while the second one
(``actorOf(new MyActor(..))``) is used when the actor class has a constructor
that takes arguments. This is the only way to create an instance of an Actor and
the ``actorOf`` method ensures this. The latter version is using call-by-name
and lazily creates the actor within the scope of the ``actorOf`` method. The
``actorOf`` method instantiates the actor and returns, not an instance to the
actor, but an instance to an ``ActorRef``. This reference is the handle through
which you communicate with the actor. It is immutable, serializable and
location-aware meaning that it "remembers" its original actor even if it is sent
to other nodes across the network and can be seen as the equivalent to the
Erlang actor's PID.
The actor's life-cycle is:
@ -268,233 +324,103 @@ The actor's life-cycle is:
Once the actor has been stopped it is dead and can not be started again.
Now we have a router that is representing all our workers in a single abstraction. If you paid attention to the code above, you saw that we were using the ``nrOfWorkers`` variable. This variable and others we have to pass to the ``Master`` actor in its constructor. So now let's create the master actor. We have to pass in three integer variables:
Now we have a router that is representing all our workers in a single
abstraction. If you paid attention to the code above, you saw that we were using
the ``nrOfWorkers`` variable. This variable and others we have to pass to the
``Master`` actor in its constructor. So now let's create the master actor. We
have to pass in three integer variables:
- ``nrOfWorkers`` -- defining how many workers we should start up
- ``nrOfMessages`` -- defining how many number chunks to send out to the workers
- ``nrOfElements`` -- defining how big the number chunks sent to each worker should be
Here is the master actor::
Here is the master actor:
class Master(
nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch)
extends Actor {
var pi: Double = _
var nrOfResults: Int = _
var start: Long = _
// create the workers
val workers = Vector.fill(nrOfWorkers)(actorOf[Worker])
// wrap them with a load-balancing router
val router = Routing.loadBalancerActor(CyclicIterator(workers))
def receive = { ... }
override def preStart() {
start = System.currentTimeMillis
}
override def postStop() {
// tell the world that the calculation is complete
println(
"\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis"
.format(pi, (System.currentTimeMillis - start)))
latch.countDown()
}
}
.. includecode:: code/tutorials/first/Pi.scala#master
:exclude: handle-messages
A couple of things are worth explaining further.
First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the ``Master`` actor. This latch is only used for plumbing (in this specific tutorial), to have a simple way of letting the outside world knowing when the master can deliver the result and shut down. In more idiomatic Akka code, as we will see in part two of this tutorial series, we would not use a latch but other abstractions and functions like ``Channel``, ``Future`` and ``?`` to achieve the same thing in a non-blocking way. But for simplicity let's stick to a ``CountDownLatch`` for now.
First, we are passing in a ``java.util.concurrent.CountDownLatch`` to the
``Master`` actor. This latch is only used for plumbing (in this specific
tutorial), to have a simple way of letting the outside world knowing when the
master can deliver the result and shut down. In more idiomatic Akka code, as we
will see in part two of this tutorial series, we would not use a latch but other
abstractions and functions like ``Channel``, ``Future`` and ``?`` to achieve the
same thing in a non-blocking way. But for simplicity let's stick to a
``CountDownLatch`` for now.
Second, we are adding a couple of life-cycle callback methods; ``preStart`` and ``postStop``. In the ``preStart`` callback we are recording the time when the actor is started and in the ``postStop`` callback we are printing out the result (the approximation of Pi) and the time it took to calculate it. In this call we also invoke ``latch.countDown`` to tell the outside world that we are done.
Second, we are adding a couple of life-cycle callback methods; ``preStart`` and
``postStop``. In the ``preStart`` callback we are recording the time when the
actor is started and in the ``postStop`` callback we are printing out the result
(the approximation of Pi) and the time it took to calculate it. In this call we
also invoke ``latch.countDown`` to tell the outside world that we are done.
But we are not done yet. We are missing the message handler for the ``Master`` actor. This message handler needs to be able to react to two different messages:
But we are not done yet. We are missing the message handler for the ``Master``
actor. This message handler needs to be able to react to two different messages:
- ``Calculate`` -- which should start the calculation
- ``Result`` -- which should aggregate the different results
The ``Calculate`` handler is sending out work to all the ``Worker`` actors and after doing that it also sends a ``Broadcast(PoisonPill)`` message to the router, which will send out the ``PoisonPill`` message to all the actors it is representing (in our case all the ``Worker`` actors). ``PoisonPill`` is a special kind of message that tells the receiver to shut itself down using the normal shutdown method; ``self.stop``. We also send a ``PoisonPill`` to the router itself (since it's also an actor that we want to shut down).
The ``Calculate`` handler is sending out work to all the ``Worker`` actors and
after doing that it also sends a ``Broadcast(PoisonPill)`` message to the
router, which will send out the ``PoisonPill`` message to all the actors it is
representing (in our case all the ``Worker`` actors). ``PoisonPill`` is a
special kind of message that tells the receiver to shut itself down using the
normal shutdown method; ``self.stop``. We also send a ``PoisonPill`` to the
router itself (since it's also an actor that we want to shut down).
The ``Result`` handler is simpler, here we get the value from the ``Result`` message and aggregate it to our ``pi`` member variable. We also keep track of how many results we have received back, and if that matches the number of tasks sent out, the ``Master`` actor considers itself done and shuts down.
The ``Result`` handler is simpler, here we get the value from the ``Result``
message and aggregate it to our ``pi`` member variable. We also keep track of
how many results we have received back, and if that matches the number of tasks
sent out, the ``Master`` actor considers itself done and shuts down.
Let's capture this in code::
Let's capture this in code:
// message handler
def receive = {
case Calculate =>
// schedule work
for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements)
.. includecode:: code/tutorials/first/Pi.scala#master-receive
// send a PoisonPill to all workers telling them to shut down themselves
router ! Broadcast(PoisonPill)
// send a PoisonPill to the router, telling him to shut himself down
router ! PoisonPill
case Result(value) =>
// handle result from the worker
pi += value
nrOfResults += 1
if (nrOfResults == nrOfMessages) self.stop()
}
Bootstrap the calculation
-------------------------
=========================
Now the only thing that is left to implement is the runner that should bootstrap and run the calculation for us. We do that by creating an object that we call ``Pi``, here we can extend the ``App`` trait in Scala, which means that we will be able to run this as an application directly from the command line.
Now the only thing that is left to implement is the runner that should bootstrap
and run the calculation for us. We do that by creating an object that we call
``Pi``, here we can extend the ``App`` trait in Scala, which means that we will
be able to run this as an application directly from the command line.
The ``Pi`` object is a perfect container module for our actors and messages, so let's put them all there. We also create a method ``calculate`` in which we start up the ``Master`` actor and wait for it to finish::
The ``Pi`` object is a perfect container module for our actors and messages, so
let's put them all there. We also create a method ``calculate`` in which we
start up the ``Master`` actor and wait for it to finish:
object Pi extends App {
calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
... // actors and messages
def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) {
// this latch is only plumbing to know when the calculation is completed
val latch = new CountDownLatch(1)
// create the master
val master = actorOf(
new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch))
// start the calculation
master ! Calculate
// wait for master to shut down
latch.await()
}
}
.. includecode:: code/tutorials/first/Pi.scala#app
:exclude: actors-and-messages
That's it. Now we are done.
But before we package it up and run it, let's take a look at the full code now, with package declaration, imports and all::
But before we package it up and run it, let's take a look at the full code now,
with package declaration, imports and all:
package akka.tutorial.first.scala
.. includecode:: code/tutorials/first/Pi.scala
import akka.actor.{Actor, PoisonPill}
import Actor._
import akka.routing.{Routing, CyclicIterator}
import Routing._
import java.util.concurrent.CountDownLatch
object Pi extends App {
calculate(nrOfWorkers = 4, nrOfElements = 10000, nrOfMessages = 10000)
// ====================
// ===== Messages =====
// ====================
sealed trait PiMessage
case object Calculate extends PiMessage
case class Work(start: Int, nrOfElements: Int) extends PiMessage
case class Result(value: Double) extends PiMessage
// ==================
// ===== Worker =====
// ==================
class Worker extends Actor {
// define the work
def calculatePiFor(start: Int, nrOfElements: Int): Double = {
var acc = 0.0
for (i <- start until (start + nrOfElements))
acc += 4.0 * (1 - (i % 2) * 2) / (2 * i + 1)
acc
}
def receive = {
case Work(start, nrOfElements) =>
self reply Result(calculatePiFor(start, nrOfElements)) // perform the work
}
}
// ==================
// ===== Master =====
// ==================
class Master(
nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch)
extends Actor {
var pi: Double = _
var nrOfResults: Int = _
var start: Long = _
// create the workers
val workers = Vector.fill(nrOfWorkers)(actorOf[Worker])
// wrap them with a load-balancing router
val router = Routing.loadBalancerActor(CyclicIterator(workers))
// message handler
def receive = {
case Calculate =>
// schedule work
//for (start <- 0 until nrOfMessages) router ! Work(start, nrOfElements)
for (i <- 0 until nrOfMessages) router ! Work(i * nrOfElements, nrOfElements)
// send a PoisonPill to all workers telling them to shut down themselves
router ! Broadcast(PoisonPill)
// send a PoisonPill to the router, telling him to shut himself down
router ! PoisonPill
case Result(value) =>
// handle result from the worker
pi += value
nrOfResults += 1
if (nrOfResults == nrOfMessages) self.stop()
}
override def preStart() {
start = System.currentTimeMillis
}
override def postStop() {
// tell the world that the calculation is complete
println(
"\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis"
.format(pi, (System.currentTimeMillis - start)))
latch.countDown()
}
}
// ==================
// ===== Run it =====
// ==================
def calculate(nrOfWorkers: Int, nrOfElements: Int, nrOfMessages: Int) {
// this latch is only plumbing to know when the calculation is completed
val latch = new CountDownLatch(1)
// create the master
val master = actorOf(
new Master(nrOfWorkers, nrOfMessages, nrOfElements, latch))
// start the calculation
master ! Calculate
// wait for master to shut down
latch.await()
}
}
Run it as a command line application
------------------------------------
====================================
If you have not typed in (or copied) the code for the tutorial as ``$AKKA_HOME/tutorial/Pi.scala`` then now is the time. When that's done open up a shell and step in to the Akka distribution (``cd $AKKA_HOME``).
If you have not typed in (or copied) the code for the tutorial as
``$AKKA_HOME/tutorial/Pi.scala`` then now is the time. When that's done open up
a shell and step in to the Akka distribution (``cd $AKKA_HOME``).
First we need to compile the source file. That is done with Scala's compiler ``scalac``. Our application depends on the ``akka-actor-2.0-SNAPSHOT.jar`` JAR file, so let's add that to the compiler classpath when we compile the source::
First we need to compile the source file. That is done with Scala's compiler
``scalac``. Our application depends on the ``akka-actor-2.0-SNAPSHOT.jar`` JAR
file, so let's add that to the compiler classpath when we compile the source::
$ scalac -cp lib/akka/akka-actor-2.0-SNAPSHOT.jar tutorial/Pi.scala
When we have compiled the source file we are ready to run the application. This is done with ``java`` but yet again we need to add the ``akka-actor-2.0-SNAPSHOT.jar`` JAR file to the classpath, and this time we also need to add the Scala runtime library ``scala-library.jar`` and the classes we compiled ourselves::
When we have compiled the source file we are ready to run the application. This
is done with ``java`` but yet again we need to add the
``akka-actor-2.0-SNAPSHOT.jar`` JAR file to the classpath, and this time we also
need to add the Scala runtime library ``scala-library.jar`` and the classes we
compiled ourselves::
$ java \
-cp lib/scala-library.jar:lib/akka/akka-actor-2.0-SNAPSHOT.jar:. \
@ -507,12 +433,16 @@ When we have compiled the source file we are ready to run the application. This
Yippee! It is working.
If you have not defined the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Cant load akka.conf`` warning. This is ok since it will then just use the defaults.
If you have not defined the ``AKKA_HOME`` environment variable then Akka can't
find the ``akka.conf`` configuration file and will print out a ``Cant load
akka.conf`` warning. This is ok since it will then just use the defaults.
Run it inside SBT
-----------------
=================
If you used SBT, then you can run the application directly inside SBT. First you need to compile the project::
If you used SBT, then you can run the application directly inside SBT. First you
need to compile the project::
$ sbt
> compile
@ -527,13 +457,22 @@ When this in done we can run our application directly inside SBT::
Yippee! It is working.
If you have not defined an the ``AKKA_HOME`` environment variable then Akka can't find the ``akka.conf`` configuration file and will print out a ``Cant load akka.conf`` warning. This is ok since it will then just use the defaults.
If you have not defined an the ``AKKA_HOME`` environment variable then Akka
can't find the ``akka.conf`` configuration file and will print out a ``Cant
load akka.conf`` warning. This is ok since it will then just use the defaults.
Conclusion
----------
==========
We have learned how to create our first Akka project using Akka's actors to speed up a computation-intensive problem by scaling out on multi-core processors (also known as scaling up). We have also learned to compile and run an Akka project using either the tools on the command line or the SBT build system.
We have learned how to create our first Akka project using Akka's actors to
speed up a computation-intensive problem by scaling out on multi-core processors
(also known as scaling up). We have also learned to compile and run an Akka
project using either the tools on the command line or the SBT build system.
If you have a multi-core machine then I encourage you to try out different number of workers (number of working actors) by tweaking the ``nrOfWorkers`` variable to for example; 2, 4, 6, 8 etc. to see performance improvement by scaling up.
If you have a multi-core machine then I encourage you to try out different
number of workers (number of working actors) by tweaking the ``nrOfWorkers``
variable to for example; 2, 4, 6, 8 etc. to see performance improvement by
scaling up.
Happy hakking.

View file

@ -5,8 +5,9 @@ Getting Started
.. contents:: :local:
The best way to start learning Akka is to try the Getting Started Tutorial, which comes in several flavours
depending on you development environment preferences:
The best way to start learning Akka is to try the Getting Started Tutorial,
which comes in several flavours depending on you development environment
preferences:
- :ref:`getting-started-first-java` for Java development, either
@ -20,9 +21,10 @@ depending on you development environment preferences:
- :ref:`getting-started-first-scala-eclipse` for Scala development with Eclipse
The Getting Started Tutorial describes everything you need to get going, and you don't need to read the rest of
this page if you study the tutorial. For later look back reference this page describes the
essential parts for getting started with different development environments.
The Getting Started Tutorial describes everything you need to get going, and you
don't need to read the rest of this page if you study the tutorial. For later
look back reference this page describes the essential parts for getting started
with different development environments.
Prerequisites
-------------
@ -33,9 +35,10 @@ later installed on you machine.
Download
--------
There are several ways to download Akka. You can download the full distribution with microkernel, which includes
all modules. You can download just the core distribution. Or you can use a build tool like Maven or SBT to download
dependencies from the Akka Maven repository.
There are several ways to download Akka. You can download the full distribution
with microkernel, which includes all modules. You can download just the core
distribution or just the actors distribution. Or you can use a build tool like
Maven or sbt to download dependencies from the Akka Maven repository.
Modules
-------
@ -54,13 +57,23 @@ Akka is very modular and has many JARs for containing different features.
- ``akka-spring-2.0-SNAPSHOT.jar`` -- Spring framework integration
- ``akka-kernel-2.0-SNAPSHOT.jar`` -- Akka microkernel for running a bare-bones mini application server (embeds Jetty etc.)
How to see the JARs dependencies of each Akka module is described in the :ref:`dependencies` section. Worth noting
is that ``akka-actor`` has zero external dependencies (apart from the ``scala-library.jar`` JAR).
How to see the JARs dependencies of each Akka module is described in the
:ref:`dependencies` section. Worth noting is that ``akka-actor`` has zero
external dependencies (apart from the ``scala-library.jar`` JAR).
Using a release distribution
----------------------------
Download the release you need, Akka core or Akka Modules, from `<http://akka.io/downloads>`_ and unzip it.
Download the release you need, Akka Actors, Akka Core, or Akka Microkernel, from
http://akka.io/downloads and unzip it.
Using a snapshot version
------------------------
The Akka nightly snapshots are published to
http://repo.typesafe.com/typesafe/maven-timestamps/ and are versioned with a
timestamp. You need to choose a timestamped version to work with and can decide
when to update to a newer version.
Microkernel
^^^^^^^^^^^
@ -75,14 +88,17 @@ More information is available in the documentation of the :ref:`microkernel`.
Using a build tool
------------------
Akka can be used with build tools that support Maven repositories. The Akka Maven repository can be found at `<http://akka.io/repository>`_
and Typesafe provides `<http://repo.typesafe.com/typesafe/releases/>`_ that proxies several other repositories, including akka.io.
Akka can be used with build tools that support Maven repositories. The Akka
Maven repository can be found at http://akka.io/repository/ and Typesafe provides
http://repo.typesafe.com/typesafe/releases/ that proxies several other
repositories, including akka.io.
Using Akka with Maven
---------------------
Information about how to use Akka with Maven, including how to create an Akka Maven project from scratch,
can be found in the :ref:`getting-started-first-java`.
Information about how to use Akka with Maven, including how to create an Akka
Maven project from scratch, can be found in the
:ref:`getting-started-first-java`.
Summary of the essential parts for using Akka with Maven:
@ -106,6 +122,7 @@ Summary of the essential parts for using Akka with Maven:
<version>2.0-SNAPSHOT</version>
</dependency>
**Note**: for snapshot versions akka uses specific timestamped versions.
Using Akka with SBT
@ -124,7 +141,7 @@ SBT installation instructions on `https://github.com/harrah/xsbt/wiki/Setup <htt
version := "1.0"
scalaVersion := "2.9.0-1"
scalaVersion := "2.9.1"
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"

View file

@ -27,26 +27,52 @@ Download from http://akka.io/downloads/
Akka implements a unique hybrid
===============================
- :ref:`untyped-actors-java`, which gives you:
Actors
------
Actors give you:
- Simple and high-level abstractions for concurrency and parallelism.
- Asynchronous, non-blocking and highly performant event-driven programming model.
- Very lightweight event-driven processes (create ~6.5 million actors on 4GB RAM).
- :ref:`fault-tolerance-java` through supervisor hierarchies with "let-it-crash"
semantics. Excellent for writing highly fault-tolerant systems that never
stop, systems that self-heal.
See :ref:`actors-scala` and :ref:`untyped-actors-java`
- :ref:`stm-java` (STM). (Distributed transactions coming soon).
Fault Tolerance
---------------
- :ref:`transactors-java`: combine actors and STM into transactional
actors. Allows you to compose atomic message flows with automatic retry and
rollback.
Fault tolerance through supervisor hierarchies with "let-it-crash"
semantics. Excellent for writing highly fault-tolerant systems that never stop,
systems that self-heal.
- :ref:`remote-actors-java`: highly performant distributed actors with remote
supervision and error management.
See :ref:`fault-tolerance-scala` and :ref:`fault-tolerance-java`
- :ref:`java-api` and :ref:`scala-api`
Software Transactional Memory (STM)
-----------------------------------
See :ref:`stm-scala` and :ref:`stm-java`
Transactors
-----------
Transactors combine actors and STM into transactional actors. Allows you to
compose atomic message flows with automatic retry and rollback.
See :ref:`transactors-scala` and :ref:`transactors-java`
Remote Actors
-------------
Highly performant distributed actors with remote supervision and error
management.
See :ref:`remote-actors-scala` and :ref:`remote-actors-java`.
Scala and Java APIs
===================
Akka has both a :ref:`scala-api` and a :ref:`java-api`.
Akka can be used in two different ways
@ -58,5 +84,4 @@ Akka can be used in two different ways
- As a microkernel: stand-alone kernel, embedding a servlet container and all
the other modules.
See the :ref:`deployment-scenarios` for details.

View file

@ -46,11 +46,11 @@ Here's what some of the Akka users have to say about how they are using Akka:
http://stackoverflow.com/questions/4493001/good-use-case-for-akka
Cloudy Akka
-----------
Akka Atmos
----------
And that's all in the ApacheV2-licensed open source project. On top of that we
have a commercial product called Cloudy Akka which provides the following
have a commercial product called Akka Atmos which provides the following
features:
#. Management through Dashboard, JMX and REST

View file

@ -1,83 +1,102 @@
.. _actors-scala:
Actors (Scala)
==============
################
Actors (Scala)
################
.. sidebar:: Contents
.. contents:: :local:
Module stability: **SOLID**
The `Actor Model <http://en.wikipedia.org/wiki/Actor_model>`_ provides a higher level of abstraction for writing concurrent and distributed systems. It alleviates the developer from having to deal with explicit locking and thread management, making it easier to write correct concurrent and parallel systems. Actors were defined in the 1973 paper by Carl Hewitt but have been popularized by the Erlang language, and used for example at Ericsson with great success to build highly concurrent and reliable telecom systems.
The `Actor Model`_ provides a higher level of abstraction for writing concurrent
and distributed systems. It alleviates the developer from having to deal with
explicit locking and thread management, making it easier to write correct
concurrent and parallel systems. Actors were defined in the 1973 paper by Carl
Hewitt but have been popularized by the Erlang language, and used for example at
Ericsson with great success to build highly concurrent and reliable telecom
systems.
The API of Akkas Actors is similar to Scala Actors which has borrowed some of its syntax from Erlang.
The API of Akkas Actors is similar to Scala Actors which has borrowed some of
its syntax from Erlang.
.. _Actor Model: http://en.wikipedia.org/wiki/Actor_model
The Akka 0.9 release introduced a new concept; ActorRef, which requires some refactoring. If you are new to Akka just read along, but if you have used Akka 0.6.x, 0.7.x and 0.8.x then you might be helped by the :doc:`0.8.x => 0.9.x migration guide </project/migration-guide-0.8.x-0.9.x>`
Creating Actors
---------------
===============
Actors can be created either by:
* Extending the Actor class and implementing the receive method.
* Create an anonymous actor using one of the actor methods.
Defining an Actor class
^^^^^^^^^^^^^^^^^^^^^^^
Actor classes are implemented by extending the Actor class and implementing the ``receive`` method. The ``receive`` method should define a series of case statements (which has the type ``PartialFunction[Any, Unit]``) that defines which messages your Actor can handle, using standard Scala pattern matching, along with the implementation of how the messages should be processed.
Defining an Actor class
-----------------------
Actor classes are implemented by extending the Actor class and implementing the
``receive`` method. The ``receive`` method should define a series of case
statements (which has the type ``PartialFunction[Any, Unit]``) that defines
which messages your Actor can handle, using standard Scala pattern matching,
along with the implementation of how the messages should be processed.
Here is an example:
.. code-block:: scala
.. includecode:: code/ActorDocSpec.scala
:include: imports,my-actor
import akka.actor.Actor
import akka.event.EventHandler
Please note that the Akka Actor ``receive`` message loop is exhaustive, which is
different compared to Erlang and Scala Actors. This means that you need to
provide a pattern match for all messages that it can accept and if you want to
be able to handle unknown messages then you need to have a default case as in
the example above.
class MyActor extends Actor {
def receive = {
case "test" => EventHandler.info(this, "received test")
case _ => EventHandler.info(this, "received unknown message")
}
}
Please note that the Akka Actor ``receive`` message loop is exhaustive, which is different compared to Erlang and Scala Actors. This means that you need to provide a pattern match for all messages that it can accept and if you want to be able to handle unknown messages then you need to have a default case as in the example above.
Creating Actors
^^^^^^^^^^^^^^^
---------------
.. code-block:: scala
val myActor = Actor.actorOf[MyActor]
.. includecode:: code/ActorDocSpec.scala#creating-actorOf
Normally you would want to import the ``actorOf`` method like this:
.. code-block:: scala
.. includecode:: code/ActorDocSpec.scala#creating-imported
import akka.actor.Actor._
to avoid prefixing it with ``Actor`` every time you use it.
val myActor = actorOf[MyActor]
The call to ``actorOf`` returns an instance of ``ActorRef``. This is a handle to
the ``Actor`` instance which you can use to interact with the ``Actor``. The
``ActorRef`` is immutable and has a one to one relationship with the Actor it
represents. The ``ActorRef`` is also serializable and network-aware. This means
that you can serialize it, send it over the wire and use it on a remote host and
it will still be representing the same Actor on the original node, across the
network.
To avoid prefixing it with ``Actor`` every time you use it.
The call to ``actorOf`` returns an instance of ``ActorRef``. This is a handle to the ``Actor`` instance which you can use to interact with the ``Actor``. The ``ActorRef`` is immutable and has a one to one relationship with the Actor it represents. The ``ActorRef`` is also serializable and network-aware. This means that you can serialize it, send it over the wire and use it on a remote host and it will still be representing the same Actor on the original node, across the network.
Creating Actors with non-default constructor
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--------------------------------------------
If your Actor has a constructor that takes parameters then you can't create it using ``actorOf[TYPE]``. Instead you can use a variant of ``actorOf`` that takes a call-by-name block in which you can create the Actor in any way you like.
If your Actor has a constructor that takes parameters then you can't create it
using ``actorOf[TYPE]``. Instead you can use a variant of ``actorOf`` that takes
a call-by-name block in which you can create the Actor in any way you like.
Here is an example:
.. code-block:: scala
.. includecode:: code/ActorDocSpec.scala#creating-constructor
val a = actorOf(new MyActor(..)) // allows passing in arguments into the MyActor constructor
Running a block of code asynchronously
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--------------------------------------
Here we create a light-weight actor-based thread, that can be used to spawn off a task. Code blocks spawned up like this are always implicitly started, shut down and made eligible for garbage collection. The actor that is created "under the hood" is not reachable from the outside and there is no way of sending messages to it. It being an actor is only an implementation detail. It will only run the block in an event-based thread and exit once the block has run to completion.
Here we create a light-weight actor-based thread, that can be used to spawn off
a task. Code blocks spawned up like this are always implicitly started, shut
down and made eligible for garbage collection. The actor that is created "under
the hood" is not reachable from the outside and there is no way of sending
messages to it. It being an actor is only an implementation detail. It will only
run the block in an event-based thread and exit once the block has run to
completion.
.. code-block:: scala
@ -85,10 +104,11 @@ Here we create a light-weight actor-based thread, that can be used to spawn off
... // do stuff
}
Actor Internal API
------------------
The :class:`Actor` trait defines only one abstract method, the abovementioned
Actor Internal API
==================
The :class:`Actor` trait defines only one abstract method, the above mentioned
:meth:`receive`. In addition, it offers two convenience methods
:meth:`become`/:meth:`unbecome` for modifying the hotswap behavior stack as
described in :ref:`Actor.HotSwap` and the :obj:`self` reference to this actors
@ -107,24 +127,22 @@ described in the following::
The implementations shown above are the defaults provided by the :class:`Actor`
trait.
Start Hook
^^^^^^^^^^
Right after starting the actor, its :meth:`preStart` method is invoked. This is
guaranteed to happen before the first message from external sources is queued
to the actors mailbox.
Start Hook
----------
Right after starting the actor, its :meth:`preStart` method is invoked.
::
override def preStart {
// e.g. send initial message to self
self ! GetMeStarted
// or do any other stuff, e.g. registering with other actors
// registering with other actors
someService ! Register(self)
}
Restart Hooks
^^^^^^^^^^^^^
-------------
A supervised actor, i.e. one which is linked to another actor with a fault
handling strategy, will be restarted in case an exception is thrown while
@ -151,27 +169,31 @@ sent to an actor while it is being restarted will be queued to its mailbox as
usual.
Stop Hook
^^^^^^^^^
---------
After stopping an actor, its :meth:`postStop` hook is called, which may be used
e.g. for deregistering this actor from other services. This hook is guaranteed
to run after message queuing has been disabled for this actor, i.e. sending
messages would fail with an :class:`IllegalActorStateException`.
Identifying Actors
------------------
==================
Each Actor has two fields:
An actor is identified by its address. If no address is associated with an actor
then a unique identifier is used instead. The address of an actor can be
accessed using ``self.address``.
* ``self.uuid``
* ``self.id``
The difference is that the ``uuid`` is generated by the runtime, guaranteed to be unique and can't be modified. While the ``id`` is modifiable by the user, and defaults to the Actor class name. You can retrieve Actors by both UUID and ID using the ``ActorRegistry``, see the section further down for details.
Messages and immutability
-------------------------
=========================
**IMPORTANT**: Messages can be any kind of object but have to be immutable. Scala cant enforce immutability (yet) so this has to be by convention. Primitives like String, Int, Boolean are always immutable. Apart from these the recommended approach is to use Scala case classes which are immutable (if you dont explicitly expose the state) and works great with pattern matching at the receiver side.
**IMPORTANT**: Messages can be any kind of object but have to be
immutable. Scala cant enforce immutability (yet) so this has to be by
convention. Primitives like String, Int, Boolean are always immutable. Apart
from these the recommended approach is to use Scala case classes which are
immutable (if you dont explicitly expose the state) and works great with
pattern matching at the receiver side.
Here is an example:
@ -183,10 +205,12 @@ Here is an example:
// create a new case class message
val message = Register(user)
Other good messages types are ``scala.Tuple2``, ``scala.List``, ``scala.Map`` which are all immutable and great for pattern matching.
Other good messages types are ``scala.Tuple2``, ``scala.List``, ``scala.Map``
which are all immutable and great for pattern matching.
Send messages
-------------
=============
Messages are sent to an Actor through one of the following methods.
@ -197,8 +221,7 @@ Messages are sent to an Actor through one of the following methods.
.. note::
There used to be two more “bang” methods, which are deprecated and will be
removed in Akka 2.0:
There used to be two more “bang” methods, which are now removed in Akka 2.0:
* ``!!`` was similar to the current ``(actor ? msg).as[T]``; deprecation
followed from the change of timeout handling described below.
@ -210,14 +233,14 @@ Messages are sent to an Actor through one of the following methods.
Message ordering is guaranteed on a per-sender basis.
Fire-forget
^^^^^^^^^^^
-----------
This is the preferred way of sending messages. No blocking waiting for a
message. This gives the best concurrency and scalability characteristics.
.. code-block:: scala
actor ! "Hello"
actor ! "hello"
If invoked from within an Actor, then the sending actor reference will be
implicitly passed along with the message and available to the receiving Actor
@ -230,14 +253,14 @@ sender passed along with the message and you will get an
IllegalActorStateException when calling ``self.reply(...)``.
Send-And-Receive-Future
^^^^^^^^^^^^^^^^^^^^^^^
-----------------------
Using ``?`` will send a message to the receiving Actor asynchronously and
will return a :class:`Future`:
.. code-block:: scala
val future = actor ? "Hello"
val future = actor ? "hello"
The receiving actor should reply to this message, which will complete the
future with the reply message as value; if the actor throws an exception while
@ -260,7 +283,7 @@ See :ref:`futures-scala` for more information on how to await or query a
future.
Send-And-Receive-Eventually
^^^^^^^^^^^^^^^^^^^^^^^^^^^
---------------------------
The future returned from the ``?`` method can conveniently be passed around or
chained with further processing steps, but sometimes you just need the value,
@ -290,16 +313,20 @@ case of a timeout, :obj:`None` is returned.
for (x <- (actor ? msg).as[Int]) yield { 2 * x }
Forward message
^^^^^^^^^^^^^^^
---------------
You can forward a message from one actor to another. This means that the original sender address/reference is maintained even though the message is going through a 'mediator'. This can be useful when writing actors that work as routers, load-balancers, replicators etc.
You can forward a message from one actor to another. This means that the
original sender address/reference is maintained even though the message is going
through a 'mediator'. This can be useful when writing actors that work as
routers, load-balancers, replicators etc.
.. code-block:: scala
actor.forward(message)
Receive messages
----------------
================
An Actor has to implement the ``receive`` method to receive messages:
@ -307,9 +334,13 @@ An Actor has to implement the ``receive`` method to receive messages:
protected def receive: PartialFunction[Any, Unit]
Note: Akka has an alias to the ``PartialFunction[Any, Unit]`` type called ``Receive`` (``akka.actor.Actor.Receive``), so you can use this type instead for clarity. But most often you don't need to spell it out.
Note: Akka has an alias to the ``PartialFunction[Any, Unit]`` type called
``Receive`` (``akka.actor.Actor.Receive``), so you can use this type instead for
clarity. But most often you don't need to spell it out.
This method should return a ``PartialFunction``, e.g. a match/case clause in which the message can be matched against the different case clauses using Scala pattern matching. Here is an example:
This method should return a ``PartialFunction``, e.g. a match/case clause in
which the message can be matched against the different case clauses using Scala
pattern matching. Here is an example:
.. code-block:: scala
@ -323,94 +354,55 @@ This method should return a ``PartialFunction``, e.g. a match/case clause
}
}
Reply to messages
-----------------
=================
Reply using the channel
^^^^^^^^^^^^^^^^^^^^^^^
Reply using the sender
----------------------
If you want to have a handle to an object to whom you can reply to the message, you can use the ``Channel`` abstraction.
Simply call ``self.channel`` and then you can forward that to others, store it away or otherwise until you want to reply, which you do by ``channel ! response``:
If you want to have a handle for replying to a message, you can use
``context.sender``, which gives you an ActorRef. You can reply by sending to
that ActorRef with ``context.sender ! Message``. You can also store the ActorRef
for replying later, or passing on to other actors. If there is no sender (a
message was sent without an actor or future context) then the context.sender
defaults to a 'dead-letter' actor ref.
.. code-block:: scala
case request =>
val result = process(request)
self.channel ! result // will throw an exception if there is no sender information
self.channel tryTell result // will return Boolean whether reply succeeded
context.sender ! result // will have dead-letter actor as default
context.sender tryTell result // will return Boolean whether reply succeeded
The :class:`Channel` trait is contravariant in the expected message type. Since
``self.channel`` is subtype of ``Channel[Any]``, you may specialise your return
channel to allow the compiler to check your replies::
class MyActor extends Actor {
def doIt(channel: Channel[String], x: Any) = { channel ! x.toString }
def receive = {
case x => doIt(self.channel, x)
}
}
Reply using the reply method
----------------------------
.. code-block:: scala
case request =>
friend forward self.channel
We recommend that you as first choice use the channel abstraction instead of the other ways described in the following sections.
Reply using the reply and reply\_? methods
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you want to send a message back to the original sender of the message you just received then you can use the ``reply(..)`` method.
If you want to send a message back to the original sender of the message you
just received then you can use the ``context.reply(..)`` method.
.. code-block:: scala
case request =>
val result = process(request)
self.reply(result)
context.reply(result)
In this case the ``result`` will be send back to the Actor that sent the ``request``.
In this case the ``result`` will be sent back to the Actor that sent the
``request``. This is equivalent to using ``context.sender ! result``.
The ``reply`` method throws an ``IllegalStateException`` if unable to determine what to reply to, e.g. the sender is not an actor. You can also use the more forgiving ``tryReply`` method which returns ``true`` if reply was sent, and ``false`` if unable to determine what to reply to.
.. code-block:: scala
case request =>
val result = process(request)
if (self.tryReply(result)) ...// success
else ... // handle failure
Summary of reply semantics and options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* ``self.reply(...)`` can be used to reply to an ``Actor`` or a ``Future`` from
within an actor; the current actor will be passed as reply channel if the
current channel supports this.
* ``self.channel`` is a reference providing an abstraction for the reply
channel; this reference may be passed to other actors or used by non-actor
code.
.. note::
There used to be two methods for determining the sending Actor or Future for the current invocation:
* ``self.sender`` yielded a :class:`Option[ActorRef]`
* ``self.senderFuture`` yielded a :class:`Option[CompletableFuture[Any]]`
These two concepts have been unified into the ``channel``. If you need to know the nature of the channel, you may do so using pattern matching::
self.channel match {
case ref : ActorRef => ...
case f : ActorCompletableFuture => ...
}
Initial receive timeout
-----------------------
=======================
A timeout mechanism can be used to receive a message when no initial message is received within a certain time. To receive this timeout you have to set the ``receiveTimeout`` property and declare a case handing the ReceiveTimeout object.
A timeout mechanism can be used to receive a message when no initial message is
received within a certain time. To receive this timeout you have to set the
``receiveTimeout`` property and declare a case handing the ReceiveTimeout
object.
.. code-block:: scala
self.receiveTimeout = Some(30000L) // 30 seconds
context.receiveTimeout = Some(30000L) // 30 seconds
def receive = {
case "Hello" =>
@ -419,10 +411,12 @@ A timeout mechanism can be used to receive a message when no initial message is
throw new RuntimeException("received timeout")
}
This mechanism also work for hotswapped receive functions. Every time a ``HotSwap`` is sent, the receive timeout is reset and rescheduled.
This mechanism also work for hotswapped receive functions. Every time a
``HotSwap`` is sent, the receive timeout is reset and rescheduled.
Starting actors
---------------
===============
Actors are created & started by invoking the ``actorOf`` method.
@ -431,7 +425,9 @@ Actors are created & started by invoking the ``actorOf`` method.
val actor = actorOf[MyActor]
actor
When you create the ``Actor`` then it will automatically call the ``def preStart`` callback method on the ``Actor`` trait. This is an excellent place to add initialization code for the actor.
When you create the ``Actor`` then it will automatically call the ``def
preStart`` callback method on the ``Actor`` trait. This is an excellent place to
add initialization code for the actor.
.. code-block:: scala
@ -439,8 +435,9 @@ When you create the ``Actor`` then it will automatically call the ``def preStart
... // initialization code
}
Stopping actors
---------------
===============
Actors are stopped by invoking the ``stop`` method.
@ -448,7 +445,8 @@ Actors are stopped by invoking the ``stop`` method.
actor.stop()
When stop is called then a call to the ``def postStop`` callback method will take place. The ``Actor`` can use this callback to implement shutdown behavior.
When stop is called then a call to the ``def postStop`` callback method will
take place. The ``Actor`` can use this callback to implement shutdown behavior.
.. code-block:: scala
@ -456,56 +454,55 @@ When stop is called then a call to the ``def postStop`` callback method will tak
... // clean up resources
}
You can shut down all Actors in the system by invoking:
.. code-block:: scala
Actor.registry.shutdownAll()
PoisonPill
----------
==========
You can also send an actor the ``akka.actor.PoisonPill`` message, which will stop the actor when the message is processed.
You can also send an actor the ``akka.actor.PoisonPill`` message, which will
stop the actor when the message is processed.
If the sender is a ``Future`` (e.g. the message is sent with ``?``), the
``Future`` will be completed with an
``akka.actor.ActorKilledException("PoisonPill")``.
If the sender is a ``Future`` (e.g. the message is sent with ``?``), the ``Future`` will be completed with an ``akka.actor.ActorKilledException("PoisonPill")``.
.. _Actor.HotSwap:
HotSwap
-------
=======
Upgrade
^^^^^^^
-------
Akka supports hotswapping the Actors message loop (e.g. its implementation) at runtime. There are two ways you can do that:
Akka supports hotswapping the Actors message loop (e.g. its implementation) at
runtime. There are two ways you can do that:
* Send a ``HotSwap`` message to the Actor.
* Invoke the ``become`` method from within the Actor.
Both of these takes a ``ActorRef => PartialFunction[Any, Unit]`` that implements the new message handler. The hotswapped code is kept in a Stack which can be pushed and popped.
Both of these takes a ``ActorRef => PartialFunction[Any, Unit]`` that implements
the new message handler. The hotswapped code is kept in a Stack which can be
pushed and popped.
To hotswap the Actor body using the ``HotSwap`` message:
.. code-block:: scala
actor ! HotSwap( self => {
case message => self.reply("hotswapped body")
actor ! HotSwap( context => {
case message => context reply "hotswapped body"
})
Using the ``HotSwap`` message for hotswapping has its limitations. You can not replace it with any code that uses the Actor's ``self`` reference. If you need to do that the the ``become`` method is better.
To hotswap the Actor using ``become``:
.. code-block:: scala
def angry: Receive = {
case "foo" => self reply "I am already angry?"
case "foo" => context reply "I am already angry?"
case "bar" => become(happy)
}
def happy: Receive = {
case "bar" => self reply "I am already happy :-)"
case "bar" => context reply "I am already happy :-)"
case "foo" => become(angry)
}
@ -514,7 +511,11 @@ To hotswap the Actor using ``become``:
case "bar" => become(happy)
}
The ``become`` method is useful for many different things, but a particular nice example of it is in example where it is used to implement a Finite State Machine (FSM): `Dining Hakkers <http://github.com/jboner/akka/blob/master/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala>`_
The ``become`` method is useful for many different things, but a particular nice
example of it is in example where it is used to implement a Finite State Machine
(FSM): `Dining Hakkers`_.
.. _Dining Hakkers: http://github.com/jboner/akka/blob/master/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala
Here is another little cute example of ``become`` and ``unbecome`` in action:
@ -542,18 +543,24 @@ Here is another little cute example of ``become`` and ``unbecome`` in action:
swap ! Swap // prints Hi
swap ! Swap // prints Ho
Encoding Scala Actors nested receives without accidentally leaking memory: `UnnestedReceive <https://gist.github.com/797035>`_
------------------------------------------------------------------------------------------------------------------------------
Encoding Scala Actors nested receives without accidentally leaking memory
-------------------------------------------------------------------------
See this `Unnested receive example <https://gist.github.com/797035>`_.
Downgrade
^^^^^^^^^
---------
Since the hotswapped code is pushed to a Stack you can downgrade the code as well. There are two ways you can do that:
Since the hotswapped code is pushed to a Stack you can downgrade the code as
well. There are two ways you can do that:
* Send the Actor a ``RevertHotswap`` message
* Invoke the ``unbecome`` method from within the Actor.
Both of these will pop the Stack and replace the Actor's implementation with the ``PartialFunction[Any, Unit]`` that is at the top of the Stack.
Both of these will pop the Stack and replace the Actor's implementation with the
``PartialFunction[Any, Unit]`` that is at the top of the Stack.
Revert the Actor body using the ``RevertHotSwap`` message:
@ -569,10 +576,12 @@ Revert the Actor body using the ``unbecome`` method:
case "revert" => unbecome()
}
Killing an Actor
----------------
You can kill an actor by sending a ``Kill`` message. This will restart the actor through regular supervisor semantics.
Killing an Actor
================
You can kill an actor by sending a ``Kill`` message. This will restart the actor
through regular supervisor semantics.
Use it like this:
@ -581,49 +590,52 @@ Use it like this:
// kill the actor called 'victim'
victim ! Kill
Actor life-cycle
----------------
The actor has a well-defined non-circular life-cycle.
::
NEW (newly created actor) - can't receive messages (yet)
=> STARTED (when 'start' is invoked) - can receive messages
=> SHUT DOWN (when 'exit' or 'stop' is invoked) - can't do anything
Actors and exceptions
---------------------
It can happen that while a message is being processed by an actor, that some kind of exception is thrown, e.g. a
database exception.
=====================
It can happen that while a message is being processed by an actor, that some
kind of exception is thrown, e.g. a database exception.
What happens to the Message
^^^^^^^^^^^^^^^^^^^^^^^^^^^
---------------------------
If an exception is thrown while a message is being processed (so taken of his mailbox and handed over the the receive),
then this message will be lost. It is important to understand that it is not put back on the mailbox. So if you want to
retry processing of a message, you need to deal with it yourself by catching the exception and retry your flow. Make
sure that you put a bound on the number of retries since you don't want a system to livelock (so consuming a lot of
cpu cycles without making progress).
If an exception is thrown while a message is being processed (so taken of his
mailbox and handed over the the receive), then this message will be lost. It is
important to understand that it is not put back on the mailbox. So if you want
to retry processing of a message, you need to deal with it yourself by catching
the exception and retry your flow. Make sure that you put a bound on the number
of retries since you don't want a system to livelock (so consuming a lot of cpu
cycles without making progress).
What happens to the mailbox
^^^^^^^^^^^^^^^^^^^^^^^^^^^
If an exception is thrown while a message is being processed, nothing happens to the mailbox. If the actor is restarted,
the same mailbox will be there. So all messages on that mailbox, will be there as well.
---------------------------
If an exception is thrown while a message is being processed, nothing happens to
the mailbox. If the actor is restarted, the same mailbox will be there. So all
messages on that mailbox, will be there as well.
What happens to the actor
^^^^^^^^^^^^^^^^^^^^^^^^^
If an exception is thrown and the actor is supervised, the actor object itself is discarded and a new instance is
created. This new instance will now be used in the actor references to this actor (so this is done invisible
to the developer).
If the actor is _not_ supervised, but its lifeCycle is set to Permanent (default), it will just keep on processing messages as if nothing had happened.
If the actor is _not_ supervised, but its lifeCycle is set to Temporary, it will be stopped immediately.
-------------------------
If an exception is thrown and the actor is supervised, the actor object itself
is discarded and a new instance is created. This new instance will now be used
in the actor references to this actor (so this is done invisible to the
developer).
If the actor is _not_ supervised, but its lifeCycle is set to Permanent
(default), it will just keep on processing messages as if nothing had happened.
If the actor is _not_ supervised, but its lifeCycle is set to Temporary, it will
be stopped immediately.
Extending Actors using PartialFunction chaining
-----------------------------------------------
===============================================
A bit advanced but very useful way of defining a base message handler and then extend that, either through inheritance or delegation, is to use ``PartialFunction.orElse`` chaining.
A bit advanced but very useful way of defining a base message handler and then
extend that, either through inheritance or delegation, is to use
``PartialFunction.orElse`` chaining.
In generic base Actor:

View file

@ -0,0 +1,77 @@
package akka.docs.stm
import org.scalatest.{ BeforeAndAfterAll, WordSpec }
import org.scalatest.matchers.MustMatchers
import akka.testkit._
import akka.util.duration._
//#imports
import akka.actor.Actor
import akka.event.EventHandler
//#imports
//#my-actor
class MyActor extends Actor {
def receive = {
case "test" EventHandler.info(this, "received test")
case _ EventHandler.info(this, "received unknown message")
}
}
//#my-actor
class ActorDocSpec extends WordSpec with MustMatchers with TestKit {
"creating actor with Actor.actorOf" in {
//#creating-actorOf
val myActor = Actor.actorOf[MyActor]
//#creating-actorOf
// testing the actor
EventHandler.notify(TestEvent.Mute(EventFilter.custom {
case e: EventHandler.Info true
case _ false
}))
EventHandler.addListener(testActor)
val eventLevel = EventHandler.level
EventHandler.level = EventHandler.InfoLevel
myActor ! "test"
expectMsgPF(1 second) { case EventHandler.Info(_, "received test") true }
myActor ! "unknown"
expectMsgPF(1 second) { case EventHandler.Info(_, "received unknown message") true }
EventHandler.level = eventLevel
EventHandler.removeListener(testActor)
EventHandler.notify(TestEvent.UnMuteAll)
myActor.stop()
}
"creating actor with imported Actor._" in {
//#creating-imported
import akka.actor.Actor._
val myActor = actorOf[MyActor]
//#creating-imported
myActor.stop()
}
"creating actor with constructor" in {
class MyActor(arg: String) extends Actor {
def receive = { case _ () }
}
import akka.actor.Actor.actorOf
//#creating-constructor
// allows passing in arguments to the MyActor constructor
val myActor = actorOf(new MyActor("..."))
//#creating-constructor
myActor.stop()
}
}

View file

@ -0,0 +1,27 @@
package akka.docs.stm
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
class StmDocSpec extends WordSpec with MustMatchers {
"simple counter example" in {
//#simple
import akka.stm._
val ref = Ref(0)
def counter = atomic {
ref alter (_ + 1)
}
counter
// -> 1
counter
// -> 2
//#simple
ref.get must be === 2
}
}

View file

@ -1,3 +1,5 @@
.. _remote-actors-scala:
Remote Actors (Scala)
=====================

View file

@ -1,54 +1,71 @@
.. _stm-scala:
Software Transactional Memory (Scala)
=====================================
#######################################
Software Transactional Memory (Scala)
#######################################
.. sidebar:: Contents
.. contents:: :local:
Module stability: **SOLID**
Overview of STM
---------------
===============
An `STM <http://en.wikipedia.org/wiki/Software_transactional_memory>`_ turns the
Java heap into a transactional data set with begin/commit/rollback
semantics. Very much like a regular database. It implements the first three
letters in ACID; ACI:
An `STM <http://en.wikipedia.org/wiki/Software_transactional_memory>`_ turns the Java heap into a transactional data set with begin/commit/rollback semantics. Very much like a regular database. It implements the first three letters in ACID; ACI:
* Atomic
* Consistent
* Isolated
Generally, the STM is not needed very often when working with Akka. Some use-cases (that we can think of) are:
Generally, the STM is not needed very often when working with Akka. Some
use-cases (that we can think of) are:
- When you really need composable message flows across many actors updating their **internal local** state but need them to do that atomically in one big transaction. Might not be often, but when you do need this then you are screwed without it.
- When you really need composable message flows across many actors updating
their **internal local** state but need them to do that atomically in one big
transaction. Might not be often, but when you do need this then you are
screwed without it.
- When you want to share a datastructure across actors.
- When you need to use the persistence modules.
Akkas STM implements the concept in `Clojures <http://clojure.org/>`_ STM view on state in general. Please take the time to read `this excellent document <http://clojure.org/state>`_ and view `this presentation <http://www.infoq.com/presentations/Value-Identity-State-Rich-Hickey>`_ by Rich Hickey (the genius behind Clojure), since it forms the basis of Akkas view on STM and state in general.
Akkas STM implements the concept in `Clojure's <clojure>`_ STM view on state in
general. Please take the time to read `this excellent document <clojure-state>`_
and view `this presentation <clojure-presentation>`_ by Rich Hickey (the genius
behind Clojure), since it forms the basis of Akkas view on STM and state in
general.
The STM is based on Transactional References (referred to as Refs). Refs are memory cells, holding an (arbitrary) immutable value, that implement CAS (Compare-And-Swap) semantics and are managed and enforced by the STM for coordinated changes across many Refs. They are implemented using the excellent `Multiverse STM <http://multiverse.codehaus.org/overview.html>`_.
.. _clojure: http://clojure.org/
.. _clojure-state: http://clojure.org/state
.. _clojure-presentation: http://www.infoq.com/presentations/Value-Identity-State-Rich-Hickey
The STM is based on Transactional References (referred to as Refs). Refs are
memory cells, holding an (arbitrary) immutable value, that implement CAS
(Compare-And-Swap) semantics and are managed and enforced by the STM for
coordinated changes across many Refs. They are implemented using the excellent
`Multiverse STM <multiverse>`_.
.. _multiverse: http://multiverse.codehaus.org/overview.html
Working with immutable collections can sometimes give bad performance due to
extensive copying. Scala provides so-called persistent datastructures which
makes working with immutable collections fast. They are immutable but with
constant time access and modification. They use structural sharing and an insert
or update does not ruin the old structure, hence “persistent”. Makes working
with immutable composite types fast. The persistent datastructures currently
consist of a Map and Vector.
Working with immutable collections can sometimes give bad performance due to extensive copying. Scala provides so-called persistent datastructures which makes working with immutable collections fast. They are immutable but with constant time access and modification. They use structural sharing and an insert or update does not ruin the old structure, hence “persistent”. Makes working with immutable composite types fast. The persistent datastructures currently consist of a Map and Vector.
Simple example
--------------
==============
Here is a simple example of an incremental counter using STM. This shows creating a ``Ref``, a transactional reference, and then modifying it within a transaction, which is delimited by ``atomic``.
Here is a simple example of an incremental counter using STM. This shows
creating a ``Ref``, a transactional reference, and then modifying it within a
transaction, which is delimited by ``atomic``.
.. code-block:: scala
import akka.stm._
val ref = Ref(0)
def counter = atomic {
ref alter (_ + 1)
}
counter
// -> 1
counter
// -> 2
.. includecode:: code/StmDocSpec.scala#simple
Ref

View file

@ -5,7 +5,7 @@ import Keys._
import com.typesafe.sbtmultijvm.MultiJvmPlugin
import MultiJvmPlugin.{ MultiJvm, extraOptions, jvmOptions, scalatestOptions }
import com.typesafe.sbtscalariform.ScalariformPlugin
import ScalariformPlugin.{ format, formatPreferences }
import ScalariformPlugin.{ format, formatPreferences, formatSourceDirectories }
import java.lang.Boolean.getBoolean
object AkkaBuild extends Build {
@ -25,7 +25,7 @@ object AkkaBuild extends Build {
Unidoc.unidocExclude := Seq(samples.id, tutorials.id),
rstdocDirectory <<= baseDirectory / "akka-docs"
),
aggregate = Seq(actor, testkit, actorTests, stm, http, remote, slf4j, samples, tutorials)
aggregate = Seq(actor, testkit, actorTests, stm, http, remote, slf4j, samples, tutorials, docs)
//aggregate = Seq(actor, testkit, actorTests, stm, http, slf4j, cluster, mailboxes, camel, camelTyped, samples, tutorials)
)
@ -299,6 +299,17 @@ object AkkaBuild extends Build {
settings = defaultSettings
)
lazy val docs = Project(
id = "akka-docs",
base = file("akka-docs"),
dependencies = Seq(actor, testkit, stm, http, remote, slf4j),
settings = defaultSettings ++ Seq(
unmanagedSourceDirectories in Test <<= baseDirectory { _ ** "code" get },
libraryDependencies ++= Dependencies.docs,
formatSourceDirectories in Test <<= unmanagedSourceDirectories in Test
)
)
// Settings
override lazy val settings = super.settings ++ buildSettings ++ Publish.versionSettings
@ -423,6 +434,8 @@ object Dependencies {
// TODO: resolve Jetty version conflict
// val sampleCamel = Seq(camelCore, camelSpring, commonsCodec, Runtime.camelJms, Runtime.activemq, Runtime.springJms,
// Test.junit, Test.scalatest, Test.logback)
val docs = Seq(Test.scalatest, Test.junit)
}
object Dependency {