Misc additions to, and rewrites and formatting of, the documentation.

Signed-off-by: Jonas Bonér <jonas@jonasboner.com>
This commit is contained in:
Jonas Bonér 2011-12-15 14:26:17 +01:00
parent b0e630a239
commit ce296b0481
29 changed files with 270 additions and 223 deletions

View file

@ -39,7 +39,8 @@ akka {
# Timeout for ActorSystem.actorOf
creation-timeout = 20s
# frequency with which stopping actors are prodded in case they had to be removed from their parents
# frequency with which stopping actors are prodded in case they had to be
# removed from their parents
reaper-interval = 5s
# Default timeout for Future based invocations
@ -83,9 +84,9 @@ akka {
}
target {
# Alternatively to giving nr-of-instances you can specify the full paths of
# those actors which should be routed to. This setting takes precedence over
# nr-of-instances
# Alternatively to giving nr-of-instances you can specify the full
# paths of those actors which should be routed to. This setting takes
# precedence over nr-of-instances
paths = []
}
@ -94,8 +95,10 @@ akka {
default-dispatcher {
# Must be one of the following
# Dispatcher, (BalancingDispatcher, only valid when all actors using it are of the same type),
# A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg visible constructor
# Dispatcher, (BalancingDispatcher, only valid when all actors using it are of
# the same type),
# A FQCN to a class inheriting MessageDispatcherConfigurator with a no-arg
# visible constructor
type = "Dispatcher"
# Name used in log messages and thread names.
@ -129,22 +132,25 @@ akka {
# Specifies the bounded capacity of the task queue (< 1 == unbounded)
task-queue-size = -1
# Specifies which type of task queue will be used, can be "array" or "linked" (default)
# Specifies which type of task queue will be used, can be "array" or
# "linked" (default)
task-queue-type = "linked"
# Allow core threads to time out
allow-core-timeout = on
# Throughput defines the number of messages that are processed in a batch before the
# thread is returned to the pool. Set to 1 for as fair as possible.
# Throughput defines the number of messages that are processed in a batch
# before the thread is returned to the pool. Set to 1 for as fair as possible.
throughput = 5
# Throughput deadline for Dispatcher, set to 0 or negative for no deadline
throughput-deadline-time = 0ms
# If negative (or zero) then an unbounded mailbox is used (default)
# If positive then a bounded mailbox is used and the capacity is set using the property
# NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to deadlock, use with care
# If positive then a bounded mailbox is used and the capacity is set using the
# property
# NOTE: setting a mailbox to 'blocking' can be a bit dangerous, could lead to
# deadlock, use with care
# The following are only used for Dispatcher and only if mailbox-capacity > 0
mailbox-capacity = -1
@ -154,7 +160,8 @@ akka {
}
debug {
# enable function of Actor.loggable(), which is to log any received message at DEBUG level
# enable function of Actor.loggable(), which is to log any received message at
# DEBUG level
receive = off
# enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill and the like)
@ -170,8 +177,8 @@ akka {
event-stream = off
}
# Entries for pluggable serializers and their bindings. If a binding for a specific class is not found,
# then the default serializer (Java serialization) is used.
# Entries for pluggable serializers and their bindings. If a binding for a specific
# class is not found, then the default serializer (Java serialization) is used.
serializers {
# java = "akka.serialization.JavaSerializer"
# proto = "akka.testing.ProtobufSerializer"
@ -193,13 +200,16 @@ akka {
}
# Used to set the behavior of the scheduler.
# Changing the default values may change the system behavior drastically so make sure you know what you're doing!
# Changing the default values may change the system behavior drastically so make sure
# you know what you're doing!
#
scheduler {
# The HashedWheelTimer (HWT) implementation from Netty is used as the default scheduler in the system.
# The HashedWheelTimer (HWT) implementation from Netty is used as the default scheduler
# in the system.
# HWT does not execute the scheduled tasks on exact time.
# It will, on every tick, check if there are any tasks behind the schedule and execute them.
# You can increase or decrease the accuracy of the execution timing by specifying smaller or larger tick duration.
# You can increase or decrease the accuracy of the execution timing by specifying smaller
# or larger tick duration.
# If you are scheduling a lot of tasks you should consider increasing the ticks per wheel.
# For more information see: http://www.jboss.org/netty/
tickDuration = 100ms

View file

@ -25,48 +25,60 @@ import akka.util.Duration
*/
trait Scheduler {
/**
* Schedules a message to be sent repeatedly with an initial delay and frequency.
* E.g. if you would like a message to be sent immediately and thereafter every 500ms you would set
* delay = Duration.Zero and frequency = Duration(500, TimeUnit.MILLISECONDS)
* Schedules a message to be sent repeatedly with an initial delay and
* frequency. E.g. if you would like a message to be sent immediately and
* thereafter every 500ms you would set delay = Duration.Zero and frequency
* = Duration(500, TimeUnit.MILLISECONDS)
*
* Java & Scala API
*/
def schedule(initialDelay: Duration, frequency: Duration, receiver: ActorRef, message: Any): Cancellable
def schedule(
initialDelay: Duration,
frequency: Duration,
receiver: ActorRef,
message: Any): Cancellable
/**
* Schedules a function to be run repeatedly with an initial delay and a frequency.
* E.g. if you would like the function to be run after 2 seconds and thereafter every 100ms you would set
* delay = Duration(2, TimeUnit.SECONDS) and frequency = Duration(100, TimeUnit.MILLISECONDS)
* Schedules a function to be run repeatedly with an initial delay and a
* frequency. E.g. if you would like the function to be run after 2 seconds
* and thereafter every 100ms you would set delay = Duration(2, TimeUnit.SECONDS)
* and frequency = Duration(100, TimeUnit.MILLISECONDS)
*
* Scala API
*/
def schedule(initialDelay: Duration, frequency: Duration)(f: Unit): Cancellable
def schedule(
initialDelay: Duration, frequency: Duration)(f: Unit): Cancellable
/**
* Schedules a function to be run repeatedly with an initial delay and a frequency.
* E.g. if you would like the function to be run after 2 seconds and thereafter every 100ms you would set
* delay = Duration(2, TimeUnit.SECONDS) and frequency = Duration(100, TimeUnit.MILLISECONDS)
* Schedules a function to be run repeatedly with an initial delay and
* a frequency. E.g. if you would like the function to be run after 2
* seconds and thereafter every 100ms you would set delay = Duration(2,
* TimeUnit.SECONDS) and frequency = Duration(100, TimeUnit.MILLISECONDS)
*
* Java API
*/
def schedule(initialDelay: Duration, frequency: Duration, runnable: Runnable): Cancellable
def schedule(
initialDelay: Duration, frequency: Duration, runnable: Runnable): Cancellable
/**
* Schedules a Runnable to be run once with a delay, i.e. a time period that has to pass before the runnable is executed.
* Schedules a Runnable to be run once with a delay, i.e. a time period that
* has to pass before the runnable is executed.
*
* Java & Scala API
*/
def scheduleOnce(delay: Duration, runnable: Runnable): Cancellable
/**
* Schedules a message to be sent once with a delay, i.e. a time period that has to pass before the message is sent.
* Schedules a message to be sent once with a delay, i.e. a time period that has
* to pass before the message is sent.
*
* Java & Scala API
*/
def scheduleOnce(delay: Duration, receiver: ActorRef, message: Any): Cancellable
/**
* Schedules a function to be run once with a delay, i.e. a time period that has to pass before the function is run.
* Schedules a function to be run once with a delay, i.e. a time period that has
* to pass before the function is run.
*
* Scala API
*/
@ -95,4 +107,4 @@ trait Cancellable {
*/
def isCancelled: Boolean
}
//#cancellable
//#cancellable

View file

@ -20,12 +20,3 @@ Compares:
- Request-reply
- Fire-forget with default dispatcher
- Fire-forget with Hawt dispatcher
Performance benchmark
---------------------
Benchmarking Akka against:
- Scala Library Actors
- Raw Java concurrency
- Jetlang (Java actors lib) `<http://github.com/jboner/akka-bench>`_

View file

@ -4,10 +4,8 @@ Additional Information
.. toctree::
:maxdepth: 2
articles
benchmarks
recipies
external-sample-projects
companies-using-akka
third-party-integrations
language-bindings

View file

@ -4,21 +4,14 @@ Other Language Bindings
JRuby
-----
High level concurrency using Akka actors and JRuby.
`<https://github.com/danielribeiro/RubyOnAkka>`_
If you are using STM with JRuby then you need to unwrap the Multiverse control flow exception as follows:
.. code-block:: ruby
begin
... atomic stuff
rescue NativeException => e
raise e.cause if e.cause.java_class.package.name.include? "org.multiverse"
end
Read more here: `<https://github.com/iconara/mikka>`_.
Groovy/Groovy++
---------------
`<https://gist.github.com/620439>`_
Read more here: `<https://gist.github.com/620439>`_.
Clojure
-------
Read more here: `<http://blog.darevay.com/2011/06/clojure-and-akka-a-match-made-in/>`_.

View file

@ -4,19 +4,14 @@ Third-party Integrations
The Play! Framework
-------------------
Dustin Whitney has done an Akka integration module for the `Play! framework <http://www.playframework.org/>`_.
Play 2.0 is based upon Akka. Uses all its eventing and threading using Akka actors and futures.
Detailed instructions here: `<http://github.com/dwhitney/akka/blob/master/README.textile>`_.
Read more here: `<http://www.playframework.org/2.0>`_.
There are three screencasts:
Scalatra
--------
- Using Play! with Akka STM: `<http://vimeo.com/10764693>`_
- Using Play! with Akka Actors: `<http://vimeo.com/10792173>`_
- Using Play! with Akka Remote Actors: `<http://vimeo.com/10793443>`_
Scalatra has Akka integration.
The Pinky REST/MVC Framework
----------------------------
Read more here: `<https://github.com/scalatra/scalatra/blob/feature/newakka/akka/src/main/scala/org/scalatra/akka/AkkaSupport.scala>`_
Peter Hausel has done an Akka integration module for the `Pinky framework <http://wiki.github.com/pk11/pinky/>`_.
Read more here: `<http://wiki.github.com/pk11/pinky/release-13>`_

View file

@ -1,12 +1,15 @@
.. _cluster:
#########
Cluster
#########
######################
Cluster Specification
######################
*This document describes the new clustering coming in Akka 2.1*
.. sidebar:: Contents
.. contents:: :local:
*This document describes the new clustering coming in Akka 2.1 (not 2.0)*
Intro
=====

View file

@ -65,7 +65,8 @@ object Pi extends App {
val workers = Vector.fill(nrOfWorkers)(actorOf(Props[Worker])
// wrap them with a load-balancing router
val router = Routing.actorOf(RoutedProps().withRoundRobinRouter.withLocalConnections(workers), "pi")
val router = Routing.actorOf(
RoutedProps().withRoundRobinRouter.withLocalConnections(workers), "pi")
loadBalancerActor(CyclicIterator(workers))
//#create-workers

View file

@ -6,7 +6,6 @@ import org.scalatest.matchers.MustMatchers
//#imports
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
//#imports
class ConfigDocSpec extends WordSpec with MustMatchers {
@ -15,7 +14,7 @@ class ConfigDocSpec extends WordSpec with MustMatchers {
//#custom-config
val customConf = ConfigFactory.parseString("""
akka.actor.deployment {
/user/my-service {
/my-service {
router = round-robin
nr-of-instances = 3
}
@ -27,7 +26,5 @@ class ConfigDocSpec extends WordSpec with MustMatchers {
//#custom-config
system.shutdown()
}
}

View file

@ -57,46 +57,57 @@ Defining the configuration file
Each Akka module has a reference configuration file with the default values.
*akka-actor:*
akka-actor
~~~~~~~~~~
.. literalinclude:: ../../akka-actor/src/main/resources/reference.conf
:language: none
*akka-remote:*
akka-remote
~~~~~~~~~~~
.. literalinclude:: ../../akka-remote/src/main/resources/reference.conf
:language: none
*akka-testkit:*
akka-testkit
~~~~~~~~~~~~
.. literalinclude:: ../../akka-testkit/src/main/resources/reference.conf
:language: none
*akka-beanstalk-mailbox:*
akka-beanstalk-mailbox
~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../akka-durable-mailboxes/akka-beanstalk-mailbox/src/main/resources/reference.conf
:language: none
*akka-file-mailbox:*
akka-file-mailbox
~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../akka-durable-mailboxes/akka-file-mailbox/src/main/resources/reference.conf
:language: none
*akka-mongo-mailbox:*
akka-mongo-mailbox
~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../akka-durable-mailboxes/akka-mongo-mailbox/src/main/resources/reference.conf
:language: none
*akka-redis-mailbox:*
akka-redis-mailbox
~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../akka-durable-mailboxes/akka-redis-mailbox/src/main/resources/reference.conf
:language: none
*akka-zookeeper-mailbox:*
akka-zookeeper-mailbox
~~~~~~~~~~~~~~~~~~~~~~
.. literalinclude:: ../../akka-durable-mailboxes/akka-zookeeper-mailbox/src/main/resources/reference.conf
:language: none
Custom application.conf
-----------------------
A custom ``application.conf`` might look like this::
# In this file you can override any option defined in the reference files.

View file

@ -117,10 +117,9 @@ modules are:
- ``akka-kernel`` -- Akka microkernel for running a bare-bones mini application server
- ``akka-durable-mailboxes`` -- Durable mailboxes: file-based, MongoDB, Redis, Zookeeper
- ``akka-amqp`` -- AMQP integration
- ``akka-durable-mailboxes`` -- Durable mailboxes: file-based, MongoDB, Redis, Beanstalk and Zookeeper
.. - ``akka-amqp`` -- AMQP integration
.. - ``akka-stm-2.0-SNAPSHOT.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures
.. - ``akka-camel-2.0-SNAPSHOT.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world)
.. - ``akka-camel-typed-2.0-SNAPSHOT.jar`` -- Apache Camel Typed Actors integration

View file

@ -141,8 +141,7 @@ modules are:
- ``akka-durable-mailboxes`` -- Durable mailboxes: file-based, MongoDB, Redis, Zookeeper
- ``akka-amqp`` -- AMQP integration
.. - ``akka-amqp`` -- AMQP integration
.. - ``akka-stm-2.0-SNAPSHOT.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures
.. - ``akka-camel-2.0-SNAPSHOT.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world)
.. - ``akka-camel-typed-2.0-SNAPSHOT.jar`` -- Apache Camel Typed Actors integration

View file

@ -44,16 +44,12 @@ Modules
Akka is very modular and has many JARs for containing different features.
- ``akka-actor-2.0-SNAPSHOT.jar`` -- Standard Actors
- ``akka-typed-actor-2.0-SNAPSHOT.jar`` -- Typed Actors
- ``akka-actor-2.0-SNAPSHOT.jar`` -- Standard Actors, Typed Actors and much more
- ``akka-remote-2.0-SNAPSHOT.jar`` -- Remote Actors
- ``akka-stm-2.0-SNAPSHOT.jar`` -- STM (Software Transactional Memory), transactors and transactional datastructures
- ``akka-slf4j-2.0-SNAPSHOT.jar`` -- SLF4J Event Handler Listener
- ``akka-testkit-2.0-SNAPSHOT.jar`` -- Toolkit for testing Actors
- ``akka-camel-2.0-SNAPSHOT.jar`` -- Apache Camel Actors integration (it's the best way to have your Akka application communicate with the rest of the world)
- ``akka-camel-typed-2.0-SNAPSHOT.jar`` -- Apache Camel Typed Actors integration
- ``akka-spring-2.0-SNAPSHOT.jar`` -- Spring framework integration
- ``akka-kernel-2.0-SNAPSHOT.jar`` -- Akka microkernel for running a bare-bones mini application server
- ``akka-<storage-system>-mailbox-2.0-SNAPSHOT.jar`` -- Akka durable mailboxes
How to see the JARs dependencies of each Akka module is described in the
:ref:`dependencies` section. Worth noting is that ``akka-actor`` has zero

View file

@ -1,49 +1,59 @@
Examples of use-cases for Akka
==============================
.. _use-cases:
################################
Examples of use-cases for Akka
################################
We see Akka being adopted by many large organizations in a big range of industries
all from investment and merchant banking, retail and social media, simulation,
gaming and betting, automobile and traffic systems, health care, data analytics
and much more. Any system that have the need for high-throughput and low latency
is a good candidate for using Akka.
There is a great discussion on use-cases for Akka with some good write-ups by production
users `here <http://stackoverflow.com/questions/4493001/good-use-case-for-akka/4494512#4494512>`_
Here are some of the areas where Akka is being deployed into production
-----------------------------------------------------------------------
=======================================================================
**Transaction processing (Online Gaming, Finance/Banking, Trading, Statistics, Betting, Social Media, Telecom)**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Transaction processing (Online Gaming, Finance/Banking, Trading, Statistics, Betting, Social Media, Telecom)
------------------------------------------------------------------------------------------------------------
Scale up, scale out, fault-tolerance / HA
**Service backend (any industry, any app)**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Service backend (any industry, any app)
---------------------------------------
Service REST, SOAP, Cometd, WebSockets etc
Act as message hub / integration layer
Scale up, scale out, fault-tolerance / HA
**Concurrency/parallelism (any app)**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Concurrency/parallelism (any app)
---------------------------------
Correct
Simple to work with and understand
Just add the jars to your existing JVM project (use Scala, Java, Groovy or JRuby)
**Simulation**
^^^^^^^^^^^^^^
Simulation
----------
Master/Worker, Compute Grid, MapReduce etc.
**Batch processing (any industry)**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Batch processing (any industry)
-------------------------------
Camel integration to hook up with batch data sources
Actors divide and conquer the batch workloads
**Communications Hub (Telecom, Web media, Mobile media)**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Communications Hub (Telecom, Web media, Mobile media)
-----------------------------------------------------
Scale up, scale out, fault-tolerance / HA
**Gaming and Betting (MOM, online gaming, betting)**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Gaming and Betting (MOM, online gaming, betting)
------------------------------------------------
Scale up, scale out, fault-tolerance / HA
**Business Intelligence/Data Mining/general purpose crunching**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Business Intelligence/Data Mining/general purpose crunching
-----------------------------------------------------------
Scale up, scale out, fault-tolerance / HA
**Complex Event Stream Processing**
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Complex Event Stream Processing
-------------------------------
Scale up, scale out, fault-tolerance / HA

View file

@ -5,21 +5,23 @@
What is Akka?
###############
.. sidebar:: Contents
**Simpler Scalability, Fault-Tolerance, Concurrency & Remoting through Actors**
.. contents:: :local:
**Scalable real-time transaction processing**
We believe that writing correct concurrent, fault-tolerant and scalable
applications is too hard. Most of the time it's because we are using the wrong
tools and the wrong level of abstraction. Akka is here to change that. Using the
Actor Model together with Software Transactional Memory we raise the abstraction
level and provide a better platform to build correct concurrent and scalable
applications. For fault-tolerance we adopt the Let it crash/Embrace failure
model which have been used with great success in the telecom industry to build
Actor Model we raise the abstraction level and provide a better platform to build
correct concurrent and scalable applications. For fault-tolerance we adopt the
"Let it crash" model which have been used with great success in the telecom industry to build
applications that self-heals, systems that never stop. Actors also provides the
abstraction for transparent distribution and the basis for truly scalable and
fault-tolerant applications. Akka is Open Source and available under the Apache
2 License.
fault-tolerant applications.
Akka is Open Source and available under the Apache 2 License.
Download from http://akka.io/downloads/
@ -43,18 +45,11 @@ Fault Tolerance
Fault tolerance through supervisor hierarchies with "let-it-crash"
semantics. Excellent for writing highly fault-tolerant systems that never stop,
systems that self-heal.
systems that self-heal. Supervisor hierarchies can span over multiple JVMs to
provide truly fault-tolerant systems.
See :ref:`fault-tolerance-scala` and :ref:`fault-tolerance-java`
Transactors
-----------
Transactors combine actors and STM (Software Transactional Memory) into transactional actors.
It allows you to compose atomic message flows with automatic retry and rollback.
See :ref:`transactors-scala` and :ref:`transactors-java`
Remote Actors
-------------
@ -63,6 +58,14 @@ management.
See :ref:`remote-actors-scala` and :ref:`remote-actors-java`.
Transactors
-----------
Transactors combine actors and STM (Software Transactional Memory) into transactional actors.
It allows you to compose atomic message flows with automatic retry and rollback.
See :ref:`transactors-scala` and :ref:`transactors-java`
Scala and Java APIs
===================
@ -73,9 +76,37 @@ Akka has both a :ref:`scala-api` and a :ref:`java-api`.
Akka can be used in two different ways
======================================
- As a library: used by a web app, to be put into WEB-INF/lib or as a regular
- As a library: used by a web app, to be put into ``WEB-INF/lib`` or as a regular
JAR on your classpath.
- As a microkernel: stand-alone kernel to drop your application into.
See the :ref:`deployment-scenarios` for details.
Typesafe Stack
==============
Akka is now also part of the `Typesafe Stack <http://typesafe.com/stack>`_.
The Typesafe Stack is a modern software platform that makes it easy for developers
to build scalable software applications. It combines the Scala programming language,
Akka, the Play! web framework and robust developer tools in a simple package that
integrates seamlessly with existing Java infrastructure.
The Typesafe Stack is all fully open source.
Typesafe Console
================
On top of the Typesafe Stack we have also have commercial product called Typesafe
Console which provides the following features:
#. Management through Dashboard, JMX and REST
#. Dapper-style tracing of messages across components and remote nodes
#. Real-time statistics
#. Very low overhead monitoring agents (should always be on in production)
#. Consolidation of statistics and logging information to a single node
#. Storage of statistics data for later processing
#. Provisioning and rolling upgrades
Read more `here <http://typesafe.com/products/typesafe-subscription>`_.

View file

@ -4,6 +4,8 @@ Why Akka?
What features can the Akka platform offer, over the competition?
----------------------------------------------------------------
Akka provides scalable real-time transaction processing.
Akka is an unified runtime and programming model for:
- Scale up (Concurrency)
@ -25,39 +27,21 @@ even if you're only running it on one machine. Akka also supplies a wide array
of concurrency-paradigms, allowing for users to choose the right tool for the
job.
The integration possibilities for Akka Actors are immense through the Apache
Camel integration. We have Transactors for coordinated concurrent transactions,
as well as Agents and Dataflow concurrency.
What's a good use-case for Akka?
--------------------------------
(Web, Cloud, Application) Services - Actors lets you manage service failures
(Supervisors), load management (back-off strategies, timeouts and
processing-isolation), both horizontal and vertical scalability (add more cores
and/or add more machines). Think payment processing, invoicing, order matching,
datacrunching, messaging. Really any highly transactional systems like banking,
betting, games.
We see Akka being adopted by many large organizations in a big range of industries
all from investment and merchant banking, retail and social media, simulation,
gaming and betting, automobile and traffic systems, health care, data analytics
and much more. Any system that have the need for high-throughput and low latency
is a good candidate for using Akka.
Actors lets you manage service failures (Supervisors), load management (back-off
strategies, timeouts and processing-isolation), both horizontal and vertical
scalability (add more cores and/or add more machines).
Here's what some of the Akka users have to say about how they are using Akka:
http://stackoverflow.com/questions/4493001/good-use-case-for-akka
Akka Atmos
----------
And that's all in the ApacheV2-licensed open source project. On top of that we
have a commercial product called Akka Atmos which provides the following
features:
#. Management through Dashboard, JMX and REST
#. Dapper-style tracing of messages across components and remote nodes
#. A configurable alert system
#. Real-time statistics
#. Very low overhead monitoring agents (should always be on in production)
#. Consolidation of statistics and logging information to a single node
#. Storage of statistics data for later processing
#. Provisioning and rolling upgrades
Read more `here <http://typesafe.com/products/typesafe-subscription>`_.
All this in the ApacheV2-licensed open source project.

View file

@ -12,8 +12,6 @@ Java API
scheduler
futures
dataflow
transactors
fault-tolerance
dispatchers
routing
guice-integration

View file

@ -6,3 +6,6 @@
#######
The Akka Camel module has not been migrated to Akka 2.0-SNAPSHOT yet.
It might not make it into Akka 2.0 final but will then hopefully be
re-introduce in an upcoming release.

View file

@ -19,13 +19,13 @@ resides on crashes, then when you restart the node, the actor will be able to
continue processing as if nothing had happened; with all pending messages still
in its mailbox.
None of these mailboxes implements transactions for current message. It's possible
None of these mailboxes implements transactions for current message. It's possible
if the actor crashes after receiving a message, but before completing processing of
it, that the message could be lost.
it, that the message could be lost.
.. warning:: **IMPORTANT**
None of these mailboxes work with blocking message send, e.g. the message
None of these mailboxes work with blocking message send, i.e. the message
send operations that are relying on futures; ``?`` or ``ask``. If the node
has crashed and then restarted, the thread that was blocked waiting for the
reply is gone and there is no way we can deliver the message.
@ -42,11 +42,15 @@ We'll walk through each one of these in detail in the sections below.
You can easily implement your own mailbox. Look at the existing implementations for inspiration.
Soon Akka will also have:
We are also discussing adding some of these durable mailboxes:
- ``AmqpBasedMailbox`` -- AMQP based mailbox (default RabbitMQ)
- ``JmsBasedMailbox`` -- JMS based mailbox (default ActiveMQ)
- ``CassandraBasedMailbox`` -- Cassandra based mailbox
- ``CamelBasedMailbox`` -- Camel based mailbox
- ``SqlBasedMailbox`` -- SQL based mailbox for general RDBMS (Postgres, MySQL, Oracle etc.)
Let us know if you have a wish for a certain priority order.
.. _DurableMailbox.General:
@ -57,7 +61,7 @@ The durable mailboxes and their configuration options reside in the
``akka.actor.mailbox`` package.
You configure durable mailboxes through the dispatcher. The
actor is oblivious to which type of mailbox it is using.
actor is oblivious to which type of mailbox it is using.
Here is an example in Scala:
.. includecode:: code/akka/docs/actor/mailbox/DurableMailboxDocSpec.scala
@ -186,13 +190,13 @@ MongoDB-based Durable Mailboxes
===============================
This mailbox is backed by `MongoDB <http://mongodb.org>`_.
MongoDB is a fast, lightweight and scalable document-oriented database. It contains a number of
MongoDB is a fast, lightweight and scalable document-oriented database. It contains a number of
features cohesive to a fast, reliable & durable queueing mechanism which the Akka Mailbox takes advantage of.
Akka's implementations of MongoDB mailboxes are built on top of the purely asynchronous MongoDB driver
(often known as `Hammersmith <http://github.com/bwmcadams/hammersmith>`_ and ``com.mongodb.async``)
and as such are purely callback based with a Netty network layer. This makes them extremely fast &
lightweight versus building on other MongoDB implementations such as
Akka's implementations of MongoDB mailboxes are built on top of the purely asynchronous MongoDB driver
(often known as `Hammersmith <http://github.com/bwmcadams/hammersmith>`_ and ``com.mongodb.async``)
and as such are purely callback based with a Netty network layer. This makes them extremely fast &
lightweight versus building on other MongoDB implementations such as
`mongo-java-driver <http://github.com/mongodb/mongo-java-driver>`_ and `Casbah <http://github.com/mongodb/casbah>`_.
You configure durable mailboxes through the dispatcher, as described in
@ -206,15 +210,15 @@ Java::
akka.actor.mailbox.DurableMailboxType.mongoDurableMailboxType()
You will need to configure the URI for the MongoDB server, using the URI Format specified in the
You will need to configure the URI for the MongoDB server, using the URI Format specified in the
`MongoDB Documentation <http://www.mongodb.org/display/DOCS/Connections>`_. This is done in
the ``akka.actor.mailbox.mongodb`` section in the :ref:`configuration`.
.. literalinclude:: ../../akka-durable-mailboxes/akka-mongo-mailbox/src/main/resources/reference.conf
:language: none
You must specify a hostname (and optionally port) and at *least* a Database name. If you specify a
collection name, it will be used as a 'prefix' for the collections Akka creates to store mailbox messages.
You must specify a hostname (and optionally port) and at *least* a Database name. If you specify a
collection name, it will be used as a 'prefix' for the collections Akka creates to store mailbox messages.
Otherwise, collections will be prefixed with ``mailbox.``
It is also possible to configure the timeout thresholds for Read and Write operations in the ``timeout`` block.

View file

@ -26,11 +26,11 @@ command (on a unix-based system):
bin/akka sample.kernel.hello.HelloKernel
Use Ctrl-C to interrupt and exit the microkernel.
Use ``Ctrl-C`` to interrupt and exit the microkernel.
On a Windows machine you can also use the bin/akka.bat script.
The code for the Hello Kernel example (see the HelloKernel class for an example
The code for the Hello Kernel example (see the ``HelloKernel`` class for an example
of creating a Bootable):
.. includecode:: ../../akka-samples/akka-sample-hello-kernel/src/main/scala/sample/kernel/hello/HelloKernel.scala

View file

@ -5,4 +5,7 @@
Spring Integration
####################
The Akka Spring module has not been migrated to Akka 2.0-SNAPSHOT yet.
The Akka Spring module has not been migrated to Akka 2.0-SNAPSHOT yet.
It might not make it into Akka 2.0 final but will then hopefully be
introduced in an upcoming release.

View file

@ -1,9 +1,10 @@
.. _support:
`Support <http://typesafe.com>`__
`Commercial Support <http://typesafe.com>`__
=========================================
`Typesafe <http://typesafe.com>`_
Commercial support is provided by `Typesafe <http://typesafe.com>`_.
Akka is now part of the `Typesafe Stack <http://typesafe.com/stack>`_.
`Mailing List <http://groups.google.com/group/akka-user>`_
==========================================================

View file

@ -10,11 +10,7 @@ Akka Snapshot
=============
Automatically published Scaladoc API for the latest SNAPSHOT version of Akka can
be found here:
- Akka - http://akka.io/api/akka/snapshot
- Akka Modules - http://akka.io/api/akka-modules/snapshot
be found here: http://akka.io/api/akka/snapshot
Release Versions

View file

@ -12,8 +12,6 @@ Scala API
scheduler
futures
dataflow
agents
transactors
fault-tolerance
dispatchers
routing

View file

@ -10,7 +10,8 @@ akka {
mailbox {
mongodb {
# Any specified collection name will be used as a prefix for collections that use durable mongo mailboxes
# Any specified collection name will be used as a prefix for
# collections that use durable mongo mailboxes.
# Follow Mongo URI Spec - http://www.mongodb.org/display/DOCS/Connections
uri = "mongodb://localhost/akka.mailbox"

View file

@ -13,20 +13,22 @@ akka {
default {
# if this is set to a valid remote address, the named actor will be deployed at that node
# e.g. "akka://sys@host:port"
# if this is set to a valid remote address, the named actor will be deployed
# at that node e.g. "akka://sys@host:port"
remote = ""
target {
# A list of hostnames and ports for instantiating the children of a non-direct router
# A list of hostnames and ports for instantiating the children of a
# non-direct router
# The format should be on "akka://sys@host:port", where:
# - sys is the remote actor system name
# - hostname can be either hostname or IP address the remote actor should connect to
# - hostname can be either hostname or IP address the remote actor
# should connect to
# - port should be the port for the remote server on the other node
# The number of actor instances to be spawned is still taken from the nr-of-instances
# setting as for local routers; the instances will be distributed round-robin among the
# given nodes.
# The number of actor instances to be spawned is still taken from the
# nr-of-instances setting as for local routers; the instances will be
# distributed round-robin among the given nodes.
nodes = []
}
@ -53,9 +55,10 @@ akka {
failure-detector {
# defines the failure detector threshold
# A low threshold is prone to generate many wrong suspicions but ensures a
# quick detection in the event of a real crash. Conversely, a high threshold
# generates fewer mistakes but needs more time to detect actual crashes
# A low threshold is prone to generate many wrong suspicions but ensures
# a quick detection in the event of a real crash. Conversely, a high
# threshold generates fewer mistakes but needs more time to detect
# actual crashes
threshold = 8
max-sample-size = 1000
@ -73,10 +76,12 @@ akka {
}
server {
# The hostname or ip to bind the remoting to, InetAddress.getLocalHost.getHostAddress is used if empty
# The hostname or ip to bind the remoting to,
# InetAddress.getLocalHost.getHostAddress is used if empty
hostname = ""
# The default remote server port clients should connect to. Default is 2552 (AKKA)
# The default remote server port clients should connect to.
# Default is 2552 (AKKA)
port = 2552
# Increase this if you want to be able to send messages with large payloads
@ -85,10 +90,12 @@ akka {
# Timeout duration
connection-timeout = 120s
# Should the remote server require that it peers share the same secure-cookie (defined in the 'remote' section)?
# Should the remote server require that it peers share the same secure-cookie
# (defined in the 'remote' section)?
require-cookie = off
# Enable untrusted mode for full security of server managed actors, allows untrusted clients to connect.
# Enable untrusted mode for full security of server managed actors, allows
# untrusted clients to connect.
untrusted-mode = off
# Sets the size of the connection backlog
@ -97,11 +104,13 @@ akka {
client {
buffering {
# Should message buffering on remote client error be used (buffer flushed on successful reconnect)
# Should message buffering on remote client error be used (buffer flushed
# on successful reconnect)
retry-message-send-on-failure = off
# If negative (or zero) then an unbounded mailbox is used (default)
# If positive then a bounded mailbox is used and the capacity is set using the property
# If positive then a bounded mailbox is used and the capacity is set using
# the property
capacity = -1
}
reconnect-delay = 5s

View file

@ -7,10 +7,14 @@
akka {
test {
# factor by which to scale timeouts during tests, e.g. to account for shared build system load
# factor by which to scale timeouts during tests, e.g. to account for shared
# build system load
timefactor = 1.0
# duration of EventFilter.intercept waits after the block is finished until all required messages are received
# duration of EventFilter.intercept waits after the block is finished until
# all required messages are received
filter-leeway = 3s
# duration to wait in expectMsg and friends outside of within() block by default
single-expect-default = 3s
}

View file

@ -79,12 +79,11 @@ public class Pi {
public void onReceive(Object message) {
if (message instanceof Work) {
Work work = (Work) message;
double result = calculatePiFor(work.getStart(), work.getNrOfElements());
getSender().tell(new Result(result));
} else throw new IllegalArgumentException("Unknown message [" + message + "]");
} else {
throw new IllegalArgumentException("Unknown message [" + message + "]");
}
}
}
//#worker
@ -108,9 +107,9 @@ public class Pi {
this.latch = latch;
//#create-router
router = this.getContext().actorOf(new Props().withCreator(
Worker.class).withRouter(new RoundRobinRouter(nrOfWorkers)),
"pi");
router = this.getContext().actorOf(
new Props(Worker.class).withRouter(new RoundRobinRouter(nrOfWorkers)),
"pi");
//#create-router
}
@ -139,8 +138,8 @@ public class Pi {
@Override
public void postStop() {
System.out.println(String.format(
"\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis",
pi, (System.currentTimeMillis() - start)));
"\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis",
pi, (System.currentTimeMillis() - start)));
latch.countDown();
}
}

View file

@ -42,7 +42,8 @@ object Pi extends App {
//#worker
//#master
class Master(nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch)
class Master(
nrOfWorkers: Int, nrOfMessages: Int, nrOfElements: Int, latch: CountDownLatch)
extends Actor {
var pi: Double = _
@ -50,7 +51,8 @@ object Pi extends App {
var start: Long = _
//#create-router
val router = context.actorOf(Props[Worker].withRouter(RoundRobinRouter(nrOfWorkers)), "pi")
val router = context.actorOf(
Props[Worker].withRouter(RoundRobinRouter(nrOfWorkers)), "pi")
//#create-router
//#master-receive
@ -72,9 +74,8 @@ object Pi extends App {
}
override def postStop() {
println(
"\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis"
.format(pi, (System.currentTimeMillis - start)))
println("\n\tPi estimate: \t\t%s\n\tCalculation time: \t%s millis"
.format(pi, (System.currentTimeMillis - start)))
latch.countDown()
}
}