Merge branch 'master' into wip-2053d-actorbased-remote-drewhk
Conflicts: akka-docs/rst/java/code/docs/serialization/SerializationDocTestBase.java akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala
This commit is contained in:
commit
55be17419e
213 changed files with 5004 additions and 1940 deletions
|
|
@ -234,8 +234,8 @@ If the current actor behavior does not match a received message,
|
|||
:meth:`unhandled` is called, which by default publishes an
|
||||
``akka.actor.UnhandledMessage(message, sender, recipient)`` on the actor
|
||||
system’s event stream (set configuration item
|
||||
``akka.event-handler-startup-timeout`` to ``true`` to have them converted into
|
||||
actual Debug messages)
|
||||
``akka.actor.debug.unhandled`` to ``on`` to have them converted into
|
||||
actual Debug messages).
|
||||
|
||||
In addition, it offers:
|
||||
|
||||
|
|
|
|||
|
|
@ -384,6 +384,8 @@ URI options
|
|||
|
||||
The following URI options are supported:
|
||||
|
||||
.. tabularcolumns:: |l|l|l|L|
|
||||
|
||||
+--------------+----------+---------+-------------------------------------------+
|
||||
| Name | Type | Default | Description |
|
||||
+==============+==========+=========+===========================================+
|
||||
|
|
|
|||
|
|
@ -189,6 +189,15 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit {
|
|||
}
|
||||
//#fsm-code-elided
|
||||
|
||||
"demonstrate NullFunction" in {
|
||||
class A extends Actor with FSM[Int, Null] {
|
||||
val SomeState = 0
|
||||
//#NullFunction
|
||||
when(SomeState)(FSM.NullFunction)
|
||||
//#NullFunction
|
||||
}
|
||||
}
|
||||
|
||||
"batch correctly" in {
|
||||
val buncher = system.actorOf(Props(new Buncher))
|
||||
buncher ! SetTarget(testActor)
|
||||
|
|
|
|||
|
|
@ -417,7 +417,7 @@ class FutureDocSpec extends AkkaSpec {
|
|||
val delayed = after(200 millis, using = system.scheduler)(Future.failed(
|
||||
new IllegalStateException("OHNOES")))
|
||||
val future = Future { Thread.sleep(1000); "foo" }
|
||||
val result = future either delayed
|
||||
val result = Future firstCompletedOf Seq(future, delayed)
|
||||
//#after
|
||||
intercept[IllegalStateException] { Await.result(result, 2 second) }
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,19 +2,6 @@
|
|||
* Copyright (C) 2009-2012 Typesafe Inc. <http://www.typesafe.com>
|
||||
*/
|
||||
|
||||
//#extract-transport
|
||||
package object akka {
|
||||
// needs to be inside the akka package because accessing unsupported API !
|
||||
def transportOf(system: actor.ExtendedActorSystem): remote.RemoteTransport =
|
||||
system.provider match {
|
||||
case r: remote.RemoteActorRefProvider ⇒ r.transport
|
||||
case _ ⇒
|
||||
throw new UnsupportedOperationException(
|
||||
"this method requires the RemoteActorRefProvider to be configured")
|
||||
}
|
||||
}
|
||||
//#extract-transport
|
||||
|
||||
package docs.serialization {
|
||||
|
||||
import org.scalatest.matchers.MustMatchers
|
||||
|
|
@ -216,7 +203,7 @@ package docs.serialization {
|
|||
object ExternalAddress extends ExtensionKey[ExternalAddressExt]
|
||||
|
||||
class ExternalAddressExt(system: ExtendedActorSystem) extends Extension {
|
||||
def addressForAkka: Address = akka.transportOf(system).defaultAddress
|
||||
def addressForAkka: Address = system.provider.getDefaultAddress
|
||||
}
|
||||
|
||||
def serializeAkkaDefault(ref: ActorRef): String =
|
||||
|
|
|
|||
|
|
@ -110,11 +110,11 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
|
|||
fsm.setState(stateName = 1)
|
||||
assert(fsm.stateName == 1)
|
||||
|
||||
assert(fsm.timerActive_?("test") == false)
|
||||
assert(fsm.isTimerActive("test") == false)
|
||||
fsm.setTimer("test", 12, 10 millis, true)
|
||||
assert(fsm.timerActive_?("test") == true)
|
||||
assert(fsm.isTimerActive("test") == true)
|
||||
fsm.cancelTimer("test")
|
||||
assert(fsm.timerActive_?("test") == false)
|
||||
assert(fsm.isTimerActive("test") == false)
|
||||
//#test-fsm-ref
|
||||
}
|
||||
|
||||
|
|
@ -232,7 +232,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
|
|||
//#test-probe-forward
|
||||
}
|
||||
|
||||
"demonstrate " in {
|
||||
"demonstrate calling thread dispatcher" in {
|
||||
//#calling-thread-dispatcher
|
||||
import akka.testkit.CallingThreadDispatcher
|
||||
val ref = system.actorOf(Props[MyActor].withDispatcher(CallingThreadDispatcher.Id))
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@ package docs.zeromq
|
|||
import language.postfixOps
|
||||
|
||||
import scala.concurrent.duration._
|
||||
import scala.collection.immutable
|
||||
import akka.actor.{ Actor, Props }
|
||||
import akka.util.ByteString
|
||||
import akka.testkit._
|
||||
import akka.zeromq.{ ZeroMQVersion, ZeroMQExtension, SocketType, Bind }
|
||||
import java.text.SimpleDateFormat
|
||||
|
|
@ -29,7 +29,8 @@ object ZeromqDocSpec {
|
|||
|
||||
class HealthProbe extends Actor {
|
||||
|
||||
val pubSocket = ZeroMQExtension(context.system).newSocket(SocketType.Pub, Bind("tcp://127.0.0.1:1235"))
|
||||
val pubSocket = ZeroMQExtension(context.system).newSocket(SocketType.Pub,
|
||||
Bind("tcp://127.0.0.1:1235"))
|
||||
val memory = ManagementFactory.getMemoryMXBean
|
||||
val os = ManagementFactory.getOperatingSystemMXBean
|
||||
val ser = SerializationExtension(context.system)
|
||||
|
|
@ -52,12 +53,12 @@ object ZeromqDocSpec {
|
|||
val heapPayload = ser.serialize(Heap(timestamp, currentHeap.getUsed,
|
||||
currentHeap.getMax)).get
|
||||
// the first frame is the topic, second is the message
|
||||
pubSocket ! ZMQMessage(immutable.Seq(Frame("health.heap"), Frame(heapPayload)))
|
||||
pubSocket ! ZMQMessage(ByteString("health.heap"), ByteString(heapPayload))
|
||||
|
||||
// use akka SerializationExtension to convert to bytes
|
||||
val loadPayload = ser.serialize(Load(timestamp, os.getSystemLoadAverage)).get
|
||||
// the first frame is the topic, second is the message
|
||||
pubSocket ! ZMQMessage(immutable.Seq(Frame("health.load"), Frame(loadPayload)))
|
||||
pubSocket ! ZMQMessage(ByteString("health.load"), ByteString(loadPayload))
|
||||
}
|
||||
}
|
||||
//#health
|
||||
|
|
@ -72,14 +73,14 @@ object ZeromqDocSpec {
|
|||
|
||||
def receive = {
|
||||
// the first frame is the topic, second is the message
|
||||
case m: ZMQMessage if m.firstFrameAsString == "health.heap" ⇒
|
||||
val Heap(timestamp, used, max) = ser.deserialize(m.payload(1),
|
||||
case m: ZMQMessage if m.frames(0).utf8String == "health.heap" ⇒
|
||||
val Heap(timestamp, used, max) = ser.deserialize(m.frames(1).toArray,
|
||||
classOf[Heap]).get
|
||||
log.info("Used heap {} bytes, at {}", used,
|
||||
timestampFormat.format(new Date(timestamp)))
|
||||
|
||||
case m: ZMQMessage if m.firstFrameAsString == "health.load" ⇒
|
||||
val Load(timestamp, loadAverage) = ser.deserialize(m.payload(1),
|
||||
case m: ZMQMessage if m.frames(0).utf8String == "health.load" ⇒
|
||||
val Load(timestamp, loadAverage) = ser.deserialize(m.frames(1).toArray,
|
||||
classOf[Load]).get
|
||||
log.info("Load average {}, at {}", loadAverage,
|
||||
timestampFormat.format(new Date(timestamp)))
|
||||
|
|
@ -97,9 +98,8 @@ object ZeromqDocSpec {
|
|||
|
||||
def receive = {
|
||||
// the first frame is the topic, second is the message
|
||||
case m: ZMQMessage if m.firstFrameAsString == "health.heap" ⇒
|
||||
val Heap(timestamp, used, max) = ser.deserialize(m.payload(1),
|
||||
classOf[Heap]).get
|
||||
case m: ZMQMessage if m.frames(0).utf8String == "health.heap" ⇒
|
||||
val Heap(timestamp, used, max) = ser.deserialize(m.frames(1).toArray, classOf[Heap]).get
|
||||
if ((used.toDouble / max) > 0.9) count += 1
|
||||
else count = 0
|
||||
if (count > 10) log.warning("Need more memory, using {} %",
|
||||
|
|
@ -146,7 +146,7 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") {
|
|||
|
||||
val payload = Array.empty[Byte]
|
||||
//#pub-topic
|
||||
pubSocket ! ZMQMessage(Frame("foo.bar"), Frame(payload))
|
||||
pubSocket ! ZMQMessage(ByteString("foo.bar"), ByteString(payload))
|
||||
//#pub-topic
|
||||
|
||||
system.stop(subSocket)
|
||||
|
|
@ -187,8 +187,9 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") {
|
|||
|
||||
def checkZeroMQInstallation() = try {
|
||||
ZeroMQExtension(system).version match {
|
||||
case ZeroMQVersion(2, 1, _) ⇒ Unit
|
||||
case version ⇒ pending
|
||||
case ZeroMQVersion(2, x, _) if x >= 1 ⇒ Unit
|
||||
case ZeroMQVersion(y, _, _) if y >= 3 ⇒ Unit
|
||||
case version ⇒ pending
|
||||
}
|
||||
} catch {
|
||||
case e: LinkageError ⇒ pending
|
||||
|
|
|
|||
|
|
@ -24,9 +24,6 @@ sample as it is easy to follow the log output to understand what is happening in
|
|||
|
||||
fault-tolerance-sample
|
||||
|
||||
.. includecode:: code/docs/actor/FaultHandlingDocSample.scala#all
|
||||
:exclude: imports,messages,dummydb
|
||||
|
||||
Creating a Supervisor Strategy
|
||||
------------------------------
|
||||
|
||||
|
|
|
|||
|
|
@ -179,6 +179,18 @@ demonstrated below:
|
|||
The :class:`Event(msg: Any, data: D)` case class is parameterized with the data
|
||||
type held by the FSM for convenient pattern matching.
|
||||
|
||||
.. warning::
|
||||
|
||||
It is required that you define handlers for each of the possible FSM states,
|
||||
otherwise there will be failures when trying to switch to undeclared states.
|
||||
|
||||
It is recommended practice to declare the states as objects extending a
|
||||
sealed trait and then verify that there is a ``when`` clause for each of the
|
||||
states. If you want to leave the handling of a state “unhandled” (more below),
|
||||
it still needs to be declared like this:
|
||||
|
||||
.. includecode:: code/docs/actor/FSMDocSpec.scala#NullFunction
|
||||
|
||||
Defining the Initial State
|
||||
--------------------------
|
||||
|
||||
|
|
@ -359,7 +371,7 @@ which is guaranteed to work immediately, meaning that the scheduled message
|
|||
will not be processed after this call even if the timer already fired and
|
||||
queued it. The status of any timer may be inquired with
|
||||
|
||||
:func:`timerActive_?(name)`
|
||||
:func:`isTimerActive(name)`
|
||||
|
||||
These named timers complement state timeouts because they are not affected by
|
||||
intervening reception of other messages.
|
||||
|
|
|
|||
|
|
@ -138,9 +138,9 @@ Receiving messages from the ``IOManager``:
|
|||
IO.Iteratee
|
||||
^^^^^^^^^^^
|
||||
|
||||
Included with Akka's IO support is a basic implementation of ``Iteratee``\s. ``Iteratee``\s are an effective way of handling a stream of data without needing to wait for all the data to arrive. This is especially useful when dealing with non blocking IO since we will usually receive data in chunks which may not include enough information to process, or it may contain much more data then we currently need.
|
||||
Included with Akka's IO support is a basic implementation of ``Iteratee``\s. ``Iteratee``\s are an effective way of handling a stream of data without needing to wait for all the data to arrive. This is especially useful when dealing with non blocking IO since we will usually receive data in chunks which may not include enough information to process, or it may contain much more data than we currently need.
|
||||
|
||||
This ``Iteratee`` implementation is much more basic then what is usually found. There is only support for ``ByteString`` input, and enumerators aren't used. The reason for this limited implementation is to reduce the amount of explicit type signatures needed and to keep things simple. It is important to note that Akka's ``Iteratee``\s are completely optional, incoming data can be handled in any way, including other ``Iteratee`` libraries.
|
||||
This ``Iteratee`` implementation is much more basic than what is usually found. There is only support for ``ByteString`` input, and enumerators aren't used. The reason for this limited implementation is to reduce the amount of explicit type signatures needed and to keep things simple. It is important to note that Akka's ``Iteratee``\s are completely optional, incoming data can be handled in any way, including other ``Iteratee`` libraries.
|
||||
|
||||
``Iteratee``\s work by processing the data that it is given and returning either the result (with any unused input) or a continuation if more input is needed. They are monadic, so methods like ``flatMap`` can be used to pass the result of an ``Iteratee`` to another.
|
||||
|
||||
|
|
@ -204,7 +204,7 @@ Following the path we read in the query (if it exists):
|
|||
.. includecode:: code/docs/io/HTTPServer.scala
|
||||
:include: read-query
|
||||
|
||||
It is much simpler then reading the path since we aren't doing any parsing of the query since there is no standard format of the query string.
|
||||
It is much simpler than reading the path since we aren't doing any parsing of the query since there is no standard format of the query string.
|
||||
|
||||
Both the path and query used the ``readUriPart`` ``Iteratee``, which is next:
|
||||
|
||||
|
|
|
|||
|
|
@ -19,11 +19,7 @@ Put your application jar in the ``deploy`` directory to have it automatically
|
|||
loaded.
|
||||
|
||||
To start the kernel use the scripts in the ``bin`` directory, passing the boot
|
||||
classes for your application.
|
||||
|
||||
There is a simple example of an application setup for running with the
|
||||
microkernel included in the akka download. This can be run with the following
|
||||
command (on a unix-based system):
|
||||
classes for your application. Example command (on a unix-based system):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
|
|
|
|||
|
|
@ -129,6 +129,15 @@ actor systems has to have a JAR containing the class.
|
|||
most cases is not serializable. It is best to create a factory method in the
|
||||
companion object of the actor’s class.
|
||||
|
||||
.. note::
|
||||
|
||||
You can use asterisks as wildcard matches for the actor paths, so you could specify:
|
||||
``/*/sampleActor`` and that would match all ``sampleActor`` on that level in the hierarchy.
|
||||
You can also use wildcard in the last position to match all actors at a certain level:
|
||||
``/someParent/*``. Non-wildcard matches always have higher priority to match than wildcards, so:
|
||||
``/foo/bar`` is considered **more specific** than ``/foo/*`` and only the highest priority match is used.
|
||||
Please note that it **cannot** be used to partially match section, like this: ``/foo*/bar``, ``/f*o/bar`` etc.
|
||||
|
||||
.. warning::
|
||||
|
||||
*Caveat:* Remote deployment ties both systems together in a tight fashion,
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ In addition to being able to supply looked-up remote actors as routees, you can
|
|||
make the router deploy its created children on a set of remote hosts; this will
|
||||
be done in round-robin fashion. In order to do that, wrap the router
|
||||
configuration in a :class:`RemoteRouterConfig`, attaching the remote addresses of
|
||||
the nodes to deploy to. Naturally, this requires your to include the
|
||||
the nodes to deploy to. Naturally, this requires you to include the
|
||||
``akka-remote`` module on your classpath:
|
||||
|
||||
.. includecode:: code/docs/routing/RouterViaProgramExample.scala#remoteRoutees
|
||||
|
|
@ -430,7 +430,7 @@ Configured Custom Router
|
|||
|
||||
It is possible to define configuration properties for custom routers. In the ``router`` property of the deployment
|
||||
configuration you define the fully qualified class name of the router class. The router class must extend
|
||||
``akka.routing.RouterConfig`` and and have constructor with ``com.typesafe.config.Config`` parameter.
|
||||
``akka.routing.RouterConfig`` and have constructor with one ``com.typesafe.config.Config`` parameter.
|
||||
The deployment section of the configuration is passed to the constructor.
|
||||
|
||||
Custom Resizer
|
||||
|
|
|
|||
|
|
@ -138,24 +138,12 @@ concrete address handy you can create a dummy one for the right protocol using
|
|||
``Address(protocol, "", "", 0)`` (assuming that the actual transport used is as
|
||||
lenient as Akka’s RemoteActorRefProvider).
|
||||
|
||||
There is a possible simplification available if you are just using the default
|
||||
:class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is
|
||||
enabled by the fact that this combination has just a single remote address.
|
||||
This approach relies on internal API, which means that it is not guaranteed to
|
||||
be supported in future versions. To make this caveat more obvious, some bridge
|
||||
code in the ``akka`` package is required to make it work:
|
||||
|
||||
.. includecode:: code/docs/serialization/SerializationDocSpec.scala
|
||||
:include: extract-transport
|
||||
|
||||
And with this, the address extraction goes like this:
|
||||
There is also a default remote address which is the one used by cluster support
|
||||
(and typical systems have just this one); you can get it like this:
|
||||
|
||||
.. includecode:: code/docs/serialization/SerializationDocSpec.scala
|
||||
:include: external-address-default
|
||||
|
||||
This solution has to be adapted once other providers are used (like the planned
|
||||
extensions for clustering).
|
||||
|
||||
Deep serialization of Actors
|
||||
----------------------------
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue