Reformating configuration and examples for PDF (Scala, and leftovers). See #2413
This commit is contained in:
parent
309bb53d98
commit
08ef942242
46 changed files with 330 additions and 191 deletions
|
|
@ -40,7 +40,8 @@ class CustomRouteSpec extends AkkaSpec {
|
|||
val target = system.actorOf(Props.empty)
|
||||
val router = system.actorOf(Props.empty.withRouter(new MyRouter(target)))
|
||||
val route = ExtractRoute(router)
|
||||
val r = Await.result(router.ask(CurrentRoutees)(1 second).mapTo[RouterRoutees], 1 second)
|
||||
val r = Await.result(router.ask(CurrentRoutees)(1 second).
|
||||
mapTo[RouterRoutees], 1 second)
|
||||
r.routees.size must be(1)
|
||||
route(testActor -> "hallo") must be(Seq(Destination(testActor, target)))
|
||||
route(testActor -> 12) must be(Seq(Destination(testActor, r.routees.head)))
|
||||
|
|
|
|||
|
|
@ -626,8 +626,10 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
|
||||
//#crRoute
|
||||
def createRoute(routeeProvider: RouteeProvider): Route = {
|
||||
val democratActor = routeeProvider.context.actorOf(Props(new DemocratActor()), "d")
|
||||
val republicanActor = routeeProvider.context.actorOf(Props(new RepublicanActor()), "r")
|
||||
val democratActor =
|
||||
routeeProvider.context.actorOf(Props(new DemocratActor()), "d")
|
||||
val republicanActor =
|
||||
routeeProvider.context.actorOf(Props(new RepublicanActor()), "r")
|
||||
val routees = Vector[ActorRef](democratActor, republicanActor)
|
||||
|
||||
//#crRegisterRoutees
|
||||
|
|
|
|||
|
|
@ -205,9 +205,9 @@ akka {
|
|||
# Min number of threads to cap factor-based parallelism number to
|
||||
parallelism-min = 8
|
||||
|
||||
# The parallelism factor is used to determine thread pool size using the following formula:
|
||||
# ceil(available processors * factor). Resulting size is then bounded by the parallelism-min
|
||||
# and parallelism-max values.
|
||||
# The parallelism factor is used to determine thread pool size using the
|
||||
# following formula: ceil(available processors * factor). Resulting size
|
||||
# is then bounded by the parallelism-min and parallelism-max values.
|
||||
parallelism-factor = 3.0
|
||||
|
||||
# Max number of threads to cap factor-based parallelism number to
|
||||
|
|
@ -222,22 +222,25 @@ akka {
|
|||
# Min number of threads to cap factor-based core number to
|
||||
core-pool-size-min = 8
|
||||
|
||||
# The core pool size factor is used to determine thread pool core size using the following formula:
|
||||
# ceil(available processors * factor). Resulting size is then bounded by the core-pool-size-min
|
||||
# and core-pool-size-max values.
|
||||
# The core pool size factor is used to determine thread pool core size
|
||||
# using the following formula: ceil(available processors * factor).
|
||||
# Resulting size is then bounded by the core-pool-size-min and
|
||||
# core-pool-size-max values.
|
||||
core-pool-size-factor = 3.0
|
||||
|
||||
# Max number of threads to cap factor-based number to
|
||||
core-pool-size-max = 64
|
||||
|
||||
# Minimum number of threads to cap factor-based max number to (if using a bounded task queue)
|
||||
# Minimum number of threads to cap factor-based max number to (if using
|
||||
# a bounded task queue)
|
||||
max-pool-size-min = 8
|
||||
|
||||
# Max no of threads (if using a bounded task queue) is determined by calculating:
|
||||
# ceil(available processors * factor)
|
||||
# Max no of threads (if using a bounded task queue) is determined by
|
||||
# calculating: ceil(available processors * factor)
|
||||
max-pool-size-factor = 3.0
|
||||
|
||||
# Max number of threads to cap factor-based max number to (if using a bounded task queue)
|
||||
# Max number of threads to cap factor-based max number to (if using a
|
||||
# bounded task queue)
|
||||
max-pool-size-max = 64
|
||||
|
||||
# Specifies the bounded capacity of the task queue (< 1 == unbounded)
|
||||
|
|
@ -323,10 +326,10 @@ akka {
|
|||
bytes = "akka.serialization.ByteArraySerializer"
|
||||
}
|
||||
|
||||
# Class to Serializer binding. You only need to specify the name of an interface
|
||||
# or abstract base class of the messages. In case of ambiguity it is using
|
||||
# the most specific configured class, or giving a warning and choosing the
|
||||
# “first” one.
|
||||
# Class to Serializer binding. You only need to specify the name of an
|
||||
# interface or abstract base class of the messages. In case of ambiguity it
|
||||
# is using the most specific configured class, or giving a warning and
|
||||
# choosing the “first” one.
|
||||
#
|
||||
# To disable one of the default serializers, assign its class to "none", like
|
||||
# "java.io.Serializable" = none
|
||||
|
|
|
|||
|
|
@ -49,7 +49,8 @@ trait Scheduler {
|
|||
* Scala API
|
||||
*/
|
||||
def schedule(
|
||||
initialDelay: FiniteDuration, interval: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable
|
||||
initialDelay: FiniteDuration,
|
||||
interval: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable
|
||||
|
||||
/**
|
||||
* Schedules a function to be run repeatedly with an initial delay and
|
||||
|
|
@ -60,7 +61,9 @@ trait Scheduler {
|
|||
* Java API
|
||||
*/
|
||||
def schedule(
|
||||
initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
|
||||
initialDelay: FiniteDuration,
|
||||
interval: FiniteDuration,
|
||||
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
|
||||
|
||||
/**
|
||||
* Schedules a Runnable to be run once with a delay, i.e. a time period that
|
||||
|
|
@ -68,7 +71,9 @@ trait Scheduler {
|
|||
*
|
||||
* Java & Scala API
|
||||
*/
|
||||
def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
|
||||
def scheduleOnce(
|
||||
delay: FiniteDuration,
|
||||
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
|
||||
|
||||
/**
|
||||
* Schedules a message to be sent once with a delay, i.e. a time period that has
|
||||
|
|
@ -76,7 +81,10 @@ trait Scheduler {
|
|||
*
|
||||
* Java & Scala API
|
||||
*/
|
||||
def scheduleOnce(delay: FiniteDuration, receiver: ActorRef, message: Any)(implicit executor: ExecutionContext): Cancellable
|
||||
def scheduleOnce(
|
||||
delay: FiniteDuration,
|
||||
receiver: ActorRef,
|
||||
message: Any)(implicit executor: ExecutionContext): Cancellable
|
||||
|
||||
/**
|
||||
* Schedules a function to be run once with a delay, i.e. a time period that has
|
||||
|
|
@ -84,7 +92,8 @@ trait Scheduler {
|
|||
*
|
||||
* Scala API
|
||||
*/
|
||||
def scheduleOnce(delay: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable
|
||||
def scheduleOnce(
|
||||
delay: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable
|
||||
}
|
||||
//#scheduler
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,8 @@
|
|||
akka {
|
||||
|
||||
cluster {
|
||||
# Initial contact points of the cluster. Nodes to join at startup if auto-join = on.
|
||||
# Initial contact points of the cluster. The nodes to join at startup if
|
||||
# auto-join = on.
|
||||
# Comma separated full URIs defined by a string on the form of
|
||||
# "akka://system@hostname:port"
|
||||
# Leave as empty if the node should be a singleton cluster.
|
||||
|
|
@ -21,16 +22,17 @@ akka {
|
|||
# If seed-nodes is empty it will join itself and become a single node cluster.
|
||||
auto-join = on
|
||||
|
||||
# Should the 'leader' in the cluster be allowed to automatically mark unreachable
|
||||
# nodes as DOWN?
|
||||
# Using auto-down implies that two separate clusters will automatically be formed
|
||||
# in case of network partition.
|
||||
# Should the 'leader' in the cluster be allowed to automatically mark
|
||||
# unreachable nodes as DOWN?
|
||||
# Using auto-down implies that two separate clusters will automatically be
|
||||
# formed in case of network partition.
|
||||
auto-down = off
|
||||
|
||||
# Enable or disable JMX MBeans for management of the cluster
|
||||
jmx.enabled = on
|
||||
|
||||
# how long should the node wait before starting the periodic tasks maintenance tasks?
|
||||
# how long should the node wait before starting the periodic tasks
|
||||
# maintenance tasks?
|
||||
periodic-tasks-initial-delay = 1s
|
||||
|
||||
# how often should the node send out gossip information?
|
||||
|
|
@ -39,8 +41,8 @@ akka {
|
|||
# how often should the leader perform maintenance tasks?
|
||||
leader-actions-interval = 1s
|
||||
|
||||
# how often should the node move nodes, marked as unreachable by the failure detector,
|
||||
# out of the membership ring?
|
||||
# how often should the node move nodes, marked as unreachable by the failure
|
||||
# detector, out of the membership ring?
|
||||
unreachable-nodes-reaper-interval = 1s
|
||||
|
||||
# How often the current internal stats should be published.
|
||||
|
|
@ -56,7 +58,7 @@ akka {
|
|||
# If specified you need to define the settings of the actual dispatcher.
|
||||
use-dispatcher = ""
|
||||
|
||||
# Gossip to random node with newer or older state information, if any with some
|
||||
# Gossip to random node with newer or older state information, if any with
|
||||
# this probability. Otherwise Gossip to any random live node.
|
||||
# Probability value is between 0.0 and 1.0. 0.0 means never, 1.0 means always.
|
||||
gossip-different-view-probability = 0.8
|
||||
|
|
@ -111,15 +113,16 @@ akka {
|
|||
# How often a node publishes metrics information.
|
||||
gossip-interval = 3s
|
||||
|
||||
# How quickly the exponential weighting of past data is decayed compared to new data.
|
||||
# How quickly the exponential weighting of past data is decayed compared to
|
||||
# new data.
|
||||
# If set to 0 data streaming over time will be turned off.
|
||||
# Set higher to increase the bias toward newer values
|
||||
rate-of-decay = 10
|
||||
}
|
||||
|
||||
# If the tick-duration of the default scheduler is longer than the tick-duration
|
||||
# configured here a dedicated scheduler will be used for periodic tasks of the cluster,
|
||||
# otherwise the default scheduler is used.
|
||||
# If the tick-duration of the default scheduler is longer than the
|
||||
# tick-duration configured here a dedicated scheduler will be used for
|
||||
# periodic tasks of the cluster, otherwise the default scheduler is used.
|
||||
# See akka.scheduler settings for more details about the HashedWheelTimer.
|
||||
scheduler {
|
||||
tick-duration = 33ms
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ Please follow these guidelines when creating public commits and writing commit m
|
|||
|
||||
Example::
|
||||
|
||||
Completed replication over BookKeeper based transaction log with configurable actor snapshotting every X message. Fixes #XXX
|
||||
Completed replication over BookKeeper based transaction log. Fixes #XXX
|
||||
|
||||
* Details 1
|
||||
* Details 2
|
||||
|
|
|
|||
|
|
@ -33,18 +33,21 @@ multi-JVM testing (Simplified for clarity):
|
|||
lazy val remoteTests = Project(
|
||||
id = "akka-remote-tests",
|
||||
base = file("akka-remote-tests"),
|
||||
dependencies = Seq(remote, actorTests % "test->test", testkit % "test->test"),
|
||||
dependencies = Seq(remote, actorTests % "test->test",
|
||||
testkit % "test->test"),
|
||||
settings = defaultSettings ++ Seq(
|
||||
// disable parallel tests
|
||||
parallelExecution in Test := false,
|
||||
extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src =>
|
||||
(name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq
|
||||
(name: String) => (src ** (name + ".conf")).get.
|
||||
headOption.map("-Dakka.config=" + _.absolutePath).toSeq
|
||||
},
|
||||
test in Test <<= (test in Test) dependsOn (test in MultiJvm)
|
||||
)
|
||||
) configs (MultiJvm)
|
||||
|
||||
lazy val buildSettings = Defaults.defaultSettings ++ SbtMultiJvm.multiJvmSettings ++ Seq(
|
||||
lazy val buildSettings = Defaults.defaultSettings ++
|
||||
SbtMultiJvm.multiJvmSettings ++ Seq(
|
||||
organization := "com.typesafe.akka",
|
||||
version := "@version@",
|
||||
scalaVersion := "@scalaVersion@",
|
||||
|
|
|
|||
|
|
@ -28,13 +28,15 @@ public class ActivationTestBase {
|
|||
Camel camel = CamelExtension.get(system);
|
||||
// get a future reference to the activation of the endpoint of the Consumer Actor
|
||||
Timeout timeout = new Timeout(Duration.create(10, SECONDS));
|
||||
Future<ActorRef> activationFuture = camel.activationFutureFor(producer, timeout, system.dispatcher());
|
||||
Future<ActorRef> activationFuture = camel.activationFutureFor(producer,
|
||||
timeout, system.dispatcher());
|
||||
//#CamelActivation
|
||||
//#CamelDeactivation
|
||||
// ..
|
||||
system.stop(producer);
|
||||
// get a future reference to the deactivation of the endpoint of the Consumer Actor
|
||||
Future<ActorRef> deactivationFuture = camel.deactivationFutureFor(producer, timeout, system.dispatcher());
|
||||
Future<ActorRef> deactivationFuture = camel.deactivationFutureFor(producer,
|
||||
timeout, system.dispatcher());
|
||||
//#CamelDeactivation
|
||||
system.shutdown();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,8 @@ public class CamelExtensionTestBase {
|
|||
ActorSystem system = ActorSystem.create("some-system");
|
||||
Camel camel = CamelExtension.get(system);
|
||||
CamelContext camelContext = camel.context();
|
||||
// camelContext.addComponent("activemq", ActiveMQComponent.activeMQComponent("vm://localhost?broker.persistent=false"))
|
||||
// camelContext.addComponent("activemq", ActiveMQComponent.activeMQComponent(
|
||||
// "vm://localhost?broker.persistent=false"));
|
||||
//#CamelExtensionAddComponent
|
||||
system.shutdown();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,8 @@ import scala.concurrent.util.FiniteDuration;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class Consumer4 extends UntypedConsumerActor {
|
||||
private final static FiniteDuration timeout = Duration.create(500, TimeUnit.MILLISECONDS);
|
||||
private final static FiniteDuration timeout =
|
||||
Duration.create(500, TimeUnit.MILLISECONDS);
|
||||
|
||||
@Override
|
||||
public FiniteDuration replyTimeout() {
|
||||
|
|
|
|||
|
|
@ -12,12 +12,15 @@ import scala.Option;
|
|||
public class ErrorThrowingConsumer extends UntypedConsumerActor{
|
||||
private String uri;
|
||||
|
||||
private static Mapper<RouteDefinition, ProcessorDefinition<?>> mapper = new Mapper<RouteDefinition, ProcessorDefinition<?>>() {
|
||||
public ProcessorDefinition<?> apply(RouteDefinition rd) {
|
||||
// Catch any exception and handle it by returning the exception message as response
|
||||
return rd.onException(Exception.class).handled(true).transform(Builder.exceptionMessage()).end();
|
||||
}
|
||||
};
|
||||
private static Mapper<RouteDefinition, ProcessorDefinition<?>> mapper =
|
||||
new Mapper<RouteDefinition, ProcessorDefinition<?>>() {
|
||||
public ProcessorDefinition<?> apply(RouteDefinition rd) {
|
||||
// Catch any exception and handle it by returning the exception message
|
||||
// as response
|
||||
return rd.onException(Exception.class).handled(true).
|
||||
transform(Builder.exceptionMessage()).end();
|
||||
}
|
||||
};
|
||||
|
||||
public ErrorThrowingConsumer(String uri){
|
||||
this.uri = uri;
|
||||
|
|
@ -37,7 +40,8 @@ public class ErrorThrowingConsumer extends UntypedConsumerActor{
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mapper<RouteDefinition, ProcessorDefinition<?>> getRouteDefinitionHandler() {
|
||||
public Mapper<RouteDefinition,
|
||||
ProcessorDefinition<?>> getRouteDefinitionHandler() {
|
||||
return mapper;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,8 @@ public class ProducerTestBase {
|
|||
ActorRef producer = system.actorOf(props,"jmsproducer");
|
||||
Map<String,Object> headers = new HashMap<String, Object>();
|
||||
headers.put(CamelMessage.MessageExchangeId(),"123");
|
||||
producer.tell(new CamelMessage("<order amount=\"100\" currency=\"PLN\" itemId=\"12345\"/>",headers), null);
|
||||
producer.tell(new CamelMessage("<order amount=\"100\" currency=\"PLN\" " +
|
||||
"itemId=\"12345\"/>",headers), null);
|
||||
//#Correlate
|
||||
system.stop(producer);
|
||||
system.shutdown();
|
||||
|
|
|
|||
|
|
@ -6,7 +6,8 @@ public class HttpSample {
|
|||
public static void main(String[] args) {
|
||||
//#HttpExample
|
||||
// Create the actors. this can be done in a Boot class so you can
|
||||
// run the example in the MicroKernel. just add the below three lines to your boot class.
|
||||
// run the example in the MicroKernel. just add the three lines to below
|
||||
// your boot class.
|
||||
ActorSystem system = ActorSystem.create("some-system");
|
||||
final ActorRef httpTransformer = system.actorOf(new Props(HttpTransformer.class));
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,8 @@ public class HttpTransformer extends UntypedActor{
|
|||
public void onReceive(Object message) {
|
||||
if (message instanceof CamelMessage) {
|
||||
CamelMessage camelMessage = (CamelMessage) message;
|
||||
CamelMessage replacedMessage = camelMessage.mapBody(new Mapper<Object, String>(){
|
||||
CamelMessage replacedMessage =
|
||||
camelMessage.mapBody(new Mapper<Object, String>(){
|
||||
@Override
|
||||
public String apply(Object body) {
|
||||
String text = new String((byte[])body);
|
||||
|
|
|
|||
|
|
@ -19,7 +19,8 @@ public class Consumer3 extends UntypedConsumerActor{
|
|||
public void onReceive(Object message) {
|
||||
if (message instanceof CamelMessage) {
|
||||
CamelMessage camelMessage = (CamelMessage) message;
|
||||
transformer.forward(camelMessage.getBodyAs(String.class, getCamelContext()),getContext());
|
||||
transformer.forward(camelMessage.getBodyAs(String.class, getCamelContext()),
|
||||
getContext());
|
||||
} else
|
||||
unhandled(message);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,8 @@ public class CustomRouteBuilder extends RouteBuilder{
|
|||
public void configure() throws Exception {
|
||||
from("direct:welcome").process(new Processor(){
|
||||
public void process(Exchange exchange) throws Exception {
|
||||
exchange.getOut().setBody(String.format("Welcome %s",exchange.getIn().getBody()));
|
||||
exchange.getOut().setBody(String.format("Welcome %s",
|
||||
exchange.getIn().getBody()));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,8 @@ public class CustomRouteSample {
|
|||
public static void main(String[] args) {
|
||||
try {
|
||||
//#CustomRouteExample
|
||||
// the below lines can be added to a Boot class, so that you can run the example from a MicroKernel
|
||||
// the below lines can be added to a Boot class, so that you can run the
|
||||
// example from a MicroKernel
|
||||
ActorSystem system = ActorSystem.create("some-system");
|
||||
final ActorRef producer = system.actorOf(new Props(Producer1.class));
|
||||
final ActorRef mediator = system.actorOf(new Props(new UntypedActorFactory() {
|
||||
|
|
|
|||
|
|
@ -15,14 +15,16 @@ public class Transformer extends UntypedActor {
|
|||
|
||||
public void onReceive(Object message) {
|
||||
if (message instanceof CamelMessage) {
|
||||
// example: transform message body "foo" to "- foo -" and forward result to producer
|
||||
// example: transform message body "foo" to "- foo -" and forward result
|
||||
// to producer
|
||||
CamelMessage camelMessage = (CamelMessage) message;
|
||||
CamelMessage transformedMessage = camelMessage.mapBody(new Mapper<String, String>(){
|
||||
@Override
|
||||
public String apply(String body) {
|
||||
return String.format("- %s -",body);
|
||||
}
|
||||
});
|
||||
CamelMessage transformedMessage =
|
||||
camelMessage.mapBody(new Mapper<String, String>(){
|
||||
@Override
|
||||
public String apply(String body) {
|
||||
return String.format("- %s -",body);
|
||||
}
|
||||
});
|
||||
producer.forward(transformedMessage, getContext());
|
||||
} else
|
||||
unhandled(message);
|
||||
|
|
|
|||
|
|
@ -57,9 +57,9 @@ public class CustomRouterDocTestBase {
|
|||
public void demonstrateDispatchers() {
|
||||
//#dispatchers
|
||||
final ActorRef router = system.actorOf(new Props(MyActor.class)
|
||||
// “head” router runs on "head" dispatcher
|
||||
// “head” router will run on "head" dispatcher
|
||||
.withRouter(new RoundRobinRouter(5).withDispatcher("head"))
|
||||
// MyActor “workers” run on "workers" dispatcher
|
||||
// MyActor “workers” will run on "workers" dispatcher
|
||||
.withDispatcher("workers"));
|
||||
//#dispatchers
|
||||
}
|
||||
|
|
@ -104,7 +104,6 @@ public class CustomRouterDocTestBase {
|
|||
}
|
||||
|
||||
//#crMessages
|
||||
|
||||
//#CustomRouter
|
||||
static
|
||||
//#CustomRouter
|
||||
|
|
|
|||
|
|
@ -46,7 +46,8 @@ public class RemoteDeploymentDocTestBase {
|
|||
addr = AddressFromURIString.parse("akka://sys@host:1234"); // the same
|
||||
//#make-address
|
||||
//#deploy
|
||||
ActorRef ref = system.actorOf(new Props(SampleActor.class).withDeploy(new Deploy(new RemoteScope(addr))));
|
||||
ActorRef ref = system.actorOf(new Props(SampleActor.class).withDeploy(
|
||||
new Deploy(new RemoteScope(addr))));
|
||||
//#deploy
|
||||
assert ref.path().address().equals(addr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,10 +58,14 @@ import scala.concurrent.util.duration._
|
|||
class MyMailboxType(systemSettings: ActorSystem.Settings, config: Config)
|
||||
extends MailboxType {
|
||||
|
||||
override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = (owner zip system) headOption match {
|
||||
case Some((o, s: ExtendedActorSystem)) ⇒ new MyMessageQueue(o, s)
|
||||
case _ ⇒ throw new IllegalArgumentException("requires an owner (i.e. does not work with BalancingDispatcher)")
|
||||
}
|
||||
override def create(owner: Option[ActorRef],
|
||||
system: Option[ActorSystem]): MessageQueue =
|
||||
(owner zip system) headOption match {
|
||||
case Some((o, s: ExtendedActorSystem)) ⇒ new MyMessageQueue(o, s)
|
||||
case _ ⇒
|
||||
throw new IllegalArgumentException("requires an owner " +
|
||||
"(i.e. does not work with BalancingDispatcher)")
|
||||
}
|
||||
}
|
||||
|
||||
class MyMessageQueue(_owner: ActorRef, _system: ExtendedActorSystem)
|
||||
|
|
@ -72,10 +76,11 @@ class MyMessageQueue(_owner: ActorRef, _system: ExtendedActorSystem)
|
|||
// three parameters below
|
||||
val breaker = CircuitBreaker(system.scheduler, 5, 30.seconds, 1.minute)
|
||||
|
||||
def enqueue(receiver: ActorRef, envelope: Envelope): Unit = breaker.withSyncCircuitBreaker {
|
||||
val data: Array[Byte] = serialize(envelope)
|
||||
storage.push(data)
|
||||
}
|
||||
def enqueue(receiver: ActorRef, envelope: Envelope): Unit =
|
||||
breaker.withSyncCircuitBreaker {
|
||||
val data: Array[Byte] = serialize(envelope)
|
||||
storage.push(data)
|
||||
}
|
||||
|
||||
def dequeue(): Envelope = breaker.withSyncCircuitBreaker {
|
||||
val data: Option[Array[Byte]] = storage.pull()
|
||||
|
|
|
|||
|
|
@ -66,11 +66,12 @@ Java:
|
|||
::
|
||||
|
||||
// Use this Actors' Dispatcher as ExecutionContext
|
||||
getContext().system().scheduler().scheduleOnce(Duration.parse("10 seconds", getSelf(),
|
||||
new Reconnect(), getContext().getDispatcher());
|
||||
getContext().system().scheduler().scheduleOnce(Duration.parse("10 seconds",
|
||||
getSelf(), new Reconnect(), getContext().getDispatcher());
|
||||
|
||||
// Use ActorSystem's default Dispatcher as ExecutionContext
|
||||
system.scheduler().scheduleOnce(Duration.create(50, TimeUnit.MILLISECONDS), new Runnable() {
|
||||
system.scheduler().scheduleOnce(Duration.create(50, TimeUnit.MILLISECONDS),
|
||||
new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
testActor.tell(System.currentTimeMillis());
|
||||
|
|
@ -98,7 +99,8 @@ v2.0::
|
|||
v2.1::
|
||||
|
||||
val failedFilter = future1.filter(_ % 2 == 1).recover {
|
||||
case m: NoSuchElementException => //When filter fails, it will have a java.util.NoSuchElementException
|
||||
// When filter fails, it will have a java.util.NoSuchElementException
|
||||
case m: NoSuchElementException =>
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -112,10 +114,10 @@ v2.0::
|
|||
ExecutionContextExecutorService ec =
|
||||
ExecutionContexts.fromExecutorService(yourExecutorServiceGoesHere);
|
||||
|
||||
//Use ec with your Futures
|
||||
// Use ec with your Futures
|
||||
Future<String> f1 = Futures.successful("foo", ec);
|
||||
|
||||
// Then you shut the ec down somewhere at the end of your program/application.
|
||||
// Then you shut the ec down somewhere at the end of your application.
|
||||
ec.shutdown();
|
||||
|
||||
v2.1::
|
||||
|
|
@ -127,7 +129,7 @@ v2.1::
|
|||
//No need to pass the ExecutionContext here
|
||||
Future<String> f1 = Futures.successful("foo");
|
||||
|
||||
// Then you shut the ExecutorService down somewhere at the end of your program/application.
|
||||
// Then you shut the ExecutorService down somewhere at the end of your application.
|
||||
yourExecutorServiceGoesHere.shutdown();
|
||||
|
||||
v2.0::
|
||||
|
|
@ -166,7 +168,8 @@ v2.0::
|
|||
v2.1::
|
||||
|
||||
final ExecutionContext ec = system.dispatcher();
|
||||
Future<String> future1 = Futures.successful("value").andThen(new OnComplete<String>() {
|
||||
Future<String> future1 = Futures.successful("value").andThen(
|
||||
new OnComplete<String>() {
|
||||
public void onComplete(Throwable failure, String result) {
|
||||
if (failure != null)
|
||||
sendToIssueTracker(failure);
|
||||
|
|
@ -210,7 +213,8 @@ v2.0 Java::
|
|||
|
||||
v2.1 Java::
|
||||
|
||||
ActorRef router2 = system.actorOf(new Props().withRouter(RoundRobinRouter.create(routees)));
|
||||
ActorRef router2 = system.actorOf(new Props().withRouter(
|
||||
RoundRobinRouter.create(routees)));
|
||||
|
||||
Props: Function-based creation
|
||||
==============================
|
||||
|
|
@ -337,7 +341,8 @@ v2.0::
|
|||
val newRoutees = routeeProvider.createRoutees(props, requestedCapacity, Nil)
|
||||
routeeProvider.registerRoutees(newRoutees)
|
||||
} else if (requestedCapacity < 0) {
|
||||
val (keep, abandon) = currentRoutees.splitAt(currentRoutees.length + requestedCapacity)
|
||||
val (keep, abandon) = currentRoutees.splitAt(currentRoutees.length +
|
||||
requestedCapacity)
|
||||
routeeProvider.unregisterRoutees(abandon)
|
||||
delayedStop(routeeProvider.context.system.scheduler, abandon)(
|
||||
routeeProvider.context.dispatcher)
|
||||
|
|
@ -379,7 +384,7 @@ v2.0::
|
|||
v2.1::
|
||||
|
||||
final FiniteDuration d = Duration.create("1 second");
|
||||
final Timeout t = new Timeout(d); // always required finite duration, now also in type
|
||||
final Timeout t = new Timeout(d); // always required finite duration, now enforced
|
||||
|
||||
Package Name Changes in Remoting
|
||||
================================
|
||||
|
|
|
|||
|
|
@ -235,7 +235,8 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
|
|||
"creating actor with Props" in {
|
||||
//#creating-props
|
||||
import akka.actor.Props
|
||||
val myActor = system.actorOf(Props[MyActor].withDispatcher("my-dispatcher"), name = "myactor2")
|
||||
val myActor = system.actorOf(Props[MyActor].withDispatcher("my-dispatcher"),
|
||||
name = "myactor2")
|
||||
//#creating-props
|
||||
|
||||
system.stop(myActor)
|
||||
|
|
@ -354,7 +355,8 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
|
|||
Await.result(stopped, 6 seconds)
|
||||
// the actor has been stopped
|
||||
} catch {
|
||||
case e: akka.pattern.AskTimeoutException ⇒ // the actor wasn't stopped within 5 seconds
|
||||
// the actor wasn't stopped within 5 seconds
|
||||
case e: akka.pattern.AskTimeoutException ⇒
|
||||
}
|
||||
//#gracefulStop
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,7 +87,8 @@ class Worker extends Actor with ActorLogging {
|
|||
case _: CounterService.ServiceUnavailable ⇒ Stop
|
||||
}
|
||||
|
||||
// The sender of the initial Start message will continuously be notified about progress
|
||||
// The sender of the initial Start message will continuously be notified
|
||||
// about progress
|
||||
var progressListener: Option[ActorRef] = None
|
||||
val counterService = context.actorOf(Props[CounterService], name = "counter")
|
||||
val totalCount = 51
|
||||
|
|
@ -133,9 +134,10 @@ class CounterService extends Actor {
|
|||
|
||||
// Restart the storage child when StorageException is thrown.
|
||||
// After 3 restarts within 5 seconds it will be stopped.
|
||||
override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 5 seconds) {
|
||||
case _: Storage.StorageException ⇒ Restart
|
||||
}
|
||||
override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 3,
|
||||
withinTimeRange = 5 seconds) {
|
||||
case _: Storage.StorageException ⇒ Restart
|
||||
}
|
||||
|
||||
val key = self.path.name
|
||||
var storage: Option[ActorRef] = None
|
||||
|
|
@ -194,14 +196,15 @@ class CounterService extends Actor {
|
|||
}
|
||||
|
||||
def forwardOrPlaceInBacklog(msg: Any) {
|
||||
// We need the initial value from storage before we can start delegate to the counter.
|
||||
// Before that we place the messages in a backlog, to be sent to the counter when
|
||||
// it is initialized.
|
||||
// We need the initial value from storage before we can start delegate to
|
||||
// the counter. Before that we place the messages in a backlog, to be sent
|
||||
// to the counter when it is initialized.
|
||||
counter match {
|
||||
case Some(c) ⇒ c forward msg
|
||||
case None ⇒
|
||||
if (backlog.size >= MaxBacklog)
|
||||
throw new ServiceUnavailable("CounterService not available, lack of initial value")
|
||||
throw new ServiceUnavailable(
|
||||
"CounterService not available, lack of initial value")
|
||||
backlog = backlog :+ (sender, msg)
|
||||
}
|
||||
}
|
||||
|
|
@ -281,7 +284,8 @@ object DummyDB {
|
|||
|
||||
@throws(classOf[StorageException])
|
||||
def save(key: String, value: Long): Unit = synchronized {
|
||||
if (11 <= value && value <= 14) throw new StorageException("Simulated store failure " + value)
|
||||
if (11 <= value && value <= 14)
|
||||
throw new StorageException("Simulated store failure " + value)
|
||||
db += (key -> value)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,12 +24,13 @@ object FaultHandlingDocSpec {
|
|||
import akka.actor.SupervisorStrategy._
|
||||
import scala.concurrent.util.duration._
|
||||
|
||||
override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
|
||||
case _: ArithmeticException ⇒ Resume
|
||||
case _: NullPointerException ⇒ Restart
|
||||
case _: IllegalArgumentException ⇒ Stop
|
||||
case _: Exception ⇒ Escalate
|
||||
}
|
||||
override val supervisorStrategy =
|
||||
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
|
||||
case _: ArithmeticException ⇒ Resume
|
||||
case _: NullPointerException ⇒ Restart
|
||||
case _: IllegalArgumentException ⇒ Stop
|
||||
case _: Exception ⇒ Escalate
|
||||
}
|
||||
//#strategy
|
||||
|
||||
def receive = {
|
||||
|
|
@ -45,12 +46,13 @@ object FaultHandlingDocSpec {
|
|||
import akka.actor.SupervisorStrategy._
|
||||
import scala.concurrent.util.duration._
|
||||
|
||||
override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
|
||||
case _: ArithmeticException ⇒ Resume
|
||||
case _: NullPointerException ⇒ Restart
|
||||
case _: IllegalArgumentException ⇒ Stop
|
||||
case _: Exception ⇒ Escalate
|
||||
}
|
||||
override val supervisorStrategy =
|
||||
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
|
||||
case _: ArithmeticException ⇒ Resume
|
||||
case _: NullPointerException ⇒ Restart
|
||||
case _: IllegalArgumentException ⇒ Stop
|
||||
case _: Exception ⇒ Escalate
|
||||
}
|
||||
//#strategy2
|
||||
|
||||
def receive = {
|
||||
|
|
|
|||
|
|
@ -55,7 +55,8 @@ trait Foo {
|
|||
|
||||
trait Bar {
|
||||
import TypedActor.dispatcher //So we have an implicit dispatcher for our Promise
|
||||
def doBar(str: String): Future[String] = Promise.successful(str.toUpperCase).future
|
||||
def doBar(str: String): Future[String] =
|
||||
Promise.successful(str.toUpperCase).future
|
||||
}
|
||||
|
||||
class FooBar extends Foo with Bar
|
||||
|
|
@ -106,7 +107,8 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
|
|||
//#typed-actor-create1
|
||||
//#typed-actor-create2
|
||||
val otherSquarer: Squarer =
|
||||
TypedActor(system).typedActorOf(TypedProps(classOf[Squarer], new SquarerImpl("foo")), "name")
|
||||
TypedActor(system).typedActorOf(TypedProps(classOf[Squarer],
|
||||
new SquarerImpl("foo")), "name")
|
||||
//#typed-actor-create2
|
||||
|
||||
//#typed-actor-calls
|
||||
|
|
@ -157,7 +159,8 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
|
|||
try {
|
||||
//#typed-actor-hierarchy
|
||||
//Inside your Typed Actor
|
||||
val childSquarer: Squarer = TypedActor(TypedActor.context).typedActorOf(TypedProps[SquarerImpl]())
|
||||
val childSquarer: Squarer =
|
||||
TypedActor(TypedActor.context).typedActorOf(TypedProps[SquarerImpl]())
|
||||
//Use "childSquarer" as a Squarer
|
||||
//#typed-actor-hierarchy
|
||||
} catch {
|
||||
|
|
@ -167,7 +170,8 @@ class TypedActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) {
|
|||
|
||||
"supercharge" in {
|
||||
//#typed-actor-supercharge-usage
|
||||
val awesomeFooBar: Foo with Bar = TypedActor(system).typedActorOf(TypedProps[FooBar]())
|
||||
val awesomeFooBar: Foo with Bar =
|
||||
TypedActor(system).typedActorOf(TypedProps[FooBar]())
|
||||
|
||||
awesomeFooBar.doFoo(10)
|
||||
val f = awesomeFooBar.doBar("yes")
|
||||
|
|
|
|||
|
|
@ -25,7 +25,8 @@ object CustomRoute {
|
|||
}
|
||||
}
|
||||
|
||||
class CustomRouteBuilder(system: ActorSystem, responder: ActorRef) extends RouteBuilder {
|
||||
class CustomRouteBuilder(system: ActorSystem, responder: ActorRef)
|
||||
extends RouteBuilder {
|
||||
def configure {
|
||||
from("jetty:http://localhost:8877/camel/custom").to(responder)
|
||||
}
|
||||
|
|
@ -48,7 +49,8 @@ object CustomRoute {
|
|||
def receive = {
|
||||
case msg: CamelMessage ⇒ throw new Exception("error: %s" format msg.body)
|
||||
}
|
||||
override def onRouteDefinition = (rd) ⇒ rd.onException(classOf[Exception]).handled(true).transform(Builder.exceptionMessage).end
|
||||
override def onRouteDefinition = (rd) ⇒ rd.onException(classOf[Exception]).
|
||||
handled(true).transform(Builder.exceptionMessage).end
|
||||
|
||||
final override def preRestart(reason: Throwable, message: Option[Any]) {
|
||||
sender ! Failure(reason)
|
||||
|
|
|
|||
|
|
@ -19,8 +19,10 @@ object CustomRouteExample {
|
|||
|
||||
class Transformer(producer: ActorRef) extends Actor {
|
||||
def receive = {
|
||||
// example: transform message body "foo" to "- foo -" and forward result to producer
|
||||
case msg: CamelMessage ⇒ producer.forward(msg.mapBody((body: String) ⇒ "- %s -" format body))
|
||||
// example: transform message body "foo" to "- foo -" and forward result
|
||||
// to producer
|
||||
case msg: CamelMessage ⇒
|
||||
producer.forward(msg.mapBody((body: String) ⇒ "- %s -" format body))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -38,7 +40,8 @@ object CustomRouteExample {
|
|||
})
|
||||
}
|
||||
}
|
||||
// the below lines can be added to a Boot class, so that you can run the example from a MicroKernel
|
||||
// the below lines can be added to a Boot class, so that you can run the
|
||||
// example from a MicroKernel
|
||||
val system = ActorSystem("some-system")
|
||||
val producer = system.actorOf(Props[Producer1])
|
||||
val mediator = system.actorOf(Props(new Transformer(producer)))
|
||||
|
|
|
|||
|
|
@ -21,7 +21,8 @@ object HttpExample {
|
|||
def endpointUri = "jetty://http://akka.io/?bridgeEndpoint=true"
|
||||
|
||||
override def transformOutgoingMessage(msg: Any) = msg match {
|
||||
case msg: CamelMessage ⇒ msg.copy(headers = msg.headers ++ msg.headers(Set(Exchange.HTTP_PATH)))
|
||||
case msg: CamelMessage ⇒ msg.copy(headers = msg.headers ++
|
||||
msg.headers(Set(Exchange.HTTP_PATH)))
|
||||
}
|
||||
|
||||
override def routeResponse(msg: Any) { transformer forward msg }
|
||||
|
|
@ -29,13 +30,17 @@ object HttpExample {
|
|||
|
||||
class HttpTransformer extends Actor {
|
||||
def receive = {
|
||||
case msg: CamelMessage ⇒ sender ! (msg.mapBody { body: Array[Byte] ⇒ new String(body).replaceAll("Akka ", "AKKA ") })
|
||||
case msg: Failure ⇒ sender ! msg
|
||||
case msg: CamelMessage ⇒
|
||||
sender ! (msg.mapBody { body: Array[Byte] ⇒
|
||||
new String(body).replaceAll("Akka ", "AKKA ")
|
||||
})
|
||||
case msg: Failure ⇒ sender ! msg
|
||||
}
|
||||
}
|
||||
|
||||
// Create the actors. this can be done in a Boot class so you can
|
||||
// run the example in the MicroKernel. just add the below three lines to your boot class.
|
||||
// run the example in the MicroKernel. just add the three lines below to
|
||||
// your boot class.
|
||||
val system = ActorSystem("some-system")
|
||||
val httpTransformer = system.actorOf(Props[HttpTransformer])
|
||||
val httpProducer = system.actorOf(Props(new HttpProducer(httpTransformer)))
|
||||
|
|
|
|||
|
|
@ -72,7 +72,8 @@ object Introduction {
|
|||
val system = ActorSystem("some-system")
|
||||
val camel = CamelExtension(system)
|
||||
val camelContext = camel.context
|
||||
// camelContext.addComponent("activemq", ActiveMQComponent.activeMQComponent("vm://localhost?broker.persistent=false"))
|
||||
// camelContext.addComponent("activemq", ActiveMQComponent.activeMQComponent(
|
||||
// "vm://localhost?broker.persistent=false"))
|
||||
//#CamelExtensionAddComponent
|
||||
}
|
||||
{
|
||||
|
|
@ -92,12 +93,14 @@ object Introduction {
|
|||
val camel = CamelExtension(system)
|
||||
val actorRef = system.actorOf(Props[MyEndpoint])
|
||||
// get a future reference to the activation of the endpoint of the Consumer Actor
|
||||
val activationFuture = camel.activationFutureFor(actorRef)(timeout = 10 seconds, executor = system.dispatcher)
|
||||
val activationFuture = camel.activationFutureFor(actorRef)(timeout = 10 seconds,
|
||||
executor = system.dispatcher)
|
||||
//#CamelActivation
|
||||
//#CamelDeactivation
|
||||
system.stop(actorRef)
|
||||
// get a future reference to the deactivation of the endpoint of the Consumer Actor
|
||||
val deactivationFuture = camel.deactivationFutureFor(actorRef)(timeout = 10 seconds, executor = system.dispatcher)
|
||||
val deactivationFuture = camel.deactivationFutureFor(actorRef)(timeout = 10 seconds,
|
||||
executor = system.dispatcher)
|
||||
//#CamelDeactivation
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,8 @@ object Producers {
|
|||
}
|
||||
val system = ActorSystem("some-system")
|
||||
val receiver = system.actorOf(Props[ResponseReceiver])
|
||||
val forwardResponse = system.actorOf(Props(new Forwarder("http://localhost:8080/news/akka", receiver)))
|
||||
val forwardResponse = system.actorOf(Props(
|
||||
new Forwarder("http://localhost:8080/news/akka", receiver)))
|
||||
// the Forwarder sends out a request to the web page and forwards the response to
|
||||
// the ResponseReceiver
|
||||
forwardResponse ! "some request"
|
||||
|
|
|
|||
|
|
@ -151,7 +151,8 @@ object DispatcherDocSpec {
|
|||
def this(settings: ActorSystem.Settings, config: Config) = this()
|
||||
|
||||
// The create method is called to create the MessageQueue
|
||||
final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue =
|
||||
final override def create(owner: Option[ActorRef],
|
||||
system: Option[ActorSystem]): MessageQueue =
|
||||
new QueueBasedMessageQueue with UnboundedMessageQueueSemantics {
|
||||
final val queue = new ConcurrentLinkedQueue[Envelope]()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,9 @@ import akka.testkit.AkkaSpec
|
|||
//#extension
|
||||
class SettingsImpl(config: Config) extends Extension {
|
||||
val DbUri: String = config.getString("myapp.db.uri")
|
||||
val CircuitBreakerTimeout: Duration = Duration(config.getMilliseconds("myapp.circuit-breaker.timeout"), TimeUnit.MILLISECONDS)
|
||||
val CircuitBreakerTimeout: Duration =
|
||||
Duration(config.getMilliseconds("myapp.circuit-breaker.timeout"),
|
||||
TimeUnit.MILLISECONDS)
|
||||
}
|
||||
//#extension
|
||||
|
||||
|
|
@ -29,7 +31,8 @@ object Settings extends ExtensionId[SettingsImpl] with ExtensionIdProvider {
|
|||
|
||||
override def lookup = Settings
|
||||
|
||||
override def createExtension(system: ExtendedActorSystem) = new SettingsImpl(system.settings.config)
|
||||
override def createExtension(system: ExtendedActorSystem) =
|
||||
new SettingsImpl(system.settings.config)
|
||||
}
|
||||
//#extensionid
|
||||
|
||||
|
|
|
|||
|
|
@ -151,7 +151,8 @@ class FutureDocSpec extends AkkaSpec {
|
|||
result must be(4)
|
||||
|
||||
val failedFilter = future1.filter(_ % 2 == 1).recover {
|
||||
case m: NoSuchElementException ⇒ 0 //When filter fails, it will have a java.util.NoSuchElementException
|
||||
// When filter fails, it will have a java.util.NoSuchElementException
|
||||
case m: NoSuchElementException ⇒ 0
|
||||
}
|
||||
val result2 = Await.result(failedFilter, 1 second)
|
||||
result2 must be(0) //Can only be 0 when there was a MatchError
|
||||
|
|
@ -258,7 +259,8 @@ class FutureDocSpec extends AkkaSpec {
|
|||
|
||||
"demonstrate usage of fold" in {
|
||||
//#fold
|
||||
val futures = for (i ← 1 to 1000) yield Future(i * 2) // Create a sequence of Futures
|
||||
// Create a sequence of Futures
|
||||
val futures = for (i ← 1 to 1000) yield Future(i * 2)
|
||||
val futureSum = Future.fold(futures)(0)(_ + _)
|
||||
Await.result(futureSum, 1 second) must be(1001000)
|
||||
//#fold
|
||||
|
|
@ -266,7 +268,8 @@ class FutureDocSpec extends AkkaSpec {
|
|||
|
||||
"demonstrate usage of reduce" in {
|
||||
//#reduce
|
||||
val futures = for (i ← 1 to 1000) yield Future(i * 2) // Create a sequence of Futures
|
||||
// Create a sequence of Futures
|
||||
val futures = for (i ← 1 to 1000) yield Future(i * 2)
|
||||
val futureSum = Future.reduce(futures)(_ + _)
|
||||
Await.result(futureSum, 1 second) must be(1001000)
|
||||
//#reduce
|
||||
|
|
@ -290,8 +293,9 @@ class FutureDocSpec extends AkkaSpec {
|
|||
val msg1 = -1
|
||||
//#try-recover
|
||||
val future = akka.pattern.ask(actor, msg1) recoverWith {
|
||||
case e: ArithmeticException ⇒ Future.successful(0)
|
||||
case foo: IllegalArgumentException ⇒ Future.failed[Int](new IllegalStateException("All br0ken!"))
|
||||
case e: ArithmeticException ⇒ Future.successful(0)
|
||||
case foo: IllegalArgumentException ⇒
|
||||
Future.failed[Int](new IllegalStateException("All br0ken!"))
|
||||
}
|
||||
//#try-recover
|
||||
Await.result(future, 1 second) must be(0)
|
||||
|
|
|
|||
|
|
@ -50,10 +50,16 @@ object HttpServer {
|
|||
val rsp = request match {
|
||||
case Request("GET", "ping" :: Nil, _, _, headers, _) ⇒
|
||||
OKResponse(ByteString("<p>pong</p>"),
|
||||
request.headers.exists { case Header(n, v) ⇒ n.toLowerCase == "connection" && v.toLowerCase == "keep-alive" })
|
||||
request.headers.exists {
|
||||
case Header(n, v) ⇒
|
||||
n.toLowerCase == "connection" && v.toLowerCase == "keep-alive"
|
||||
})
|
||||
case req ⇒
|
||||
OKResponse(ByteString("<p>" + req.toString + "</p>"),
|
||||
request.headers.exists { case Header(n, v) ⇒ n.toLowerCase == "connection" && v.toLowerCase == "keep-alive" })
|
||||
request.headers.exists {
|
||||
case Header(n, v) ⇒
|
||||
n.toLowerCase == "connection" && v.toLowerCase == "keep-alive"
|
||||
})
|
||||
}
|
||||
socket write OKResponse.bytes(rsp).compact
|
||||
if (!rsp.keepAlive) socket.close()
|
||||
|
|
@ -64,7 +70,8 @@ object HttpServer {
|
|||
//#actor-companion
|
||||
|
||||
//#request-class
|
||||
case class Request(meth: String, path: List[String], query: Option[String], httpver: String, headers: List[Header], body: Option[ByteString])
|
||||
case class Request(meth: String, path: List[String], query: Option[String],
|
||||
httpver: String, headers: List[Header], body: Option[ByteString])
|
||||
case class Header(name: String, value: String)
|
||||
//#request-class
|
||||
|
||||
|
|
@ -118,13 +125,15 @@ object HttpIteratees {
|
|||
|
||||
//#read-path
|
||||
def readPath = {
|
||||
def step(segments: List[String]): IO.Iteratee[List[String]] = IO peek 1 flatMap {
|
||||
case PATH ⇒ IO drop 1 flatMap (_ ⇒ readUriPart(pathchar) flatMap (segment ⇒ step(segment :: segments)))
|
||||
case _ ⇒ segments match {
|
||||
case "" :: rest ⇒ IO Done rest.reverse
|
||||
case _ ⇒ IO Done segments.reverse
|
||||
def step(segments: List[String]): IO.Iteratee[List[String]] =
|
||||
IO peek 1 flatMap {
|
||||
case PATH ⇒ IO drop 1 flatMap (_ ⇒ readUriPart(pathchar) flatMap (
|
||||
segment ⇒ step(segment :: segments)))
|
||||
case _ ⇒ segments match {
|
||||
case "" :: rest ⇒ IO Done rest.reverse
|
||||
case _ ⇒ IO Done segments.reverse
|
||||
}
|
||||
}
|
||||
}
|
||||
step(Nil)
|
||||
}
|
||||
//#read-path
|
||||
|
|
@ -140,14 +149,17 @@ object HttpIteratees {
|
|||
val alpha = Set.empty ++ ('a' to 'z') ++ ('A' to 'Z') map (_.toByte)
|
||||
val digit = Set.empty ++ ('0' to '9') map (_.toByte)
|
||||
val hexdigit = digit ++ (Set.empty ++ ('a' to 'f') ++ ('A' to 'F') map (_.toByte))
|
||||
val subdelim = Set('!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=') map (_.toByte)
|
||||
val subdelim = Set('!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=') map
|
||||
(_.toByte)
|
||||
val pathchar = alpha ++ digit ++ subdelim ++ (Set(':', '@') map (_.toByte))
|
||||
val querychar = pathchar ++ (Set('/', '?') map (_.toByte))
|
||||
|
||||
def readUriPart(allowed: Set[Byte]): IO.Iteratee[String] = for {
|
||||
str ← IO takeWhile allowed map ascii
|
||||
pchar ← IO peek 1 map (_ == PERCENT)
|
||||
all ← if (pchar) readPChar flatMap (ch ⇒ readUriPart(allowed) map (str + ch + _)) else IO Done str
|
||||
all ← if (pchar) readPChar flatMap (ch ⇒ readUriPart(allowed) map
|
||||
(str + ch + _))
|
||||
else IO Done str
|
||||
} yield all
|
||||
|
||||
def readPChar = IO take 3 map {
|
||||
|
|
@ -173,15 +185,18 @@ object HttpIteratees {
|
|||
value ← IO takeUntil CRLF flatMap readMultiLineValue
|
||||
} yield Header(ascii(name), ascii(value))
|
||||
|
||||
def readMultiLineValue(initial: ByteString): IO.Iteratee[ByteString] = IO peek 1 flatMap {
|
||||
case SP ⇒ IO takeUntil CRLF flatMap (bytes ⇒ readMultiLineValue(initial ++ bytes))
|
||||
case _ ⇒ IO Done initial
|
||||
}
|
||||
def readMultiLineValue(initial: ByteString): IO.Iteratee[ByteString] =
|
||||
IO peek 1 flatMap {
|
||||
case SP ⇒ IO takeUntil CRLF flatMap (
|
||||
bytes ⇒ readMultiLineValue(initial ++ bytes))
|
||||
case _ ⇒ IO Done initial
|
||||
}
|
||||
//#read-headers
|
||||
|
||||
//#read-body
|
||||
def readBody(headers: List[Header]) =
|
||||
if (headers.exists(header ⇒ header.name == "Content-Length" || header.name == "Transfer-Encoding"))
|
||||
if (headers.exists(header ⇒ header.name == "Content-Length" ||
|
||||
header.name == "Transfer-Encoding"))
|
||||
IO.takeAll map (Some(_))
|
||||
else
|
||||
IO Done None
|
||||
|
|
@ -210,7 +225,8 @@ object OKResponse {
|
|||
date ++= ByteString(new java.util.Date().toString) ++= CRLF ++=
|
||||
server ++= CRLF ++=
|
||||
contentLength ++= ByteString(rsp.body.length.toString) ++= CRLF ++=
|
||||
connection ++= (if (rsp.keepAlive) keepAlive else close) ++= CRLF ++= CRLF ++= rsp.body result
|
||||
connection ++= (if (rsp.keepAlive) keepAlive else close) ++= CRLF ++=
|
||||
CRLF ++= rsp.body result
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,8 @@ class RemoteDeploymentDocSpec extends AkkaSpec("""
|
|||
|
||||
"demonstrate programmatic deployment" in {
|
||||
//#deploy
|
||||
val ref = system.actorOf(Props[SampleActor].withDeploy(Deploy(scope = RemoteScope(address))))
|
||||
val ref = system.actorOf(Props[SampleActor].
|
||||
withDeploy(Deploy(scope = RemoteScope(address))))
|
||||
//#deploy
|
||||
ref.path.address must be(address)
|
||||
ref ! "test"
|
||||
|
|
|
|||
|
|
@ -22,8 +22,10 @@ class RouterDocSpec extends AkkaSpec {
|
|||
|
||||
//#dispatchers
|
||||
val router: ActorRef = system.actorOf(Props[MyActor]
|
||||
.withRouter(RoundRobinRouter(5, routerDispatcher = "router")) // “head” will run on "router" dispatcher
|
||||
.withDispatcher("workers")) // MyActor workers will run on "workers" dispatcher
|
||||
// “head” will run on "router" dispatcher
|
||||
.withRouter(RoundRobinRouter(5, routerDispatcher = "router"))
|
||||
// MyActor workers will run on "workers" dispatcher
|
||||
.withDispatcher("workers"))
|
||||
//#dispatchers
|
||||
|
||||
}
|
||||
|
|
@ -66,8 +66,8 @@ class ParentActor extends Actor {
|
|||
//#randomRouter
|
||||
case "smr" ⇒
|
||||
//#smallestMailboxRouter
|
||||
val smallestMailboxRouter =
|
||||
context.actorOf(Props[PrintlnActor].withRouter(SmallestMailboxRouter(5)), "router")
|
||||
val smallestMailboxRouter = context.actorOf(Props[PrintlnActor].
|
||||
withRouter(SmallestMailboxRouter(5)), "router")
|
||||
1 to 10 foreach {
|
||||
i ⇒ smallestMailboxRouter ! i
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,8 @@ class TestKitUsageSpec
|
|||
val randomTail = Random.nextInt(10)
|
||||
val headList = Seq().padTo(randomHead, "0")
|
||||
val tailList = Seq().padTo(randomTail, "1")
|
||||
val seqRef = system.actorOf(Props(new SequencingActor(testActor, headList, tailList)))
|
||||
val seqRef =
|
||||
system.actorOf(Props(new SequencingActor(testActor, headList, tailList)))
|
||||
|
||||
override def afterAll {
|
||||
system.shutdown()
|
||||
|
|
|
|||
|
|
@ -49,7 +49,8 @@ object ZeromqDocSpec {
|
|||
val timestamp = System.currentTimeMillis
|
||||
|
||||
// use akka SerializationExtension to convert to bytes
|
||||
val heapPayload = ser.serialize(Heap(timestamp, currentHeap.getUsed, currentHeap.getMax)).get
|
||||
val heapPayload = ser.serialize(Heap(timestamp, currentHeap.getUsed,
|
||||
currentHeap.getMax)).get
|
||||
// the first frame is the topic, second is the message
|
||||
pubSocket ! ZMQMessage(Seq(Frame("health.heap"), Frame(heapPayload)))
|
||||
|
||||
|
|
@ -64,19 +65,24 @@ object ZeromqDocSpec {
|
|||
//#logger
|
||||
class Logger extends Actor with ActorLogging {
|
||||
|
||||
ZeroMQExtension(context.system).newSocket(SocketType.Sub, Listener(self), Connect("tcp://127.0.0.1:1235"), Subscribe("health"))
|
||||
ZeroMQExtension(context.system).newSocket(SocketType.Sub, Listener(self),
|
||||
Connect("tcp://127.0.0.1:1235"), Subscribe("health"))
|
||||
val ser = SerializationExtension(context.system)
|
||||
val timestampFormat = new SimpleDateFormat("HH:mm:ss.SSS")
|
||||
|
||||
def receive = {
|
||||
// the first frame is the topic, second is the message
|
||||
case m: ZMQMessage if m.firstFrameAsString == "health.heap" ⇒
|
||||
val Heap(timestamp, used, max) = ser.deserialize(m.payload(1), classOf[Heap]).get
|
||||
log.info("Used heap {} bytes, at {}", used, timestampFormat.format(new Date(timestamp)))
|
||||
val Heap(timestamp, used, max) = ser.deserialize(m.payload(1),
|
||||
classOf[Heap]).get
|
||||
log.info("Used heap {} bytes, at {}", used,
|
||||
timestampFormat.format(new Date(timestamp)))
|
||||
|
||||
case m: ZMQMessage if m.firstFrameAsString == "health.load" ⇒
|
||||
val Load(timestamp, loadAverage) = ser.deserialize(m.payload(1), classOf[Load]).get
|
||||
log.info("Load average {}, at {}", loadAverage, timestampFormat.format(new Date(timestamp)))
|
||||
val Load(timestamp, loadAverage) = ser.deserialize(m.payload(1),
|
||||
classOf[Load]).get
|
||||
log.info("Load average {}, at {}", loadAverage,
|
||||
timestampFormat.format(new Date(timestamp)))
|
||||
}
|
||||
}
|
||||
//#logger
|
||||
|
|
@ -84,17 +90,20 @@ object ZeromqDocSpec {
|
|||
//#alerter
|
||||
class HeapAlerter extends Actor with ActorLogging {
|
||||
|
||||
ZeroMQExtension(context.system).newSocket(SocketType.Sub, Listener(self), Connect("tcp://127.0.0.1:1235"), Subscribe("health.heap"))
|
||||
ZeroMQExtension(context.system).newSocket(SocketType.Sub,
|
||||
Listener(self), Connect("tcp://127.0.0.1:1235"), Subscribe("health.heap"))
|
||||
val ser = SerializationExtension(context.system)
|
||||
var count = 0
|
||||
|
||||
def receive = {
|
||||
// the first frame is the topic, second is the message
|
||||
case m: ZMQMessage if m.firstFrameAsString == "health.heap" ⇒
|
||||
val Heap(timestamp, used, max) = ser.deserialize(m.payload(1), classOf[Heap]).get
|
||||
val Heap(timestamp, used, max) = ser.deserialize(m.payload(1),
|
||||
classOf[Heap]).get
|
||||
if ((used.toDouble / max) > 0.9) count += 1
|
||||
else count = 0
|
||||
if (count > 10) log.warning("Need more memory, using {} %", (100.0 * used / max))
|
||||
if (count > 10) log.warning("Need more memory, using {} %",
|
||||
(100.0 * used / max))
|
||||
}
|
||||
}
|
||||
//#alerter
|
||||
|
|
@ -109,7 +118,8 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") {
|
|||
|
||||
//#pub-socket
|
||||
import akka.zeromq.ZeroMQExtension
|
||||
val pubSocket = ZeroMQExtension(system).newSocket(SocketType.Pub, Bind("tcp://127.0.0.1:21231"))
|
||||
val pubSocket = ZeroMQExtension(system).newSocket(SocketType.Pub,
|
||||
Bind("tcp://127.0.0.1:21231"))
|
||||
//#pub-socket
|
||||
|
||||
//#sub-socket
|
||||
|
|
@ -121,11 +131,13 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") {
|
|||
case _ ⇒ //...
|
||||
}
|
||||
}))
|
||||
val subSocket = ZeroMQExtension(system).newSocket(SocketType.Sub, Listener(listener), Connect("tcp://127.0.0.1:21231"), SubscribeAll)
|
||||
val subSocket = ZeroMQExtension(system).newSocket(SocketType.Sub,
|
||||
Listener(listener), Connect("tcp://127.0.0.1:21231"), SubscribeAll)
|
||||
//#sub-socket
|
||||
|
||||
//#sub-topic-socket
|
||||
val subTopicSocket = ZeroMQExtension(system).newSocket(SocketType.Sub, Listener(listener), Connect("tcp://127.0.0.1:21231"), Subscribe("foo.bar"))
|
||||
val subTopicSocket = ZeroMQExtension(system).newSocket(SocketType.Sub,
|
||||
Listener(listener), Connect("tcp://127.0.0.1:21231"), Subscribe("foo.bar"))
|
||||
//#sub-topic-socket
|
||||
|
||||
//#unsub-topic-socket
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ by Actors:
|
|||
akka {
|
||||
actor {
|
||||
debug {
|
||||
# enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill and the like)
|
||||
# enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill et.c.)
|
||||
autoreceive = on
|
||||
}
|
||||
}
|
||||
|
|
@ -148,7 +148,8 @@ If you want to see all messages that are sent through remoting at DEBUG log leve
|
|||
|
||||
akka {
|
||||
remote {
|
||||
# If this is "on", Akka will log all outbound messages at DEBUG level, if off then they are not logged
|
||||
# If this is "on", Akka will log all outbound messages at DEBUG level,
|
||||
# if off then they are not logged
|
||||
log-sent-messages = on
|
||||
}
|
||||
}
|
||||
|
|
@ -160,7 +161,8 @@ If you want to see all messages that are received through remoting at DEBUG log
|
|||
|
||||
akka {
|
||||
remote {
|
||||
# If this is "on", Akka will log all inbound messages at DEBUG level, if off then they are not logged
|
||||
# If this is "on", Akka will log all inbound messages at DEBUG level,
|
||||
# if off then they are not logged
|
||||
log-received-messages = on
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -419,7 +419,13 @@ implementation called :class:`TestProbe`. The functionality is best explained
|
|||
using a small example:
|
||||
|
||||
.. includecode:: code/docs/testkit/TestkitDocSpec.scala
|
||||
:include: imports-test-probe,my-double-echo,test-probe
|
||||
:include: imports-test-probe
|
||||
|
||||
.. includecode:: code/docs/testkit/TestkitDocSpec.scala
|
||||
:include: my-double-echo
|
||||
|
||||
.. includecode:: code/docs/testkit/TestkitDocSpec.scala
|
||||
:include: test-probe
|
||||
|
||||
Here a the system under test is simulated by :class:`MyDoubleEcho`, which is
|
||||
supposed to mirror its input to two outputs. Attaching two test probes enables
|
||||
|
|
@ -458,7 +464,10 @@ concerning volume and timing of the message flow while still keeping the
|
|||
network functioning:
|
||||
|
||||
.. includecode:: code/docs/testkit/TestkitDocSpec.scala
|
||||
:include: test-probe-forward-actors,test-probe-forward
|
||||
:include: test-probe-forward-actors
|
||||
|
||||
.. includecode:: code/docs/testkit/TestkitDocSpec.scala
|
||||
:include: test-probe-forward
|
||||
|
||||
The ``dest`` actor will receive the same message invocation as if no test probe
|
||||
had intervened.
|
||||
|
|
|
|||
|
|
@ -39,7 +39,8 @@ class StatsService extends Actor {
|
|||
case StatsJob(text) if text != "" ⇒
|
||||
val words = text.split(" ")
|
||||
val replyTo = sender // important to not close over sender
|
||||
val aggregator = context.actorOf(Props(new StatsAggregator(words.size, replyTo)))
|
||||
val aggregator = context.actorOf(Props(
|
||||
new StatsAggregator(words.size, replyTo)))
|
||||
words foreach { word ⇒
|
||||
workerRouter.tell(
|
||||
ConsistentHashableEnvelope(word, word), aggregator)
|
||||
|
|
@ -111,7 +112,8 @@ class StatsFacade extends Actor with ActorLogging {
|
|||
if (leaderAddress == cluster.selfAddress) {
|
||||
if (!currentMasterCreatedByMe) {
|
||||
log.info("Creating new statsService master at [{}]", leaderAddress)
|
||||
currentMaster = Some(context.actorOf(Props[StatsService], name = "statsService"))
|
||||
currentMaster = Some(context.actorOf(Props[StatsService],
|
||||
name = "statsService"))
|
||||
currentMasterCreatedByMe = true
|
||||
}
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -26,7 +26,8 @@ class SimpleCalculatorActor extends Actor {
|
|||
|
||||
class CalculatorApplication extends Bootable {
|
||||
//#setup
|
||||
val system = ActorSystem("CalculatorApplication", ConfigFactory.load.getConfig("calculator"))
|
||||
val system = ActorSystem("CalculatorApplication",
|
||||
ConfigFactory.load.getConfig("calculator"))
|
||||
val actor = system.actorOf(Props[SimpleCalculatorActor], "simpleCalculator")
|
||||
//#setup
|
||||
|
||||
|
|
|
|||
|
|
@ -14,9 +14,11 @@ import akka.actor._
|
|||
|
||||
class CreationApplication extends Bootable {
|
||||
//#setup
|
||||
val system = ActorSystem("RemoteCreation", ConfigFactory.load.getConfig("remotecreation"))
|
||||
val system =
|
||||
ActorSystem("RemoteCreation", ConfigFactory.load.getConfig("remotecreation"))
|
||||
val localActor = system.actorOf(Props[CreationActor], "creationActor")
|
||||
val remoteActor = system.actorOf(Props[AdvancedCalculatorActor], "advancedCalculator")
|
||||
val remoteActor =
|
||||
system.actorOf(Props[AdvancedCalculatorActor], "advancedCalculator")
|
||||
|
||||
def doSomething(op: MathOp) = {
|
||||
localActor ! (remoteActor, op)
|
||||
|
|
@ -36,8 +38,10 @@ class CreationActor extends Actor {
|
|||
def receive = {
|
||||
case (actor: ActorRef, op: MathOp) ⇒ actor ! op
|
||||
case result: MathResult ⇒ result match {
|
||||
case MultiplicationResult(n1, n2, r) ⇒ println("Mul result: %d * %d = %d".format(n1, n2, r))
|
||||
case DivisionResult(n1, n2, r) ⇒ println("Div result: %.0f / %d = %.2f".format(n1, n2, r))
|
||||
case MultiplicationResult(n1, n2, r) ⇒
|
||||
println("Mul result: %d * %d = %d".format(n1, n2, r))
|
||||
case DivisionResult(n1, n2, r) ⇒
|
||||
println("Div result: %.0f / %d = %.2f".format(n1, n2, r))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -48,8 +52,10 @@ object CreationApp {
|
|||
val app = new CreationApplication
|
||||
println("Started Creation Application")
|
||||
while (true) {
|
||||
if (Random.nextInt(100) % 2 == 0) app.doSomething(Multiply(Random.nextInt(20), Random.nextInt(20)))
|
||||
else app.doSomething(Divide(Random.nextInt(10000), (Random.nextInt(99) + 1)))
|
||||
if (Random.nextInt(100) % 2 == 0)
|
||||
app.doSomething(Multiply(Random.nextInt(20), Random.nextInt(20)))
|
||||
else
|
||||
app.doSomething(Divide(Random.nextInt(10000), (Random.nextInt(99) + 1)))
|
||||
|
||||
Thread.sleep(200)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,9 +16,11 @@ import akka.actor.{ ActorRef, Props, Actor, ActorSystem }
|
|||
|
||||
class LookupApplication extends Bootable {
|
||||
//#setup
|
||||
val system = ActorSystem("LookupApplication", ConfigFactory.load.getConfig("remotelookup"))
|
||||
val system =
|
||||
ActorSystem("LookupApplication", ConfigFactory.load.getConfig("remotelookup"))
|
||||
val actor = system.actorOf(Props[LookupActor], "lookupActor")
|
||||
val remoteActor = system.actorFor("akka://CalculatorApplication@127.0.0.1:2552/user/simpleCalculator")
|
||||
val remoteActor = system.actorFor(
|
||||
"akka://CalculatorApplication@127.0.0.1:2552/user/simpleCalculator")
|
||||
|
||||
def doSomething(op: MathOp) = {
|
||||
actor ! (remoteActor, op)
|
||||
|
|
@ -38,8 +40,10 @@ class LookupActor extends Actor {
|
|||
def receive = {
|
||||
case (actor: ActorRef, op: MathOp) ⇒ actor ! op
|
||||
case result: MathResult ⇒ result match {
|
||||
case AddResult(n1, n2, r) ⇒ println("Add result: %d + %d = %d".format(n1, n2, r))
|
||||
case SubtractResult(n1, n2, r) ⇒ println("Sub result: %d - %d = %d".format(n1, n2, r))
|
||||
case AddResult(n1, n2, r) ⇒
|
||||
println("Add result: %d + %d = %d".format(n1, n2, r))
|
||||
case SubtractResult(n1, n2, r) ⇒
|
||||
println("Sub result: %d - %d = %d".format(n1, n2, r))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -50,8 +54,10 @@ object LookupApp {
|
|||
val app = new LookupApplication
|
||||
println("Started Lookup Application")
|
||||
while (true) {
|
||||
if (Random.nextInt(100) % 2 == 0) app.doSomething(Add(Random.nextInt(100), Random.nextInt(100)))
|
||||
else app.doSomething(Subtract(Random.nextInt(100), Random.nextInt(100)))
|
||||
if (Random.nextInt(100) % 2 == 0)
|
||||
app.doSomething(Add(Random.nextInt(100), Random.nextInt(100)))
|
||||
else
|
||||
app.doSomething(Subtract(Random.nextInt(100), Random.nextInt(100)))
|
||||
|
||||
Thread.sleep(200)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue