Merge pull request #1915 from krasserm/wip-3704-persistence-improvements-part-1-krassserm

!per #3704 Persistence improvement (part 1)
This commit is contained in:
Patrik Nordwall 2014-01-17 06:37:52 -08:00
commit 8d2bc2bc40
55 changed files with 3474 additions and 2191 deletions

View file

@ -19,6 +19,7 @@ prior deprecation.
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
../scala/persistence
../dev/multi-node-testing ../dev/multi-node-testing
Another reason for marking a module as experimental is that it's too early Another reason for marking a module as experimental is that it's too early

View file

@ -139,7 +139,7 @@ public class PersistenceDocTest {
if (message instanceof Persistent) { if (message instanceof Persistent) {
Persistent p = (Persistent)message; Persistent p = (Persistent)message;
Persistent out = p.withPayload("done " + p.payload()); Persistent out = p.withPayload("done " + p.payload());
channel.tell(Deliver.create(out, destination), getSelf()); channel.tell(Deliver.create(out, destination.path()), getSelf());
} }
} }
} }
@ -174,24 +174,35 @@ public class PersistenceDocTest {
.withRedeliverInterval(Duration.create(30, TimeUnit.SECONDS)) .withRedeliverInterval(Duration.create(30, TimeUnit.SECONDS))
.withRedeliverMax(15))); .withRedeliverMax(15)));
//#channel-custom-settings //#channel-custom-settings
//#channel-custom-listener
class MyListener extends UntypedActor {
@Override
public void onReceive(Object message) throws Exception {
if (message instanceof RedeliverFailure) {
Iterable<ConfirmablePersistent> messages =
((RedeliverFailure)message).getMessages();
// ...
}
}
}
final ActorRef myListener = getContext().actorOf(Props.create(MyListener.class));
getContext().actorOf(Channel.props(
ChannelSettings.create().withRedeliverFailureListener(null)));
//#channel-custom-listener
} }
public void onReceive(Object message) throws Exception { public void onReceive(Object message) throws Exception {
if (message instanceof Persistent) { if (message instanceof Persistent) {
Persistent p = (Persistent)message; Persistent p = (Persistent)message;
Persistent out = p.withPayload("done " + p.payload()); Persistent out = p.withPayload("done " + p.payload());
channel.tell(Deliver.create(out, destination), getSelf()); channel.tell(Deliver.create(out, destination.path()), getSelf());
//#channel-example-reply //#channel-example-reply
channel.tell(Deliver.create(out, getSender()), getSelf()); channel.tell(Deliver.create(out, getSender().path()), getSelf());
//#channel-example-reply //#channel-example-reply
//#resolve-destination
channel.tell(Deliver.create(out, getSender(), Resolve.destination()), getSelf());
//#resolve-destination
//#resolve-sender
channel.tell(Deliver.create(out, destination, Resolve.sender()), getSender());
//#resolve-sender
} }
} }
} }
@ -292,9 +303,13 @@ public class PersistenceDocTest {
.withRedeliverInterval(Duration.create(30, TimeUnit.SECONDS)) .withRedeliverInterval(Duration.create(30, TimeUnit.SECONDS))
.withRedeliverMax(15)), "myPersistentChannel"); .withRedeliverMax(15)), "myPersistentChannel");
channel.tell(Deliver.create(Persistent.create("example"), destination), getSelf()); channel.tell(Deliver.create(Persistent.create("example"), destination.path()), getSelf());
//#persistent-channel-example //#persistent-channel-example
//#persistent-channel-watermarks
PersistentChannelSettings.create()
.withPendingConfirmationsMax(10000)
.withPendingConfirmationsMin(2000);
//#persistent-channel-watermarks
//#persistent-channel-reply //#persistent-channel-reply
PersistentChannelSettings.create().withReplyPersistent(true); PersistentChannelSettings.create().withReplyPersistent(true);
//#persistent-channel-reply //#persistent-channel-reply
@ -318,7 +333,7 @@ public class PersistenceDocTest {
// ... // ...
// reliably deliver events // reliably deliver events
channel.tell(Deliver.create(Persistent.create( channel.tell(Deliver.create(Persistent.create(
event, getCurrentPersistentMessage()), destination), getSelf()); event, getCurrentPersistentMessage()), destination.path()), getSelf());
} }
public void onReceiveReplay(Object msg) { public void onReceiveReplay(Object msg) {
@ -339,4 +354,30 @@ public class PersistenceDocTest {
} }
//#reliable-event-delivery //#reliable-event-delivery
}; };
static Object o9 = new Object() {
//#view
class MyView extends UntypedView {
@Override
public String processorId() {
return "some-processor-id";
}
@Override
public void onReceive(Object message) throws Exception {
if (message instanceof Persistent) {
// ...
}
}
}
//#view
public void usage() {
final ActorSystem system = ActorSystem.create("example");
//#view-update
final ActorRef view = system.actorOf(Props.create(MyView.class));
view.tell(Update.create(true), null);
//#view-update
}
};
} }

View file

@ -77,22 +77,32 @@ public class PersistencePluginDocTest {
class MyAsyncJournal extends AsyncWriteJournal { class MyAsyncJournal extends AsyncWriteJournal {
@Override @Override
public Future<Long> doReplayAsync(String processorId, long fromSequenceNr, long toSequenceNr, Procedure<PersistentRepr> replayCallback) { public Future<Void> doAsyncWriteMessages(Iterable<PersistentRepr> messages) {
return null; return null;
} }
@Override @Override
public Future<Void> doWriteAsync(Iterable<PersistentRepr> persistentBatch) { public Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> confirmations) {
return null; return null;
} }
@Override @Override
public Future<Void> doDeleteAsync(String processorId, long fromSequenceNr, long toSequenceNr, boolean permanent) { public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> messageIds, boolean permanent) {
return null; return null;
} }
@Override @Override
public Future<Void> doConfirmAsync(String processorId, long sequenceNr, String channelId) { public Future<Void> doAsyncDeleteMessagesTo(String processorId, long toSequenceNr, boolean permanent) {
return null;
}
@Override
public Future<Void> doAsyncReplayMessages(String processorId, long fromSequenceNr, long toSequenceNr, long max, Procedure<PersistentRepr> replayCallback) {
return null;
}
@Override
public Future<Long> doAsyncReadHighestSequenceNr(String processorId, long fromSequenceNr) {
return null; return null;
} }
} }

View file

@ -5,21 +5,13 @@ Persistence
########### ###########
Akka persistence enables stateful actors to persist their internal state so that it can be recovered when an actor Akka persistence enables stateful actors to persist their internal state so that it can be recovered when an actor
is started, restarted by a supervisor or migrated in a cluster. It also allows stateful actors to recover from JVM is started, restarted after a JVM crash or by a supervisor, or migrated in a cluster. The key concept behind Akka
crashes, for example. The key concept behind Akka persistence is that only changes to an actor's internal state are persistence is that only changes to an actor's internal state are persisted but never its current state directly
persisted but never its current state directly (except for optional snapshots). These changes are only ever appended (except for optional snapshots). These changes are only ever appended to storage, nothing is ever mutated, which
to storage, nothing is ever mutated, which allows for very high transaction rates and efficient replication. Stateful allows for very high transaction rates and efficient replication. Stateful actors are recovered by replaying stored
actors are recovered by replaying stored changes to these actors from which they can rebuild internal state. This can changes to these actors from which they can rebuild internal state. This can be either the full history of changes
be either the full history of changes or starting from a snapshot of internal actor state which can dramatically or starting from a snapshot which can dramatically reduce recovery times. Akka persistence also provides point-to-point
reduce recovery times. Akka persistence also provides point-to-point communication channels with at-least-once communication channels with at-least-once message delivery semantics.
message delivery guarantees.
Storage backends for state changes and snapshots are pluggable in Akka persistence. Currently, these are written to
the local filesystem. Distributed and replicated storage, with the possibility of scaling writes, will be available
soon.
Akka persistence is inspired by the `eventsourced`_ library. It follows the same concepts and architecture of
`eventsourced`_ but significantly differs on API and implementation level.
.. warning:: .. warning::
@ -28,6 +20,9 @@ Akka persistence is inspired by the `eventsourced`_ library. It follows the same
changes to a minimum the binary compatibility guarantee for maintenance releases does not apply to the changes to a minimum the binary compatibility guarantee for maintenance releases does not apply to the
contents of the ``akka.persistence`` package. contents of the ``akka.persistence`` package.
Akka persistence is inspired by the `eventsourced`_ library. It follows the same concepts and architecture of
`eventsourced`_ but significantly differs on API and implementation level.
.. _eventsourced: https://github.com/eligosource/eventsourced .. _eventsourced: https://github.com/eligosource/eventsourced
Dependencies Dependencies
@ -48,16 +43,22 @@ Architecture
before its ``onReceive`` method is called. When a processor is started or restarted, journaled messages are replayed before its ``onReceive`` method is called. When a processor is started or restarted, journaled messages are replayed
to that processor, so that it can recover internal state from these messages. to that processor, so that it can recover internal state from these messages.
* *Channel*: Channels are used by processors to communicate with other actors. They prevent that replayed messages * *View*: A view is a persistent, stateful actor that receives journaled messages that have been written by another
are redundantly delivered to these actors and provide at-least-once message delivery guarantees, also in case of processor. A view itself does not journal new messages, instead, it updates internal state only from a processor's
sender and receiver JVM crashes. replicated message stream.
* *Channel*: Channels are used by processors and views to communicate with other actors. They prevent that replayed
messages are redundantly delivered to these actors and provide at-least-once message delivery semantics, also in
case of sender and receiver JVM crashes.
* *Journal*: A journal stores the sequence of messages sent to a processor. An application can control which messages * *Journal*: A journal stores the sequence of messages sent to a processor. An application can control which messages
are stored and which are received by the processor without being journaled. The storage backend of a journal is are journaled and which are received by the processor without being journaled. The storage backend of a journal is
pluggable. pluggable. The default journal storage plugin writes to the local filesystem, replicated journals are available as
:ref:`community-projects-java`.
* *Snapshot store*: A snapshot store persists snapshots of a processor's internal state. Snapshots are used for * *Snapshot store*: A snapshot store persists snapshots of a processor's or a view's internal state. Snapshots are
optimizing recovery times. The storage backend of a snapshot store is pluggable. used for optimizing recovery times. The storage backend of a snapshot store is pluggable. The default snapshot
storage plugin writes to the local filesystem.
* *Event sourcing*. Based on the building blocks described above, Akka persistence provides abstractions for the * *Event sourcing*. Based on the building blocks described above, Akka persistence provides abstractions for the
development of event sourced applications (see section :ref:`event-sourcing-java`) development of event sourced applications (see section :ref:`event-sourcing-java`)
@ -75,10 +76,9 @@ A processor can be implemented by extending the abstract ``UntypedProcessor`` cl
Processors only write messages of type ``Persistent`` to the journal, others are received without being persisted. Processors only write messages of type ``Persistent`` to the journal, others are received without being persisted.
When a processor's ``onReceive`` method is called with a ``Persistent`` message it can safely assume that this message When a processor's ``onReceive`` method is called with a ``Persistent`` message it can safely assume that this message
has been successfully written to the journal. If a journal fails to write a ``Persistent`` message then the processor has been successfully written to the journal. If a journal fails to write a ``Persistent`` message then the processor
is stopped, by default. If an application wants that a processors continues to run on persistence failures it must is stopped, by default. If a processor should continue running on persistence failures it must handle
handle ``PersistenceFailure`` messages. In this case, a processor may want to inform the sender about the failure, ``PersistenceFailure`` messages. In this case, a processor may want to inform the sender about the failure,
so that the sender can re-send the message, if needed, under the assumption that the journal recovered from a so that the sender can re-send the message, if needed.
temporary failure.
An ``UntypedProcessor`` itself is an ``Actor`` and can therefore be instantiated with ``actorOf``. An ``UntypedProcessor`` itself is an ``Actor`` and can therefore be instantiated with ``actorOf``.
@ -87,9 +87,9 @@ An ``UntypedProcessor`` itself is an ``Actor`` and can therefore be instantiated
Recovery Recovery
-------- --------
By default, a processor is automatically recovered on start and on restart by replaying persistent messages. By default, a processor is automatically recovered on start and on restart by replaying journaled messages.
New messages sent to a processor during recovery do not interfere with replayed messages. New messages will New messages sent to a processor during recovery do not interfere with replayed messages. New messages will
only be received by that processor after recovery completes. only be received by a processor after recovery completes.
Recovery customization Recovery customization
^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^
@ -137,7 +137,7 @@ that message as argument. An optional ``permanent`` parameter specifies whether
deleted from the journal or only marked as deleted. In both cases, the message won't be replayed. Later extensions deleted from the journal or only marked as deleted. In both cases, the message won't be replayed. Later extensions
to Akka persistence will allow to replay messages that have been marked as deleted which can be useful for debugging to Akka persistence will allow to replay messages that have been marked as deleted which can be useful for debugging
purposes, for example. To delete all messages (journaled by a single processor) up to a specified sequence number, purposes, for example. To delete all messages (journaled by a single processor) up to a specified sequence number,
processors can call the ``deleteMessages`` method. processors should call the ``deleteMessages`` method.
Identifiers Identifiers
----------- -----------
@ -150,41 +150,103 @@ method.
Applications can customize a processor's id by specifying an actor name during processor creation as shown in Applications can customize a processor's id by specifying an actor name during processor creation as shown in
section :ref:`processors-java`. This changes that processor's name in its actor hierarchy and hence influences only section :ref:`processors-java`. This changes that processor's name in its actor hierarchy and hence influences only
part of the processor id. To fully customize a processor's id, the ``processorId`` method should be overridden. part of the processor id. To fully customize a processor's id, the ``processorId`` method must be overridden.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#processor-id-override .. includecode:: code/docs/persistence/PersistenceDocTest.java#processor-id-override
Overriding ``processorId`` is the recommended way to generate stable identifiers.
.. _views-java:
Views
=====
Views can be implemented by extending the ``UntypedView`` trait and implementing the ``onReceive`` and the ``processorId``
methods.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#view
The ``processorId`` identifies the processor from which the view receives journaled messages. It is not necessary
the referenced processor is actually running. Views read messages from a processor's journal directly. When a
processor is started later and begins to write new messages, the corresponding view is updated automatically, by
default.
Updates
-------
The default update interval of all views of an actor system is configurable:
.. includecode:: ../scala/code/docs/persistence/PersistenceDocSpec.scala#auto-update-interval
``View`` implementation classes may also override the ``autoUpdateInterval`` method to return a custom update
interval for a specific view class or view instance. Applications may also trigger additional updates at
any time by sending a view an ``Update`` message.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#view-update
If the ``await`` parameter is set to ``true``, messages that follow the ``Update`` request are processed when the
incremental message replay, triggered by that update request, completed. If set to ``false`` (default), messages
following the update request may interleave with the replayed message stream. Automated updates always run with
``await = false``.
Automated updates of all views of an actor system can be turned off by configuration:
.. includecode:: ../scala/code/docs/persistence/PersistenceDocSpec.scala#auto-update
Implementation classes may override the configured default value by overriding the ``autoUpdate`` method. To
limit the number of replayed messages per update request, applications can configure a custom
``akka.persistence.view.auto-update-replay-max`` value or override the ``autoUpdateReplayMax`` method. The number
of replayed messages for manual updates can be limited with the ``replayMax`` parameter of the ``Update`` message.
Recovery
--------
Initial recovery of views works in the very same way as for :ref:`processors` (i.e. by sending a ``Recover`` message
to self). The maximum number of replayed messages during initial recovery is determined by ``autoUpdateReplayMax``.
Further possibilities to customize initial recovery are explained in section :ref:`processors-java`.
Identifiers
-----------
A view must have an identifier that doesn't change across different actor incarnations. It defaults to the
``String`` representation of the actor path without the address part and can be obtained via the ``viewId``
method.
Applications can customize a view's id by specifying an actor name during view creation. This changes that view's
name in its actor hierarchy and hence influences only part of the view id. To fully customize a view's id, the
``viewId`` method must be overridden. Overriding ``viewId`` is the recommended way to generate stable identifiers.
The ``viewId`` must differ from the referenced ``processorId``, unless :ref:`snapshots-java` of a view and its
processor shall be shared (which is what applications usually do not want).
.. _channels-java: .. _channels-java:
Channels Channels
======== ========
.. warning:: Channels are special actors that are used by processors or views to communicate with other actors (channel
destinations). The following discusses channels in context of processors but this is also applicable to views.
There are further changes planned to the channel API that couldn't make it into the current milestone.
One example is to have only a single destination per channel to allow gap detection and more advanced
flow control.
Channels are special actors that are used by processors to communicate with other actors (channel destinations).
Channels prevent redundant delivery of replayed messages to destinations during processor recovery. A replayed Channels prevent redundant delivery of replayed messages to destinations during processor recovery. A replayed
message is retained by a channel if its previous delivery has been confirmed by a destination. message is retained by a channel if its delivery has been confirmed by a destination.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-example .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-example
A channel is ready to use once it has been created, no recovery or further activation is needed. A ``Deliver`` A channel is ready to use once it has been created, no recovery or further activation is needed. A ``Deliver``
request instructs a channel to send a ``Persistent`` message to a destination. Sender references are preserved request instructs a channel to send a ``Persistent`` message to a destination. A destination is provided as
by a channel, therefore, a destination can reply to the sender of a ``Deliver`` request. ``ActorPath`` and messages are sent by the channel via that path's ``ActorSelection``. Sender references are
preserved by a channel, therefore, a destination can reply to the sender of a ``Deliver`` request.
If a processor wants to reply to a ``Persistent`` message sender it should use the ``getSender()`` reference as If a processor wants to reply to a ``Persistent`` message sender it should use the ``getSender()`` path as
channel destination. channel destination.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-example-reply .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-example-reply
Persistent messages delivered by a channel are of type ``ConfirmablePersistent``. ``ConfirmablePersistent`` extends Persistent messages delivered by a channel are of type ``ConfirmablePersistent``. ``ConfirmablePersistent`` extends
``Persistent`` by adding the methods ``confirm`` method and ``redeliveries`` (see also :ref:`redelivery-java`). Channel ``Persistent`` by adding the methods ``confirm`` and ``redeliveries`` (see also :ref:`redelivery-java`). A channel
destinations confirm the delivery of a ``ConfirmablePersistent`` message by calling ``confirm()`` an that message. destination confirms the delivery of a ``ConfirmablePersistent`` message by calling ``confirm()`` on that message.
This asynchronously writes a confirmation entry to the journal. Replayed messages internally contain these confirmation This asynchronously writes a confirmation entry to the journal. Replayed messages internally contain confirmation
entries which allows a channel to decide if a message should be retained or not. entries which allows a channel to decide if it should retain these messages or not.
A ``Processor`` can also be used as channel destination i.e. it can persist ``ConfirmablePersistent`` messages too. A ``Processor`` can also be used as channel destination i.e. it can persist ``ConfirmablePersistent`` messages too.
@ -193,25 +255,23 @@ A ``Processor`` can also be used as channel destination i.e. it can persist ``Co
Message re-delivery Message re-delivery
------------------- -------------------
Channels re-deliver messages to destinations if they do not confirm their receipt within a configurable timeout. Channels re-deliver messages to destinations if they do not confirm delivery within a configurable timeout.
This timeout can be specified as ``redeliverInterval`` when creating a channel, optionally together with the This timeout can be specified as ``redeliverInterval`` when creating a channel, optionally together with the
maximum number of re-deliveries a channel should attempt for each unconfirmed message. maximum number of re-deliveries a channel should attempt for each unconfirmed message. The number of re-delivery
attempts can be obtained via the ``redeliveries`` method on ``ConfirmablePersistent``.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-custom-settings .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-custom-settings
Message re-delivery is done out of order with regards to normal delivery i.e. redelivered messages may arrive A channel keeps messages in memory until their successful delivery has been confirmed or the maximum number of
later than newer normally delivered messages. The number of re-delivery attempts can be obtained via the re-deliveries is reached. To be notified about messages that have reached the maximum number of re-deliveries,
``redeliveries`` method on ``ConfirmablePersistent``. applications can register a listener at channel creation.
A channel keeps messages in memory until their successful delivery has been confirmed by their destination(s) .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-custom-listener
or their maximum number of re-deliveries is reached. In the latter case, the application has to re-send the
correspnding ``Deliver`` request to the channel so that the channel can start a new series of delivery attempts
(starting again with a ``redeliveries`` count of ``0``).
Re-sending ``Deliver`` requests is done automatically if the sending processor replays messages: only ``Deliver`` A listener receives ``RedeliverFailure`` notifications containing all messages that could not be delivered. On
requests of unconfirmed messages will be served again by the channel. A message replay can be enforced by an receiving a ``RedeliverFailure`` message, an application may decide to restart the sending processor to enforce
application by restarting the sending processor, for example. A replay will also take place if the whole a re-send of these messages to the channel or confirm these messages to prevent further re-sends. The sending
application is restarted, either after normal termination or after a crash. processor can also be restarted any time later to re-send unconfirmed messages.
This combination of This combination of
@ -220,7 +280,7 @@ This combination of
* message re-deliveries by channels and * message re-deliveries by channels and
* application-level confirmations (acknowledgements) by destinations * application-level confirmations (acknowledgements) by destinations
enables channels to provide at-least-once message delivery guarantees. Possible duplicates can be detected by enables channels to provide at-least-once message delivery semantics. Possible duplicates can be detected by
destinations by tracking message sequence numbers. Message sequence numbers are generated per sending processor. destinations by tracking message sequence numbers. Message sequence numbers are generated per sending processor.
Depending on how a processor routes outbound messages to destinations, they may either see a contiguous message Depending on how a processor routes outbound messages to destinations, they may either see a contiguous message
sequence or a sequence with gaps. sequence or a sequence with gaps.
@ -229,14 +289,13 @@ sequence or a sequence with gaps.
If a processor emits more than one outbound message per inbound ``Persistent`` message it **must** use a If a processor emits more than one outbound message per inbound ``Persistent`` message it **must** use a
separate channel for each outbound message to ensure that confirmations are uniquely identifiable, otherwise, separate channel for each outbound message to ensure that confirmations are uniquely identifiable, otherwise,
at-least-once message delivery is not guaranteed. This rule has been introduced to avoid writing additional at-least-once message delivery semantics do not apply. This rule has been introduced to avoid writing additional
outbound message identifiers to the journal which would decrease the overall throughput. It is furthermore outbound message identifiers to the journal which would decrease the overall throughput. It is furthermore
recommended to collapse multiple outbound messages to the same destination into a single outbound message, recommended to collapse multiple outbound messages to the same destination into a single outbound message,
otherwise, if sent via multiple channels, their ordering is not defined. These restrictions are likely to be otherwise, if sent via multiple channels, their ordering is not defined.
removed in the final release.
Whenever an application wants to have more control how sequence numbers are assigned to messages it should use If an application wants to have more control how sequence numbers are assigned to messages it should use an
an application-specific sequence number generator and include the generated sequence numbers into the ``payload`` application-specific sequence number generator and include the generated sequence numbers into the ``payload``
of ``Persistent`` messages. of ``Persistent`` messages.
Persistent channels Persistent channels
@ -246,60 +305,45 @@ Channels created with ``Channel.props`` do not persist messages. These channels
with a sending processor that takes care of persistence, hence, channel-specific persistence is not necessary in with a sending processor that takes care of persistence, hence, channel-specific persistence is not necessary in
this case. They are referred to as transient channels in the following. this case. They are referred to as transient channels in the following.
Applications may also use transient channels standalone (i.e. without a sending processor) if re-delivery attempts Persistent channels are like transient channels but additionally persist messages before delivering them. Messages
to destinations are required but message loss in case of a sender JVM crash is not an issue. If applications want to that have been persisted by a persistent channel are deleted when destinations confirm their delivery. A persistent
use standalone channels but message loss is not acceptable, they should use persistent channels. A persistent channel channel can be created with ``PersistentChannel.props`` and configured with a ``PersistentChannelSettings`` object.
can be created with ``PersistentChannel.props`` and configured with a ``PersistentChannelSettings`` object.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#persistent-channel-example .. includecode:: code/docs/persistence/PersistenceDocTest.java#persistent-channel-example
A persistent channel is like a transient channel that additionally persists ``Deliver`` requests before serving it. A persistent channel is useful for delivery of messages to slow destinations or destinations that are unavailable
Hence, it can recover from sender JVM crashes and provide the same message re-delivery semantics as a transient for a long time. It can constrain the number of pending confirmations based on the ``pendingConfirmationsMax``
channel in combination with an application-defined processor. and ``pendingConfirmationsMin`` parameters of ``PersistentChannelSettings``.
By default, a persistent channel doesn't reply whether a ``Persistent`` message, sent with ``Deliver``, has been .. includecode:: code/docs/persistence/PersistenceDocTest.java#persistent-channel-watermarks
successfully persisted or not. This can be enabled by creating the channel with the ``replyPersistent`` configuration
parameter set to ``true``: It suspends delivery when the number of pending confirmations reaches ``pendingConfirmationsMax`` and resumes
delivery again when this number falls below ``pendingConfirmationsMin``. This prevents both, flooding destinations
with more messages than they can process and unlimited memory consumption by the channel. A persistent channel
continues to persist new messages even when message delivery is temporarily suspended.
Standalone usage
----------------
Applications may also use channels standalone. Transient channels can be used standalone if re-delivery attempts
to destinations are required but message loss in case of a sender JVM crash is not an issue. If message loss in
case of a sender JVM crash is an issue, persistent channels should be used. In this case, applications may want to
receive replies from the channel whether messages have been successfully persisted or not. This can be enabled by
creating the channel with the ``replyPersistent`` configuration parameter set to ``true``:
.. includecode:: code/docs/persistence/PersistenceDocTest.java#persistent-channel-reply .. includecode:: code/docs/persistence/PersistenceDocTest.java#persistent-channel-reply
With this setting, either the successfully persisted message is replied to the sender or a ``PersistenceFailure``. With this setting, either the successfully persisted message is replied to the sender or a ``PersistenceFailure``
In case of a persistence failure, the sender should re-send the message. message. In case the latter case, the sender should re-send the message.
Using a persistent channel in combination with an application-defined processor can make sense if destinations are
unavailable for a long time and an application doesn't want to buffer all messages in memory (but write them to the
journal only). In this case, delivery can be disabled by sending the channel a ``DisableDelivery`` message (to
stop delivery and persist-only) and re-enabled again by sending it an ``EnableDelivery`` message. A disabled channel
that receives an ``EnableDelivery`` message, processes all persisted, unconfirmed ``Deliver`` requests again before
serving new ones.
Sender resolution
-----------------
``ActorRef`` s of ``Persistent`` message senders are also stored in the journal. Consequently, they may become invalid if
an application is restarted and messages are replayed. For example, the stored ``ActorRef`` may then reference
a previous incarnation of a sender and a new incarnation of that sender cannot receive a reply from a processor.
This may be acceptable for many applications but others may require that a new sender incarnation receives the
reply (to reliably resume a conversation between actors after a JVM crash, for example). Here, a channel may
assist in resolving new sender incarnations by specifying a third ``Deliver`` argument:
* ``Resolve.destination()`` if the sender of a persistent message is used as channel destination
.. includecode:: code/docs/persistence/PersistenceDocTest.java#resolve-destination
* ``Resolve.sender()`` if the sender of a persistent message is forwarded to a destination.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#resolve-sender
Default is ``Resolve.off()`` which means no resolution. Find out more in the ``Deliver`` API docs.
Identifiers Identifiers
----------- -----------
In the same way as :ref:`processors`, channels also have an identifier that defaults to a channel's path. A channel In the same way as :ref:`processors-java` and :ref:`views-java`, channels also have an identifier that defaults to a channel's
identifier can therefore be customized by using a custom actor name at channel creation. This changes that channel's path. A channel identifier can therefore be customized by using a custom actor name at channel creation. This changes
name in its actor hierarchy and hence influences only part of the channel identifier. To fully customize a channel that channel's name in its actor hierarchy and hence influences only part of the channel identifier. To fully customize
identifier, it should be provided as argument ``Channel.props(String)`` or ``PersistentChannel.props(String)``. a channel identifier, it should be provided as argument ``Channel.props(String)`` or ``PersistentChannel.props(String)``
(recommended to generate stable identifiers).
.. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-id-override .. includecode:: code/docs/persistence/PersistenceDocTest.java#channel-id-override
@ -326,16 +370,18 @@ Sequence number
The sequence number of a ``Persistent`` message can be obtained via its ``sequenceNr`` method. Persistent The sequence number of a ``Persistent`` message can be obtained via its ``sequenceNr`` method. Persistent
messages are assigned sequence numbers on a per-processor basis (or per channel basis if used messages are assigned sequence numbers on a per-processor basis (or per channel basis if used
standalone). A sequence starts at ``1L`` and doesn't contain gaps unless a processor deletes a message. standalone). A sequence starts at ``1L`` and doesn't contain gaps unless a processor deletes messages.
.. _snapshots-java: .. _snapshots-java:
Snapshots Snapshots
========= =========
Snapshots can dramatically reduce recovery times. Processors can save snapshots of internal state by calling the Snapshots can dramatically reduce recovery times of processors and views. The following discusses snapshots
``saveSnapshot`` method on ``Processor``. If saving of a snapshot succeeds, the processor will receive a in context of processors but this is also applicable to views.
``SaveSnapshotSuccess`` message, otherwise a ``SaveSnapshotFailure`` message.
Processors can save snapshots of internal state by calling the ``saveSnapshot`` method. If saving of a snapshot
succeeds, the processor receives a ``SaveSnapshotSuccess`` message, otherwise a ``SaveSnapshotFailure`` message
.. includecode:: code/docs/persistence/PersistenceDocTest.java#save-snapshot .. includecode:: code/docs/persistence/PersistenceDocTest.java#save-snapshot
@ -359,9 +405,9 @@ saved snapshot matches the specified ``SnapshotSelectionCriteria`` will replay a
Snapshot deletion Snapshot deletion
----------------- -----------------
A processor can delete a single snapshot by calling the ``deleteSnapshot`` method with the sequence number and the A processor can delete individual snapshots by calling the ``deleteSnapshot`` method with the sequence number and the
timestamp of the snapshot as argument. To bulk-delete snapshots that match a specified ``SnapshotSelectionCriteria`` timestamp of a snapshot as argument. To bulk-delete snapshots matching ``SnapshotSelectionCriteria``, processors should
argument, processors can call the ``deleteSnapshots`` method. use the ``deleteSnapshots`` method.
.. _event-sourcing-java: .. _event-sourcing-java:
@ -389,7 +435,7 @@ Akka persistence supports event sourcing with the abstract ``UntypedEventsourced
event sourcing as a pattern on top of command sourcing). A processor that extends this abstract class does not handle event sourcing as a pattern on top of command sourcing). A processor that extends this abstract class does not handle
``Persistent`` messages directly but uses the ``persist`` method to persist and handle events. The behavior of an ``Persistent`` messages directly but uses the ``persist`` method to persist and handle events. The behavior of an
``UntypedEventsourcedProcessor`` is defined by implementing ``onReceiveReplay`` and ``onReceiveCommand``. This is ``UntypedEventsourcedProcessor`` is defined by implementing ``onReceiveReplay`` and ``onReceiveCommand``. This is
best explained with an example (which is also part of ``akka-sample-persistence``). demonstrated in the following example.
.. includecode:: ../../../akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/EventsourcedExample.java#eventsourced-example .. includecode:: ../../../akka-samples/akka-sample-persistence/src/main/java/sample/persistence/japi/EventsourcedExample.java#eventsourced-example
@ -402,28 +448,25 @@ a command is handled by generating two events which are then persisted and handl
``persist`` with an event (or a sequence of events) as first argument and an event handler as second argument. ``persist`` with an event (or a sequence of events) as first argument and an event handler as second argument.
The ``persist`` method persists events asynchronously and the event handler is executed for successfully persisted The ``persist`` method persists events asynchronously and the event handler is executed for successfully persisted
events. Successfully persisted events are internally sent back to the processor as separate messages which trigger events. Successfully persisted events are internally sent back to the processor as individual messages that trigger
the event handler execution. An event handler may therefore close over processor state and mutate it. The sender event handler executions. An event handler may close over processor state and mutate it. The sender of a persisted
of a persisted event is the sender of the corresponding command. This allows event handlers to reply to the sender event is the sender of the corresponding command. This allows event handlers to reply to the sender of a command
of a command (not shown). (not shown).
The main responsibility of an event handler is changing processor state using event data and notifying others The main responsibility of an event handler is changing processor state using event data and notifying others
about successful state changes by publishing events. about successful state changes by publishing events.
When persisting events with ``persist`` it is guaranteed that the processor will not receive new commands between When persisting events with ``persist`` it is guaranteed that the processor will not receive further commands between
the ``persist`` call and the execution(s) of the associated event handler. This also holds for multiple ``persist`` the ``persist`` call and the execution(s) of the associated event handler. This also holds for multiple ``persist``
calls in context of a single command. calls in context of a single command. The example also shows how to switch between command different command handlers
with ``getContext().become()`` and ``getContext().unbecome()``.
The example also demonstrates how to change the processor's default behavior, defined by ``onReceiveCommand``, to
another behavior, defined by ``otherCommandHandler``, and back using ``getContext().become()`` and
``getContext().unbecome()``. See also the API docs of ``persist`` for further details.
Reliable event delivery Reliable event delivery
----------------------- -----------------------
Sending events from an event handler to another actor directly doesn't guarantee delivery of these events. To Sending events from an event handler to another actor has at-most-once delivery semantics. For at-least-once delivery,
guarantee at-least-once delivery, :ref:`channels-java` must be used. In this case, also replayed events (received by :ref:`channels-java` must be used. In this case, also replayed events (received by ``receiveReplay``) must be sent to a
``receiveReplay``) must be sent to a channel, as shown in the following example: channel, as shown in the following example:
.. includecode:: code/docs/persistence/PersistenceDocTest.java#reliable-event-delivery .. includecode:: code/docs/persistence/PersistenceDocTest.java#reliable-event-delivery
@ -438,29 +481,33 @@ To optimize throughput, an ``UntypedProcessor`` internally batches received ``Pe
writing them to the journal (as a single batch). The batch size dynamically grows from 1 under low and moderate loads writing them to the journal (as a single batch). The batch size dynamically grows from 1 under low and moderate loads
to a configurable maximum size (default is ``200``) under high load. to a configurable maximum size (default is ``200``) under high load.
.. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#max-batch-size .. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#max-message-batch-size
A new batch write is triggered by a processor as soon as a batch reaches the maximum size or if the journal completed A new batch write is triggered by a processor as soon as a batch reaches the maximum size or if the journal completed
writing the previous batch. Batch writes are never timer-based which keeps latencies as low as possible. writing the previous batch. Batch writes are never timer-based which keeps latencies at a minimum.
Applications that want to have more explicit control over batch writes and batch sizes can send processors Applications that want to have more explicit control over batch writes and batch sizes can send processors
``PersistentBatch`` messages. ``PersistentBatch`` messages.
.. includecode:: code/docs/persistence/PersistenceDocTest.java#batch-write .. includecode:: code/docs/persistence/PersistenceDocTest.java#batch-write
``Persistent`` messages contained in a ``PersistentBatch`` message are always written atomically, even if the batch ``Persistent`` messages contained in a ``PersistentBatch`` are always written atomically, even if the batch
size is greater than ``max-batch-size``. Also, a ``PersistentBatch`` is written isolated from other batches. size is greater than ``max-message-batch-size``. Also, a ``PersistentBatch`` is written isolated from other batches.
``Persistent`` messages contained in a ``PersistentBatch`` are received individually by a processor. ``Persistent`` messages contained in a ``PersistentBatch`` are received individually by a processor.
``PersistentBatch`` messages, for example, are used internally by an ``UntypedEventsourcedProcessor`` to ensure atomic ``PersistentBatch`` messages, for example, are used internally by an ``UntypedEventsourcedProcessor`` to ensure atomic
writes of events. All events that are persisted in context of a single command are written as single batch to the writes of events. All events that are persisted in context of a single command are written as a single batch to the
journal (even if ``persist`` is called multiple times per command). The recovery of an ``UntypedEventsourcedProcessor`` journal (even if ``persist`` is called multiple times per command). The recovery of an ``UntypedEventsourcedProcessor``
will therefore never be done partially i.e. with only a subset of events persisted by a single command. will therefore never be done partially (with only a subset of events persisted by a single command).
Confirmation and deletion operations performed by :ref:`channels-java` are also batched. The maximum confirmation
and deletion batch sizes are configurable with ``akka.persistence.journal.max-confirmation-batch-size`` and
``akka.persistence.journal.max-deletion-batch-size``, respectively.
Storage plugins Storage plugins
=============== ===============
Storage backends for journals and snapshot stores are plugins in akka-persistence. The default journal plugin Storage backends for journals and snapshot stores are pluggable in Akka persistence. The default journal plugin
writes messages to LevelDB (see :ref:`local-leveldb-journal-java`). The default snapshot store plugin writes snapshots writes messages to LevelDB (see :ref:`local-leveldb-journal-java`). The default snapshot store plugin writes snapshots
as individual files to the local filesystem (see :ref:`local-snapshot-store-java`). Applications can provide their own as individual files to the local filesystem (see :ref:`local-snapshot-store-java`). Applications can provide their own
plugins by implementing a plugin API and activate them by configuration. Plugin development requires the following plugins by implementing a plugin API and activate them by configuration. Plugin development requires the following
@ -472,19 +519,19 @@ Journal plugin API
------------------ ------------------
A journal plugin either extends ``SyncWriteJournal`` or ``AsyncWriteJournal``. ``SyncWriteJournal`` is an A journal plugin either extends ``SyncWriteJournal`` or ``AsyncWriteJournal``. ``SyncWriteJournal`` is an
actor that should be extended when the storage backend API only supports synchronous, blocking writes. The actor that should be extended when the storage backend API only supports synchronous, blocking writes. In this
methods to be implemented in this case are: case, the methods to be implemented are:
.. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/SyncWritePlugin.java#sync-write-plugin-api .. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/SyncWritePlugin.java#sync-write-plugin-api
``AsyncWriteJournal`` is an actor that should be extended if the storage backend API supports asynchronous, ``AsyncWriteJournal`` is an actor that should be extended if the storage backend API supports asynchronous,
non-blocking writes. The methods to be implemented in that case are: non-blocking writes. In this case, the methods to be implemented are:
.. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java#async-write-plugin-api .. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncWritePlugin.java#async-write-plugin-api
Message replays are always asynchronous, therefore, any journal plugin must implement: Message replays and sequence number recovery are always asynchronous, therefore, any journal plugin must implement:
.. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncReplayPlugin.java#async-replay-plugin-api .. includecode:: ../../../akka-persistence/src/main/java/akka/persistence/journal/japi/AsyncRecoveryPlugin.java#async-replay-plugin-api
A journal plugin can be activated with the following minimal configuration: A journal plugin can be activated with the following minimal configuration:
@ -530,15 +577,15 @@ Shared LevelDB journal
---------------------- ----------------------
A LevelDB instance can also be shared by multiple actor systems (on the same or on different nodes). This, for A LevelDB instance can also be shared by multiple actor systems (on the same or on different nodes). This, for
example, allows processors to failover to a backup node, assuming that the node, where the shared instance is example, allows processors to failover to a backup node and continue using the shared journal instance from the
runnning, is accessible from the backup node. backup node.
.. warning:: .. warning::
A shared LevelDB instance is a single point of failure and should therefore only be used for testing A shared LevelDB instance is a single point of failure and should therefore only be used for testing
purposes. purposes. Highly-available, replicated journal are available as :ref:`community-projects-java`.
A shared LevelDB instance can be created by instantiating the ``SharedLeveldbStore`` actor. A shared LevelDB instance is started by instantiating the ``SharedLeveldbStore`` actor.
.. includecode:: code/docs/persistence/PersistencePluginDocTest.java#shared-store-creation .. includecode:: code/docs/persistence/PersistencePluginDocTest.java#shared-store-creation
@ -565,12 +612,21 @@ i.e. only the first injection is used.
Local snapshot store Local snapshot store
-------------------- --------------------
The default snapshot store plugin is ``akka.persistence.snapshot-store.local`` which writes snapshot files to The default snapshot store plugin is ``akka.persistence.snapshot-store.local``. It writes snapshot files to
the local filesystem. The default storage location is a directory named ``snapshots`` in the current working the local filesystem. The default storage location is a directory named ``snapshots`` in the current working
directory. This can be changed by configuration where the specified path can be relative or absolute: directory. This can be changed by configuration where the specified path can be relative or absolute:
.. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#snapshot-config .. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#snapshot-config
.. _community-projects-java:
Community plugins
-----------------
* `Replicated journal backed by Apache Cassandra <https://github.com/krasserm/akka-persistence-cassandra/>`_.
* `Replicated journal backed by Apache HBase <https://github.com/ktoso/akka-persistence-hbase/>`_.
* `Replicated journal backed by MongoDB <https://github.com/ddevore/akka-persistence-mongo/>`_.
Custom serialization Custom serialization
==================== ====================
@ -584,8 +640,7 @@ it must add
.. includecode:: ../scala/code/docs/persistence/PersistenceSerializerDocSpec.scala#custom-serializer-config .. includecode:: ../scala/code/docs/persistence/PersistenceSerializerDocSpec.scala#custom-serializer-config
to the application configuration. If not specified, a default serializer is used, which is the ``JavaSerializer`` to the application configuration. If not specified, a default serializer is used.
in this example.
Testing Testing
======= =======
@ -599,5 +654,4 @@ or
.. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-native-config .. includecode:: ../scala/code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-native-config
in your Akka configuration. The latter setting applies if you're using a :ref:`shared-leveldb-journal-java`. The LevelDB in your Akka configuration. The LevelDB Java port is for testing purposes only.
Java port is for testing purposes only.

View file

@ -7,10 +7,20 @@ package docs.persistence
import scala.concurrent.duration._ import scala.concurrent.duration._
import scala.language.postfixOps import scala.language.postfixOps
import akka.actor.ActorSystem import akka.actor.{ Actor, ActorSystem }
import akka.persistence._ import akka.persistence._
trait PersistenceDocSpec { trait PersistenceDocSpec {
val config =
"""
//#auto-update-interval
akka.persistence.view.auto-update-interval = 5s
//#auto-update-interval
//#auto-update
akka.persistence.view.auto-update = off
//#auto-update
"""
val system: ActorSystem val system: ActorSystem
import system._ import system._
@ -110,7 +120,7 @@ trait PersistenceDocSpec {
def receive = { def receive = {
case p @ Persistent(payload, _) => case p @ Persistent(payload, _) =>
channel ! Deliver(p.withPayload(s"processed ${payload}"), destination) channel ! Deliver(p.withPayload(s"processed ${payload}"), destination.path)
} }
} }
@ -124,8 +134,6 @@ trait PersistenceDocSpec {
//#channel-example //#channel-example
class MyProcessor2 extends Processor { class MyProcessor2 extends Processor {
import akka.persistence.Resolve
val destination = context.actorOf(Props[MyDestination]) val destination = context.actorOf(Props[MyDestination])
val channel = val channel =
//#channel-id-override //#channel-id-override
@ -141,15 +149,21 @@ trait PersistenceDocSpec {
def receive = { def receive = {
case p @ Persistent(payload, _) => case p @ Persistent(payload, _) =>
//#channel-example-reply //#channel-example-reply
channel ! Deliver(p.withPayload(s"processed ${payload}"), sender) channel ! Deliver(p.withPayload(s"processed ${payload}"), sender.path)
//#channel-example-reply //#channel-example-reply
//#resolve-destination
channel ! Deliver(p, sender, Resolve.Destination)
//#resolve-destination
//#resolve-sender
channel forward Deliver(p, destination, Resolve.Sender)
//#resolve-sender
} }
//#channel-custom-listener
class MyListener extends Actor {
def receive = {
case RedeliverFailure(messages) => // ...
}
}
val myListener = context.actorOf(Props[MyListener])
val myChannel = context.actorOf(Channel.props(
ChannelSettings(redeliverFailureListener = Some(myListener))))
//#channel-custom-listener
} }
class MyProcessor3 extends Processor { class MyProcessor3 extends Processor {
@ -254,9 +268,13 @@ trait PersistenceDocSpec {
PersistentChannelSettings(redeliverInterval = 30 seconds, redeliverMax = 15)), PersistentChannelSettings(redeliverInterval = 30 seconds, redeliverMax = 15)),
name = "myPersistentChannel") name = "myPersistentChannel")
channel ! Deliver(Persistent("example"), destination) channel ! Deliver(Persistent("example"), destination.path)
//#persistent-channel-example //#persistent-channel-example
//#persistent-channel-watermarks
PersistentChannelSettings(
pendingConfirmationsMax = 10000,
pendingConfirmationsMin = 2000)
//#persistent-channel-watermarks
//#persistent-channel-reply //#persistent-channel-reply
PersistentChannelSettings(replyPersistent = true) PersistentChannelSettings(replyPersistent = true)
//#persistent-channel-reply //#persistent-channel-reply
@ -274,7 +292,7 @@ trait PersistenceDocSpec {
// update state // update state
// ... // ...
// reliably deliver events // reliably deliver events
channel ! Deliver(Persistent(event), destination) channel ! Deliver(Persistent(event), destination.path)
} }
def receiveReplay: Receive = { def receiveReplay: Receive = {
@ -290,4 +308,22 @@ trait PersistenceDocSpec {
} }
//#reliable-event-delivery //#reliable-event-delivery
} }
new AnyRef {
import akka.actor.Props
//#view
class MyView extends View {
def processorId: String = "some-processor-id"
def receive: Actor.Receive = {
case Persistent(payload, sequenceNr) => // ...
}
}
//#view
//#view-update
val view = system.actorOf(Props[MyView])
view ! Update(await = true)
//#view-update
}
} }

View file

@ -23,9 +23,9 @@ import akka.persistence.snapshot._
object PersistencePluginDocSpec { object PersistencePluginDocSpec {
val config = val config =
""" """
//#max-batch-size //#max-message-batch-size
akka.persistence.journal.max-batch-size = 200 akka.persistence.journal.max-message-batch-size = 200
//#max-batch-size //#max-message-batch-size
//#journal-config //#journal-config
akka.persistence.journal.leveldb.dir = "target/journal" akka.persistence.journal.leveldb.dir = "target/journal"
//#journal-config //#journal-config
@ -119,10 +119,12 @@ trait SharedLeveldbPluginDocSpec {
} }
class MyJournal extends AsyncWriteJournal { class MyJournal extends AsyncWriteJournal {
def writeAsync(persistentBatch: Seq[PersistentRepr]): Future[Unit] = ??? def asyncWriteMessages(messages: Seq[PersistentRepr]): Future[Unit] = ???
def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Future[Unit] = ??? def asyncWriteConfirmations(confirmations: Seq[PersistentConfirmation]): Future[Unit] = ???
def confirmAsync(processorId: String, sequenceNr: Long, channelId: String): Future[Unit] = ??? def asyncDeleteMessages(messageIds: Seq[PersistentId], permanent: Boolean): Future[Unit] = ???
def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) => Unit): Future[Long] = ??? def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Future[Unit] = ???
def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) => Unit): Future[Unit] = ???
def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] = ???
} }
class MySnapshotStore extends SnapshotStore { class MySnapshotStore extends SnapshotStore {

View file

@ -5,21 +5,13 @@ Persistence
########### ###########
Akka persistence enables stateful actors to persist their internal state so that it can be recovered when an actor Akka persistence enables stateful actors to persist their internal state so that it can be recovered when an actor
is started, restarted by a supervisor or migrated in a cluster. It also allows stateful actors to recover from JVM is started, restarted after a JVM crash or by a supervisor, or migrated in a cluster. The key concept behind Akka
crashes, for example. The key concept behind Akka persistence is that only changes to an actor's internal state are persistence is that only changes to an actor's internal state are persisted but never its current state directly
persisted but never its current state directly (except for optional snapshots). These changes are only ever appended (except for optional snapshots). These changes are only ever appended to storage, nothing is ever mutated, which
to storage, nothing is ever mutated, which allows for very high transaction rates and efficient replication. Stateful allows for very high transaction rates and efficient replication. Stateful actors are recovered by replaying stored
actors are recovered by replaying stored changes to these actors from which they can rebuild internal state. This can changes to these actors from which they can rebuild internal state. This can be either the full history of changes
be either the full history of changes or starting from a snapshot of internal actor state which can dramatically or starting from a snapshot which can dramatically reduce recovery times. Akka persistence also provides point-to-point
reduce recovery times. Akka persistence also provides point-to-point communication channels with at-least-once communication channels with at-least-once message delivery semantics.
message delivery guarantees.
Storage backends for state changes and snapshots are pluggable in Akka persistence. Currently, these are written to
the local filesystem. Distributed and replicated storage, with the possibility of scaling writes, will be available
soon.
Akka persistence is inspired by the `eventsourced`_ library. It follows the same concepts and architecture of
`eventsourced`_ but significantly differs on API and implementation level.
.. warning:: .. warning::
@ -28,6 +20,9 @@ Akka persistence is inspired by the `eventsourced`_ library. It follows the same
changes to a minimum the binary compatibility guarantee for maintenance releases does not apply to the changes to a minimum the binary compatibility guarantee for maintenance releases does not apply to the
contents of the ``akka.persistence`` package. contents of the ``akka.persistence`` package.
Akka persistence is inspired by and the official replacement of the `eventsourced`_ library. It follows the same
concepts and architecture of `eventsourced`_ but significantly differs on API and implementation level.
.. _eventsourced: https://github.com/eligosource/eventsourced .. _eventsourced: https://github.com/eligosource/eventsourced
Dependencies Dependencies
@ -44,16 +39,22 @@ Architecture
before its ``receive`` method is called. When a processor is started or restarted, journaled messages are replayed before its ``receive`` method is called. When a processor is started or restarted, journaled messages are replayed
to that processor, so that it can recover internal state from these messages. to that processor, so that it can recover internal state from these messages.
* *Channel*: Channels are used by processors to communicate with other actors. They prevent that replayed messages * *View*: A view is a persistent, stateful actor that receives journaled messages that have been written by another
are redundantly delivered to these actors and provide at-least-once message delivery guarantees, also in case of processor. A view itself does not journal new messages, instead, it updates internal state only from a processor's
sender and receiver JVM crashes. replicated message stream.
* *Channel*: Channels are used by processors and views to communicate with other actors. They prevent that replayed
messages are redundantly delivered to these actors and provide at-least-once message delivery semantics, also in
case of sender and receiver JVM crashes.
* *Journal*: A journal stores the sequence of messages sent to a processor. An application can control which messages * *Journal*: A journal stores the sequence of messages sent to a processor. An application can control which messages
are stored and which are received by the processor without being journaled. The storage backend of a journal is are journaled and which are received by the processor without being journaled. The storage backend of a journal is
pluggable. pluggable. The default journal storage plugin writes to the local filesystem, replicated journals are available as
:ref:`community-projects`.
* *Snapshot store*: A snapshot store persists snapshots of a processor's internal state. Snapshots are used for * *Snapshot store*: A snapshot store persists snapshots of a processor's or a view's internal state. Snapshots are
optimizing recovery times. The storage backend of a snapshot store is pluggable. used for optimizing recovery times. The storage backend of a snapshot store is pluggable. The default snapshot
storage plugin writes to the local filesystem.
* *Event sourcing*. Based on the building blocks described above, Akka persistence provides abstractions for the * *Event sourcing*. Based on the building blocks described above, Akka persistence provides abstractions for the
development of event sourced applications (see section :ref:`event-sourcing`) development of event sourced applications (see section :ref:`event-sourcing`)
@ -70,10 +71,9 @@ A processor can be implemented by extending the ``Processor`` trait and implemen
Processors only write messages of type ``Persistent`` to the journal, others are received without being persisted. Processors only write messages of type ``Persistent`` to the journal, others are received without being persisted.
When a processor's ``receive`` method is called with a ``Persistent`` message it can safely assume that this message When a processor's ``receive`` method is called with a ``Persistent`` message it can safely assume that this message
has been successfully written to the journal. If a journal fails to write a ``Persistent`` message then the processor has been successfully written to the journal. If a journal fails to write a ``Persistent`` message then the processor
is stopped, by default. If an application wants that a processors continues to run on persistence failures it must is stopped, by default. If a processor should continue running on persistence failures it must handle
handle ``PersistenceFailure`` messages. In this case, a processor may want to inform the sender about the failure, ``PersistenceFailure`` messages. In this case, a processor may want to inform the sender about the failure,
so that the sender can re-send the message, if needed, under the assumption that the journal recovered from a so that the sender can re-send the message, if needed.
temporary failure.
A ``Processor`` itself is an ``Actor`` and can therefore be instantiated with ``actorOf``. A ``Processor`` itself is an ``Actor`` and can therefore be instantiated with ``actorOf``.
@ -84,7 +84,7 @@ Recovery
By default, a processor is automatically recovered on start and on restart by replaying journaled messages. By default, a processor is automatically recovered on start and on restart by replaying journaled messages.
New messages sent to a processor during recovery do not interfere with replayed messages. New messages will New messages sent to a processor during recovery do not interfere with replayed messages. New messages will
only be received by that processor after recovery completes. only be received by a processor after recovery completes.
Recovery customization Recovery customization
^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^
@ -132,7 +132,7 @@ that message as argument. An optional ``permanent`` parameter specifies whether
deleted from the journal or only marked as deleted. In both cases, the message won't be replayed. Later extensions deleted from the journal or only marked as deleted. In both cases, the message won't be replayed. Later extensions
to Akka persistence will allow to replay messages that have been marked as deleted which can be useful for debugging to Akka persistence will allow to replay messages that have been marked as deleted which can be useful for debugging
purposes, for example. To delete all messages (journaled by a single processor) up to a specified sequence number, purposes, for example. To delete all messages (journaled by a single processor) up to a specified sequence number,
processors can call the ``deleteMessages`` method. processors should call the ``deleteMessages`` method.
Identifiers Identifiers
----------- -----------
@ -145,41 +145,103 @@ method.
Applications can customize a processor's id by specifying an actor name during processor creation as shown in Applications can customize a processor's id by specifying an actor name during processor creation as shown in
section :ref:`processors`. This changes that processor's name in its actor hierarchy and hence influences only section :ref:`processors`. This changes that processor's name in its actor hierarchy and hence influences only
part of the processor id. To fully customize a processor's id, the ``processorId`` method should be overridden. part of the processor id. To fully customize a processor's id, the ``processorId`` method must be overridden.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#processor-id-override .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#processor-id-override
Overriding ``processorId`` is the recommended way to generate stable identifiers.
.. _views:
Views
=====
Views can be implemented by extending the ``View`` trait and implementing the ``receive`` and the ``processorId``
methods.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#view
The ``processorId`` identifies the processor from which the view receives journaled messages. It is not necessary
the referenced processor is actually running. Views read messages from a processor's journal directly. When a
processor is started later and begins to write new messages, the corresponding view is updated automatically, by
default.
Updates
-------
The default update interval of all views of an actor system is configurable:
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#auto-update-interval
``View`` implementation classes may also override the ``autoUpdateInterval`` method to return a custom update
interval for a specific view class or view instance. Applications may also trigger additional updates at
any time by sending a view an ``Update`` message.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#view-update
If the ``await`` parameter is set to ``true``, messages that follow the ``Update`` request are processed when the
incremental message replay, triggered by that update request, completed. If set to ``false`` (default), messages
following the update request may interleave with the replayed message stream. Automated updates always run with
``await = false``.
Automated updates of all views of an actor system can be turned off by configuration:
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#auto-update
Implementation classes may override the configured default value by overriding the ``autoUpdate`` method. To
limit the number of replayed messages per update request, applications can configure a custom
``akka.persistence.view.auto-update-replay-max`` value or override the ``autoUpdateReplayMax`` method. The number
of replayed messages for manual updates can be limited with the ``replayMax`` parameter of the ``Update`` message.
Recovery
--------
Initial recovery of views works in the very same way as for :ref:`processors` (i.e. by sending a ``Recover`` message
to self). The maximum number of replayed messages during initial recovery is determined by ``autoUpdateReplayMax``.
Further possibilities to customize initial recovery are explained in section :ref:`processors`.
Identifiers
-----------
A view must have an identifier that doesn't change across different actor incarnations. It defaults to the
``String`` representation of the actor path without the address part and can be obtained via the ``viewId``
method.
Applications can customize a view's id by specifying an actor name during view creation. This changes that view's
name in its actor hierarchy and hence influences only part of the view id. To fully customize a view's id, the
``viewId`` method must be overridden. Overriding ``viewId`` is the recommended way to generate stable identifiers.
The ``viewId`` must differ from the referenced ``processorId``, unless :ref:`snapshots` of a view and its
processor shall be shared (which is what applications usually do not want).
.. _channels: .. _channels:
Channels Channels
======== ========
.. warning:: Channels are special actors that are used by processors or views to communicate with other actors (channel
destinations). The following discusses channels in context of processors but this is also applicable to views.
There are further changes planned to the channel API that couldn't make it into the current milestone.
One example is to have only a single destination per channel to allow gap detection and more advanced
flow control.
Channels are special actors that are used by processors to communicate with other actors (channel destinations).
Channels prevent redundant delivery of replayed messages to destinations during processor recovery. A replayed Channels prevent redundant delivery of replayed messages to destinations during processor recovery. A replayed
message is retained by a channel if its previous delivery has been confirmed by a destination. message is retained by a channel if its delivery has been confirmed by a destination.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-example .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-example
A channel is ready to use once it has been created, no recovery or further activation is needed. A ``Deliver`` A channel is ready to use once it has been created, no recovery or further activation is needed. A ``Deliver``
request instructs a channel to send a ``Persistent`` message to a destination. Sender references are preserved request instructs a channel to send a ``Persistent`` message to a destination. A destination is provided as
by a channel, therefore, a destination can reply to the sender of a ``Deliver`` request. ``ActorPath`` and messages are sent by the channel via that path's ``ActorSelection``. Sender references are
preserved by a channel, therefore, a destination can reply to the sender of a ``Deliver`` request.
If a processor wants to reply to a ``Persistent`` message sender it should use the ``sender`` reference as channel If a processor wants to reply to a ``Persistent`` message sender it should use the ``sender`` path as channel
destination. destination.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-example-reply .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-example-reply
Persistent messages delivered by a channel are of type ``ConfirmablePersistent``. ``ConfirmablePersistent`` extends Persistent messages delivered by a channel are of type ``ConfirmablePersistent``. ``ConfirmablePersistent`` extends
``Persistent`` by adding the methods ``confirm`` method and ``redeliveries`` (see also :ref:`redelivery`). Channel ``Persistent`` by adding the methods ``confirm`` and ``redeliveries`` (see also :ref:`redelivery`). A channel
destinations confirm the delivery of a ``ConfirmablePersistent`` message by calling ``confirm()`` an that message. destination confirms the delivery of a ``ConfirmablePersistent`` message by calling ``confirm()`` on that message.
This asynchronously writes a confirmation entry to the journal. Replayed messages internally contain these confirmation This asynchronously writes a confirmation entry to the journal. Replayed messages internally contain confirmation
entries which allows a channel to decide if a message should be retained or not. entries which allows a channel to decide if it should retain these messages or not.
A ``Processor`` can also be used as channel destination i.e. it can persist ``ConfirmablePersistent`` messages too. A ``Processor`` can also be used as channel destination i.e. it can persist ``ConfirmablePersistent`` messages too.
@ -188,25 +250,23 @@ A ``Processor`` can also be used as channel destination i.e. it can persist ``Co
Message re-delivery Message re-delivery
------------------- -------------------
Channels re-deliver messages to destinations if they do not confirm their receipt within a configurable timeout. Channels re-deliver messages to destinations if they do not confirm delivery within a configurable timeout.
This timeout can be specified as ``redeliverInterval`` when creating a channel, optionally together with the This timeout can be specified as ``redeliverInterval`` when creating a channel, optionally together with the
maximum number of re-deliveries a channel should attempt for each unconfirmed message. maximum number of re-deliveries a channel should attempt for each unconfirmed message. The number of re-delivery
attempts can be obtained via the ``redeliveries`` method on ``ConfirmablePersistent`` or by pattern matching.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-custom-settings .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-custom-settings
Message re-delivery is done out of order with regards to normal delivery i.e. redelivered messages may arrive A channel keeps messages in memory until their successful delivery has been confirmed or the maximum number of
later than newer normally delivered messages. The number of re-delivery attempts can be obtained via the re-deliveries is reached. To be notified about messages that have reached the maximum number of re-deliveries,
``redeliveries`` method on ``ConfirmablePersistent`` or by pattern matching. applications can register a listener at channel creation.
A channel keeps messages in memory until their successful delivery has been confirmed by their destination(s) .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-custom-listener
or their maximum number of re-deliveries is reached. In the latter case, the application has to re-send the
correspnding ``Deliver`` request to the channel so that the channel can start a new series of delivery attempts
(starting again with a ``redeliveries`` count of ``0``).
Re-sending ``Deliver`` requests is done automatically if the sending processor replays messages: only ``Deliver`` A listener receives ``RedeliverFailure`` notifications containing all messages that could not be delivered. On
requests of unconfirmed messages will be served again by the channel. A message replay can be enforced by an receiving a ``RedeliverFailure`` message, an application may decide to restart the sending processor to enforce
application by restarting the sending processor, for example. A replay will also take place if the whole a re-send of these messages to the channel or confirm these messages to prevent further re-sends. The sending
application is restarted, either after normal termination or after a crash. processor can also be restarted any time later to re-send unconfirmed messages.
This combination of This combination of
@ -215,7 +275,7 @@ This combination of
* message re-deliveries by channels and * message re-deliveries by channels and
* application-level confirmations (acknowledgements) by destinations * application-level confirmations (acknowledgements) by destinations
enables channels to provide at-least-once message delivery guarantees. Possible duplicates can be detected by enables channels to provide at-least-once message delivery semantics. Possible duplicates can be detected by
destinations by tracking message sequence numbers. Message sequence numbers are generated per sending processor. destinations by tracking message sequence numbers. Message sequence numbers are generated per sending processor.
Depending on how a processor routes outbound messages to destinations, they may either see a contiguous message Depending on how a processor routes outbound messages to destinations, they may either see a contiguous message
sequence or a sequence with gaps. sequence or a sequence with gaps.
@ -224,14 +284,13 @@ sequence or a sequence with gaps.
If a processor emits more than one outbound message per inbound ``Persistent`` message it **must** use a If a processor emits more than one outbound message per inbound ``Persistent`` message it **must** use a
separate channel for each outbound message to ensure that confirmations are uniquely identifiable, otherwise, separate channel for each outbound message to ensure that confirmations are uniquely identifiable, otherwise,
at-least-once message delivery is not guaranteed. This rule has been introduced to avoid writing additional at-least-once message delivery semantics do not apply. This rule has been introduced to avoid writing additional
outbound message identifiers to the journal which would decrease the overall throughput. It is furthermore outbound message identifiers to the journal which would decrease the overall throughput. It is furthermore
recommended to collapse multiple outbound messages to the same destination into a single outbound message, recommended to collapse multiple outbound messages to the same destination into a single outbound message,
otherwise, if sent via multiple channels, their ordering is not defined. These restrictions are likely to be otherwise, if sent via multiple channels, their ordering is not defined.
removed in the final release.
Whenever an application wants to have more control how sequence numbers are assigned to messages it should use If an application wants to have more control how sequence numbers are assigned to messages it should use an
an application-specific sequence number generator and include the generated sequence numbers into the ``payload`` application-specific sequence number generator and include the generated sequence numbers into the ``payload``
of ``Persistent`` messages. of ``Persistent`` messages.
Persistent channels Persistent channels
@ -241,60 +300,45 @@ Channels created with ``Channel.props`` do not persist messages. These channels
with a sending processor that takes care of persistence, hence, channel-specific persistence is not necessary in with a sending processor that takes care of persistence, hence, channel-specific persistence is not necessary in
this case. They are referred to as transient channels in the following. this case. They are referred to as transient channels in the following.
Applications may also use transient channels standalone (i.e. without a sending processor) if re-delivery attempts Persistent channels are like transient channels but additionally persist messages before delivering them. Messages
to destinations are required but message loss in case of a sender JVM crash is not an issue. If applications want to that have been persisted by a persistent channel are deleted when destinations confirm their delivery. A persistent
use standalone channels but message loss is not acceptable, they should use persistent channels. A persistent channel channel can be created with ``PersistentChannel.props`` and configured with a ``PersistentChannelSettings`` object.
can be created with ``PersistentChannel.props`` and configured with a ``PersistentChannelSettings`` object.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#persistent-channel-example .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#persistent-channel-example
A persistent channel is like a transient channel that additionally persists ``Deliver`` requests before serving it. A persistent channel is useful for delivery of messages to slow destinations or destinations that are unavailable
Hence, it can recover from sender JVM crashes and provide the same message re-delivery semantics as a transient for a long time. It can constrain the number of pending confirmations based on the ``pendingConfirmationsMax``
channel in combination with an application-defined processor. and ``pendingConfirmationsMin`` parameters of ``PersistentChannelSettings``.
By default, a persistent channel doesn't reply whether a ``Persistent`` message, sent with ``Deliver``, has been .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#persistent-channel-watermarks
successfully persisted or not. This can be enabled by creating the channel with the ``replyPersistent`` configuration
parameter set to ``true``: It suspends delivery when the number of pending confirmations reaches ``pendingConfirmationsMax`` and resumes
delivery again when this number falls below ``pendingConfirmationsMin``. This prevents both, flooding destinations
with more messages than they can process and unlimited memory consumption by the channel. A persistent channel
continues to persist new messages even when message delivery is temporarily suspended.
Standalone usage
----------------
Applications may also use channels standalone. Transient channels can be used standalone if re-delivery attempts
to destinations are required but message loss in case of a sender JVM crash is not an issue. If message loss in
case of a sender JVM crash is an issue, persistent channels should be used. In this case, applications may want to
receive replies from the channel whether messages have been successfully persisted or not. This can be enabled by
creating the channel with the ``replyPersistent`` configuration parameter set to ``true``:
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#persistent-channel-reply .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#persistent-channel-reply
With this setting, either the successfully persisted message is replied to the sender or a ``PersistenceFailure``. With this setting, either the successfully persisted message is replied to the sender or a ``PersistenceFailure``
In case of a persistence failure, the sender should re-send the message. message. In case the latter case, the sender should re-send the message.
Using a persistent channel in combination with an application-defined processor can make sense if destinations are
unavailable for a long time and an application doesn't want to buffer all messages in memory (but write them to the
journal only). In this case, delivery can be disabled by sending the channel a ``DisableDelivery`` message (to
stop delivery and persist-only) and re-enabled again by sending it an ``EnableDelivery`` message. A disabled channel
that receives an ``EnableDelivery`` message, processes all persisted, unconfirmed ``Deliver`` requests again before
serving new ones.
Sender resolution
-----------------
``ActorRef`` s of ``Persistent`` message senders are also stored in the journal. Consequently, they may become invalid if
an application is restarted and messages are replayed. For example, the stored ``ActorRef`` may then reference
a previous incarnation of a sender and a new incarnation of that sender cannot receive a reply from a processor.
This may be acceptable for many applications but others may require that a new sender incarnation receives the
reply (to reliably resume a conversation between actors after a JVM crash, for example). Here, a channel may
assist in resolving new sender incarnations by specifying a third ``Deliver`` argument:
* ``Resolve.Destination`` if the sender of a persistent message is used as channel destination
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#resolve-destination
* ``Resolve.Sender`` if the sender of a persistent message is forwarded to a destination.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#resolve-sender
Default is ``Resolve.Off`` which means no resolution. Find out more in the ``Deliver`` API docs.
Identifiers Identifiers
----------- -----------
In the same way as :ref:`processors`, channels also have an identifier that defaults to a channel's path. A channel In the same way as :ref:`processors` and :ref:`views`, channels also have an identifier that defaults to a channel's
identifier can therefore be customized by using a custom actor name at channel creation. This changes that channel's path. A channel identifier can therefore be customized by using a custom actor name at channel creation. This changes
name in its actor hierarchy and hence influences only part of the channel identifier. To fully customize a channel that channel's name in its actor hierarchy and hence influences only part of the channel identifier. To fully customize
identifier, it should be provided as argument ``Channel.props(String)`` or ``PersistentChannel.props(String)``. a channel identifier, it should be provided as argument ``Channel.props(String)`` or ``PersistentChannel.props(String)``
(recommended to generate stable identifiers).
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-id-override .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#channel-id-override
@ -313,7 +357,7 @@ method or by pattern matching
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#payload-pattern-matching .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#payload-pattern-matching
Inside processors, new persistent messages are derived from the current persistent message before sending them via a Inside processors, new persistent messages are derived from the current persistent message before sending them via a
channel, either by calling ``p.withPayload(...)`` or ``Persistent.create(...)`` where the latter uses the channel, either by calling ``p.withPayload(...)`` or ``Persistent(...)`` where the latter uses the
implicit ``currentPersistentMessage`` made available by ``Processor``. implicit ``currentPersistentMessage`` made available by ``Processor``.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#current-message .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#current-message
@ -333,16 +377,18 @@ method or by pattern matching
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#sequence-nr-pattern-matching .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#sequence-nr-pattern-matching
Persistent messages are assigned sequence numbers on a per-processor basis (or per channel basis if used Persistent messages are assigned sequence numbers on a per-processor basis (or per channel basis if used
standalone). A sequence starts at ``1L`` and doesn't contain gaps unless a processor deletes a message. standalone). A sequence starts at ``1L`` and doesn't contain gaps unless a processor deletes messages.
.. _snapshots: .. _snapshots:
Snapshots Snapshots
========= =========
Snapshots can dramatically reduce recovery times. Processors can save snapshots of internal state by calling the Snapshots can dramatically reduce recovery times of processors and views. The following discusses snapshots
``saveSnapshot`` method on ``Processor``. If saving of a snapshot succeeds, the processor will receive a in context of processors but this is also applicable to views.
``SaveSnapshotSuccess`` message, otherwise a ``SaveSnapshotFailure`` message
Processors can save snapshots of internal state by calling the ``saveSnapshot`` method. If saving of a snapshot
succeeds, the processor receives a ``SaveSnapshotSuccess`` message, otherwise a ``SaveSnapshotFailure`` message
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#save-snapshot .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#save-snapshot
@ -370,9 +416,9 @@ saved snapshot matches the specified ``SnapshotSelectionCriteria`` will replay a
Snapshot deletion Snapshot deletion
----------------- -----------------
A processor can delete a single snapshot by calling the ``deleteSnapshot`` method with the sequence number and the A processor can delete individual snapshots by calling the ``deleteSnapshot`` method with the sequence number and the
timestamp of the snapshot as argument. To bulk-delete snapshots that match a specified ``SnapshotSelectionCriteria`` timestamp of a snapshot as argument. To bulk-delete snapshots matching ``SnapshotSelectionCriteria``, processors should
argument, processors can call the ``deleteSnapshots`` method. use the ``deleteSnapshots`` method.
.. _event-sourcing: .. _event-sourcing:
@ -399,8 +445,7 @@ also process commands that do not change application state, such as query comman
Akka persistence supports event sourcing with the ``EventsourcedProcessor`` trait (which implements event sourcing Akka persistence supports event sourcing with the ``EventsourcedProcessor`` trait (which implements event sourcing
as a pattern on top of command sourcing). A processor that extends this trait does not handle ``Persistent`` messages as a pattern on top of command sourcing). A processor that extends this trait does not handle ``Persistent`` messages
directly but uses the ``persist`` method to persist and handle events. The behavior of an ``EventsourcedProcessor`` directly but uses the ``persist`` method to persist and handle events. The behavior of an ``EventsourcedProcessor``
is defined by implementing ``receiveReplay`` and ``receiveCommand``. This is best explained with an example (which is defined by implementing ``receiveReplay`` and ``receiveCommand``. This is demonstrated in the following example.
is also part of ``akka-sample-persistence``).
.. includecode:: ../../../akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/EventsourcedExample.scala#eventsourced-example .. includecode:: ../../../akka-samples/akka-sample-persistence/src/main/scala/sample/persistence/EventsourcedExample.scala#eventsourced-example
@ -413,28 +458,25 @@ a command is handled by generating two events which are then persisted and handl
``persist`` with an event (or a sequence of events) as first argument and an event handler as second argument. ``persist`` with an event (or a sequence of events) as first argument and an event handler as second argument.
The ``persist`` method persists events asynchronously and the event handler is executed for successfully persisted The ``persist`` method persists events asynchronously and the event handler is executed for successfully persisted
events. Successfully persisted events are internally sent back to the processor as separate messages which trigger events. Successfully persisted events are internally sent back to the processor as individual messages that trigger
the event handler execution. An event handler may therefore close over processor state and mutate it. The sender event handler executions. An event handler may close over processor state and mutate it. The sender of a persisted
of a persisted event is the sender of the corresponding command. This allows event handlers to reply to the sender event is the sender of the corresponding command. This allows event handlers to reply to the sender of a command
of a command (not shown). (not shown).
The main responsibility of an event handler is changing processor state using event data and notifying others The main responsibility of an event handler is changing processor state using event data and notifying others
about successful state changes by publishing events. about successful state changes by publishing events.
When persisting events with ``persist`` it is guaranteed that the processor will not receive new commands between When persisting events with ``persist`` it is guaranteed that the processor will not receive further commands between
the ``persist`` call and the execution(s) of the associated event handler. This also holds for multiple ``persist`` the ``persist`` call and the execution(s) of the associated event handler. This also holds for multiple ``persist``
calls in context of a single command. calls in context of a single command. The example also shows how to switch between command different command handlers
with ``context.become()`` and ``context.unbecome()``.
The example also demonstrates how to change the processor's default behavior, defined by ``receiveCommand``, to
another behavior, defined by ``otherCommandHandler``, and back using ``context.become()`` and ``context.unbecome()``.
See also the API docs of ``persist`` for further details.
Reliable event delivery Reliable event delivery
----------------------- -----------------------
Sending events from an event handler to another actor directly doesn't guarantee delivery of these events. To Sending events from an event handler to another actor has at-most-once delivery semantics. For at-least-once delivery,
guarantee at-least-once delivery, :ref:`channels` must be used. In this case, also replayed events (received by :ref:`channels` must be used. In this case, also replayed events (received by ``receiveReplay``) must be sent to a
``receiveReplay``) must be sent to a channel, as shown in the following example: channel, as shown in the following example:
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#reliable-event-delivery .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#reliable-event-delivery
@ -449,29 +491,33 @@ To optimize throughput, a ``Processor`` internally batches received ``Persistent
writing them to the journal (as a single batch). The batch size dynamically grows from 1 under low and moderate loads writing them to the journal (as a single batch). The batch size dynamically grows from 1 under low and moderate loads
to a configurable maximum size (default is ``200``) under high load. to a configurable maximum size (default is ``200``) under high load.
.. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#max-batch-size .. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#max-message-batch-size
A new batch write is triggered by a processor as soon as a batch reaches the maximum size or if the journal completed A new batch write is triggered by a processor as soon as a batch reaches the maximum size or if the journal completed
writing the previous batch. Batch writes are never timer-based which keeps latencies as low as possible. writing the previous batch. Batch writes are never timer-based which keeps latencies at a minimum.
Applications that want to have more explicit control over batch writes and batch sizes can send processors Applications that want to have more explicit control over batch writes and batch sizes can send processors
``PersistentBatch`` messages. ``PersistentBatch`` messages.
.. includecode:: code/docs/persistence/PersistenceDocSpec.scala#batch-write .. includecode:: code/docs/persistence/PersistenceDocSpec.scala#batch-write
``Persistent`` messages contained in a ``PersistentBatch`` message are always written atomically, even if the batch ``Persistent`` messages contained in a ``PersistentBatch`` are always written atomically, even if the batch
size is greater than ``max-batch-size``. Also, a ``PersistentBatch`` is written isolated from other batches. size is greater than ``max-message-batch-size``. Also, a ``PersistentBatch`` is written isolated from other batches.
``Persistent`` messages contained in a ``PersistentBatch`` are received individually by a processor. ``Persistent`` messages contained in a ``PersistentBatch`` are received individually by a processor.
``PersistentBatch`` messages, for example, are used internally by an ``EventsourcedProcessor`` to ensure atomic ``PersistentBatch`` messages, for example, are used internally by an ``EventsourcedProcessor`` to ensure atomic
writes of events. All events that are persisted in context of a single command are written as single batch to the writes of events. All events that are persisted in context of a single command are written as a single batch to the
journal (even if ``persist`` is called multiple times per command). The recovery of an ``EventsourcedProcessor`` journal (even if ``persist`` is called multiple times per command). The recovery of an ``EventsourcedProcessor``
will therefore never be done partially i.e. with only a subset of events persisted by a single command. will therefore never be done partially (with only a subset of events persisted by a single command).
Confirmation and deletion operations performed by :ref:`channels` are also batched. The maximum confirmation
and deletion batch sizes are configurable with ``akka.persistence.journal.max-confirmation-batch-size`` and
``akka.persistence.journal.max-deletion-batch-size``, respectively.
Storage plugins Storage plugins
=============== ===============
Storage backends for journals and snapshot stores are plugins in akka-persistence. The default journal plugin Storage backends for journals and snapshot stores are pluggable in Akka persistence. The default journal plugin
writes messages to LevelDB (see :ref:`local-leveldb-journal`). The default snapshot store plugin writes snapshots writes messages to LevelDB (see :ref:`local-leveldb-journal`). The default snapshot store plugin writes snapshots
as individual files to the local filesystem (see :ref:`local-snapshot-store`). Applications can provide their own as individual files to the local filesystem (see :ref:`local-snapshot-store`). Applications can provide their own
plugins by implementing a plugin API and activate them by configuration. Plugin development requires the following plugins by implementing a plugin API and activate them by configuration. Plugin development requires the following
@ -483,19 +529,19 @@ Journal plugin API
------------------ ------------------
A journal plugin either extends ``SyncWriteJournal`` or ``AsyncWriteJournal``. ``SyncWriteJournal`` is an A journal plugin either extends ``SyncWriteJournal`` or ``AsyncWriteJournal``. ``SyncWriteJournal`` is an
actor that should be extended when the storage backend API only supports synchronous, blocking writes. The actor that should be extended when the storage backend API only supports synchronous, blocking writes. In this
methods to be implemented in this case are: case, the methods to be implemented are:
.. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/SyncWriteJournal.scala#journal-plugin-api .. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/SyncWriteJournal.scala#journal-plugin-api
``AsyncWriteJournal`` is an actor that should be extended if the storage backend API supports asynchronous, ``AsyncWriteJournal`` is an actor that should be extended if the storage backend API supports asynchronous,
non-blocking writes. The methods to be implemented in that case are: non-blocking writes. In this case, the methods to be implemented are:
.. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala#journal-plugin-api .. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncWriteJournal.scala#journal-plugin-api
Message replays are always asynchronous, therefore, any journal plugin must implement: Message replays and sequence number recovery are always asynchronous, therefore, any journal plugin must implement:
.. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncReplay.scala#journal-plugin-api .. includecode:: ../../../akka-persistence/src/main/scala/akka/persistence/journal/AsyncRecovery.scala#journal-plugin-api
A journal plugin can be activated with the following minimal configuration: A journal plugin can be activated with the following minimal configuration:
@ -542,15 +588,15 @@ Shared LevelDB journal
---------------------- ----------------------
A LevelDB instance can also be shared by multiple actor systems (on the same or on different nodes). This, for A LevelDB instance can also be shared by multiple actor systems (on the same or on different nodes). This, for
example, allows processors to failover to a backup node, assuming that the node, where the shared instance is example, allows processors to failover to a backup node and continue using the shared journal instance from the
runnning, is accessible from the backup node. backup node.
.. warning:: .. warning::
A shared LevelDB instance is a single point of failure and should therefore only be used for testing A shared LevelDB instance is a single point of failure and should therefore only be used for testing
purposes. purposes. Highly-available, replicated journal are available as :ref:`community-projects`.
A shared LevelDB instance can be created by instantiating the ``SharedLeveldbStore`` actor. A shared LevelDB instance is started by instantiating the ``SharedLeveldbStore`` actor.
.. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-creation .. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-creation
@ -577,18 +623,20 @@ i.e. only the first injection is used.
Local snapshot store Local snapshot store
-------------------- --------------------
The default snapshot store plugin is ``akka.persistence.snapshot-store.local`` which writes snapshot files to The default snapshot store plugin is ``akka.persistence.snapshot-store.local``. It writes snapshot files to
the local filesystem. The default storage location is a directory named ``snapshots`` in the current working the local filesystem. The default storage location is a directory named ``snapshots`` in the current working
directory. This can be changed by configuration where the specified path can be relative or absolute: directory. This can be changed by configuration where the specified path can be relative or absolute:
.. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#snapshot-config .. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#snapshot-config
Planned plugins .. _community-projects:
---------------
* Shared snapshot store (SPOF, for testing purposes) Community plugins
* HA snapshot store backed by a distributed file system -----------------
* HA journal backed by a distributed (NoSQL) data store
* `Replicated journal backed by Apache Cassandra <https://github.com/krasserm/akka-persistence-cassandra/>`_.
* `Replicated journal backed by Apache HBase <https://github.com/ktoso/akka-persistence-hbase/>`_.
* `Replicated journal backed by MongoDB <https://github.com/ddevore/akka-persistence-mongo/>`_.
Custom serialization Custom serialization
==================== ====================
@ -603,8 +651,7 @@ it must add
.. includecode:: code/docs/persistence/PersistenceSerializerDocSpec.scala#custom-serializer-config .. includecode:: code/docs/persistence/PersistenceSerializerDocSpec.scala#custom-serializer-config
to the application configuration. If not specified, a default serializer is used, which is the ``JavaSerializer`` to the application configuration. If not specified, a default serializer is used.
in this example.
Testing Testing
======= =======
@ -618,8 +665,7 @@ or
.. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-native-config .. includecode:: code/docs/persistence/PersistencePluginDocSpec.scala#shared-store-native-config
in your Akka configuration. The latter setting applies if you're using a :ref:`shared-leveldb-journal`. The LevelDB in your Akka configuration. The LevelDB Java port is for testing purposes only.
Java port is for testing purposes only.
Miscellaneous Miscellaneous
============= =============

View file

@ -9,16 +9,14 @@ import scala.concurrent.Future;
import akka.japi.Procedure; import akka.japi.Procedure;
import akka.persistence.PersistentRepr; import akka.persistence.PersistentRepr;
interface AsyncReplayPlugin { interface AsyncRecoveryPlugin {
//#async-replay-plugin-api //#async-replay-plugin-api
/** /**
* Java API, Plugin API: asynchronously replays persistent messages. * Java API, Plugin API: asynchronously replays persistent messages.
* Implementations replay a message by calling `replayCallback`. The returned * Implementations replay a message by calling `replayCallback`. The returned
* future must be completed when all messages (matching the sequence number * future must be completed when all messages (matching the sequence number
* bounds) have been replayed. The future `Long` value must be the highest * bounds) have been replayed. The future must be completed with a failure if
* stored sequence number in the journal for the specified processor. The * any of the persistent messages could not be replayed.
* future must be completed with a failure if any of the persistent messages
* could not be replayed.
* *
* The `replayCallback` must also be called with messages that have been marked * The `replayCallback` must also be called with messages that have been marked
* as deleted. In this case a replayed message's `deleted` method must return * as deleted. In this case a replayed message's `deleted` method must return
@ -30,9 +28,20 @@ interface AsyncReplayPlugin {
* @param processorId processor id. * @param processorId processor id.
* @param fromSequenceNr sequence number where replay should start (inclusive). * @param fromSequenceNr sequence number where replay should start (inclusive).
* @param toSequenceNr sequence number where replay should end (inclusive). * @param toSequenceNr sequence number where replay should end (inclusive).
* @param max maximum number of messages to be replayed.
* @param replayCallback called to replay a single message. Can be called from any * @param replayCallback called to replay a single message. Can be called from any
* thread. * thread.
*/ */
Future<Long> doReplayAsync(String processorId, long fromSequenceNr, long toSequenceNr, Procedure<PersistentRepr> replayCallback); Future<Void> doAsyncReplayMessages(String processorId, long fromSequenceNr, long toSequenceNr, long max, Procedure<PersistentRepr> replayCallback);
/**
* Java API, Plugin API: asynchronously reads the highest stored sequence number
* for the given `processorId`.
*
* @param processorId processor id.
* @param fromSequenceNr hint where to start searching for the highest sequence
* number.
*/
Future<Long> doAsyncReadHighestSequenceNr(String processorId, long fromSequenceNr);
//#async-replay-plugin-api //#async-replay-plugin-api
} }

View file

@ -6,31 +6,37 @@ package akka.persistence.journal.japi;
import scala.concurrent.Future; import scala.concurrent.Future;
import akka.persistence.PersistentRepr; import akka.persistence.*;
interface AsyncWritePlugin { interface AsyncWritePlugin {
//#async-write-plugin-api //#async-write-plugin-api
/** /**
* Java API, Plugin API: asynchronously writes a batch of persistent messages to the * Java API, Plugin API: synchronously writes a batch of persistent messages to the
* journal. The batch write must be atomic i.e. either all persistent messages in the * journal. The batch write must be atomic i.e. either all persistent messages in the
* batch are written or none. * batch are written or none.
*/ */
Future<Void> doWriteAsync(Iterable<PersistentRepr> persistentBatch); Future<Void> doAsyncWriteMessages(Iterable<PersistentRepr> messages);
/** /**
* Java API, Plugin API: asynchronously deletes all persistent messages within the * Java API, Plugin API: synchronously writes a batch of delivery confirmations to
* range from `fromSequenceNr` to `toSequenceNr`. If `permanent` is set to `false`, * the journal.
* the persistent messages are marked as deleted, otherwise they are permanently */
* deleted. Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> confirmations);
/**
* Java API, Plugin API: synchronously deletes messages identified by `messageIds`
* from the journal. If `permanent` is set to `false`, the persistent messages are
* marked as deleted, otherwise they are permanently deleted.
*/
Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> messageIds, boolean permanent);
/**
* Java API, Plugin API: synchronously deletes all persistent messages up to
* `toSequenceNr`. If `permanent` is set to `false`, the persistent messages are
* marked as deleted, otherwise they are permanently deleted.
* *
* @see AsyncReplayPlugin * @see AsyncRecoveryPlugin
*/ */
Future<Void> doDeleteAsync(String processorId, long fromSequenceNr, long toSequenceNr, boolean permanent); Future<Void> doAsyncDeleteMessagesTo(String processorId, long toSequenceNr, boolean permanent);
/**
* Java API, Plugin API: asynchronously writes a delivery confirmation to the
* journal.
*/
Future<Void> doConfirmAsync(String processorId, long sequenceNr, String channelId);
//#async-write-plugin-api //#async-write-plugin-api
} }

View file

@ -4,7 +4,7 @@
package akka.persistence.journal.japi; package akka.persistence.journal.japi;
import akka.persistence.PersistentRepr; import akka.persistence.*;
interface SyncWritePlugin { interface SyncWritePlugin {
//#sync-write-plugin-api //#sync-write-plugin-api
@ -13,21 +13,28 @@ interface SyncWritePlugin {
* journal. The batch write must be atomic i.e. either all persistent messages in the * journal. The batch write must be atomic i.e. either all persistent messages in the
* batch are written or none. * batch are written or none.
*/ */
void doWrite(Iterable<PersistentRepr> persistentBatch); void doWriteMessages(Iterable<PersistentRepr> messages);
/** /**
* Java API, Plugin API: synchronously deletes all persistent messages within the * Java API, Plugin API: synchronously writes a batch of delivery confirmations to
* range from `fromSequenceNr` to `toSequenceNr`. If `permanent` is set to `false`, * the journal.
* the persistent messages are marked as deleted, otherwise they are permanently */
* deleted. void doWriteConfirmations(Iterable<PersistentConfirmation> confirmations);
/**
* Java API, Plugin API: synchronously deletes messages identified by `messageIds`
* from the journal. If `permanent` is set to `false`, the persistent messages are
* marked as deleted, otherwise they are permanently deleted.
*/
void doDeleteMessages(Iterable<PersistentId> messageIds, boolean permanent);
/**
* Java API, Plugin API: synchronously deletes all persistent messages up to
* `toSequenceNr`. If `permanent` is set to `false`, the persistent messages are
* marked as deleted, otherwise they are permanently deleted.
* *
* @see AsyncReplayPlugin * @see AsyncRecoveryPlugin
*/ */
void doDelete(String processorId, long fromSequenceNr, long toSequenceNr, boolean permanent); void doDeleteMessagesTo(String processorId, long toSequenceNr, boolean permanent);
/**
* Java API, Plugin API: synchronously writes a delivery confirmation to the journal.
*/
void doConfirm(String processorId, long sequenceNr, String channelId) throws Exception;
//#sync-write-plugin-api //#sync-write-plugin-api
} }

View file

@ -14,11 +14,10 @@ message PersistentMessage {
optional int64 sequenceNr = 2; optional int64 sequenceNr = 2;
optional string processorId = 3; optional string processorId = 3;
optional bool deleted = 4; optional bool deleted = 4;
optional bool resolved = 5;
optional int32 redeliveries = 6; optional int32 redeliveries = 6;
repeated string confirms = 7; repeated string confirms = 7;
optional bool confirmable = 8; optional bool confirmable = 8;
optional ConfirmMessage confirmMessage = 9; optional DeliveredMessage confirmMessage = 9;
optional string confirmTarget = 10; optional string confirmTarget = 10;
optional string sender = 11; optional string sender = 11;
} }
@ -29,22 +28,15 @@ message PersistentPayload {
optional bytes payloadManifest = 3; optional bytes payloadManifest = 3;
} }
message ConfirmMessage { message DeliveredMessage {
optional string processorId = 1; optional string processorId = 1;
optional int64 messageSequenceNr = 2; optional string channelId = 2;
optional string channelId = 3; optional int64 persistentSequenceNr = 3;
optional int64 wrapperSequenceNr = 4; optional int64 deliverySequenceNr = 4;
optional string channelEndpoint = 5; optional string channel = 5;
} }
message DeliverMessage { message DeliverMessage {
enum ResolveStrategy {
Off = 1;
Sender = 2;
Destination = 3;
}
optional PersistentMessage persistent = 1; optional PersistentMessage persistent = 1;
optional string destination = 2; optional string destination = 2;
optional ResolveStrategy resolve = 3;
} }

View file

@ -30,7 +30,13 @@ akka {
# Only applies to internally created batches by processors that receive # Only applies to internally created batches by processors that receive
# persistent messages individually. Application-defined batches, even if # persistent messages individually. Application-defined batches, even if
# larger than this setting, are always written as a single isolated batch. # larger than this setting, are always written as a single isolated batch.
max-batch-size = 200 max-message-batch-size = 200
# Maximum size of a confirmation batch written to the journal.
max-confirmation-batch-size = 10000
# Maximum size of a deletion batch written to the journal.
max-deletion-batch-size = 10000
# Path to the journal plugin to be used # Path to the journal plugin to be used
plugin = "akka.persistence.journal.leveldb" plugin = "akka.persistence.journal.leveldb"
@ -61,7 +67,7 @@ akka {
dir = "journal" dir = "journal"
# Use fsync on write # Use fsync on write
fsync = off fsync = on
# Verify checksum on read. # Verify checksum on read.
checksum = off checksum = off
@ -91,7 +97,7 @@ akka {
dir = "journal" dir = "journal"
# Use fsync on write # Use fsync on write
fsync = off fsync = on
# Verify checksum on read. # Verify checksum on read.
checksum = off checksum = off
@ -124,6 +130,19 @@ akka {
} }
} }
view {
# Automated incremental view update.
auto-update = on
# Interval between incremental updates
auto-update-interval = 5s
# Maximum number of messages to replay per incremental view update. Set to
# -1 for no upper limit.
auto-update-replay-max = -1
}
dispatchers { dispatchers {
default-plugin-dispatcher { default-plugin-dispatcher {
type = PinnedDispatcher type = PinnedDispatcher

View file

@ -4,64 +4,67 @@
package akka.persistence package akka.persistence
import java.lang.{ Iterable JIterable }
import scala.collection.immutable import scala.collection.immutable
import scala.collection.JavaConverters._
import scala.concurrent.duration._ import scala.concurrent.duration._
import scala.language.postfixOps import scala.language.postfixOps
import akka.actor._ import akka.actor._
import akka.dispatch.Envelope
import akka.persistence.JournalProtocol.Confirm
import akka.persistence.serialization.Message import akka.persistence.serialization.Message
import akka.persistence.JournalProtocol._
/** /**
* A [[Channel]] configuration object. * A [[Channel]] configuration object.
* *
* @param redeliverMax maximum number of redeliveries (default is 5). * @param redeliverMax Maximum number of redelivery attempts.
* @param redeliverInterval interval between redeliveries (default is 5 seconds). * @param redeliverInterval Interval between redelivery attempts.
* @param redeliverFailureListener Receiver of [[RedeliverFailure]] notifications which are sent when the number
* of redeliveries reaches `redeliverMax` for a sequence of messages. To enforce
* a redelivery of these messages, the listener has to restart the sending processor.
* Alternatively, it can also confirm these messages, preventing further redeliveries.
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
class ChannelSettings( case class ChannelSettings(
val redeliverMax: Int, val redeliverMax: Int = 5,
val redeliverInterval: FiniteDuration) extends Serializable { val redeliverInterval: FiniteDuration = 5.seconds,
val redeliverFailureListener: Option[ActorRef] = None) {
/** /**
* Java API. * Java API.
*/ */
def withRedeliverMax(redeliverMax: Int): ChannelSettings = def withRedeliverMax(redeliverMax: Int): ChannelSettings =
update(redeliverMax = redeliverMax) copy(redeliverMax = redeliverMax)
/** /**
* Java API. * Java API.
*/ */
def withRedeliverInterval(redeliverInterval: FiniteDuration): ChannelSettings = def withRedeliverInterval(redeliverInterval: FiniteDuration): ChannelSettings =
update(redeliverInterval = redeliverInterval) copy(redeliverInterval = redeliverInterval)
private def update(
redeliverMax: Int = redeliverMax,
redeliverInterval: FiniteDuration = redeliverInterval): ChannelSettings =
new ChannelSettings(redeliverMax, redeliverInterval)
}
object ChannelSettings {
def apply(
redeliverMax: Int = 5,
redeliverInterval: FiniteDuration = 5 seconds): ChannelSettings =
new ChannelSettings(redeliverMax, redeliverInterval)
/** /**
* Java API. * Java API.
*/ */
def create() = apply() def withRedeliverFailureListener(redeliverFailureListener: ActorRef): ChannelSettings =
copy(redeliverFailureListener = Option(redeliverFailureListener))
}
object ChannelSettings {
/**
* Java API.
*/
def create() = ChannelSettings.apply()
} }
/** /**
* A channel is used by [[Processor]]s for sending [[Persistent]] messages to destinations. The main * A channel is used by [[Processor]]s (and [[View]]s) for sending [[Persistent]] messages to destinations.
* responsibility of a channel is to prevent redundant delivery of replayed messages to destinations * The main responsibility of a channel is to prevent redundant delivery of replayed messages to destinations
* when a processor is recovered. * when a processor is recovered.
* *
* A channel is instructed to deliver a persistent message to a `destination` with the [[Deliver]] * A channel is instructed to deliver a persistent message to a destination with the [[Deliver]] command. A
* command. * destination is provided as `ActorPath` and messages are sent via that path's `ActorSelection`.
* *
* {{{ * {{{
* class ForwardExample extends Processor { * class ForwardExample extends Processor {
@ -71,7 +74,7 @@ object ChannelSettings {
* def receive = { * def receive = {
* case m @ Persistent(payload, _) => * case m @ Persistent(payload, _) =>
* // forward modified message to destination * // forward modified message to destination
* channel forward Deliver(m.withPayload(s"fw: ${payload}"), destination) * channel forward Deliver(m.withPayload(s"fw: ${payload}"), destination.path)
* } * }
* } * }
* }}} * }}}
@ -86,7 +89,7 @@ object ChannelSettings {
* def receive = { * def receive = {
* case m @ Persistent(payload, _) => * case m @ Persistent(payload, _) =>
* // reply modified message to sender * // reply modified message to sender
* channel ! Deliver(m.withPayload(s"re: ${payload}"), sender) * channel ! Deliver(m.withPayload(s"re: ${payload}"), sender.path)
* } * }
* } * }
* }}} * }}}
@ -105,39 +108,38 @@ object ChannelSettings {
* }}} * }}}
* *
* If a destination does not confirm the receipt of a `ConfirmablePersistent` message, it will be redelivered * If a destination does not confirm the receipt of a `ConfirmablePersistent` message, it will be redelivered
* by the channel according to the parameters in [[ChannelSettings]]. Message redelivery is done out of order * by the channel according to the parameters in [[ChannelSettings]]. Redelivered messages have a `redeliveries`
* with regards to normal delivery i.e. redelivered messages may arrive later than newer normally delivered * value greater than zero.
* messages. Redelivered messages have a `redeliveries` value greater than zero.
* *
* If the maximum number of redeliveries for a certain message is reached and there is still no confirmation * If the maximum number of redeliveries is reached for certain messages, they are removed from the channel and
* from the destination, then this message is removed from the channel. In order to deliver that message to * a `redeliverFailureListener` (if specified, see [[ChannelSettings]]) is notified about these messages with a
* the destination again, the processor must replay its stored messages to the channel (during start or restart). * [[RedeliverFailure]] message. Besides other application-specific tasks, this listener can restart the sending
* Replayed, unconfirmed messages are then processed and delivered by the channel again. These messages are now * processor to enforce a redelivery of these messages or confirm these messages to prevent further redeliveries.
* duplicates (with a `redeliveries` counter starting from zero). Duplicates can be detected by destinations
* by tracking message sequence numbers.
* *
* @see [[Deliver]] * @see [[Deliver]]
*/ */
final class Channel private[akka] (_channelId: Option[String], channelSettings: ChannelSettings) extends Actor { final class Channel private[akka] (_channelId: Option[String], channelSettings: ChannelSettings) extends Actor {
import channelSettings._
private val id = _channelId match { private val id = _channelId match {
case Some(cid) cid case Some(cid) cid
case None Persistence(context.system).channelId(self) case None Persistence(context.system).channelId(self)
} }
private val journal = Persistence(context.system).journalFor(id) private val journal = Persistence(context.system).confirmationBatchingJournalForChannel(id)
private val delivery = context.actorOf(Props(classOf[ReliableDelivery], channelSettings))
private val reliableDelivery = context.actorOf(Props(classOf[ReliableDelivery], channelSettings))
private val resolvedDelivery = context.actorOf(Props(classOf[ResolvedDelivery], reliableDelivery))
def receive = { def receive = {
case d @ Deliver(persistent: PersistentRepr, _, _) case d @ Deliver(persistent: PersistentRepr, _)
if (!persistent.confirms.contains(id)) resolvedDelivery forward d.copy(prepareDelivery(persistent)) if (!persistent.confirms.contains(id)) delivery forward d.copy(prepareDelivery(persistent))
case d: RedeliverFailure redeliverFailureListener.foreach(_ ! d)
case d: Delivered delivery forward d
} }
private def prepareDelivery(persistent: PersistentRepr): PersistentRepr = private def prepareDelivery(persistent: PersistentRepr): PersistentRepr =
ConfirmablePersistentImpl(persistent, ConfirmablePersistentImpl(persistent,
confirmTarget = journal, confirmTarget = journal,
confirmMessage = Confirm(persistent.processorId, persistent.sequenceNr, id)) confirmMessage = DeliveredByChannel(persistent.processorId, id, persistent.sequenceNr, channel = self))
} }
object Channel { object Channel {
@ -178,189 +180,144 @@ object Channel {
} }
/** /**
* Instructs a [[Channel]] or [[PersistentChannel]] to deliver `persistent` message to * Instructs a [[Channel]] or [[PersistentChannel]] to deliver a `persistent` message to
* destination `destination`. The `resolve` parameter can be: * a `destination`.
*
* - `Resolve.Destination`: will resolve a new destination reference from the specified
* `destination`s path. The `persistent` message will be sent to the newly resolved
* destination.
* - `Resolve.Sender`: will resolve a new sender reference from this `Deliver` message's
* `sender` path. The `persistent` message will be sent to the specified `destination`
* using the newly resolved sender.
* - `Resolve.Off`: will not do any resolution (default).
*
* Resolving an actor reference means first obtaining an `ActorSelection` from the path of
* the reference to be resolved and then obtaining a new actor reference via an `Identify`
* - `ActorIdentity` conversation. Actor reference resolution does not change the original
* order of messages.
*
* Resolving actor references may become necessary when using the stored sender references
* of replayed messages. A stored sender reference may become invalid (for example, it may
* reference a previous sender incarnation, after a JVM restart). Depending on how a processor
* uses sender references, two resolution strategies are relevant.
*
* - `Resolve.Sender` when a processor forwards a replayed message to a destination.
*
* {{{
* channel forward Deliver(message, destination, Resolve.Sender)
* }}}
*
* - `Resolve.Destination` when a processor replies to the sender of a replayed message. In
* this case the sender is used as channel destination.
*
* {{{
* channel ! Deliver(message, sender, Resolve.Destination)
* }}}
*
* A destination or sender reference will only be resolved by a channel if
*
* - the `resolve` parameter is set to `Resolve.Destination` or `Resolve.Channel`
* - the message is replayed
* - the message is not retained by the channel and
* - there was no previous successful resolve action for that message
* *
* @param persistent persistent message. * @param persistent persistent message.
* @param destination persistent message destination. * @param destination persistent message destination.
* @param resolve resolve strategy.
*/ */
@SerialVersionUID(1L) @SerialVersionUID(1L)
case class Deliver(persistent: Persistent, destination: ActorRef, resolve: Resolve.ResolveStrategy = Resolve.Off) extends Message case class Deliver(persistent: Persistent, destination: ActorPath) extends Message
object Deliver { object Deliver {
/** /**
* Java API. * Java API.
*/ */
def create(persistent: Persistent, destination: ActorRef) = Deliver(persistent, destination) def create(persistent: Persistent, destination: ActorPath) = Deliver(persistent, destination)
/**
* Java API.
*/
def create(persistent: Persistent, destination: ActorRef, resolve: Resolve.ResolveStrategy) = Deliver(persistent, destination, resolve)
} }
/** /**
* Actor reference resolution strategy. * Plugin API: confirmation message generated by receivers of [[ConfirmablePersistent]] messages
* * by calling `ConfirmablePersistent.confirm()`.
* @see [[Deliver]]
*/ */
object Resolve { trait Delivered extends Message {
sealed abstract class ResolveStrategy def channelId: String
def persistentSequenceNr: Long
def deliverySequenceNr: Long
def channel: ActorRef
/** /**
* No resolution. * INTERNAL API.
*/ */
@SerialVersionUID(1L) private[persistence] def update(deliverySequenceNr: Long = deliverySequenceNr, channel: ActorRef = channel): Delivered
case object Off extends ResolveStrategy
/**
* [[Channel]] should resolve the `sender` of a [[Deliver]] message.
*/
@SerialVersionUID(1L)
case object Sender extends ResolveStrategy
/**
* [[Channel]] should resolve the `destination` of a [[Deliver]] message.
*/
@SerialVersionUID(1L)
case object Destination extends ResolveStrategy
/**
* Java API.
*/
def off() = Off
/**
* Java API.
*/
def sender() = Sender
/**
* Java API.
*/
def destination() = Destination
} }
/** /**
* Resolves actor references as specified by [[Deliver]] requests and then delegates delivery * Plugin API.
* to `next`.
*/ */
private class ResolvedDelivery(next: ActorRef) extends Actor with Stash { case class DeliveredByChannel(
private var currentResolution: Envelope = _ processorId: String,
channelId: String,
persistentSequenceNr: Long,
deliverySequenceNr: Long = 0L,
channel: ActorRef = null) extends Delivered with PersistentConfirmation {
private val delivering: Receive = { def sequenceNr: Long = persistentSequenceNr
case d @ Deliver(persistent: PersistentRepr, destination, resolve) def update(deliverySequenceNr: Long, channel: ActorRef): DeliveredByChannel =
resolve match { copy(deliverySequenceNr = deliverySequenceNr, channel = channel)
case Resolve.Sender if !persistent.resolved }
context.actorSelection(sender.path) ! Identify(1)
context.become(resolving, discardOld = false) /**
currentResolution = Envelope(d, sender, context.system) * INTERNAL API.
case Resolve.Destination if !persistent.resolved */
context.actorSelection(destination.path) ! Identify(1) private[persistence] class DeliveredByChannelBatching(journal: ActorRef, settings: PersistenceSettings) extends Actor {
context.become(resolving, discardOld = false) private val publish = settings.internal.publishConfirmations
currentResolution = Envelope(d, sender, context.system) private val batchMax = settings.journal.maxConfirmationBatchSize
case _ next forward d
private var batching = false
private var batch = Vector.empty[DeliveredByChannel]
def receive = {
case WriteConfirmationsSuccess(confirmations)
if (batch.isEmpty) batching = false else journalBatch()
confirmations.foreach { c
val dbc = c.asInstanceOf[DeliveredByChannel]
if (dbc.channel != null) dbc.channel ! c
if (publish) context.system.eventStream.publish(c)
} }
unstash() case WriteConfirmationsFailure(_)
if (batch.isEmpty) batching = false else journalBatch()
case d: DeliveredByChannel
addToBatch(d)
if (!batching || maxBatchSizeReached) journalBatch()
case m journal forward m
} }
private val resolving: Receive = { def addToBatch(pc: DeliveredByChannel): Unit =
case ActorIdentity(1, resolvedOption) batch = batch :+ pc
val Envelope(d: Deliver, sender) = currentResolution
if (d.resolve == Resolve.Sender) {
next tell (d, resolvedOption.getOrElse(sender))
} else if (d.resolve == Resolve.Destination) {
next tell (d.copy(destination = resolvedOption.getOrElse(d.destination)), sender)
}
context.unbecome()
unstash()
case _: Deliver stash()
}
def receive = delivering def maxBatchSizeReached: Boolean =
batch.length >= batchMax
def journalBatch(): Unit = {
journal ! WriteConfirmations(batch, self)
batch = Vector.empty
batching = true
}
}
/**
* Notification message to inform channel listeners about messages that have reached the maximum
* number of redeliveries.
*/
case class RedeliverFailure(messages: immutable.Seq[ConfirmablePersistent]) {
/**
* Java API.
*/
def getMessages: JIterable[ConfirmablePersistent] = messages.asJava
} }
/** /**
* Reliably deliver messages contained in [[Deliver]] requests to their destinations. Unconfirmed * Reliably deliver messages contained in [[Deliver]] requests to their destinations. Unconfirmed
* messages are redelivered according to the parameters in [[ChannelSettings]]. * messages are redelivered according to the parameters in [[ChannelSettings]].
*/ */
private class ReliableDelivery(channelSettings: ChannelSettings) extends Actor { private class ReliableDelivery(redeliverSettings: ChannelSettings) extends Actor {
import channelSettings._ import redeliverSettings._
import ReliableDelivery._ import ReliableDelivery._
private val redelivery = context.actorOf(Props(classOf[Redelivery], channelSettings)) private val redelivery = context.actorOf(Props(classOf[Redelivery], redeliverSettings))
private var attempts: DeliveryAttempts = Map.empty private var deliveryAttempts: DeliveryAttempts = immutable.SortedMap.empty
private var sequenceNr: Long = 0L private var deliverySequenceNr: Long = 0L
def receive = { def receive = {
case d @ Deliver(persistent: PersistentRepr, destination, _) case d @ Deliver(persistent: ConfirmablePersistentImpl, destination)
val dsnr = nextSequenceNr() val dsnr = nextDeliverySequenceNr()
val psnr = persistent.sequenceNr val psnr = persistent.sequenceNr
val confirm = persistent.confirmMessage.copy(channelEndpoint = self) val confirm = persistent.confirmMessage.update(deliverySequenceNr = dsnr)
val updated = persistent.update(confirmMessage = confirm, sequenceNr = if (psnr == 0) dsnr else psnr) val updated = persistent.update(confirmMessage = confirm, sequenceNr = if (psnr == 0) dsnr else psnr)
destination forward updated context.actorSelection(destination).tell(updated, sender)
attempts += ((updated.processorId, updated.sequenceNr) -> DeliveryAttempt(updated, destination, sender, dsnr)) deliveryAttempts += (dsnr -> DeliveryAttempt(updated, destination, sender))
case c @ Confirm(processorId, messageSequenceNr, _, _, _) case d: Delivered
attempts -= ((processorId, messageSequenceNr)) deliveryAttempts -= d.deliverySequenceNr
redelivery forward d
case Redeliver case Redeliver
val limit = System.nanoTime - redeliverInterval.toNanos val limit = System.nanoTime - redeliverInterval.toNanos
val (older, younger) = attempts.partition { case (_, a) a.timestamp < limit } val (older, younger) = deliveryAttempts.span { case (_, a) a.timestamp < limit }
redelivery ! Redeliver(older, redeliverMax) redelivery ! Redeliver(older, redeliverMax)
attempts = younger deliveryAttempts = younger
} }
private def nextSequenceNr(): Long = { private def nextDeliverySequenceNr(): Long = {
sequenceNr += 1 deliverySequenceNr += 1
sequenceNr deliverySequenceNr
} }
} }
private object ReliableDelivery { private object ReliableDelivery {
type DeliveryAttempts = immutable.Map[(String, Long), DeliveryAttempt] type DeliveryAttempts = immutable.SortedMap[Long, DeliveryAttempt]
type FailedAttempts = Vector[ConfirmablePersistentImpl]
case class DeliveryAttempt(persistent: PersistentRepr, destination: ActorRef, sender: ActorRef, deliverySequenceNr: Long, timestamp: Long = System.nanoTime) {
def withChannelEndpoint(channelEndpoint: ActorRef) =
copy(persistent.update(confirmMessage = persistent.confirmMessage.copy(channelEndpoint = channelEndpoint)))
case class DeliveryAttempt(persistent: ConfirmablePersistentImpl, destination: ActorPath, sender: ActorRef, timestamp: Long = System.nanoTime) {
def incrementRedeliveryCount = def incrementRedeliveryCount =
copy(persistent.update(redeliveries = persistent.redeliveries + 1)) copy(persistent.update(redeliveries = persistent.redeliveries + 1))
} }
@ -371,40 +328,42 @@ private object ReliableDelivery {
/** /**
* Redelivery process used by [[ReliableDelivery]]. * Redelivery process used by [[ReliableDelivery]].
*/ */
private class Redelivery(channelSettings: ChannelSettings) extends Actor { private class Redelivery(redeliverSettings: ChannelSettings) extends Actor {
import context.dispatcher import context.dispatcher
import channelSettings._ import redeliverSettings._
import ReliableDelivery._ import ReliableDelivery._
private var attempts: DeliveryAttempts = Map.empty private var redeliveryAttempts: DeliveryAttempts = immutable.SortedMap.empty
private var schedule: Cancellable = _ private var redeliverySchedule: Cancellable = _
def receive = { def receive = {
case Redeliver(as, max) case Redeliver(as, max)
attempts ++= as.map { case (k, a) (k, a.withChannelEndpoint(self)) } val (attempts, failed) = (redeliveryAttempts ++ as).foldLeft[(DeliveryAttempts, FailedAttempts)]((immutable.SortedMap.empty, Vector.empty)) {
attempts = attempts.foldLeft[DeliveryAttempts](Map.empty) { case ((attempts, failed), (k, attempt))
case (acc, (k, attempt)) val persistent = attempt.persistent
// drop redelivery attempts that exceed redeliveryMax if (persistent.redeliveries >= redeliverMax) {
if (attempt.persistent.redeliveries >= redeliverMax) acc (attempts, failed :+ persistent)
// increase redelivery count of attempt } else {
else acc + (k -> attempt.incrementRedeliveryCount) val updated = attempt.incrementRedeliveryCount
context.actorSelection(updated.destination).tell(updated.persistent, updated.sender)
(attempts.updated(k, updated), failed)
}
} }
redeliver(attempts) redeliveryAttempts = attempts
scheduleRedelivery() scheduleRedelivery()
case c @ Confirm(processorId, messageSequenceNr, _, _, _) failed.headOption.foreach(_.confirmMessage.channel ! RedeliverFailure(failed))
attempts -= ((processorId, messageSequenceNr)) case c: Delivered
redeliveryAttempts -= c.deliverySequenceNr
} }
override def preStart(): Unit = override def preStart(): Unit =
scheduleRedelivery() scheduleRedelivery()
override def postStop(): Unit = override def postStop(): Unit =
schedule.cancel() redeliverySchedule.cancel()
private def scheduleRedelivery(): Unit = private def scheduleRedelivery(): Unit =
schedule = context.system.scheduler.scheduleOnce(redeliverInterval, context.parent, Redeliver) redeliverySchedule = context.system.scheduler.scheduleOnce(redeliverInterval, context.parent, Redeliver)
private def redeliver(attempts: DeliveryAttempts): Unit =
attempts.values.toSeq.sortBy(_.deliverySequenceNr).foreach(ad ad.destination tell (ad.persistent, ad.sender))
} }

View file

@ -17,10 +17,6 @@ import akka.persistence.JournalProtocol._
* Event sourcing mixin for a [[Processor]]. * Event sourcing mixin for a [[Processor]].
*/ */
private[persistence] trait Eventsourced extends Processor { private[persistence] trait Eventsourced extends Processor {
private trait State {
def aroundReceive(receive: Receive, message: Any): Unit
}
/** /**
* Processor recovery state. Waits for recovery completion and then changes to * Processor recovery state. Waits for recovery completion and then changes to
* `processingCommands` * `processingCommands`
@ -31,8 +27,9 @@ private[persistence] trait Eventsourced extends Processor {
def aroundReceive(receive: Receive, message: Any) { def aroundReceive(receive: Receive, message: Any) {
Eventsourced.super.aroundReceive(receive, message) Eventsourced.super.aroundReceive(receive, message)
message match { message match {
case _: ReplaySuccess | _: ReplayFailure currentState = processingCommands case _: ReadHighestSequenceNrSuccess | _: ReadHighestSequenceNrFailure
case _ currentState = processingCommands
case _
} }
} }
} }
@ -48,7 +45,7 @@ private[persistence] trait Eventsourced extends Processor {
override def toString: String = "processing commands" override def toString: String = "processing commands"
def aroundReceive(receive: Receive, message: Any) { def aroundReceive(receive: Receive, message: Any) {
Eventsourced.super.aroundReceive(receive, LoopSuccess(message)) Eventsourced.super.aroundReceive(receive, LoopMessageSuccess(message))
if (!persistInvocations.isEmpty) { if (!persistInvocations.isEmpty) {
currentState = persistingEvents currentState = persistingEvents
Eventsourced.super.aroundReceive(receive, PersistentBatch(persistentEventBatch.reverse)) Eventsourced.super.aroundReceive(receive, PersistentBatch(persistentEventBatch.reverse))
@ -75,15 +72,15 @@ private[persistence] trait Eventsourced extends Processor {
case p: PersistentRepr case p: PersistentRepr
deleteMessage(p.sequenceNr, true) deleteMessage(p.sequenceNr, true)
throw new UnsupportedOperationException("Persistent commands not supported") throw new UnsupportedOperationException("Persistent commands not supported")
case WriteSuccess(p) case WriteMessageSuccess(p)
withCurrentPersistent(p)(p persistInvocations.head._2(p.payload)) withCurrentPersistent(p)(p persistInvocations.head._2(p.payload))
onWriteComplete() onWriteComplete()
case e @ WriteFailure(p, _) case e @ WriteMessageFailure(p, _)
Eventsourced.super.aroundReceive(receive, message) // stops actor by default Eventsourced.super.aroundReceive(receive, message) // stops actor by default
onWriteComplete() onWriteComplete()
case s @ WriteBatchSuccess Eventsourced.super.aroundReceive(receive, s) case s @ WriteMessagesSuccess Eventsourced.super.aroundReceive(receive, s)
case f: WriteBatchFailure Eventsourced.super.aroundReceive(receive, f) case f: WriteMessagesFailure Eventsourced.super.aroundReceive(receive, f)
case other processorStash.stash() case other processorStash.stash()
} }
def onWriteComplete(): Unit = { def onWriteComplete(): Unit = {

View file

@ -13,117 +13,155 @@ import akka.persistence.serialization.Message
/** /**
* INTERNAL API. * INTERNAL API.
* *
* Defines messages exchanged between processors, channels and a journal. * Messages exchanged between processors, views, channels and a journal.
*/ */
private[persistence] object JournalProtocol { private[persistence] object JournalProtocol {
/** /**
* Instructs a journal to delete all persistent messages with sequence numbers in * Request to delete messages identified by `messageIds`. If `permanent` is set to `false`,
* the range from `fromSequenceNr` to `toSequenceNr` (both inclusive). If `permanent` * the persistent messages are marked as deleted, otherwise they are permanently deleted.
* is set to `false`, the persistent messages are marked as deleted in the journal,
* otherwise they are permanently deleted from the journal.
*/ */
case class Delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) case class DeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean, requestor: Option[ActorRef] = None)
/** /**
* Message sent after confirming the receipt of a [[ConfirmablePersistent]] message. * Reply message to a successful [[DeleteMessages]] request.
*/
case class DeleteMessagesSuccess(messageIds: immutable.Seq[PersistentId])
/**
* Reply message to a failed [[DeleteMessages]] request.
*/
case class DeleteMessagesFailure(cause: Throwable)
/**
* Request to delete all persistent messages with sequence numbers up to `toSequenceNr`
* (inclusive). If `permanent` is set to `false`, the persistent messages are marked
* as deleted in the journal, otherwise they are permanently deleted from the journal.
*/
case class DeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean)
/**
* Request to write delivery confirmations.
*/
case class WriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation], requestor: ActorRef)
/**
* Reply message to a successful [[WriteConfirmations]] request.
*/
case class WriteConfirmationsSuccess(confirmations: immutable.Seq[PersistentConfirmation])
/**
* Reply message to a failed [[WriteConfirmations]] request.
*/
case class WriteConfirmationsFailure(cause: Throwable)
/**
* Request to write messages.
* *
* @param processorId id of the processor that sent the message corresponding to * @param messages messages to be written.
* this confirmation to a channel. * @param processor write requestor.
* @param messageSequenceNr sequence number of the sent message.
* @param channelId id of the channel that delivered the message corresponding to
* this confirmation.
* @param wrapperSequenceNr sequence number of the message stored by a persistent
* channel. This message contains the [[Deliver]] request
* with the message identified by `processorId` and
* `messageSequenceNumber`.
* @param channelEndpoint actor reference that sent the the message corresponding to
* this confirmation. This is a child actor of the sending
* [[Channel]] or [[PersistentChannel]].
*/ */
case class Confirm(processorId: String, messageSequenceNr: Long, channelId: String, wrapperSequenceNr: Long = 0L, channelEndpoint: ActorRef = null) extends Message case class WriteMessages(messages: immutable.Seq[PersistentRepr], processor: ActorRef)
/** /**
* Instructs a journal to persist a sequence of messages. * Reply message to a successful [[WriteMessages]] request. This reply is sent to the requestor
* * before all subsequent [[WriteMessageSuccess]] replies.
* @param persistentBatch batch of messages to be persisted.
* @param processor requesting processor.
*/ */
case class WriteBatch(persistentBatch: immutable.Seq[PersistentRepr], processor: ActorRef) case object WriteMessagesSuccess
/** /**
* Reply message to a processor if a batch write succeeded. This message is received before * Reply message to a failed [[WriteMessages]] request. This reply is sent to the requestor
* all subsequent [[WriteSuccess]] messages. * before all subsequent [[WriteMessagFailure]] replies.
*/
case object WriteBatchSuccess
/**
* Reply message to a processor if a batch write failed. This message is received before
* all subsequent [[WriteFailure]] messages.
* *
* @param cause failure cause. * @param cause failure cause.
*/ */
case class WriteBatchFailure(cause: Throwable) case class WriteMessagesFailure(cause: Throwable)
/** /**
* Reply message to a processor that `persistent` message has been successfully journaled. * Reply message to a successful [[WriteMessages]] request. For each contained [[PersistentRepr]] message
* in the request, a separate reply is sent to the requestor.
* *
* @param persistent persistent message. * @param persistent successfully written message.
*/ */
case class WriteSuccess(persistent: PersistentRepr) case class WriteMessageSuccess(persistent: PersistentRepr)
/** /**
* Reply message to a processor that `persistent` message could not be journaled. * Reply message to a failed [[WriteMessages]] request. For each contained [[PersistentRepr]] message
* in the request, a separate reply is sent to the requestor.
* *
* @param persistent persistent message. * @param message message failed to be written.
* @param cause failure cause. * @param cause failure cause.
*/ */
case class WriteFailure(persistent: PersistentRepr, cause: Throwable) case class WriteMessageFailure(message: PersistentRepr, cause: Throwable)
/** /**
* Instructs a journal to loop a `message` back to `processor`, without persisting the * Request to loop a `message` back to `processor`, without persisting the message. Looping of messages
* message. Looping of messages through a journal is required to preserve message order * through a journal is required to preserve message order with persistent messages.
* with persistent messages.
* *
* @param message message to be looped through the journal. * @param message message to be looped through the journal.
* @param processor requesting processor. * @param processor loop requestor.
*/ */
case class Loop(message: Any, processor: ActorRef) case class LoopMessage(message: Any, processor: ActorRef)
/** /**
* Reply message to a processor that a `message` has been looped through the journal. * Reply message to a [[LoopMessage]] request.
* *
* @param message looped message. * @param message looped message.
*/ */
case class LoopSuccess(message: Any) case class LoopMessageSuccess(message: Any)
/** /**
* Instructs a journal to replay messages to `processor`. * Request to replay messages to `processor`.
* *
* @param fromSequenceNr sequence number where replay should start. * @param fromSequenceNr sequence number where replay should start (inclusive).
* @param toSequenceNr sequence number where replay should end (inclusive). * @param toSequenceNr sequence number where replay should end (inclusive).
* @param max maximum number of messages to be replayed.
* @param processorId requesting processor id.
* @param processor requesting processor.
* @param replayDeleted `true` if messages marked as deleted shall be replayed.
*/
case class ReplayMessages(fromSequenceNr: Long, toSequenceNr: Long, max: Long, processorId: String, processor: ActorRef, replayDeleted: Boolean = false)
/**
* Reply message to a [[ReplayMessages]] request. A separate reply is sent to the requestor for each
* replayed message.
*
* @param persistent replayed message.
*/
case class ReplayedMessage(persistent: PersistentRepr)
/**
* Reply message to a successful [[ReplayMessages]] request. This reply is sent to the requestor
* after all [[ReplayedMessage]] have been sent (if any).
*/
case object ReplayMessagesSuccess
/**
* Reply message to a failed [[ReplayMessages]] request. This reply is sent to the requestor
* if a replay could not be successfully completed.
*/
case class ReplayMessagesFailure(cause: Throwable)
/**
* Request to read the highest stored sequence number of a given processor.
*
* @param fromSequenceNr optional hint where to start searching for the maximum sequence number.
* @param processorId requesting processor id. * @param processorId requesting processor id.
* @param processor requesting processor. * @param processor requesting processor.
*/ */
case class Replay(fromSequenceNr: Long, toSequenceNr: Long, processorId: String, processor: ActorRef) case class ReadHighestSequenceNr(fromSequenceNr: Long = 1L, processorId: String, processor: ActorRef)
/** /**
* Reply message to a processor that `persistent` message has been replayed. * Reply message to a successful [[ReadHighestSequenceNr]] request.
* *
* @param persistent persistent message. * @param highestSequenceNr read highest sequence number.
*/ */
case class Replayed(persistent: PersistentRepr) case class ReadHighestSequenceNrSuccess(highestSequenceNr: Long)
/** /**
* Reply message to a processor that all `persistent` messages have been replayed. * Reply message to a failed [[ReadHighestSequenceNr]] request.
* *
* @param maxSequenceNr the highest stored sequence number (for a processor). * @param cause failure cause.
*/ */
case class ReplaySuccess(maxSequenceNr: Long) case class ReadHighestSequenceNrFailure(cause: Throwable)
/**
* Reply message to a processor that not all `persistent` messages could have been
* replayed.
*/
case class ReplayFailure(cause: Throwable)
} }

View file

@ -4,9 +4,62 @@
package akka.persistence package akka.persistence
import scala.concurrent.duration._
import com.typesafe.config.Config
import akka.actor._ import akka.actor._
import akka.dispatch.Dispatchers import akka.dispatch.Dispatchers
import akka.persistence.journal.AsyncWriteJournal import akka.persistence.journal.AsyncWriteJournal
import akka.util.Helpers.ConfigOps
/**
* Persistence configuration.
*/
final class PersistenceSettings(config: Config) {
object journal {
val maxMessageBatchSize: Int =
config.getInt("journal.max-message-batch-size")
val maxConfirmationBatchSize: Int =
config.getInt("journal.max-confirmation-batch-size")
val maxDeletionBatchSize: Int =
config.getInt("journal.max-deletion-batch-size")
}
object view {
val autoUpdate: Boolean =
config.getBoolean("view.auto-update")
val autoUpdateInterval: FiniteDuration =
config.getMillisDuration("view.auto-update-interval")
val autoUpdateReplayMax: Long =
posMax(config.getLong("view.auto-update-replay-max"))
private def posMax(v: Long) =
if (v < 0) Long.MaxValue else v
}
/**
* INTERNAL API.
*
* These config options are only used internally for testing
* purposes and are therefore not defined in reference.conf
*/
private[persistence] object internal {
val publishPluginCommands: Boolean = {
val path = "publish-plugin-commands"
config.hasPath(path) && config.getBoolean(path)
}
val publishConfirmations: Boolean = {
val path = "publish-confirmations"
config.hasPath(path) && config.getBoolean(path)
}
}
}
/** /**
* Persistence extension. * Persistence extension.
@ -27,27 +80,34 @@ object Persistence extends ExtensionId[Persistence] with ExtensionIdProvider {
*/ */
class Persistence(val system: ExtendedActorSystem) extends Extension { class Persistence(val system: ExtendedActorSystem) extends Extension {
private val DefaultPluginDispatcherId = "akka.persistence.dispatchers.default-plugin-dispatcher" private val DefaultPluginDispatcherId = "akka.persistence.dispatchers.default-plugin-dispatcher"
private val config = system.settings.config.getConfig("akka.persistence") private val config = system.settings.config.getConfig("akka.persistence")
private val snapshotStore = createPlugin("snapshot-store", _ DefaultPluginDispatcherId)
private val journal = createPlugin("journal", clazz
if (classOf[AsyncWriteJournal].isAssignableFrom(clazz)) Dispatchers.DefaultDispatcherId else DefaultPluginDispatcherId)
/** val settings = new PersistenceSettings(config)
* INTERNAL API.
*/ private val snapshotStore = createPlugin("snapshot-store") { _
private[persistence] val publishPluginCommands: Boolean = { DefaultPluginDispatcherId
val path = "publish-plugin-commands"
// this config option is only used internally (for testing
// purposes) and is therefore not defined in reference.conf
config.hasPath(path) && config.getBoolean(path)
} }
private val journal = createPlugin("journal") { clazz
if (classOf[AsyncWriteJournal].isAssignableFrom(clazz)) Dispatchers.DefaultDispatcherId
else DefaultPluginDispatcherId
}
private val confirmationBatchLayer = system.asInstanceOf[ActorSystemImpl]
.systemActorOf(Props(classOf[DeliveredByChannelBatching], journal, settings), "confirmation-batch-layer")
private val deletionBatchLayer = system.asInstanceOf[ActorSystemImpl]
.systemActorOf(Props(classOf[DeliveredByPersistentChannelBatching], journal, settings), "deletion-batch-layer")
/** /**
* INTERNAL API. * Creates a canonical processor id from a processor actor ref.
*/ */
private[persistence] val maxBatchSize: Int = def processorId(processor: ActorRef): String = id(processor)
config.getInt("journal.max-batch-size")
/**
* Creates a canonical channel id from a channel actor ref.
*/
def channelId(channel: ActorRef): String = id(channel)
/** /**
* Returns a snapshot store for a processor identified by `processorId`. * Returns a snapshot store for a processor identified by `processorId`.
@ -68,16 +128,18 @@ class Persistence(val system: ExtendedActorSystem) extends Extension {
} }
/** /**
* Creates a canonical processor id from a processor actor ref. * INTERNAL API.
*/ */
def processorId(processor: ActorRef): String = id(processor) private[persistence] def confirmationBatchingJournalForChannel(channelId: String): ActorRef =
confirmationBatchLayer
/** /**
* Creates a canonical channel id from a channel actor ref. * INTERNAL API.
*/ */
def channelId(channel: ActorRef): String = id(channel) private[persistence] def deletionBatchingJournalForChannel(channelId: String): ActorRef =
deletionBatchLayer
private def createPlugin(pluginType: String, dispatcherSelector: Class[_] String) = { private def createPlugin(pluginType: String)(dispatcherSelector: Class[_] String) = {
val pluginConfigPath = config.getString(s"${pluginType}.plugin") val pluginConfigPath = config.getString(s"${pluginType}.plugin")
val pluginConfig = system.settings.config.getConfig(pluginConfigPath) val pluginConfig = system.settings.config.getConfig(pluginConfigPath)
val pluginClassName = pluginConfig.getString("class") val pluginClassName = pluginConfig.getString("class")

View file

@ -12,7 +12,6 @@ import scala.collection.immutable
import akka.actor.{ ActorContext, ActorRef } import akka.actor.{ ActorContext, ActorRef }
import akka.japi.Util.immutableSeq import akka.japi.Util.immutableSeq
import akka.pattern.PromiseActorRef import akka.pattern.PromiseActorRef
import akka.persistence.JournalProtocol.Confirm
import akka.persistence.serialization.Message import akka.persistence.serialization.Message
/** /**
@ -115,14 +114,43 @@ case class PersistentBatch(persistentBatch: immutable.Seq[Persistent]) extends M
persistentBatch.toList.asInstanceOf[List[PersistentRepr]] persistentBatch.toList.asInstanceOf[List[PersistentRepr]]
} }
/**
* Plugin API: confirmation entry written by journal plugins.
*/
trait PersistentConfirmation {
def processorId: String
def channelId: String
def sequenceNr: Long
}
/**
* Plugin API: persistent message identifier.
*/
trait PersistentId {
/**
* Id of processor that journals a persistent message
*/
def processorId: String
/**
* A persistent message's sequence number.
*/
def sequenceNr: Long
}
/**
* INTERNAL API.
*/
private[persistence] case class PersistentIdImpl(processorId: String, sequenceNr: Long) extends PersistentId
/** /**
* Plugin API: representation of a persistent message in the journal plugin API. * Plugin API: representation of a persistent message in the journal plugin API.
* *
* @see[[SyncWriteJournal]] * @see [[journal.SyncWriteJournal]]
* @see[[AsyncWriteJournal]] * @see [[journal.AsyncWriteJournal]]
* @see[[AsyncReplay]] * @see [[journal.AsyncRecovery]]
*/ */
trait PersistentRepr extends Persistent with Message { trait PersistentRepr extends Persistent with PersistentId with Message {
import scala.collection.JavaConverters._ import scala.collection.JavaConverters._
/** /**
@ -130,28 +158,11 @@ trait PersistentRepr extends Persistent with Message {
*/ */
def payload: Any def payload: Any
/**
* This persistent message's seuence number.
*/
def sequenceNr: Long
/**
* Id of processor that journals the message
*/
def processorId: String
/** /**
* `true` if this message is marked as deleted. * `true` if this message is marked as deleted.
*/ */
def deleted: Boolean def deleted: Boolean
/**
* `true` by default, `false` for replayed messages. Set to `true` by a channel if this
* message is replayed and its sender reference was resolved. Channels use this field to
* avoid redundant sender reference resolutions.
*/
def resolved: Boolean
/** /**
* Number of redeliveries. Only greater than zero if message has been redelivered by a [[Channel]] * Number of redeliveries. Only greater than zero if message has been redelivered by a [[Channel]]
* or [[PersistentChannel]]. * or [[PersistentChannel]].
@ -178,7 +189,7 @@ trait PersistentRepr extends Persistent with Message {
/** /**
* Delivery confirmation message. * Delivery confirmation message.
*/ */
def confirmMessage: Confirm def confirmMessage: Delivered
/** /**
* Delivery confirmation message. * Delivery confirmation message.
@ -202,16 +213,15 @@ trait PersistentRepr extends Persistent with Message {
prepareWrite(if (sender.isInstanceOf[PromiseActorRef]) context.system.deadLetters else sender) prepareWrite(if (sender.isInstanceOf[PromiseActorRef]) context.system.deadLetters else sender)
/** /**
* INTERNAL API. * Creates a new copy of this [[PersistentRepr]].
*/ */
private[persistence] def update( def update(
sequenceNr: Long = sequenceNr, sequenceNr: Long = sequenceNr,
processorId: String = processorId, processorId: String = processorId,
deleted: Boolean = deleted, deleted: Boolean = deleted,
resolved: Boolean = resolved,
redeliveries: Int = redeliveries, redeliveries: Int = redeliveries,
confirms: immutable.Seq[String] = confirms, confirms: immutable.Seq[String] = confirms,
confirmMessage: Confirm = confirmMessage, confirmMessage: Delivered = confirmMessage,
confirmTarget: ActorRef = confirmTarget, confirmTarget: ActorRef = confirmTarget,
sender: ActorRef = sender): PersistentRepr sender: ActorRef = sender): PersistentRepr
} }
@ -230,14 +240,13 @@ object PersistentRepr {
sequenceNr: Long = 0L, sequenceNr: Long = 0L,
processorId: String = PersistentRepr.Undefined, processorId: String = PersistentRepr.Undefined,
deleted: Boolean = false, deleted: Boolean = false,
resolved: Boolean = true,
redeliveries: Int = 0, redeliveries: Int = 0,
confirms: immutable.Seq[String] = Nil, confirms: immutable.Seq[String] = Nil,
confirmable: Boolean = false, confirmable: Boolean = false,
confirmMessage: Confirm = null, confirmMessage: Delivered = null,
confirmTarget: ActorRef = null, confirmTarget: ActorRef = null,
sender: ActorRef = null) = sender: ActorRef = null) =
if (confirmable) ConfirmablePersistentImpl(payload, sequenceNr, processorId, deleted, resolved, redeliveries, confirms, confirmMessage, confirmTarget, sender) if (confirmable) ConfirmablePersistentImpl(payload, sequenceNr, processorId, deleted, redeliveries, confirms, confirmMessage, confirmTarget, sender)
else PersistentImpl(payload, sequenceNr, processorId, deleted, confirms, sender) else PersistentImpl(payload, sequenceNr, processorId, deleted, confirms, sender)
/** /**
@ -275,18 +284,16 @@ private[persistence] case class PersistentImpl(
sequenceNr: Long, sequenceNr: Long,
processorId: String, processorId: String,
deleted: Boolean, deleted: Boolean,
resolved: Boolean,
redeliveries: Int, redeliveries: Int,
confirms: immutable.Seq[String], confirms: immutable.Seq[String],
confirmMessage: Confirm, confirmMessage: Delivered,
confirmTarget: ActorRef, confirmTarget: ActorRef,
sender: ActorRef) = sender: ActorRef) =
copy(sequenceNr = sequenceNr, processorId = processorId, deleted = deleted, confirms = confirms, sender = sender) copy(sequenceNr = sequenceNr, processorId = processorId, deleted = deleted, confirms = confirms, sender = sender)
val resolved: Boolean = false
val redeliveries: Int = 0 val redeliveries: Int = 0
val confirmable: Boolean = false val confirmable: Boolean = false
val confirmMessage: Confirm = null val confirmMessage: Delivered = null
val confirmTarget: ActorRef = null val confirmTarget: ActorRef = null
} }
@ -298,10 +305,9 @@ private[persistence] case class ConfirmablePersistentImpl(
sequenceNr: Long, sequenceNr: Long,
processorId: String, processorId: String,
deleted: Boolean, deleted: Boolean,
resolved: Boolean,
redeliveries: Int, redeliveries: Int,
confirms: immutable.Seq[String], confirms: immutable.Seq[String],
confirmMessage: Confirm, confirmMessage: Delivered,
confirmTarget: ActorRef, confirmTarget: ActorRef,
sender: ActorRef) extends ConfirmablePersistent with PersistentRepr { sender: ActorRef) extends ConfirmablePersistent with PersistentRepr {
@ -314,16 +320,16 @@ private[persistence] case class ConfirmablePersistentImpl(
def confirmable = true def confirmable = true
def prepareWrite(sender: ActorRef) = def prepareWrite(sender: ActorRef) =
copy(sender = sender, resolved = false, confirmMessage = null, confirmTarget = null) copy(sender = sender, confirmMessage = null, confirmTarget = null)
def update(sequenceNr: Long, processorId: String, deleted: Boolean, resolved: Boolean, redeliveries: Int, confirms: immutable.Seq[String], confirmMessage: Confirm, confirmTarget: ActorRef, sender: ActorRef) = def update(sequenceNr: Long, processorId: String, deleted: Boolean, redeliveries: Int, confirms: immutable.Seq[String], confirmMessage: Delivered, confirmTarget: ActorRef, sender: ActorRef) =
copy(sequenceNr = sequenceNr, processorId = processorId, deleted = deleted, resolved = resolved, redeliveries = redeliveries, confirms = confirms, confirmMessage = confirmMessage, confirmTarget = confirmTarget, sender = sender) copy(sequenceNr = sequenceNr, processorId = processorId, deleted = deleted, redeliveries = redeliveries, confirms = confirms, confirmMessage = confirmMessage, confirmTarget = confirmTarget, sender = sender)
} }
/** /**
* INTERNAL API. * INTERNAL API.
*/ */
private[persistence] object ConfirmablePersistentImpl { private[persistence] object ConfirmablePersistentImpl {
def apply(persistent: PersistentRepr, confirmMessage: Confirm, confirmTarget: ActorRef = null): ConfirmablePersistentImpl = def apply(persistent: PersistentRepr, confirmMessage: Delivered, confirmTarget: ActorRef = null): ConfirmablePersistentImpl =
ConfirmablePersistentImpl(persistent.payload, persistent.sequenceNr, persistent.processorId, persistent.deleted, persistent.resolved, persistent.redeliveries, persistent.confirms, confirmMessage, confirmTarget, persistent.sender) ConfirmablePersistentImpl(persistent.payload, persistent.sequenceNr, persistent.processorId, persistent.deleted, persistent.redeliveries, persistent.confirms, confirmMessage, confirmTarget, persistent.sender)
} }

View file

@ -9,82 +9,120 @@ import scala.language.postfixOps
import akka.AkkaException import akka.AkkaException
import akka.actor._ import akka.actor._
import akka.persistence.JournalProtocol._
import akka.persistence.JournalProtocol.Confirm
/** /**
* A [[PersistentChannel]] configuration object. * A [[PersistentChannel]] configuration object.
* *
* @param redeliverMax maximum number of redeliveries (default is 5). * @param redeliverMax Maximum number of redelivery attempts.
* @param redeliverInterval interval between redeliveries (default is 5 seconds). * @param redeliverInterval Interval between redelivery attempts.
* @param replyPersistent if `true` the sender will receive the successfully stored [[Persistent]] * @param redeliverFailureListener Receiver of [[RedeliverFailure]] notifications which are sent when the number
* message that has been submitted with a [[Deliver]] request, or a * of redeliveries reaches `redeliverMax` for a sequence of messages. To enforce
* [[PersistenceFailure]] message in case of a persistence failure. * a redelivery of these messages, the listener has to [[Reset]] the persistent
* channel. Alternatively, it can also confirm these messages, preventing further
* redeliveries.
* @param replyPersistent If `true` the sender will receive the successfully stored [[Persistent]] message that has
* been submitted with a [[Deliver]] request, or a [[PersistenceFailure]] message in case of
* a persistence failure.
* @param pendingConfirmationsMax Message delivery is suspended by a channel if the number of pending reaches the
* specified value and is resumed again if the number of pending confirmations falls
* below `pendingConfirmationsMin`.
* @param pendingConfirmationsMin Message delivery is resumed if the number of pending confirmations falls below
* this limit. It is suspended again if it reaches `pendingConfirmationsMax`.
* Message delivery is enabled for a channel if the number of pending confirmations
* is below this limit, or, is resumed again if it falls below this limit.
* @param idleTimeout Maximum interval between read attempts made by a persistent channel. This settings applies,
* for example, after a journal failed to serve a read request. The next read request is then
* made after the configured timeout.
*/ */
class PersistentChannelSettings( @SerialVersionUID(1L)
redeliverMax: Int, case class PersistentChannelSettings(
redeliverInterval: FiniteDuration, val redeliverMax: Int = 5,
val replyPersistent: Boolean) extends ChannelSettings(redeliverMax, redeliverInterval) { val redeliverInterval: FiniteDuration = 5.seconds,
val redeliverFailureListener: Option[ActorRef] = None,
val replyPersistent: Boolean = false,
val pendingConfirmationsMax: Long = Long.MaxValue,
val pendingConfirmationsMin: Long = Long.MaxValue,
val idleTimeout: FiniteDuration = 1.minute) {
/** /**
* Java API. * Java API.
*/ */
override def withRedeliverMax(redeliverMax: Int): PersistentChannelSettings = def withRedeliverMax(redeliverMax: Int): PersistentChannelSettings =
updatePersistent(redeliverMax = redeliverMax) copy(redeliverMax = redeliverMax)
/** /**
* Java API. * Java API.
*/ */
override def withRedeliverInterval(redeliverInterval: FiniteDuration): PersistentChannelSettings = def withRedeliverInterval(redeliverInterval: FiniteDuration): PersistentChannelSettings =
updatePersistent(redeliverInterval = redeliverInterval) copy(redeliverInterval = redeliverInterval)
/** /**
* Java API. * Java API.
*/ */
def withReplyPersistent(replayPersistent: Boolean) = def withRedeliverFailureListener(redeliverFailureListener: ActorRef): PersistentChannelSettings =
updatePersistent(replyPersistent = replyPersistent) copy(redeliverFailureListener = Option(redeliverFailureListener))
private def updatePersistent( // compile error if method name is 'update' /**
redeliverMax: Int = redeliverMax, * Java API.
redeliverInterval: FiniteDuration = redeliverInterval, */
replyPersistent: Boolean = replyPersistent): PersistentChannelSettings = def withReplyPersistent(replayPersistent: Boolean): PersistentChannelSettings =
new PersistentChannelSettings(redeliverMax, redeliverInterval, replyPersistent) copy(replyPersistent = replyPersistent)
/**
* Java API.
*/
def withPendingConfirmationsMax(pendingConfirmationsMax: Long): PersistentChannelSettings =
copy(pendingConfirmationsMax = pendingConfirmationsMax)
/**
* Java API.
*/
def withPendingConfirmationsMin(pendingConfirmationsMin: Long): PersistentChannelSettings =
copy(pendingConfirmationsMin = pendingConfirmationsMin)
/**
* Converts this configuration object to [[ChannelSettings]].
*/
def toChannelSettings: ChannelSettings =
ChannelSettings(redeliverMax, redeliverInterval, redeliverFailureListener)
} }
object PersistentChannelSettings { object PersistentChannelSettings {
def apply(
redeliverMax: Int = 5,
redeliverInterval: FiniteDuration = 5 seconds,
replyPersistent: Boolean = false): PersistentChannelSettings =
new PersistentChannelSettings(redeliverMax, redeliverInterval, replyPersistent)
/** /**
* Java API. * Java API.
*/ */
def create() = apply() def create() = PersistentChannelSettings.apply()
} }
/** /**
* A [[PersistentChannel]] implements the same functionality as a [[Channel]] but additionally * Resets a [[PersistentChannel]], forcing it to redeliver all unconfirmed persistent
* persists messages before they are delivered. This is done by using internally a special-purpose * messages. This does not affect writing [[Deliver]] requests.
* [[Processor]]. Therefore, the main use case of a persistent channel is standalone usage i.e. */
* independent of an application-specific [[Processor]] sending messages to a channel. Messages case object Reset
* that have been persisted by a persistent channel are deleted when destinations confirm the
* receipt of these messages. /**
* Exception thrown by a [[PersistentChannel]] child actor to re-initiate delivery.
*/
class ResetException extends AkkaException("Channel reset on application request")
/**
* A [[PersistentChannel]] implements the same functionality as a [[Channel]] but additionally persists
* [[Deliver]] requests before they are served. Persistent channels are useful in combination with slow
* destinations or destinations that are unavailable for a long time. `Deliver` requests that have been
* persisted by a persistent channel are deleted when destinations confirm the receipt of the corresponding
* messages.
* *
* Using a persistent channel in combination with a [[Processor]] can make sense if destinations * The number of pending confirmations can be limited by a persistent channel based on the parameters of
* are unavailable for a long time and an application doesn't want to buffer all messages in * [[PersistentChannelSettings]]. It can suspend delivery when the number of pending confirmations reaches
* memory (but write them to the journal instead). In this case, delivery can be disabled with * `pendingConfirmationsMax` and resume delivery again when this number falls below `pendingConfirmationsMin`.
* [[DisableDelivery]] (to stop delivery and persist-only) and re-enabled with [[EnableDelivery]]. * This prevents both flooding destinations with more messages than they can process and unlimited memory
* `EnableDelivery` replays persistent messages to this channel and the channel delivers all * consumption by the channel. A persistent channel continues to persist [[Deliver]] request even when
* unconfirmed messages again (which may then show up as duplicates at destinations as described * message delivery is temporarily suspended.
* in the API docs of [[Channel]]. Duplicates can be detected by tracking message sequence numbers
* and redelivery counters).
* *
* A persistent channel can also reply to [[Deliver]] senders whether persisting a message was * A persistent channel can also reply to [[Deliver]] senders if the request has been successfully persisted
* successful or not (see `replyPersistent` of [[PersistentChannelSettings]]). If enabled, the * or not (see `replyPersistent` parameter in [[PersistentChannelSettings]]). In case of success, the channel
* sender will receive the persisted message as reply (i.e. a [[Persistent]] message), otherwise * replies with the contained [[Persistent]] message, otherwise with a [[PersistenceFailure]] message.
* a [[PersistenceFailure]] message.
*/ */
final class PersistentChannel private[akka] (_channelId: Option[String], channelSettings: PersistentChannelSettings) extends Actor { final class PersistentChannel private[akka] (_channelId: Option[String], channelSettings: PersistentChannelSettings) extends Actor {
private val id = _channelId match { private val id = _channelId match {
@ -92,19 +130,17 @@ final class PersistentChannel private[akka] (_channelId: Option[String], channel
case None Persistence(context.system).channelId(self) case None Persistence(context.system).channelId(self)
} }
private val reliableDelivery = context.actorOf(Props(classOf[ReliableDelivery], channelSettings)) private val requestReader = context.actorOf(Props(classOf[RequestReader], id, channelSettings))
private val resolvedDelivery = context.actorOf(Props(classOf[ResolvedDelivery], reliableDelivery)) private val requestWriter = context.actorOf(Props(classOf[RequestWriter], id, channelSettings, requestReader))
private val reliableStorage = context.actorOf(Props(classOf[ReliableStorage], id, channelSettings, resolvedDelivery))
def receive = { def receive = {
case d @ Deliver(persistent: PersistentRepr, destination, resolve) case d @ Deliver(persistent: PersistentRepr, destination)
// Persist the Deliver request by sending reliableStorage a Persistent message // Persist the Deliver request by sending reliableStorage a Persistent message
// with the Deliver request as payload. This persistent message is referred to // with the Deliver request as payload. This persistent message is referred to
// as the wrapper message, whereas the persistent message contained in the Deliver // as the wrapper message, whereas the persistent message contained in the Deliver
// request is referred to as wrapped message (see also class ReliableStorage). // request is referred to as wrapped message (see also class ReliableStorage).
if (!persistent.confirms.contains(id)) reliableStorage forward Persistent(d) if (!persistent.confirms.contains(id)) requestWriter forward Persistent(d)
case DisableDelivery reliableStorage ! DisableDelivery case Reset requestReader ! Reset
case EnableDelivery reliableStorage ! EnableDelivery
} }
} }
@ -145,70 +181,192 @@ object PersistentChannel {
} }
/** /**
* Instructs a [[PersistentChannel]] to disable the delivery of [[Persistent]] messages to their destination. * Plugin API.
* The persistent channel, however, continues to persist messages (for later delivery).
*
* @see [[EnableDelivery]]
*/ */
@SerialVersionUID(1L) case class DeliveredByPersistentChannel(
case object DisableDelivery { channelId: String,
/** persistentSequenceNr: Long,
* Java API. deliverySequenceNr: Long = 0L,
*/ channel: ActorRef = null) extends Delivered with PersistentId {
def getInstance = this
def processorId: String = channelId
def sequenceNr: Long = persistentSequenceNr
def update(deliverySequenceNr: Long, channel: ActorRef): DeliveredByPersistentChannel =
copy(deliverySequenceNr = deliverySequenceNr, channel = channel)
} }
/** /**
* Instructs a [[PersistentChannel]] to re-enable the delivery of [[Persistent]] messages to their destination. * INTERNAL API.
* This will first deliver all messages that have been stored by a persistent channel for which no confirmation
* is available yet. New [[Deliver]] requests are processed after all stored messages have been delivered. This
* request only has an effect if a persistent channel has previously been disabled with [[DisableDelivery]].
*
* @see [[DisableDelivery]]
*/ */
@SerialVersionUID(1L) private[persistence] class DeliveredByPersistentChannelBatching(journal: ActorRef, settings: PersistenceSettings) extends Actor {
case object EnableDelivery { private val publish = settings.internal.publishConfirmations
/** private val batchMax = settings.journal.maxConfirmationBatchSize
* Java API.
*/ private var batching = false
def getInstance = this private var batch = Vector.empty[DeliveredByPersistentChannel]
def receive = {
case DeleteMessagesSuccess(messageIds)
if (batch.isEmpty) batching = false else journalBatch()
messageIds.foreach {
case c: DeliveredByPersistentChannel
c.channel ! c
if (publish) context.system.eventStream.publish(c)
}
case DeleteMessagesFailure(_)
if (batch.isEmpty) batching = false else journalBatch()
case d: DeliveredByPersistentChannel
addToBatch(d)
if (!batching || maxBatchSizeReached) journalBatch()
case m journal forward m
}
def addToBatch(pc: DeliveredByPersistentChannel): Unit =
batch = batch :+ pc
def maxBatchSizeReached: Boolean =
batch.length >= batchMax
def journalBatch(): Unit = {
journal ! DeleteMessages(batch, true, Some(self))
batch = Vector.empty
batching = true
}
} }
/** /**
* Thrown by a persistent channel when [[EnableDelivery]] has been requested and delivery has been previously * Writes [[Deliver]] requests to the journal.
* disabled for that channel.
*/ */
@SerialVersionUID(1L) private class RequestWriter(channelId: String, channelSettings: PersistentChannelSettings, reader: ActorRef) extends Processor {
class ChannelRestartRequiredException extends AkkaException("channel restart required for enabling delivery") import RequestWriter._
private class ReliableStorage(channelId: String, channelSettings: PersistentChannelSettings, next: ActorRef) extends Processor {
import channelSettings._ import channelSettings._
private val cbJournal = extension.confirmationBatchingJournalForChannel(channelId)
override val processorId = channelId override val processorId = channelId
private val journal = Persistence(context.system).journalFor(channelId)
private var deliveryEnabled = true
def receive = { def receive = {
case p @ Persistent(d @ Deliver(wrapped: PersistentRepr, destination, resolve), snr) case p @ Persistent(Deliver(wrapped: PersistentRepr, _), _)
val wrapper = p.asInstanceOf[PersistentRepr]
val prepared = prepareDelivery(wrapped, wrapper)
if (!recoveryRunning && wrapped.processorId != PersistentRepr.Undefined) if (!recoveryRunning && wrapped.processorId != PersistentRepr.Undefined)
// Write a delivery confirmation to the journal so that replayed Deliver // Write a delivery confirmation to the journal so that replayed Deliver
// requests from a sending processor are not persisted again. Replaying // requests from a sending processor are not persisted again. Replaying
// Deliver requests is now the responsibility of this processor. // Deliver requests is now the responsibility of this processor.
journal ! Confirm(prepared.processorId, prepared.sequenceNr, channelId) cbJournal ! DeliveredByChannel(wrapped.processorId, channelId, wrapped.sequenceNr)
if (!recoveryRunning && replyPersistent) if (!recoveryRunning && replyPersistent)
sender ! prepared sender ! wrapped
if (deliveryEnabled) case p: PersistenceFailure
next forward d.copy(prepared) if (replyPersistent) sender ! p
}
case p: PersistenceFailure if (replyPersistent) sender ! p override protected[akka] def aroundReceive(receive: Receive, message: Any): Unit = {
case EnableDelivery if (!deliveryEnabled) throw new ChannelRestartRequiredException super.aroundReceive(receive, message)
case DisableDelivery deliveryEnabled = false message match {
case WriteMessagesSuccess | WriteMessagesFailure(_)
// activate reader after to reduce delivery latency
reader ! RequestsWritten
case _
}
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
self ! Recover(replayMax = 0L)
}
override def preStart(): Unit = {
self ! Recover(replayMax = 0L)
}
}
private object RequestWriter {
case object RequestsWritten
}
/**
* Reads [[Deliver]] requests from the journal and processes them. The number of `Deliver` requests
* processed per iteration depends on
*
* - `pendingConfirmationsMax` parameter in [[PersistentChannelSettings]]
* - `pendingConfirmationsMin` parameter in [[PersistentChannelSettings]] and the
* - current number of pending confirmations.
*
* @see [[PersistentChannel]]
*/
private class RequestReader(channelId: String, channelSettings: PersistentChannelSettings) extends Actor with Recovery {
import RequestWriter._
import channelSettings._
private val delivery = context.actorOf(Props(classOf[ReliableDelivery], channelSettings.toChannelSettings))
private val idle: State = new State {
override def toString: String = "idle"
def aroundReceive(receive: Receive, message: Any): Unit = message match {
case r: Recover // ignore
case other process(receive, other)
}
}
def receive = {
case p @ Persistent(d @ Deliver(wrapped: PersistentRepr, destination), snr)
val wrapper = p.asInstanceOf[PersistentRepr]
val prepared = prepareDelivery(wrapped, wrapper)
numReplayed += 1
numPending += 1
delivery forward d.copy(prepared)
case d: Delivered
delivery forward d
numPending = math.max(numPending - 1L, 0L)
if (numPending == pendingConfirmationsMin) onReadRequest()
case d @ RedeliverFailure(ms)
val numPendingPrev = numPending
numPending = math.max(numPending - ms.length, 0L)
if (numPendingPrev > pendingConfirmationsMin && numPending <= pendingConfirmationsMin) onReadRequest()
redeliverFailureListener.foreach(_.tell(d, context.parent))
case RequestsWritten | ReceiveTimeout
if (numPending <= pendingConfirmationsMin) onReadRequest()
case Reset throw new ResetException
}
def onReplaySuccess(receive: Receive, await: Boolean): Unit = {
onReplayComplete()
if (numReplayed > 0 && numPending <= pendingConfirmationsMin) onReadRequest()
numReplayed = 0L
}
def onReplayFailure(receive: Receive, await: Boolean, cause: Throwable): Unit = {
onReplayComplete()
}
def processorId: String =
channelId
def snapshotterId: String =
s"${channelId}-reader"
private val dbJournal = extension.deletionBatchingJournalForChannel(channelId)
/**
* Number of delivery requests replayed (read) per iteration.
*/
private var numReplayed = 0L
/**
* Number of pending confirmations.
*/
private var numPending = 0L
context.setReceiveTimeout(channelSettings.idleTimeout)
private def onReplayComplete(): Unit = {
_currentState = idle
receiverStash.unstashAll()
}
private def onReadRequest(): Unit = if (_currentState == idle) {
_currentState = replayStarted(await = false)
dbJournal ! ReplayMessages(lastSequenceNr + 1L, Long.MaxValue, pendingConfirmationsMax - numPending, processorId, self)
} }
/** /**
@ -220,12 +378,21 @@ private class ReliableStorage(channelId: String, channelSettings: PersistentChan
// otherwise, use sequence number of the wrapped message (that has been generated by // otherwise, use sequence number of the wrapped message (that has been generated by
// the sending processor). // the sending processor).
val sequenceNr = if (wrapped.sequenceNr == 0L) wrapper.sequenceNr else wrapped.sequenceNr val sequenceNr = if (wrapped.sequenceNr == 0L) wrapper.sequenceNr else wrapped.sequenceNr
val resolved = wrapped.resolved && wrapper.asInstanceOf[PersistentRepr].resolved val updated = wrapped.update(sequenceNr = sequenceNr)
val updated = wrapped.update(sequenceNr = sequenceNr, resolved = resolved)
// include the wrapper sequence number in the Confirm message so that the wrapper can // include the wrapper sequence number in the Confirm message so that the wrapper can
// be deleted later when the confirmation arrives. // be deleted later when the confirmation arrives.
ConfirmablePersistentImpl(updated, ConfirmablePersistentImpl(updated,
confirmTarget = journal, confirmTarget = dbJournal,
confirmMessage = Confirm(updated.processorId, sequenceNr, channelId, wrapper.sequenceNr)) confirmMessage = DeliveredByPersistentChannel(channelId, sequenceNr, channel = self))
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
try receiverStash.unstashAll() finally super.preRestart(reason, message)
}
override def preStart(): Unit = {
super.preStart()
self ! Recover(replayMax = 0L)
self ! RequestsWritten // considers savepoint loaded from snapshot (TODO)
} }
} }

View file

@ -4,8 +4,7 @@
package akka.persistence package akka.persistence
import scala.annotation.tailrec import akka.AkkaException
import akka.actor._ import akka.actor._
import akka.dispatch._ import akka.dispatch._
@ -28,7 +27,6 @@ import akka.dispatch._
* processor ! "bar" * processor ! "bar"
* }}} * }}}
* *
*
* During start and restart, persistent messages are replayed to a processor so that it can recover internal * During start and restart, persistent messages are replayed to a processor so that it can recover internal
* state from these messages. New messages sent to a processor during recovery do not interfere with replayed * state from these messages. New messages sent to a processor during recovery do not interfere with replayed
* messages, hence applications don't need to wait for a processor to complete its recovery. * messages, hence applications don't need to wait for a processor to complete its recovery.
@ -53,97 +51,41 @@ import akka.dispatch._
* @see [[Recover]] * @see [[Recover]]
* @see [[PersistentBatch]] * @see [[PersistentBatch]]
*/ */
trait Processor extends Actor with Stash with StashFactory { trait Processor extends Actor with Recovery {
import JournalProtocol._ import JournalProtocol._
import SnapshotProtocol._
private val extension = Persistence(context.system)
private val _processorId = extension.processorId(self)
import extension.maxBatchSize
/** /**
* Processor state. * Processes the highest stored sequence number response from the journal and then switches
* to `processing` state.
*/ */
private trait State { private val initializing = new State {
/** override def toString: String = "initializing"
* State-specific message handler.
*/
def aroundReceive(receive: Actor.Receive, message: Any): Unit
protected def process(receive: Actor.Receive, message: Any) = def aroundReceive(receive: Receive, message: Any) = message match {
receive.applyOrElse(message, unhandled) case ReadHighestSequenceNrSuccess(highest)
_currentState = processing
protected def processPersistent(receive: Actor.Receive, persistent: Persistent) = sequenceNr = highest
withCurrentPersistent(persistent)(receive.applyOrElse(_, unhandled)) receiverStash.unstashAll()
} case ReadHighestSequenceNrFailure(cause)
onRecoveryFailure(receive, cause)
/** case other
* Initial state, waits for `Recover` request, then changes to `recoveryStarted`. receiverStash.stash()
*/
private val recoveryPending = new State {
override def toString: String = "recovery pending"
def aroundReceive(receive: Actor.Receive, message: Any): Unit = message match {
case Recover(fromSnap, toSnr)
_currentState = recoveryStarted
snapshotStore ! LoadSnapshot(processorId, fromSnap, toSnr)
case _ processorStash.stash()
}
}
/**
* Processes a loaded snapshot and replayed messages, if any. If processing of the loaded
* snapshot fails, the exception is thrown immediately. If processing of a replayed message
* fails, the exception is caught and stored for being thrown later and state is changed to
* `recoveryFailed`.
*/
private val recoveryStarted = new State {
override def toString: String = "recovery started"
def aroundReceive(receive: Actor.Receive, message: Any) = message match {
case LoadSnapshotResult(sso, toSnr) sso match {
case Some(SelectedSnapshot(metadata, snapshot))
process(receive, SnapshotOffer(metadata, snapshot))
journal ! Replay(metadata.sequenceNr + 1L, toSnr, processorId, self)
case None
journal ! Replay(1L, toSnr, processorId, self)
}
case ReplaySuccess(maxSnr)
_currentState = recoverySucceeded
_sequenceNr = maxSnr
processorStash.unstashAll()
case ReplayFailure(cause)
val notification = RecoveryFailure(cause)
if (receive.isDefinedAt(notification)) process(receive, notification)
else {
val errorMsg = s"Replay failure by journal (processor id = [${processorId}])"
throw new RecoveryFailureException(errorMsg, cause)
}
case Replayed(p) try { processPersistent(receive, p) } catch {
case t: Throwable
_currentState = recoveryFailed // delay throwing exception to prepareRestart
_recoveryFailureCause = t
_recoveryFailureMessage = currentEnvelope
}
case r: Recover // ignore
case _ processorStash.stash()
} }
} }
/** /**
* Journals and processes new messages, both persistent and transient. * Journals and processes new messages, both persistent and transient.
*/ */
private val recoverySucceeded = new State { private val processing = new State {
override def toString: String = "recovery finished" override def toString: String = "processing"
private var batching = false private var batching = false
def aroundReceive(receive: Actor.Receive, message: Any) = message match { def aroundReceive(receive: Receive, message: Any) = message match {
case r: Recover // ignore case r: Recover // ignore
case Replayed(p) processPersistent(receive, p) // can occur after unstash from user stash case ReplayedMessage(p) processPersistent(receive, p) // can occur after unstash from user stash
case WriteSuccess(p) processPersistent(receive, p) case WriteMessageSuccess(p) processPersistent(receive, p)
case WriteFailure(p, cause) case WriteMessageFailure(p, cause)
val notification = PersistenceFailure(p.payload, p.sequenceNr, cause) val notification = PersistenceFailure(p.payload, p.sequenceNr, cause)
if (receive.isDefinedAt(notification)) process(receive, notification) if (receive.isDefinedAt(notification)) process(receive, notification)
else { else {
@ -152,8 +94,8 @@ trait Processor extends Actor with Stash with StashFactory {
"To avoid killing processors on persistence failure, a processor must handle PersistenceFailure messages." "To avoid killing processors on persistence failure, a processor must handle PersistenceFailure messages."
throw new ActorKilledException(errorMsg) throw new ActorKilledException(errorMsg)
} }
case LoopSuccess(m) process(receive, m) case LoopMessageSuccess(m) process(receive, m)
case WriteBatchSuccess | WriteBatchFailure(_) case WriteMessagesSuccess | WriteMessagesFailure(_)
if (processorBatch.isEmpty) batching = false else journalBatch() if (processorBatch.isEmpty) batching = false else journalBatch()
case p: PersistentRepr case p: PersistentRepr
addToBatch(p) addToBatch(p)
@ -166,7 +108,7 @@ trait Processor extends Actor with Stash with StashFactory {
case m case m
// submit all batched messages before looping this message // submit all batched messages before looping this message
if (processorBatch.isEmpty) batching = false else journalBatch() if (processorBatch.isEmpty) batching = false else journalBatch()
journal forward Loop(m, self) journal forward LoopMessage(m, self)
} }
def addToBatch(p: PersistentRepr): Unit = def addToBatch(p: PersistentRepr): Unit =
@ -176,67 +118,49 @@ trait Processor extends Actor with Stash with StashFactory {
pb.persistentReprList.foreach(addToBatch) pb.persistentReprList.foreach(addToBatch)
def maxBatchSizeReached: Boolean = def maxBatchSizeReached: Boolean =
processorBatch.length >= maxBatchSize processorBatch.length >= extension.settings.journal.maxMessageBatchSize
def journalBatch(): Unit = { def journalBatch(): Unit = {
journal ! WriteBatch(processorBatch, self) journal ! WriteMessages(processorBatch, self)
processorBatch = Vector.empty processorBatch = Vector.empty
batching = true batching = true
} }
} }
/** /**
* Consumes remaining replayed messages and then changes to `prepareRestart`. The * INTERNAL API.
* message that caused the exception during replay, is re-added to the mailbox and *
* re-received in `prepareRestart`. * Switches to `initializing` state and requests the highest stored sequence number from the journal.
*/ */
private val recoveryFailed = new State { private[persistence] def onReplaySuccess(receive: Receive, awaitReplay: Boolean): Unit = {
override def toString: String = "recovery failed" _currentState = initializing
journal ! ReadHighestSequenceNr(lastSequenceNr, processorId, self)
def aroundReceive(receive: Actor.Receive, message: Any) = message match {
case ReplayFailure(_)
replayCompleted()
// journal couldn't tell the maximum stored sequence number, hence the next
// replay must be a full replay (up to the highest stored sequence number)
_lastSequenceNr = Long.MaxValue
case ReplaySuccess(_) replayCompleted()
case Replayed(p) updateLastSequenceNr(p)
case r: Recover // ignore
case _ processorStash.stash()
}
def replayCompleted(): Unit = {
_currentState = prepareRestart
mailbox.enqueueFirst(self, _recoveryFailureMessage)
}
} }
/** /**
* Re-receives the replayed message that causes an exception during replay and throws * INTERNAL API.
* that exception.
*/ */
private val prepareRestart = new State { private[persistence] def onReplayFailure(receive: Receive, awaitReplay: Boolean, cause: Throwable): Unit =
override def toString: String = "prepare restart" onRecoveryFailure(receive, cause)
def aroundReceive(receive: Actor.Receive, message: Any) = message match { /**
case Replayed(_) throw _recoveryFailureCause * Invokes this processor's behavior with a `RecoveryFailure` message, if handled, otherwise throws a
case _ // ignore * `RecoveryFailureException`.
*/
private def onRecoveryFailure(receive: Receive, cause: Throwable): Unit = {
val notification = RecoveryFailure(cause)
if (receive.isDefinedAt(notification)) {
receive(notification)
} else {
val errorMsg = s"Recovery failure by journal (processor id = [${processorId}])"
throw new RecoveryException(errorMsg, cause)
} }
} }
private val _processorId = extension.processorId(self)
private var processorBatch = Vector.empty[PersistentRepr] private var processorBatch = Vector.empty[PersistentRepr]
private var sequenceNr: Long = 0L
private var _sequenceNr: Long = 0L
private var _lastSequenceNr: Long = 0L
private var _currentPersistent: Persistent = _
private var _currentState: State = recoveryPending
private var _recoveryFailureCause: Throwable = _
private var _recoveryFailureMessage: Envelope = _
private lazy val journal = extension.journalFor(processorId)
private lazy val snapshotStore = extension.snapshotStoreFor(processorId)
/** /**
* Processor id. Defaults to this processor's path and can be overridden. * Processor id. Defaults to this processor's path and can be overridden.
@ -244,30 +168,21 @@ trait Processor extends Actor with Stash with StashFactory {
def processorId: String = _processorId def processorId: String = _processorId
/** /**
* Highest received sequence number so far or `0L` if this processor hasn't received * Returns `processorId`.
* a persistent message yet. Usually equal to the sequence number of `currentPersistentMessage`
* (unless a processor implementation is about to re-order persistent messages using
* `stash()` and `unstash()`).
*/ */
def lastSequenceNr: Long = _lastSequenceNr def snapshotterId: String = processorId
/** /**
* Returns `true` if this processor is currently recovering. * Returns `true` if this processor is currently recovering.
*/ */
def recoveryRunning: Boolean = def recoveryRunning: Boolean =
_currentState == recoveryStarted || _currentState != processing
_currentState == prepareRestart
/** /**
* Returns `true` if this processor has successfully finished recovery. * Returns `true` if this processor has successfully finished recovery.
*/ */
def recoveryFinished: Boolean = def recoveryFinished: Boolean =
_currentState == recoverySucceeded _currentState == processing
/**
* Returns the current persistent message if there is one.
*/
implicit def currentPersistentMessage: Option[Persistent] = Option(_currentPersistent)
/** /**
* Marks a persistent message, identified by `sequenceNr`, as deleted. A message marked as deleted is * Marks a persistent message, identified by `sequenceNr`, as deleted. A message marked as deleted is
@ -289,23 +204,20 @@ trait Processor extends Actor with Stash with StashFactory {
* Processors that want to re-receive that persistent message during recovery should not call * Processors that want to re-receive that persistent message during recovery should not call
* this method. * this method.
* *
* Later extensions may also allow a replay of messages that have been marked as deleted which can
* be useful in debugging environments.
*
* @param sequenceNr sequence number of the persistent message to be deleted. * @param sequenceNr sequence number of the persistent message to be deleted.
* @param permanent if `false`, the message is marked as deleted, otherwise it is permanently deleted. * @param permanent if `false`, the message is marked as deleted, otherwise it is permanently deleted.
*/ */
def deleteMessage(sequenceNr: Long, permanent: Boolean): Unit = { def deleteMessage(sequenceNr: Long, permanent: Boolean): Unit = {
journal ! Delete(processorId, sequenceNr, sequenceNr, permanent) journal ! DeleteMessages(List(PersistentIdImpl(processorId, sequenceNr)), permanent)
} }
/** /**
* Marks all persistent messages with sequence numbers less than or equal `toSequenceNr` as deleted. * Permanently deletes all persistent messages with sequence numbers less than or equal `toSequenceNr`.
* *
* @param toSequenceNr upper sequence number bound of persistent messages to be deleted. * @param toSequenceNr upper sequence number bound of persistent messages to be deleted.
*/ */
def deleteMessages(toSequenceNr: Long): Unit = { def deleteMessages(toSequenceNr: Long): Unit = {
deleteMessages(toSequenceNr, false) deleteMessages(toSequenceNr, true)
} }
/** /**
@ -313,59 +225,11 @@ trait Processor extends Actor with Stash with StashFactory {
* is set to `false`, the persistent messages are marked as deleted in the journal, otherwise * is set to `false`, the persistent messages are marked as deleted in the journal, otherwise
* they permanently deleted from the journal. * they permanently deleted from the journal.
* *
* Later extensions may also allow a replay of messages that have been marked as deleted which can
* be useful in debugging environments.
*
* @param toSequenceNr upper sequence number bound of persistent messages to be deleted. * @param toSequenceNr upper sequence number bound of persistent messages to be deleted.
* @param permanent if `false`, the message is marked as deleted, otherwise it is permanently deleted. * @param permanent if `false`, the message is marked as deleted, otherwise it is permanently deleted.
*/ */
def deleteMessages(toSequenceNr: Long, permanent: Boolean): Unit = { def deleteMessages(toSequenceNr: Long, permanent: Boolean): Unit = {
journal ! Delete(processorId, 1L, toSequenceNr, permanent) journal ! DeleteMessagesTo(processorId, toSequenceNr, permanent)
}
/**
* Saves a `snapshot` of this processor's state. If saving succeeds, this processor will receive a
* [[SaveSnapshotSuccess]] message, otherwise a [[SaveSnapshotFailure]] message.
*/
def saveSnapshot(snapshot: Any): Unit = {
snapshotStore ! SaveSnapshot(SnapshotMetadata(processorId, lastSequenceNr), snapshot)
}
/**
* Deletes a snapshot identified by `sequenceNr` and `timestamp`.
*/
def deleteSnapshot(sequenceNr: Long, timestamp: Long): Unit = {
snapshotStore ! DeleteSnapshot(SnapshotMetadata(processorId, sequenceNr, timestamp))
}
/**
* Deletes all snapshots matching `criteria`.
*/
def deleteSnapshots(criteria: SnapshotSelectionCriteria): Unit = {
snapshotStore ! DeleteSnapshots(processorId, criteria)
}
/**
* INTERNAL API.
*/
protected[persistence] def withCurrentPersistent(persistent: Persistent)(body: Persistent Unit): Unit = try {
_currentPersistent = persistent
updateLastSequenceNr(persistent)
body(persistent)
} finally _currentPersistent = null
/**
* INTERNAL API.
*/
protected[persistence] def updateLastSequenceNr(persistent: Persistent) {
if (persistent.sequenceNr > _lastSequenceNr) _lastSequenceNr = persistent.sequenceNr
}
/**
* INTERNAL API.
*/
override protected[akka] def aroundReceive(receive: Actor.Receive, message: Any): Unit = {
_currentState.aroundReceive(receive, message)
} }
/** /**
@ -387,15 +251,15 @@ trait Processor extends Actor with Stash with StashFactory {
*/ */
final override protected[akka] def aroundPreRestart(reason: Throwable, message: Option[Any]): Unit = { final override protected[akka] def aroundPreRestart(reason: Throwable, message: Option[Any]): Unit = {
try { try {
processorStash.prepend(processorBatch.map(p Envelope(p, p.sender, context.system))) receiverStash.prepend(processorBatch.map(p Envelope(p, p.sender, context.system)))
processorStash.unstashAll() receiverStash.unstashAll()
unstashAll(unstashFilterPredicate) unstashAll(unstashFilterPredicate)
} finally { } finally {
message match { message match {
case Some(WriteSuccess(m)) preRestartDefault(reason, Some(m)) case Some(WriteMessageSuccess(m)) preRestartDefault(reason, Some(m))
case Some(LoopSuccess(m)) preRestartDefault(reason, Some(m)) case Some(LoopMessageSuccess(m)) preRestartDefault(reason, Some(m))
case Some(Replayed(m)) preRestartDefault(reason, Some(m)) case Some(ReplayedMessage(m)) preRestartDefault(reason, Some(m))
case mo preRestartDefault(reason, None) case mo preRestartDefault(reason, None)
} }
} }
} }
@ -429,36 +293,44 @@ trait Processor extends Actor with Stash with StashFactory {
} }
private def nextSequenceNr(): Long = { private def nextSequenceNr(): Long = {
_sequenceNr += 1L sequenceNr += 1L
_sequenceNr sequenceNr
} }
// -----------------------------------------------------
// Processor-internal stash
// -----------------------------------------------------
private val unstashFilterPredicate: Any Boolean = { private val unstashFilterPredicate: Any Boolean = {
case _: WriteSuccess false case _: WriteMessageSuccess false
case _: Replayed false case _: ReplayedMessage false
case _ true case _ true
} }
private val processorStash = createStash()
private def currentEnvelope: Envelope =
context.asInstanceOf[ActorCell].currentMessage
} }
/** /**
* Sent to a [[Processor]] when a journal failed to write a [[Persistent]] message. If * Sent to a [[Processor]] if a journal fails to write a [[Persistent]] message. If
* not handled, an `akka.actor.ActorKilledException` is thrown by that processor. * not handled, an `akka.actor.ActorKilledException` is thrown by that processor.
* *
* @param payload payload of the persistent message. * @param payload payload of the persistent message.
* @param sequenceNr sequence number of the persistent message. * @param sequenceNr sequence number of the persistent message.
* @param cause failure cause. * @param cause failure cause.
*/ */
@SerialVersionUID(1L)
case class PersistenceFailure(payload: Any, sequenceNr: Long, cause: Throwable) case class PersistenceFailure(payload: Any, sequenceNr: Long, cause: Throwable)
/**
* Sent to a [[Processor]] if a journal fails to replay messages or fetch that processor's
* highest sequence number. If not handled, a [[RecoveryException]] is thrown by that
* processor.
*/
@SerialVersionUID(1L)
case class RecoveryFailure(cause: Throwable)
/**
* Thrown by a [[Processor]] if a journal fails to replay messages or fetch that processor's
* highest sequence number. This exception is only thrown if that processor doesn't handle
* [[RecoveryFailure]] messages.
*/
@SerialVersionUID(1L)
case class RecoveryException(message: String, cause: Throwable) extends AkkaException(message, cause)
/** /**
* Java API: an actor that persists (journals) messages of type [[Persistent]]. Messages of other types * Java API: an actor that persists (journals) messages of type [[Persistent]]. Messages of other types
* are not persisted. * are not persisted.
@ -513,9 +385,4 @@ case class PersistenceFailure(payload: Any, sequenceNr: Long, cause: Throwable)
* @see [[Recover]] * @see [[Recover]]
* @see [[PersistentBatch]] * @see [[PersistentBatch]]
*/ */
abstract class UntypedProcessor extends UntypedActor with Processor { abstract class UntypedProcessor extends UntypedActor with Processor
/**
* Java API. returns the current persistent message or `null` if there is none.
*/
def getCurrentPersistentMessage = currentPersistentMessage.getOrElse(null)
}

View file

@ -1,71 +0,0 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence
import akka.AkkaException
/**
* Instructs a processor to recover itself. Recovery will start from a snapshot if the processor has
* previously saved one or more snapshots and at least one of these snapshots matches the specified
* `fromSnapshot` criteria. Otherwise, recovery will start from scratch by replaying all journaled
* messages.
*
* If recovery starts from a snapshot, the processor is offered that snapshot with a [[SnapshotOffer]]
* message, followed by replayed messages, if any, that are younger than the snapshot, up to the
* specified upper sequence number bound (`toSequenceNr`).
*
* @param fromSnapshot criteria for selecting a saved snapshot from which recovery should start. Default
* is latest (= youngest) snapshot.
* @param toSequenceNr upper sequence number bound (inclusive) for recovery. Default is no upper bound.
*/
@SerialVersionUID(1L)
case class Recover(fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest, toSequenceNr: Long = Long.MaxValue)
object Recover {
/**
* Java API.
*
* @see [[Recover]]
*/
def create() = Recover()
/**
* Java API.
*
* @see [[Recover]]
*/
def create(toSequenceNr: Long) =
Recover(toSequenceNr = toSequenceNr)
/**
* Java API.
*
* @see [[Recover]]
*/
def create(fromSnapshot: SnapshotSelectionCriteria) =
Recover(fromSnapshot = fromSnapshot)
/**
* Java API.
*
* @see [[Recover]]
*/
def create(fromSnapshot: SnapshotSelectionCriteria, toSequenceNr: Long) =
Recover(fromSnapshot, toSequenceNr)
}
/**
* Sent to a [[Processor]] after failed recovery. If not handled, a
* [[RecoveryFailureException]] is thrown by that processor.
*/
@SerialVersionUID(1L)
case class RecoveryFailure(cause: Throwable)
/**
* Thrown by a [[Processor]] if a journal failed to replay all requested messages.
*/
@SerialVersionUID(1L)
case class RecoveryFailureException(message: String, cause: Throwable) extends AkkaException(message, cause)

View file

@ -0,0 +1,303 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence
import akka.actor._
import akka.dispatch.Envelope
import akka.persistence.JournalProtocol._
import akka.persistence.SnapshotProtocol.LoadSnapshotResult
/**
* Recovery state machine that loads snapshots and replays messages.
*
* @see [[Processor]]
* @see [[View]]
*/
trait Recovery extends Actor with Snapshotter with Stash with StashFactory {
/**
* INTERNAL API.
*
* Recovery state.
*/
private[persistence] trait State {
def aroundReceive(receive: Receive, message: Any): Unit
protected def process(receive: Receive, message: Any) =
receive.applyOrElse(message, unhandled)
protected def processPersistent(receive: Receive, persistent: Persistent) =
withCurrentPersistent(persistent)(receive.applyOrElse(_, unhandled))
protected def updateLastSequenceNr(persistent: Persistent): Unit =
if (persistent.sequenceNr > _lastSequenceNr) _lastSequenceNr = persistent.sequenceNr
def updateLastSequenceNr(value: Long): Unit =
_lastSequenceNr = value
protected def withCurrentPersistent(persistent: Persistent)(body: Persistent Unit): Unit = try {
_currentPersistent = persistent
updateLastSequenceNr(persistent)
body(persistent)
} finally _currentPersistent = null
protected def recordFailure(cause: Throwable): Unit = {
_recoveryFailureCause = cause
_recoveryFailureMessage = context.asInstanceOf[ActorCell].currentMessage
}
}
/**
* INTERNAL API.
*
* Initial state, waits for `Recover` request, submit a `LoadSnapshot` request to the snapshot
* store and changes to `recoveryStarted` state.
*/
private[persistence] val recoveryPending = new State {
override def toString: String = "recovery pending"
def aroundReceive(receive: Receive, message: Any): Unit = message match {
case Recover(fromSnap, toSnr, replayMax)
_currentState = recoveryStarted(replayMax)
loadSnapshot(snapshotterId, fromSnap, toSnr)
case _ receiverStash.stash()
}
}
/**
* INTERNAL API.
*
* Processes a loaded snapshot, if any. A loaded snapshot is offered with a `SnapshotOffer`
* message to the actor's current behavior. Then initiates a message replay, either starting
* from the loaded snapshot or from scratch, and switches to `replayStarted` state.
*
* @param replayMax maximum number of messages to replay.
*/
private[persistence] def recoveryStarted(replayMax: Long) = new State {
override def toString: String = s"recovery started (replayMax = [${replayMax}])"
def aroundReceive(receive: Receive, message: Any) = message match {
case r: Recover // ignore
case LoadSnapshotResult(sso, toSnr)
sso.foreach {
case SelectedSnapshot(metadata, snapshot)
updateLastSequenceNr(metadata.sequenceNr)
process(receive, SnapshotOffer(metadata, snapshot))
}
_currentState = replayStarted(await = true)
journal ! ReplayMessages(lastSequenceNr + 1L, toSnr, replayMax, processorId, self)
case other receiverStash.stash()
}
}
/**
* INTERNAL API.
*
* Processes replayed messages, if any. The actor's current behavior is invoked with the replayed
* [[Persistent]] messages. If processing of a replayed message fails, the exception is caught and
* stored for being thrown later and state is changed to `recoveryFailed`. If replay succeeds the
* `onReplaySuccess` method is called, otherwise `onReplayFailure`.
*
* @param await if `true` processing of further messages will be delayed until replay completes,
* otherwise, the actor's behavior is invoked immediately with these messages.
*/
private[persistence] def replayStarted(await: Boolean) = new State {
override def toString: String = s"replay started (await = [${await}])"
def aroundReceive(receive: Receive, message: Any) = message match {
case r: Recover // ignore
case ReplayedMessage(p) try { processPersistent(receive, p) } catch {
case t: Throwable
_currentState = replayFailed // delay throwing exception to prepareRestart
recordFailure(t)
}
case ReplayMessagesSuccess onReplaySuccess(receive, await)
case ReplayMessagesFailure(cause) onReplayFailure(receive, await, cause)
case other
if (await) receiverStash.stash() else process(receive, other)
}
}
/**
* INTERNAL API.
*
* Consumes remaining replayed messages and then changes to `prepareRestart`. The
* message that caused the exception during replay, is re-added to the mailbox and
* re-received in `prepareRestart`.
*/
private[persistence] val replayFailed = new State {
override def toString: String = "replay failed"
def aroundReceive(receive: Receive, message: Any) = message match {
case ReplayMessagesFailure(_)
replayCompleted()
// journal couldn't tell the maximum stored sequence number, hence the next
// replay must be a full replay (up to the highest stored sequence number)
updateLastSequenceNr(Long.MaxValue)
case ReplayMessagesSuccess replayCompleted()
case ReplayedMessage(p) updateLastSequenceNr(p)
case r: Recover // ignore
case _ receiverStash.stash()
}
def replayCompleted(): Unit = {
_currentState = prepareRestart
mailbox.enqueueFirst(self, _recoveryFailureMessage)
}
}
/**
* INTERNAL API.
*
* Re-receives the replayed message that caused an exception and re-throws that exception.
*/
private[persistence] val prepareRestart = new State {
override def toString: String = "prepare restart"
def aroundReceive(receive: Receive, message: Any) = message match {
case ReplayedMessage(_) throw _recoveryFailureCause
case _ // ignore
}
}
private var _recoveryFailureCause: Throwable = _
private var _recoveryFailureMessage: Envelope = _
private var _lastSequenceNr: Long = 0L
private var _currentPersistent: Persistent = _
/**
* Id of the processor for which messages should be replayed.
*/
def processorId: String
/**
* Returns the current persistent message if there is any.
*/
implicit def currentPersistentMessage: Option[Persistent] = Option(_currentPersistent)
/**
* Java API: returns the current persistent message or `null` if there is none.
*/
def getCurrentPersistentMessage = currentPersistentMessage.getOrElse(null)
/**
* Highest received sequence number so far or `0L` if this actor hasn't received a persistent
* message yet. Usually equal to the sequence number of `currentPersistentMessage` (unless a
* receiver implementation is about to re-order persistent messages using `stash()` and `unstash()`).
*/
def lastSequenceNr: Long = _lastSequenceNr
/**
* Returns `lastSequenceNr`.
*/
def snapshotSequenceNr: Long = lastSequenceNr
/**
* INTERNAL API.
*/
private[persistence] var _currentState: State = recoveryPending
/**
* INTERNAL API.
*
* Called whenever a message replay succeeds.
*
* @param receive the actor's current behavior.
* @param awaitReplay `awaitReplay` value of the calling `replayStarted` state.
*/
private[persistence] def onReplaySuccess(receive: Receive, awaitReplay: Boolean): Unit
/**
* INTERNAL API.
*
* Called whenever a message replay fails.
*
* @param receive the actor's current behavior.
* @param awaitReplay `awaitReplay` value of the calling `replayStarted` state.
* @param cause failure cause.
*/
private[persistence] def onReplayFailure(receive: Receive, awaitReplay: Boolean, cause: Throwable): Unit
/**
* INTERNAL API.
*/
private[persistence] val extension = Persistence(context.system)
/**
* INTERNAL API.
*/
private[persistence] lazy val journal = extension.journalFor(processorId)
/**
* INTERNAL API.
*/
private[persistence] val receiverStash = createStash()
/**
* INTERNAL API.
*/
override protected[akka] def aroundReceive(receive: Receive, message: Any): Unit = {
_currentState.aroundReceive(receive, message)
}
}
/**
* Instructs a processor to recover itself. Recovery will start from a snapshot if the processor has
* previously saved one or more snapshots and at least one of these snapshots matches the specified
* `fromSnapshot` criteria. Otherwise, recovery will start from scratch by replaying all journaled
* messages.
*
* If recovery starts from a snapshot, the processor is offered that snapshot with a [[SnapshotOffer]]
* message, followed by replayed messages, if any, that are younger than the snapshot, up to the
* specified upper sequence number bound (`toSequenceNr`).
*
* @param fromSnapshot criteria for selecting a saved snapshot from which recovery should start. Default
* is latest (= youngest) snapshot.
* @param toSequenceNr upper sequence number bound (inclusive) for recovery. Default is no upper bound.
* @param replayMax maximum number of messages to replay. Default is no limit.
*/
@SerialVersionUID(1L)
case class Recover(fromSnapshot: SnapshotSelectionCriteria = SnapshotSelectionCriteria.Latest, toSequenceNr: Long = Long.MaxValue, replayMax: Long = Long.MaxValue)
object Recover {
/**
* Java API.
*
* @see [[Recover]]
*/
def create() = Recover()
/**
* Java API.
*
* @see [[Recover]]
*/
def create(toSequenceNr: Long) =
Recover(toSequenceNr = toSequenceNr)
/**
* Java API.
*
* @see [[Recover]]
*/
def create(fromSnapshot: SnapshotSelectionCriteria) =
Recover(fromSnapshot = fromSnapshot)
/**
* Java API.
*
* @see [[Recover]]
*/
def create(fromSnapshot: SnapshotSelectionCriteria, toSequenceNr: Long) =
Recover(fromSnapshot, toSequenceNr)
/**
* Java API.
*
* @see [[Recover]]
*/
def create(fromSnapshot: SnapshotSelectionCriteria, toSequenceNr: Long, replayMax: Long) =
Recover(fromSnapshot, toSequenceNr, replayMax)
}

View file

@ -101,7 +101,7 @@ case class SelectedSnapshot(metadata: SnapshotMetadata, snapshot: Any)
object SelectedSnapshot { object SelectedSnapshot {
/** /**
* Plugin Java API. * Java API, Plugin API.
*/ */
def create(metadata: SnapshotMetadata, snapshot: Any): SelectedSnapshot = def create(metadata: SnapshotMetadata, snapshot: Any): SelectedSnapshot =
SelectedSnapshot(metadata, snapshot) SelectedSnapshot(metadata, snapshot)

View file

@ -0,0 +1,51 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence
import akka.actor._
import akka.persistence.SnapshotProtocol._
/**
* Snapshot API on top of the internal snapshot protocol.
*/
trait Snapshotter extends Actor {
private lazy val snapshotStore = Persistence(context.system).snapshotStoreFor(snapshotterId)
/**
* Snapshotter id.
*/
def snapshotterId: String
/**
* Sequence number to use when taking a snapshot.
*/
def snapshotSequenceNr: Long
def loadSnapshot(processorId: String, criteria: SnapshotSelectionCriteria, toSequenceNr: Long) =
snapshotStore ! LoadSnapshot(processorId, criteria, toSequenceNr)
/**
* Saves a `snapshot` of this snapshotter's state. If saving succeeds, this snapshotter will receive a
* [[SaveSnapshotSuccess]] message, otherwise a [[SaveSnapshotFailure]] message.
*/
def saveSnapshot(snapshot: Any): Unit = {
snapshotStore ! SaveSnapshot(SnapshotMetadata(snapshotterId, snapshotSequenceNr), snapshot)
}
/**
* Deletes a snapshot identified by `sequenceNr` and `timestamp`.
*/
def deleteSnapshot(sequenceNr: Long, timestamp: Long): Unit = {
snapshotStore ! DeleteSnapshot(SnapshotMetadata(snapshotterId, sequenceNr, timestamp))
}
/**
* Deletes all snapshots matching `criteria`.
*/
def deleteSnapshots(criteria: SnapshotSelectionCriteria): Unit = {
snapshotStore ! DeleteSnapshots(snapshotterId, criteria)
}
}

View file

@ -0,0 +1,200 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence
import scala.concurrent.duration._
import akka.actor._
import akka.persistence.JournalProtocol._
/**
* Instructs a [[View]] to update itself. This will run a single incremental message replay with all
* messages from the corresponding processor's journal that have not yet been consumed by the view.
* To update a view with messages that have been written after handling this request, another `Update`
* request must be sent to the view.
*
* @param await if `true`, processing of further messages sent to the view will be delayed until the
* incremental message replay, triggered by this update request, completes. If `false`,
* any message sent to the view may interleave with replayed [[Persistent]] message
* stream.
* @param replayMax maximum number of messages to replay when handling this update request. Defaults
* to `Long.MaxValue` (i.e. no limit).
*/
@SerialVersionUID(1L)
case class Update(await: Boolean = false, replayMax: Long = Long.MaxValue)
case object Update {
/**
* Java API.
*/
def create() =
Update()
/**
* Java API.
*/
def create(await: Boolean) =
Update(await)
/**
* Java API.
*/
def create(await: Boolean, replayMax: Long) =
Update(await, replayMax)
}
/**
* A view replicates the persistent message stream of a processor. Implementation classes receive the
* message stream as [[Persistent]] messages. These messages can be processed to update internal state
* in order to maintain an (eventual consistent) view of the state of the corresponding processor. A
* view can also run on a different node, provided that a replicated journal is used. Implementation
* classes reference a processor by implementing `processorId`.
*
* Views can also store snapshots of internal state by calling [[saveSnapshot]]. The snapshots of a view
* are independent of those of the referenced processor. During recovery, a saved snapshot is offered
* to the view with a [[SnapshotOffer]] message, followed by replayed messages, if any, that are younger
* than the snapshot. Default is to offer the latest saved snapshot.
*
* By default, a view automatically updates itself with an interval returned by `autoUpdateInterval`.
* This method can be overridden by implementation classes to define a view instance-specific update
* interval. The default update interval for all views of an actor system can be configured with the
* `akka.persistence.view.auto-update-interval` configuration key. Applications may trigger additional
* view updates by sending the view [[Update]] requests. See also methods
*
* - [[autoUpdate]] for turning automated updates on or off
* - [[autoUpdateReplayMax]] for limiting the number of replayed messages per view update cycle and
* - [[autoRecoveryReplayMax]] for limiting the number of replayed messages on initial view recovery
*
* Views can also use channels to communicate with destinations in the same way as processors can do.
*/
trait View extends Actor with Recovery {
import context.dispatcher
/**
* INTERNAL API.
*
* Extends the `replayStarted` state of [[Recovery]] with logic to handle [[Update]] requests
* sent by users.
*/
private[persistence] override def replayStarted(await: Boolean) = new State {
private var delegateAwaiting = await
private var delegate = View.super.replayStarted(await)
override def toString: String = delegate.toString
override def aroundReceive(receive: Receive, message: Any) = message match {
case Update(false, _) // ignore
case u @ Update(true, _) if !delegateAwaiting
delegateAwaiting = true
delegate = View.super.replayStarted(await = true)
delegate.aroundReceive(receive, u)
case other
delegate.aroundReceive(receive, other)
}
}
/**
* When receiving an [[Update]] request, switches to `replayStarted` state and triggers
* an incremental message replay. Invokes the actor's current behavior for any other
* received message.
*/
private val idle: State = new State {
override def toString: String = "idle"
def aroundReceive(receive: Receive, message: Any): Unit = message match {
case r: Recover // ignore
case Update(awaitUpdate, replayMax)
_currentState = replayStarted(await = awaitUpdate)
journal ! ReplayMessages(lastSequenceNr + 1L, Long.MaxValue, replayMax, processorId, self)
case other process(receive, other)
}
}
/**
* INTERNAL API.
*/
private[persistence] def onReplaySuccess(receive: Receive, await: Boolean): Unit =
onReplayComplete(await)
/**
* INTERNAL API.
*/
private[persistence] def onReplayFailure(receive: Receive, await: Boolean, cause: Throwable): Unit =
onReplayComplete(await)
/**
* Switches to `idle` state and schedules the next update if `autoUpdate` returns `true`.
*/
private def onReplayComplete(await: Boolean): Unit = {
_currentState = idle
if (autoUpdate) schedule = Some(context.system.scheduler.scheduleOnce(autoUpdateInterval, self, Update(await = false)))
if (await) receiverStash.unstashAll()
}
private val _viewId = extension.processorId(self)
private val viewSettings = extension.settings.view
private var schedule: Option[Cancellable] = None
/**
* View id. Defaults to this view's path and can be overridden.
*/
def viewId: String = _viewId
/**
* Returns `viewId`.
*/
def snapshotterId: String = viewId
/**
* If `true`, this view automatically updates itself with an interval specified by `autoUpdateInterval`.
* If `false`, applications must explicitly update this view by sending [[Update]] requests. The default
* value can be configured with the `akka.persistence.view.auto-update` configuration key. This method
* can be overridden by implementation classes to return non-default values.
*/
def autoUpdate: Boolean =
viewSettings.autoUpdate
/**
* The interval for automated updates. The default value can be configured with the
* `akka.persistence.view.auto-update-interval` configuration key. This method can be
* overridden by implementation classes to return non-default values.
*/
def autoUpdateInterval: FiniteDuration =
viewSettings.autoUpdateInterval
/**
* The maximum number of messages to replay per update. The default value can be configured with the
* `akka.persistence.view.auto-update-replay-max` configuration key. This method can be overridden by
* implementation classes to return non-default values.
*/
def autoUpdateReplayMax: Long =
viewSettings.autoUpdateReplayMax
/**
* Triggers an initial recovery, starting form a snapshot, if any, and replaying at most `autoRecoveryReplayMax`
* messages (following that snapshot).
*/
override def preStart(): Unit = {
super.preStart()
self ! Recover(replayMax = autoUpdateReplayMax)
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
try receiverStash.unstashAll() finally super.preRestart(reason, message)
}
override def postStop(): Unit = {
schedule.foreach(_.cancel())
super.postStop()
}
}
/**
* Java API.
*
* @see [[View]]
*/
abstract class UntypedView extends UntypedActor with View

View file

@ -9,17 +9,16 @@ import scala.concurrent.Future
import akka.persistence.PersistentRepr import akka.persistence.PersistentRepr
/** /**
* Asynchronous message replay interface. * Asynchronous message replay and sequence number recovery interface.
*/ */
trait AsyncReplay { trait AsyncRecovery {
//#journal-plugin-api //#journal-plugin-api
/** /**
* Plugin API: asynchronously replays persistent messages. Implementations replay * Plugin API: asynchronously replays persistent messages. Implementations replay
* a message by calling `replayCallback`. The returned future must be completed * a message by calling `replayCallback`. The returned future must be completed
* when all messages (matching the sequence number bounds) have been replayed. The * when all messages (matching the sequence number bounds) have been replayed.
* future `Long` value must be the highest stored sequence number in the journal * The future must be completed with a failure if any of the persistent messages
* for the specified processor. The future must be completed with a failure if any * could not be replayed.
* of the persistent messages could not be replayed.
* *
* The `replayCallback` must also be called with messages that have been marked * The `replayCallback` must also be called with messages that have been marked
* as deleted. In this case a replayed message's `deleted` method must return * as deleted. In this case a replayed message's `deleted` method must return
@ -31,12 +30,23 @@ trait AsyncReplay {
* @param processorId processor id. * @param processorId processor id.
* @param fromSequenceNr sequence number where replay should start (inclusive). * @param fromSequenceNr sequence number where replay should start (inclusive).
* @param toSequenceNr sequence number where replay should end (inclusive). * @param toSequenceNr sequence number where replay should end (inclusive).
* @param max maximum number of messages to be replayed.
* @param replayCallback called to replay a single message. Can be called from any * @param replayCallback called to replay a single message. Can be called from any
* thread. * thread.
* *
* @see [[AsyncWriteJournal]] * @see [[AsyncWriteJournal]]
* @see [[SyncWriteJournal]] * @see [[SyncWriteJournal]]
*/ */
def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: PersistentRepr Unit): Future[Long] def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr Unit): Future[Unit]
/**
* Plugin API: asynchronously reads the highest stored sequence number for the
* given `processorId`.
*
* @param processorId processor id.
* @param fromSequenceNr hint where to start searching for the highest sequence
* number.
*/
def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long]
//#journal-plugin-api //#journal-plugin-api
} }

View file

@ -12,69 +12,73 @@ import scala.util._
import akka.actor._ import akka.actor._
import akka.pattern.pipe import akka.pattern.pipe
import akka.persistence._ import akka.persistence._
import akka.persistence.JournalProtocol._
/** /**
* Abstract journal, optimized for asynchronous, non-blocking writes. * Abstract journal, optimized for asynchronous, non-blocking writes.
*/ */
trait AsyncWriteJournal extends Actor with AsyncReplay { trait AsyncWriteJournal extends Actor with AsyncRecovery {
import JournalProtocol._
import AsyncWriteJournal._ import AsyncWriteJournal._
import context.dispatcher import context.dispatcher
private val extension = Persistence(context.system) private val extension = Persistence(context.system)
private val publish = extension.settings.internal.publishPluginCommands
private val resequencer = context.actorOf(Props[Resequencer]) private val resequencer = context.actorOf(Props[Resequencer])
private var resequencerCounter = 1L private var resequencerCounter = 1L
def receive = { def receive = {
case WriteBatch(persistentBatch, processor) case WriteMessages(persistentBatch, processor)
val cctr = resequencerCounter val cctr = resequencerCounter
def resequence(f: PersistentRepr Any) = persistentBatch.zipWithIndex.foreach { def resequence(f: PersistentRepr Any) = persistentBatch.zipWithIndex.foreach {
case (p, i) resequencer ! Desequenced(f(p), cctr + i + 1, processor, p.sender) case (p, i) resequencer ! Desequenced(f(p), cctr + i + 1, processor, p.sender)
} }
writeAsync(persistentBatch.map(_.prepareWrite())) onComplete { asyncWriteMessages(persistentBatch.map(_.prepareWrite())) onComplete {
case Success(_) case Success(_)
resequencer ! Desequenced(WriteBatchSuccess, cctr, processor, self) resequencer ! Desequenced(WriteMessagesSuccess, cctr, processor, self)
resequence(WriteSuccess(_)) resequence(WriteMessageSuccess(_))
case Failure(e) case Failure(e)
resequencer ! Desequenced(WriteBatchFailure(e), cctr, processor, self) resequencer ! Desequenced(WriteMessagesFailure(e), cctr, processor, self)
resequence(WriteFailure(_, e)) resequence(WriteMessageFailure(_, e))
} }
resequencerCounter += persistentBatch.length + 1 resequencerCounter += persistentBatch.length + 1
case Replay(fromSequenceNr, toSequenceNr, processorId, processor) case ReplayMessages(fromSequenceNr, toSequenceNr, max, processorId, processor, replayDeleted)
// Send replayed messages and replay result to processor directly. No need // Send replayed messages and replay result to processor directly. No need
// to resequence replayed messages relative to written and looped messages. // to resequence replayed messages relative to written and looped messages.
replayAsync(processorId, fromSequenceNr, toSequenceNr) { p asyncReplayMessages(processorId, fromSequenceNr, toSequenceNr, max) { p
if (!p.deleted) processor.tell(Replayed(p), p.sender) if (!p.deleted || replayDeleted) processor.tell(ReplayedMessage(p), p.sender)
} map { } map {
maxSnr ReplaySuccess(maxSnr) case _ ReplayMessagesSuccess
} recover { } recover {
case e ReplayFailure(e) case e ReplayMessagesFailure(e)
} pipeTo (processor) } pipeTo (processor)
case c @ Confirm(processorId, messageSequenceNr, channelId, wrapperSequenceNr, channelEndpoint) case ReadHighestSequenceNr(fromSequenceNr, processorId, processor)
val op = if (wrapperSequenceNr == 0L) { // Send read highest sequence number to processor directly. No need
// A wrapperSequenceNr == 0L means that the corresponding message was delivered by a // to resequence the result relative to written and looped messages.
// transient channel. We can now write a delivery confirmation for this message. asyncReadHighestSequenceNr(processorId, fromSequenceNr).map {
confirmAsync(processorId, messageSequenceNr, channelId) highest ReadHighestSequenceNrSuccess(highest)
} else { } recover {
// A wrapperSequenceNr != 0L means that the corresponding message was delivered by a case e ReadHighestSequenceNrFailure(e)
// persistent channel. We can now safely delete the wrapper message (that contains the } pipeTo (processor)
// delivered message). case c @ WriteConfirmations(confirmationsBatch, requestor)
deleteAsync(channelId, wrapperSequenceNr, wrapperSequenceNr, true) asyncWriteConfirmations(confirmationsBatch) onComplete {
case Success(_) requestor ! WriteConfirmationsSuccess(confirmationsBatch)
case Failure(e) requestor ! WriteConfirmationsFailure(e)
} }
op onComplete { case d @ DeleteMessages(messageIds, permanent, requestorOption)
asyncDeleteMessages(messageIds, permanent) onComplete {
case Success(_) case Success(_)
if (extension.publishPluginCommands) context.system.eventStream.publish(c) requestorOption.foreach(_ ! DeleteMessagesSuccess(messageIds))
if (channelEndpoint != null) channelEndpoint ! c if (publish) context.system.eventStream.publish(d)
case Failure(e) // TODO: publish failure to event stream case Failure(e)
} }
case d @ Delete(processorId, fromSequenceNr, toSequenceNr, permanent) case d @ DeleteMessagesTo(processorId, toSequenceNr, permanent)
deleteAsync(processorId, fromSequenceNr, toSequenceNr, permanent) onComplete { asyncDeleteMessagesTo(processorId, toSequenceNr, permanent) onComplete {
case Success(_) if (extension.publishPluginCommands) context.system.eventStream.publish(d) case Success(_) if (publish) context.system.eventStream.publish(d)
case Failure(e) // TODO: publish failure to event stream case Failure(e)
} }
case Loop(message, processor) case LoopMessage(message, processor)
resequencer ! Desequenced(LoopSuccess(message), resequencerCounter, processor, sender) resequencer ! Desequenced(LoopMessageSuccess(message), resequencerCounter, processor, sender)
resequencerCounter += 1 resequencerCounter += 1
} }
@ -84,22 +88,26 @@ trait AsyncWriteJournal extends Actor with AsyncReplay {
* The batch write must be atomic i.e. either all persistent messages in the batch * The batch write must be atomic i.e. either all persistent messages in the batch
* are written or none. * are written or none.
*/ */
def writeAsync(persistentBatch: immutable.Seq[PersistentRepr]): Future[Unit] def asyncWriteMessages(messages: immutable.Seq[PersistentRepr]): Future[Unit]
/** /**
* Plugin API: asynchronously deletes all persistent messages within the range from * Plugin API: asynchronously writes a batch of delivery confirmations to the journal.
* `fromSequenceNr` to `toSequenceNr` (both inclusive). If `permanent` is set to
* `false`, the persistent messages are marked as deleted, otherwise they are
* permanently deleted.
*
* @see [[AsyncReplay]]
*/ */
def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Future[Unit] def asyncWriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Future[Unit]
/** /**
* Plugin API: asynchronously writes a delivery confirmation to the journal. * Plugin API: asynchronously deletes messages identified by `messageIds` from the
* journal. If `permanent` is set to `false`, the persistent messages are marked as
* deleted, otherwise they are permanently deleted.
*/ */
def confirmAsync(processorId: String, sequenceNr: Long, channelId: String): Future[Unit] def asyncDeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Future[Unit]
/**
* Plugin API: asynchronously deletes all persistent messages up to `toSequenceNr`
* (inclusive). If `permanent` is set to `false`, the persistent messages are marked
* as deleted, otherwise they are permanently deleted.
*/
def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Future[Unit]
//#journal-plugin-api //#journal-plugin-api
} }

View file

@ -37,21 +37,27 @@ private[persistence] trait AsyncWriteProxy extends AsyncWriteJournal with Stash
implicit def timeout: Timeout implicit def timeout: Timeout
def writeAsync(persistentBatch: immutable.Seq[PersistentRepr]): Future[Unit] = def asyncWriteMessages(messages: immutable.Seq[PersistentRepr]): Future[Unit] =
(store ? WriteBatch(persistentBatch)).mapTo[Unit] (store ? WriteMessages(messages)).mapTo[Unit]
def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Future[Unit] = def asyncWriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Future[Unit] =
(store ? Delete(processorId, fromSequenceNr, toSequenceNr, permanent)).mapTo[Unit] (store ? WriteConfirmations(confirmations)).mapTo[Unit]
def confirmAsync(processorId: String, sequenceNr: Long, channelId: String): Future[Unit] = def asyncDeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Future[Unit] =
(store ? Confirm(processorId, sequenceNr, channelId)).mapTo[Unit] (store ? DeleteMessages(messageIds, permanent)).mapTo[Unit]
def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) Unit): Future[Long] = { def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Future[Unit] =
val replayCompletionPromise = Promise[Long] (store ? DeleteMessagesTo(processorId, toSequenceNr, permanent)).mapTo[Unit]
def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) Unit): Future[Unit] = {
val replayCompletionPromise = Promise[Unit]
val mediator = context.actorOf(Props(classOf[ReplayMediator], replayCallback, replayCompletionPromise, timeout.duration).withDeploy(Deploy.local)) val mediator = context.actorOf(Props(classOf[ReplayMediator], replayCallback, replayCompletionPromise, timeout.duration).withDeploy(Deploy.local))
store.tell(Replay(processorId, fromSequenceNr, toSequenceNr), mediator) store.tell(ReplayMessages(processorId, fromSequenceNr, toSequenceNr, max), mediator)
replayCompletionPromise.future replayCompletionPromise.future
} }
def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] =
(store ? ReadHighestSequenceNr(processorId, fromSequenceNr)).mapTo[Long]
} }
/** /**
@ -66,22 +72,28 @@ private[persistence] object AsyncWriteProxy {
*/ */
private[persistence] object AsyncWriteTarget { private[persistence] object AsyncWriteTarget {
@SerialVersionUID(1L) @SerialVersionUID(1L)
case class WriteBatch(pb: immutable.Seq[PersistentRepr]) case class WriteMessages(messages: immutable.Seq[PersistentRepr])
@SerialVersionUID(1L) @SerialVersionUID(1L)
case class Delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) case class WriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation])
@SerialVersionUID(1L) @SerialVersionUID(1L)
case class Confirm(processorId: String, sequenceNr: Long, channelId: String) case class DeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean)
@SerialVersionUID(1L) @SerialVersionUID(1L)
case class Replay(processorId: String, fromSequenceNr: Long, toSequenceNr: Long) case class DeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean)
@SerialVersionUID(1L) @SerialVersionUID(1L)
case class ReplaySuccess(maxSequenceNr: Long) case class ReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)
@SerialVersionUID(1L)
case object ReplaySuccess
@SerialVersionUID(1L) @SerialVersionUID(1L)
case class ReplayFailure(cause: Throwable) case class ReplayFailure(cause: Throwable)
@SerialVersionUID(1L)
case class ReadHighestSequenceNr(processorId: String, fromSequenceNr: Long)
} }
/** /**
@ -90,15 +102,15 @@ private[persistence] object AsyncWriteTarget {
@SerialVersionUID(1L) @SerialVersionUID(1L)
class AsyncReplayTimeoutException(msg: String) extends AkkaException(msg) class AsyncReplayTimeoutException(msg: String) extends AkkaException(msg)
private class ReplayMediator(replayCallback: PersistentRepr Unit, replayCompletionPromise: Promise[Long], replayTimeout: Duration) extends Actor { private class ReplayMediator(replayCallback: PersistentRepr Unit, replayCompletionPromise: Promise[Unit], replayTimeout: Duration) extends Actor {
import AsyncWriteTarget._ import AsyncWriteTarget._
context.setReceiveTimeout(replayTimeout) context.setReceiveTimeout(replayTimeout)
def receive = { def receive = {
case p: PersistentRepr replayCallback(p) case p: PersistentRepr replayCallback(p)
case ReplaySuccess(maxSnr) case ReplaySuccess
replayCompletionPromise.success(maxSnr) replayCompletionPromise.success(())
context.stop(self) context.stop(self)
case ReplayFailure(cause) case ReplayFailure(cause)
replayCompletionPromise.failure(cause) replayCompletionPromise.failure(cause)

View file

@ -15,49 +15,58 @@ import akka.persistence._
/** /**
* Abstract journal, optimized for synchronous writes. * Abstract journal, optimized for synchronous writes.
*/ */
trait SyncWriteJournal extends Actor with AsyncReplay { trait SyncWriteJournal extends Actor with AsyncRecovery {
import JournalProtocol._ import JournalProtocol._
import context.dispatcher import context.dispatcher
private val extension = Persistence(context.system) private val extension = Persistence(context.system)
private val publish = extension.settings.internal.publishPluginCommands
final def receive = { final def receive = {
case WriteBatch(persistentBatch, processor) case WriteMessages(persistentBatch, processor)
Try(write(persistentBatch.map(_.prepareWrite()))) match { Try(writeMessages(persistentBatch.map(_.prepareWrite()))) match {
case Success(_) case Success(_)
processor ! WriteBatchSuccess processor ! WriteMessagesSuccess
persistentBatch.foreach(p processor.tell(WriteSuccess(p), p.sender)) persistentBatch.foreach(p processor.tell(WriteMessageSuccess(p), p.sender))
case Failure(e) case Failure(e)
processor ! WriteBatchFailure(e) processor ! WriteMessagesFailure(e)
persistentBatch.foreach(p processor tell (WriteFailure(p, e), p.sender)) persistentBatch.foreach(p processor tell (WriteMessageFailure(p, e), p.sender))
throw e throw e
} }
case Replay(fromSequenceNr, toSequenceNr, processorId, processor) case ReplayMessages(fromSequenceNr, toSequenceNr, max, processorId, processor, replayDeleted)
replayAsync(processorId, fromSequenceNr, toSequenceNr) { p asyncReplayMessages(processorId, fromSequenceNr, toSequenceNr, max) { p
if (!p.deleted) processor.tell(Replayed(p), p.sender) if (!p.deleted || replayDeleted) processor.tell(ReplayedMessage(p), p.sender)
} map { } map {
maxSnr ReplaySuccess(maxSnr) case _ ReplayMessagesSuccess
} recover { } recover {
case e ReplayFailure(e) case e ReplayMessagesFailure(e)
} pipeTo (processor) } pipeTo (processor)
case c @ Confirm(processorId, messageSequenceNr, channelId, wrapperSequenceNr, channelEndpoint) case ReadHighestSequenceNr(fromSequenceNr, processorId, processor)
if (wrapperSequenceNr == 0L) { asyncReadHighestSequenceNr(processorId, fromSequenceNr).map {
// A wrapperSequenceNr == 0L means that the corresponding message was delivered by a highest ReadHighestSequenceNrSuccess(highest)
// transient channel. We can now write a delivery confirmation for this message. } recover {
confirm(processorId, messageSequenceNr, channelId) case e ReadHighestSequenceNrFailure(e)
} else { } pipeTo (processor)
// A wrapperSequenceNr != 0L means that the corresponding message was delivered by a case WriteConfirmations(confirmationsBatch, requestor)
// persistent channel. We can now safely delete the wrapper message (that contains the Try(writeConfirmations(confirmationsBatch)) match {
// delivered message). case Success(_) requestor ! WriteConfirmationsSuccess(confirmationsBatch)
delete(channelId, wrapperSequenceNr, wrapperSequenceNr, true) case Failure(e) requestor ! WriteConfirmationsFailure(e)
} }
if (channelEndpoint != null) channelEndpoint ! c case d @ DeleteMessages(messageIds, permanent, requestorOption)
if (extension.publishPluginCommands) context.system.eventStream.publish(c) Try(deleteMessages(messageIds, permanent)) match {
case d @ Delete(processorId, fromSequenceNr, toSequenceNr, permanent) case Success(_)
delete(processorId, fromSequenceNr, toSequenceNr, permanent) requestorOption.foreach(_ ! DeleteMessagesSuccess(messageIds))
if (extension.publishPluginCommands) context.system.eventStream.publish(d) if (publish) context.system.eventStream.publish(d)
case Loop(message, processor) case Failure(e)
processor forward LoopSuccess(message) requestorOption.foreach(_ ! DeleteMessagesFailure(e))
}
case d @ DeleteMessagesTo(processorId, toSequenceNr, permanent)
Try(deleteMessagesTo(processorId, toSequenceNr, permanent)) match {
case Success(_) if (publish) context.system.eventStream.publish(d)
case Failure(e)
}
case LoopMessage(message, processor)
processor forward LoopMessageSuccess(message)
} }
//#journal-plugin-api //#journal-plugin-api
@ -66,21 +75,25 @@ trait SyncWriteJournal extends Actor with AsyncReplay {
* The batch write must be atomic i.e. either all persistent messages in the batch * The batch write must be atomic i.e. either all persistent messages in the batch
* are written or none. * are written or none.
*/ */
def write(persistentBatch: immutable.Seq[PersistentRepr]): Unit def writeMessages(messages: immutable.Seq[PersistentRepr]): Unit
/** /**
* Plugin API: synchronously deletes all persistent messages within the range from * Plugin API: synchronously writes a batch of delivery confirmations to the journal.
* `fromSequenceNr` to `toSequenceNr` (both inclusive). If `permanent` is set to
* `false`, the persistent messages are marked as deleted, otherwise they are
* permanently deleted.
*
* @see [[AsyncReplay]]
*/ */
def delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Unit def writeConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Unit
/** /**
* Plugin API: synchronously writes a delivery confirmation to the journal. * Plugin API: synchronously deletes messages identified by `messageIds` from the
* journal. If `permanent` is set to `false`, the persistent messages are marked as
* deleted, otherwise they are permanently deleted.
*/ */
def confirm(processorId: String, sequenceNr: Long, channelId: String): Unit def deleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Unit
/**
* Plugin API: synchronously deletes all persistent messages up to `toSequenceNr`
* (inclusive). If `permanent` is set to `false`, the persistent messages are marked
* as deleted, otherwise they are permanently deleted.
*/
def deleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Unit
//#journal-plugin-api //#journal-plugin-api
} }

View file

@ -34,7 +34,7 @@ private[persistence] class InmemJournal extends AsyncWriteProxy {
* INTERNAL API. * INTERNAL API.
*/ */
private[persistence] trait InmemMessages { private[persistence] trait InmemMessages {
// processor id => persistent message // processor id -> persistent message
var messages = Map.empty[String, Vector[PersistentRepr]] var messages = Map.empty[String, Vector[PersistentRepr]]
def add(p: PersistentRepr) = messages = messages + (messages.get(p.processorId) match { def add(p: PersistentRepr) = messages = messages + (messages.get(p.processorId) match {
@ -52,18 +52,21 @@ private[persistence] trait InmemMessages {
case None messages case None messages
} }
def read(pid: String, fromSnr: Long, toSnr: Long): immutable.Seq[PersistentRepr] = messages.get(pid) match { def read(pid: String, fromSnr: Long, toSnr: Long, max: Long): immutable.Seq[PersistentRepr] = messages.get(pid) match {
case Some(ms) ms.filter(m m.sequenceNr >= fromSnr && m.sequenceNr <= toSnr) case Some(ms) ms.filter(m m.sequenceNr >= fromSnr && m.sequenceNr <= toSnr).take(safeLongToInt(max))
case None Nil case None Nil
} }
def maxSequenceNr(pid: String): Long = { def highestSequenceNr(pid: String): Long = {
val snro = for { val snro = for {
ms messages.get(pid) ms messages.get(pid)
m ms.lastOption m ms.lastOption
} yield m.sequenceNr } yield m.sequenceNr
snro.getOrElse(0L) snro.getOrElse(0L)
} }
private def safeLongToInt(l: Long): Int =
if (Int.MaxValue < l) Int.MaxValue else l.toInt
} }
/** /**
@ -73,16 +76,22 @@ private[persistence] class InmemStore extends Actor with InmemMessages {
import AsyncWriteTarget._ import AsyncWriteTarget._
def receive = { def receive = {
case WriteBatch(pb) case WriteMessages(msgs)
sender ! pb.foreach(add) sender ! msgs.foreach(add)
case Delete(pid, fsnr, tsnr, false) case WriteConfirmations(cnfs)
sender ! (fsnr to tsnr foreach { snr update(pid, snr)(_.update(deleted = true)) }) sender ! cnfs.foreach(cnf update(cnf.processorId, cnf.sequenceNr)(p p.update(confirms = cnf.channelId +: p.confirms)))
case Delete(pid, fsnr, tsnr, true) case DeleteMessages(msgIds, false)
sender ! (fsnr to tsnr foreach { snr delete(pid, snr) }) sender ! msgIds.foreach(msgId update(msgId.processorId, msgId.sequenceNr)(_.update(deleted = true)))
case Confirm(pid, snr, cid) case DeleteMessages(msgIds, true)
sender ! update(pid, snr)(p p.update(confirms = cid +: p.confirms)) sender ! msgIds.foreach(msgId delete(msgId.processorId, msgId.sequenceNr))
case Replay(pid, fromSnr, toSnr) case DeleteMessagesTo(pid, tsnr, false)
read(pid, fromSnr, toSnr).foreach(sender ! _) sender ! (1L to tsnr foreach { snr update(pid, snr)(_.update(deleted = true)) })
sender ! ReplaySuccess(maxSequenceNr(pid)) case DeleteMessagesTo(pid, tsnr, true)
sender ! (1L to tsnr foreach { snr delete(pid, snr) })
case ReplayMessages(pid, fromSnr, toSnr, max)
read(pid, fromSnr, toSnr, max).foreach(sender ! _)
sender ! ReplaySuccess
case ReadHighestSequenceNr(processorId, _)
sender ! highestSequenceNr(processorId)
} }
} }

View file

@ -0,0 +1,27 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence.journal.japi
import scala.concurrent.Future
import akka.actor.Actor
import akka.japi.Procedure
import akka.persistence.journal.{ AsyncRecovery SAsyncReplay }
import akka.persistence.PersistentRepr
/**
* Java API: asynchronous message replay and sequence number recovery interface.
*/
abstract class AsyncRecovery extends SAsyncReplay with AsyncRecoveryPlugin { this: Actor
import context.dispatcher
final def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) Unit) =
doAsyncReplayMessages(processorId, fromSequenceNr, toSequenceNr, max, new Procedure[PersistentRepr] {
def apply(p: PersistentRepr) = replayCallback(p)
}).map(Unit.unbox)
final def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] =
doAsyncReadHighestSequenceNr(processorId, fromSequenceNr: Long).map(_.longValue)
}

View file

@ -1,26 +0,0 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence.journal.japi
import java.lang.{ Long JLong }
import scala.concurrent.Future
import akka.actor.Actor
import akka.japi.Procedure
import akka.persistence.journal.{ AsyncReplay SAsyncReplay }
import akka.persistence.PersistentRepr
/**
* Java API: asynchronous message replay interface.
*/
abstract class AsyncReplay extends SAsyncReplay with AsyncReplayPlugin { this: Actor
import context.dispatcher
final def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) Unit) =
doReplayAsync(processorId, fromSequenceNr, toSequenceNr, new Procedure[PersistentRepr] {
def apply(p: PersistentRepr) = replayCallback(p)
}).map(_.longValue)
}

View file

@ -7,21 +7,24 @@ package akka.persistence.journal.japi
import scala.collection.immutable import scala.collection.immutable
import scala.collection.JavaConverters._ import scala.collection.JavaConverters._
import akka.persistence._
import akka.persistence.journal.{ AsyncWriteJournal SAsyncWriteJournal } import akka.persistence.journal.{ AsyncWriteJournal SAsyncWriteJournal }
import akka.persistence.PersistentRepr
/** /**
* Java API: abstract journal, optimized for asynchronous, non-blocking writes. * Java API: abstract journal, optimized for asynchronous, non-blocking writes.
*/ */
abstract class AsyncWriteJournal extends AsyncReplay with SAsyncWriteJournal with AsyncWritePlugin { abstract class AsyncWriteJournal extends AsyncRecovery with SAsyncWriteJournal with AsyncWritePlugin {
import context.dispatcher import context.dispatcher
final def writeAsync(persistentBatch: immutable.Seq[PersistentRepr]) = final def asyncWriteMessages(messages: immutable.Seq[PersistentRepr]) =
doWriteAsync(persistentBatch.asJava).map(Unit.unbox) doAsyncWriteMessages(messages.asJava).map(Unit.unbox)
final def deleteAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) = final def asyncWriteConfirmations(confirmations: immutable.Seq[PersistentConfirmation]) =
doDeleteAsync(processorId, fromSequenceNr, toSequenceNr, permanent).map(Unit.unbox) doAsyncWriteConfirmations(confirmations.asJava).map(Unit.unbox)
final def confirmAsync(processorId: String, sequenceNr: Long, channelId: String) = final def asyncDeleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean) =
doConfirmAsync(processorId, sequenceNr, channelId).map(Unit.unbox) doAsyncDeleteMessages(messageIds.asJava, permanent).map(Unit.unbox)
final def asyncDeleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean) =
doAsyncDeleteMessagesTo(processorId, toSequenceNr, permanent).map(Unit.unbox)
} }

View file

@ -7,19 +7,22 @@ package akka.persistence.journal.japi
import scala.collection.immutable import scala.collection.immutable
import scala.collection.JavaConverters._ import scala.collection.JavaConverters._
import akka.persistence._
import akka.persistence.journal.{ SyncWriteJournal SSyncWriteJournal } import akka.persistence.journal.{ SyncWriteJournal SSyncWriteJournal }
import akka.persistence.PersistentRepr
/** /**
* Java API: abstract journal, optimized for synchronous writes. * Java API: abstract journal, optimized for synchronous writes.
*/ */
abstract class SyncWriteJournal extends AsyncReplay with SSyncWriteJournal with SyncWritePlugin { abstract class SyncWriteJournal extends AsyncRecovery with SSyncWriteJournal with SyncWritePlugin {
final def write(persistentBatch: immutable.Seq[PersistentRepr]) = final def writeMessages(messages: immutable.Seq[PersistentRepr]) =
doWrite(persistentBatch.asJava) doWriteMessages(messages.asJava)
final def delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) = final def writeConfirmations(confirmations: immutable.Seq[PersistentConfirmation]) =
doDelete(processorId, fromSequenceNr, toSequenceNr, permanent) doWriteConfirmations(confirmations.asJava)
final def confirm(processorId: String, sequenceNr: Long, channelId: String) = final def deleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean) =
doConfirm(processorId, sequenceNr, channelId) doDeleteMessages(messageIds.asJava, permanent)
final def deleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean) =
doDeleteMessagesTo(processorId, toSequenceNr, permanent)
} }

View file

@ -28,14 +28,9 @@ private[persistence] trait LeveldbIdMapping extends Actor { this: LeveldbStore
case Some(v) v case Some(v) v
} }
private def readIdMap(): Map[String, Int] = { private def readIdMap(): Map[String, Int] = withIterator { iter
val iter = leveldbIterator iter.seek(keyToBytes(idKey(idOffset)))
try { readIdMap(Map.empty, iter)
iter.seek(keyToBytes(idKey(idOffset)))
readIdMap(Map.empty, iter)
} finally {
iter.close()
}
} }
private def readIdMap(pathMap: Map[String, Int], iter: DBIterator): Map[String, Int] = { private def readIdMap(pathMap: Map[String, Int], iter: DBIterator): Map[String, Int] = {

View file

@ -8,27 +8,29 @@ package akka.persistence.journal.leveldb
import scala.concurrent.Future import scala.concurrent.Future
import akka.persistence._ import akka.persistence._
import akka.persistence.journal.AsyncReplay import akka.persistence.journal.AsyncRecovery
import org.iq80.leveldb.DBIterator
/** /**
* INTERNAL API. * INTERNAL API.
* *
* LevelDB backed message replay. * LevelDB backed message replay and sequence number recovery.
*/ */
private[persistence] trait LeveldbReplay extends AsyncReplay { this: LeveldbStore private[persistence] trait LeveldbRecovery extends AsyncRecovery { this: LeveldbStore
import Key._ import Key._
private lazy val replayDispatcherId = config.getString("replay-dispatcher") private lazy val replayDispatcherId = config.getString("replay-dispatcher")
private lazy val replayDispatcher = context.system.dispatchers.lookup(replayDispatcherId) private lazy val replayDispatcher = context.system.dispatchers.lookup(replayDispatcherId)
def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: PersistentRepr Unit): Future[Long] = def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] =
Future(replay(numericId(processorId), fromSequenceNr: Long, toSequenceNr)(replayCallback))(replayDispatcher) Future(readHighestSequenceNr(numericId(processorId)))(replayDispatcher)
def replay(processorId: Int, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: PersistentRepr Unit): Long = { def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr Unit): Future[Unit] =
val iter = leveldbIterator Future(replayMessages(numericId(processorId), fromSequenceNr: Long, toSequenceNr, max: Long)(replayCallback))(replayDispatcher)
def replayMessages(processorId: Int, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr Unit): Unit = {
@scala.annotation.tailrec @scala.annotation.tailrec
def go(key: Key, replayCallback: PersistentRepr Unit) { def go(iter: DBIterator, key: Key, ctr: Long, replayCallback: PersistentRepr Unit) {
if (iter.hasNext) { if (iter.hasNext) {
val nextEntry = iter.next() val nextEntry = iter.next()
val nextKey = keyFromBytes(nextEntry.getKey) val nextKey = keyFromBytes(nextEntry.getKey)
@ -36,31 +38,33 @@ private[persistence] trait LeveldbReplay extends AsyncReplay { this: LeveldbStor
// end iteration here // end iteration here
} else if (nextKey.channelId != 0) { } else if (nextKey.channelId != 0) {
// phantom confirmation (just advance iterator) // phantom confirmation (just advance iterator)
go(nextKey, replayCallback) go(iter, nextKey, ctr, replayCallback)
} else if (key.processorId == nextKey.processorId) { } else if (key.processorId == nextKey.processorId) {
val msg = persistentFromBytes(nextEntry.getValue) val msg = persistentFromBytes(nextEntry.getValue)
val del = deletion(nextKey) val del = deletion(iter, nextKey)
val cnf = confirms(nextKey, Nil) val cnf = confirms(iter, nextKey, Nil)
replayCallback(msg.update(confirms = cnf, deleted = del)) if (ctr < max) {
go(nextKey, replayCallback) replayCallback(msg.update(confirms = cnf, deleted = del))
go(iter, nextKey, ctr + 1L, replayCallback)
}
} }
} }
} }
@scala.annotation.tailrec @scala.annotation.tailrec
def confirms(key: Key, channelIds: List[String]): List[String] = { def confirms(iter: DBIterator, key: Key, channelIds: List[String]): List[String] = {
if (iter.hasNext) { if (iter.hasNext) {
val nextEntry = iter.peekNext() val nextEntry = iter.peekNext()
val nextKey = keyFromBytes(nextEntry.getKey) val nextKey = keyFromBytes(nextEntry.getKey)
if (key.processorId == nextKey.processorId && key.sequenceNr == nextKey.sequenceNr) { if (key.processorId == nextKey.processorId && key.sequenceNr == nextKey.sequenceNr) {
val nextValue = new String(nextEntry.getValue, "UTF-8") val nextValue = new String(nextEntry.getValue, "UTF-8")
iter.next() iter.next()
confirms(nextKey, nextValue :: channelIds) confirms(iter, nextKey, nextValue :: channelIds)
} else channelIds } else channelIds
} else channelIds } else channelIds
} }
def deletion(key: Key): Boolean = { def deletion(iter: DBIterator, key: Key): Boolean = {
if (iter.hasNext) { if (iter.hasNext) {
val nextEntry = iter.peekNext() val nextEntry = iter.peekNext()
val nextKey = keyFromBytes(nextEntry.getKey) val nextKey = keyFromBytes(nextEntry.getKey)
@ -71,17 +75,14 @@ private[persistence] trait LeveldbReplay extends AsyncReplay { this: LeveldbStor
} else false } else false
} }
try { withIterator { iter
val startKey = Key(processorId, if (fromSequenceNr < 1L) 1L else fromSequenceNr, 0) val startKey = Key(processorId, if (fromSequenceNr < 1L) 1L else fromSequenceNr, 0)
iter.seek(keyToBytes(startKey)) iter.seek(keyToBytes(startKey))
go(startKey, replayCallback) go(iter, startKey, 0L, replayCallback)
maxSequenceNr(processorId)
} finally {
iter.close()
} }
} }
def maxSequenceNr(processorId: Int) = { def readHighestSequenceNr(processorId: Int) = {
leveldb.get(keyToBytes(counterKey(processorId)), leveldbSnapshot) match { leveldb.get(keyToBytes(counterKey(processorId)), leveldbSnapshot) match {
case null 0L case null 0L
case bytes counterFromBytes(bytes) case bytes counterFromBytes(bytes)

View file

@ -20,7 +20,7 @@ import akka.serialization.SerializationExtension
/** /**
* INTERNAL API. * INTERNAL API.
*/ */
private[persistence] trait LeveldbStore extends Actor with LeveldbIdMapping with LeveldbReplay { private[persistence] trait LeveldbStore extends Actor with LeveldbIdMapping with LeveldbRecovery {
val configPath: String val configPath: String
val config = context.system.settings.config.getConfig(configPath) val config = context.system.settings.config.getConfig(configPath)
@ -44,36 +44,47 @@ private[persistence] trait LeveldbStore extends Actor with LeveldbIdMapping with
import Key._ import Key._
def write(persistentBatch: immutable.Seq[PersistentRepr]) = def writeMessages(messages: immutable.Seq[PersistentRepr]) =
withBatch(batch persistentBatch.foreach(persistent addToBatch(persistent, batch))) withBatch(batch messages.foreach(message addToMessageBatch(message, batch)))
def delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean) = withBatch { batch def writeConfirmations(confirmations: immutable.Seq[PersistentConfirmation]) =
val nid = numericId(processorId) withBatch(batch confirmations.foreach(confirmation addToConfirmationBatch(confirmation, batch)))
if (permanent) fromSequenceNr to toSequenceNr foreach { sequenceNr
batch.delete(keyToBytes(Key(nid, sequenceNr, 0))) // TODO: delete confirmations and deletion markers, if any. def deleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean) = withBatch { batch
} messageIds foreach { messageId
else fromSequenceNr to toSequenceNr foreach { sequenceNr if (permanent) batch.delete(keyToBytes(Key(numericId(messageId.processorId), messageId.sequenceNr, 0)))
batch.put(keyToBytes(deletionKey(nid, sequenceNr)), Array.empty[Byte]) else batch.put(keyToBytes(deletionKey(numericId(messageId.processorId), messageId.sequenceNr)), Array.emptyByteArray)
} }
} }
def confirm(processorId: String, sequenceNr: Long, channelId: String) { def deleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean) = withBatch { batch
leveldb.put(keyToBytes(Key(numericId(processorId), sequenceNr, numericId(channelId))), channelId.getBytes("UTF-8")) val nid = numericId(processorId)
// seek to first existing message
val fromSequenceNr = withIterator { iter
val startKey = Key(nid, 1L, 0)
iter.seek(keyToBytes(startKey))
if (iter.hasNext) keyFromBytes(iter.peekNext().getKey).sequenceNr else Long.MaxValue
}
fromSequenceNr to toSequenceNr foreach { sequenceNr
if (permanent) batch.delete(keyToBytes(Key(nid, sequenceNr, 0))) // TODO: delete confirmations and deletion markers, if any.
else batch.put(keyToBytes(deletionKey(nid, sequenceNr)), Array.emptyByteArray)
}
} }
def leveldbSnapshot = leveldbReadOptions.snapshot(leveldb.getSnapshot) def leveldbSnapshot = leveldbReadOptions.snapshot(leveldb.getSnapshot)
def leveldbIterator = leveldb.iterator(leveldbSnapshot)
def persistentToBytes(p: PersistentRepr): Array[Byte] = serialization.serialize(p).get def withIterator[R](body: DBIterator R): R = {
def persistentFromBytes(a: Array[Byte]): PersistentRepr = serialization.deserialize(a, classOf[PersistentRepr]).get val iterator = leveldb.iterator(leveldbSnapshot)
try {
private def addToBatch(persistent: PersistentRepr, batch: WriteBatch): Unit = { body(iterator)
val nid = numericId(persistent.processorId) } finally {
batch.put(keyToBytes(counterKey(nid)), counterToBytes(persistent.sequenceNr)) iterator.close()
batch.put(keyToBytes(Key(nid, persistent.sequenceNr, 0)), persistentToBytes(persistent)) }
} }
private def withBatch[R](body: WriteBatch R): R = { def withBatch[R](body: WriteBatch R): R = {
val batch = leveldb.createWriteBatch() val batch = leveldb.createWriteBatch()
try { try {
val r = body(batch) val r = body(batch)
@ -84,6 +95,21 @@ private[persistence] trait LeveldbStore extends Actor with LeveldbIdMapping with
} }
} }
def persistentToBytes(p: PersistentRepr): Array[Byte] = serialization.serialize(p).get
def persistentFromBytes(a: Array[Byte]): PersistentRepr = serialization.deserialize(a, classOf[PersistentRepr]).get
private def addToMessageBatch(persistent: PersistentRepr, batch: WriteBatch): Unit = {
val nid = numericId(persistent.processorId)
batch.put(keyToBytes(counterKey(nid)), counterToBytes(persistent.sequenceNr))
batch.put(keyToBytes(Key(nid, persistent.sequenceNr, 0)), persistentToBytes(persistent))
}
private def addToConfirmationBatch(confirmation: PersistentConfirmation, batch: WriteBatch): Unit = {
val npid = numericId(confirmation.processorId)
val ncid = numericId(confirmation.channelId)
batch.put(keyToBytes(Key(npid, confirmation.sequenceNr, ncid)), confirmation.channelId.getBytes("UTF-8"))
}
override def preStart() { override def preStart() {
leveldb = leveldbFactory.open(leveldbDir, if (nativeLeveldb) leveldbOptions else leveldbOptions.compressionType(CompressionType.NONE)) leveldb = leveldbFactory.open(leveldbDir, if (nativeLeveldb) leveldbOptions else leveldbOptions.compressionType(CompressionType.NONE))
super.preStart() super.preStart()
@ -104,17 +130,14 @@ class SharedLeveldbStore extends { val configPath = "akka.persistence.journal.le
import AsyncWriteTarget._ import AsyncWriteTarget._
def receive = { def receive = {
case WriteBatch(pb) sender ! write(pb) case WriteMessages(msgs) sender ! writeMessages(msgs)
case Delete(pid, fsnr, tsnr, permanent) sender ! delete(pid, fsnr, tsnr, permanent) case WriteConfirmations(cnfs) sender ! writeConfirmations(cnfs)
case Confirm(pid, snr, cid) sender ! confirm(pid, snr, cid) case DeleteMessages(messageIds, permanent) sender ! deleteMessages(messageIds, permanent)
case Replay(pid, fromSnr, toSnr) case DeleteMessagesTo(pid, tsnr, permanent) sender ! deleteMessagesTo(pid, tsnr, permanent)
val npid = numericId(pid) case ReadHighestSequenceNr(pid, fromSequenceNr) sender ! readHighestSequenceNr(numericId(pid))
val res = for { case ReplayMessages(pid, fromSnr, toSnr, max)
_ Try(replay(npid, fromSnr, toSnr)(sender ! _)) Try(replayMessages(numericId(pid), fromSnr, toSnr, max)(sender ! _)) match {
max Try(maxSequenceNr(npid)) case Success(max) sender ! ReplaySuccess
} yield max
res match {
case Success(max) sender ! ReplaySuccess(max)
case Failure(cause) sender ! ReplayFailure(cause) case Failure(cause) sender ! ReplayFailure(cause)
} }
} }

View file

@ -8,12 +8,10 @@ import scala.language.existentials
import com.google.protobuf._ import com.google.protobuf._
import akka.actor.ExtendedActorSystem import akka.actor.{ ActorPath, ExtendedActorSystem }
import akka.japi.Util.immutableSeq import akka.japi.Util.immutableSeq
import akka.persistence._ import akka.persistence._
import akka.persistence.JournalProtocol.Confirm
import akka.persistence.serialization.MessageFormats._ import akka.persistence.serialization.MessageFormats._
import akka.persistence.serialization.MessageFormats.DeliverMessage.ResolveStrategy
import akka.serialization._ import akka.serialization._
/** /**
@ -31,7 +29,8 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
val PersistentReprClass = classOf[PersistentRepr] val PersistentReprClass = classOf[PersistentRepr]
val PersistentImplClass = classOf[PersistentImpl] val PersistentImplClass = classOf[PersistentImpl]
val ConfirmablePersistentImplClass = classOf[ConfirmablePersistentImpl] val ConfirmablePersistentImplClass = classOf[ConfirmablePersistentImpl]
val ConfirmClass = classOf[Confirm] val DeliveredByTransientChannelClass = classOf[DeliveredByChannel]
val DeliveredByPersistentChannelClass = classOf[DeliveredByPersistentChannel]
val DeliverClass = classOf[Deliver] val DeliverClass = classOf[Deliver]
def identifier: Int = 7 def identifier: Int = 7
@ -42,11 +41,12 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
* serialization of a persistent message's payload to a matching `akka.serialization.Serializer`. * serialization of a persistent message's payload to a matching `akka.serialization.Serializer`.
*/ */
def toBinary(o: AnyRef): Array[Byte] = o match { def toBinary(o: AnyRef): Array[Byte] = o match {
case b: PersistentBatch persistentMessageBatchBuilder(b).build().toByteArray case b: PersistentBatch persistentMessageBatchBuilder(b).build().toByteArray
case p: PersistentRepr persistentMessageBuilder(p).build().toByteArray case p: PersistentRepr persistentMessageBuilder(p).build().toByteArray
case c: Confirm confirmMessageBuilder(c).build().toByteArray case c: DeliveredByChannel deliveredMessageBuilder(c).build().toByteArray
case d: Deliver deliverMessageBuilder(d).build.toByteArray case c: DeliveredByPersistentChannel deliveredMessageBuilder(c).build().toByteArray
case _ throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass}") case d: Deliver deliverMessageBuilder(d).build.toByteArray
case _ throw new IllegalArgumentException(s"Can't serialize object of type ${o.getClass}")
} }
/** /**
@ -56,13 +56,14 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): Message = manifest match { def fromBinary(bytes: Array[Byte], manifest: Option[Class[_]]): Message = manifest match {
case None persistent(PersistentMessage.parseFrom(bytes)) case None persistent(PersistentMessage.parseFrom(bytes))
case Some(c) c match { case Some(c) c match {
case PersistentImplClass persistent(PersistentMessage.parseFrom(bytes)) case PersistentImplClass persistent(PersistentMessage.parseFrom(bytes))
case ConfirmablePersistentImplClass persistent(PersistentMessage.parseFrom(bytes)) case ConfirmablePersistentImplClass persistent(PersistentMessage.parseFrom(bytes))
case PersistentReprClass persistent(PersistentMessage.parseFrom(bytes)) case PersistentReprClass persistent(PersistentMessage.parseFrom(bytes))
case PersistentBatchClass persistentBatch(PersistentMessageBatch.parseFrom(bytes)) case PersistentBatchClass persistentBatch(PersistentMessageBatch.parseFrom(bytes))
case ConfirmClass confirm(ConfirmMessage.parseFrom(bytes)) case DeliveredByTransientChannelClass delivered(DeliveredMessage.parseFrom(bytes))
case DeliverClass deliver(DeliverMessage.parseFrom(bytes)) case DeliveredByPersistentChannelClass delivered(DeliveredMessage.parseFrom(bytes))
case _ throw new IllegalArgumentException(s"Can't deserialize object of type ${c}") case DeliverClass deliver(DeliverMessage.parseFrom(bytes))
case _ throw new IllegalArgumentException(s"Can't deserialize object of type ${c}")
} }
} }
@ -73,12 +74,8 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
private def deliverMessageBuilder(deliver: Deliver) = { private def deliverMessageBuilder(deliver: Deliver) = {
val builder = DeliverMessage.newBuilder val builder = DeliverMessage.newBuilder
builder.setPersistent(persistentMessageBuilder(deliver.persistent.asInstanceOf[PersistentRepr])) builder.setPersistent(persistentMessageBuilder(deliver.persistent.asInstanceOf[PersistentRepr]))
builder.setDestination(Serialization.serializedActorPath(deliver.destination)) builder.setDestination(deliver.destination.toString)
deliver.resolve match { builder
case Resolve.Off builder.setResolve(DeliverMessage.ResolveStrategy.Off)
case Resolve.Sender builder.setResolve(DeliverMessage.ResolveStrategy.Sender)
case Resolve.Destination builder.setResolve(DeliverMessage.ResolveStrategy.Destination)
}
} }
private def persistentMessageBatchBuilder(persistentBatch: PersistentBatch) = { private def persistentMessageBatchBuilder(persistentBatch: PersistentBatch) = {
@ -91,7 +88,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
val builder = PersistentMessage.newBuilder val builder = PersistentMessage.newBuilder
if (persistent.processorId != Undefined) builder.setProcessorId(persistent.processorId) if (persistent.processorId != Undefined) builder.setProcessorId(persistent.processorId)
if (persistent.confirmMessage != null) builder.setConfirmMessage(confirmMessageBuilder(persistent.confirmMessage)) if (persistent.confirmMessage != null) builder.setConfirmMessage(deliveredMessageBuilder(persistent.confirmMessage))
if (persistent.confirmTarget != null) builder.setConfirmTarget(Serialization.serializedActorPath(persistent.confirmTarget)) if (persistent.confirmTarget != null) builder.setConfirmTarget(Serialization.serializedActorPath(persistent.confirmTarget))
if (persistent.sender != null) builder.setSender(Serialization.serializedActorPath(persistent.sender)) if (persistent.sender != null) builder.setSender(Serialization.serializedActorPath(persistent.sender))
@ -100,7 +97,6 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
builder.setPayload(persistentPayloadBuilder(persistent.payload.asInstanceOf[AnyRef])) builder.setPayload(persistentPayloadBuilder(persistent.payload.asInstanceOf[AnyRef]))
builder.setSequenceNr(persistent.sequenceNr) builder.setSequenceNr(persistent.sequenceNr)
builder.setDeleted(persistent.deleted) builder.setDeleted(persistent.deleted)
builder.setResolved(persistent.resolved)
builder.setRedeliveries(persistent.redeliveries) builder.setRedeliveries(persistent.redeliveries)
builder.setConfirmable(persistent.confirmable) builder.setConfirmable(persistent.confirmable)
builder builder
@ -117,16 +113,19 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
builder builder
} }
private def confirmMessageBuilder(confirm: Confirm) = { private def deliveredMessageBuilder(delivered: Delivered) = {
val builder = ConfirmMessage.newBuilder val builder = DeliveredMessage.newBuilder
if (confirm.channelEndpoint != null) builder.setChannelEndpoint(Serialization.serializedActorPath(confirm.channelEndpoint)) if (delivered.channel != null) builder.setChannel(Serialization.serializedActorPath(delivered.channel))
builder.setProcessorId(confirm.processorId) builder.setChannelId(delivered.channelId)
builder.setMessageSequenceNr(confirm.messageSequenceNr) builder.setPersistentSequenceNr(delivered.persistentSequenceNr)
builder.setChannelId(confirm.channelId) builder.setDeliverySequenceNr(delivered.deliverySequenceNr)
builder.setWrapperSequenceNr(confirm.wrapperSequenceNr)
builder delivered match {
case c: DeliveredByChannel builder.setProcessorId(c.processorId)
case _ builder
}
} }
// //
@ -136,12 +135,7 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
private def deliver(deliverMessage: DeliverMessage): Deliver = { private def deliver(deliverMessage: DeliverMessage): Deliver = {
Deliver( Deliver(
persistent(deliverMessage.getPersistent), persistent(deliverMessage.getPersistent),
system.provider.resolveActorRef(deliverMessage.getDestination), ActorPath.fromString(deliverMessage.getDestination))
deliverMessage.getResolve match {
case ResolveStrategy.Off Resolve.Off
case ResolveStrategy.Sender Resolve.Sender
case ResolveStrategy.Destination Resolve.Destination
})
} }
private def persistentBatch(persistentMessageBatch: PersistentMessageBatch): PersistentBatch = private def persistentBatch(persistentMessageBatch: PersistentMessageBatch): PersistentBatch =
@ -153,11 +147,10 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
persistentMessage.getSequenceNr, persistentMessage.getSequenceNr,
if (persistentMessage.hasProcessorId) persistentMessage.getProcessorId else Undefined, if (persistentMessage.hasProcessorId) persistentMessage.getProcessorId else Undefined,
persistentMessage.getDeleted, persistentMessage.getDeleted,
persistentMessage.getResolved,
persistentMessage.getRedeliveries, persistentMessage.getRedeliveries,
immutableSeq(persistentMessage.getConfirmsList), immutableSeq(persistentMessage.getConfirmsList),
persistentMessage.getConfirmable, persistentMessage.getConfirmable,
if (persistentMessage.hasConfirmMessage) confirm(persistentMessage.getConfirmMessage) else null, if (persistentMessage.hasConfirmMessage) delivered(persistentMessage.getConfirmMessage) else null,
if (persistentMessage.hasConfirmTarget) system.provider.resolveActorRef(persistentMessage.getConfirmTarget) else null, if (persistentMessage.hasConfirmTarget) system.provider.resolveActorRef(persistentMessage.getConfirmTarget) else null,
if (persistentMessage.hasSender) system.provider.resolveActorRef(persistentMessage.getSender) else null) if (persistentMessage.hasSender) system.provider.resolveActorRef(persistentMessage.getSender) else null)
} }
@ -172,12 +165,22 @@ class MessageSerializer(val system: ExtendedActorSystem) extends Serializer {
payloadClass).get payloadClass).get
} }
private def confirm(confirmMessage: ConfirmMessage): Confirm = { private def delivered(deliveredMessage: DeliveredMessage): Delivered = {
Confirm( val channel = if (deliveredMessage.hasChannel) system.provider.resolveActorRef(deliveredMessage.getChannel) else null
confirmMessage.getProcessorId,
confirmMessage.getMessageSequenceNr, if (deliveredMessage.hasProcessorId) {
confirmMessage.getChannelId, DeliveredByChannel(
confirmMessage.getWrapperSequenceNr, deliveredMessage.getProcessorId,
if (confirmMessage.hasChannelEndpoint) system.provider.resolveActorRef(confirmMessage.getChannelEndpoint) else null) deliveredMessage.getChannelId,
deliveredMessage.getPersistentSequenceNr,
deliveredMessage.getDeliverySequenceNr,
channel)
} else {
DeliveredByPersistentChannel(
deliveredMessage.getChannelId,
deliveredMessage.getPersistentSequenceNr,
deliveredMessage.getDeliverySequenceNr,
channel)
}
} }
} }

View file

@ -19,6 +19,7 @@ trait SnapshotStore extends Actor {
import context.dispatcher import context.dispatcher
private val extension = Persistence(context.system) private val extension = Persistence(context.system)
private val publish = extension.settings.internal.publishPluginCommands
final def receive = { final def receive = {
case LoadSnapshot(processorId, criteria, toSequenceNr) case LoadSnapshot(processorId, criteria, toSequenceNr)
@ -44,10 +45,10 @@ trait SnapshotStore extends Actor {
sender ! evt // sender is processor sender ! evt // sender is processor
case d @ DeleteSnapshot(metadata) case d @ DeleteSnapshot(metadata)
delete(metadata) delete(metadata)
if (extension.publishPluginCommands) context.system.eventStream.publish(d) if (publish) context.system.eventStream.publish(d)
case d @ DeleteSnapshots(processorId, criteria) case d @ DeleteSnapshots(processorId, criteria)
delete(processorId, criteria) delete(processorId, criteria)
if (extension.publishPluginCommands) context.system.eventStream.publish(d) if (publish) context.system.eventStream.publish(d)
} }
//#snapshot-store-plugin-api //#snapshot-store-plugin-api

View file

@ -12,8 +12,6 @@ import com.typesafe.config._
import akka.actor._ import akka.actor._
import akka.testkit._ import akka.testkit._
import akka.persistence.JournalProtocol.Confirm
object ChannelSpec { object ChannelSpec {
class TestDestination extends Actor { class TestDestination extends Actor {
def receive = { def receive = {
@ -36,6 +34,12 @@ object ChannelSpec {
cp.confirm() cp.confirm()
} }
} }
class TestListener(probe: ActorRef) extends Actor {
def receive = {
case RedeliverFailure(messages) messages.foreach(probe ! _.payload)
}
}
} }
abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with PersistenceSpec with ImplicitSender { abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with PersistenceSpec with ImplicitSender {
@ -56,52 +60,35 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist
super.afterEach() super.afterEach()
} }
def redeliverChannelSettings: ChannelSettings = private def redeliverChannelSettings(listener: Option[ActorRef]): ChannelSettings =
ChannelSettings(redeliverMax = 2, redeliverInterval = 100 milliseconds) ChannelSettings(redeliverMax = 2, redeliverInterval = 100 milliseconds, redeliverFailureListener = listener)
def createDefaultTestChannel(): ActorRef = def createDefaultTestChannel(): ActorRef =
system.actorOf(Channel.props(name, ChannelSettings())) system.actorOf(Channel.props(s"${name}-default", ChannelSettings()))
def createRedeliverTestChannel(): ActorRef = def createRedeliverTestChannel(): ActorRef =
system.actorOf(Channel.props(name, redeliverChannelSettings)) system.actorOf(Channel.props(s"${name}-redeliver", redeliverChannelSettings(None)))
def createRedeliverTestChannel(listener: Option[ActorRef]): ActorRef =
system.actorOf(Channel.props(s"${name}-redeliver-listener", redeliverChannelSettings(listener)))
def subscribeToConfirmation(probe: TestProbe): Unit = def subscribeToConfirmation(probe: TestProbe): Unit =
system.eventStream.subscribe(probe.ref, classOf[Confirm]) system.eventStream.subscribe(probe.ref, classOf[Delivered])
def awaitConfirmation(probe: TestProbe): Unit = def awaitConfirmation(probe: TestProbe): Unit =
probe.expectMsgType[Confirm] probe.expectMsgType[Delivered]
def actorRefFor(topLevelName: String) = def actorRefFor(topLevelName: String) =
extension.system.provider.resolveActorRef(RootActorPath(Address("akka", system.name)) / "user" / topLevelName) extension.system.provider.resolveActorRef(RootActorPath(Address("akka", system.name)) / "user" / topLevelName)
"A channel" must { "A channel" must {
"must resolve sender references and preserve message order" in {
val destination = system.actorOf(Props[TestDestination])
val empty = actorRefFor("testSender") // will be an EmptyLocalActorRef
val sender = system.actorOf(Props(classOf[TestReceiver], testActor), "testSender")
// replayed message (resolved = false) and invalid sender reference
defaultTestChannel tell (Deliver(PersistentRepr("a", resolved = false), destination, Resolve.Sender), empty)
// new messages (resolved = true) and valid sender references
defaultTestChannel tell (Deliver(Persistent("b"), destination), sender)
defaultTestChannel tell (Deliver(Persistent("c"), destination), sender)
expectMsg("a")
expectMsg("b")
expectMsg("c")
}
"must resolve destination references and preserve message order" in { "must resolve destination references and preserve message order" in {
val empty = actorRefFor("testDestination") // will be an EmptyLocalActorRef val empty = actorRefFor("testDestination") // will be an EmptyLocalActorRef
val destination = system.actorOf(Props(classOf[TestReceiver], testActor), "testDestination") val destination = system.actorOf(Props(classOf[TestReceiver], testActor), "testDestination")
// replayed message (resolved = false) and invalid destination reference defaultTestChannel ! Deliver(PersistentRepr("a"), empty.path)
defaultTestChannel ! Deliver(PersistentRepr("a", resolved = false), empty, Resolve.Destination) defaultTestChannel ! Deliver(Persistent("b"), destination.path)
defaultTestChannel ! Deliver(Persistent("c"), destination.path)
// new messages (resolved = true) and valid destination references
defaultTestChannel ! Deliver(Persistent("b"), destination)
defaultTestChannel ! Deliver(Persistent("c"), destination)
expectMsg("a") expectMsg("a")
expectMsg("b") expectMsg("b")
@ -113,7 +100,7 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist
subscribeToConfirmation(confirmProbe) subscribeToConfirmation(confirmProbe)
defaultTestChannel ! Deliver(Persistent("a"), destination) defaultTestChannel ! Deliver(Persistent("a"), destination.path)
awaitConfirmation(confirmProbe) awaitConfirmation(confirmProbe)
} }
@ -123,9 +110,9 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist
subscribeToConfirmation(confirmProbe) subscribeToConfirmation(confirmProbe)
defaultTestChannel ! Deliver(Persistent("a"), destination) defaultTestChannel ! Deliver(Persistent("a"), destination.path)
defaultTestChannel ! Deliver(Persistent("boom"), destination) defaultTestChannel ! Deliver(Persistent("boom"), destination.path)
defaultTestChannel ! Deliver(Persistent("b"), destination) defaultTestChannel ! Deliver(Persistent("b"), destination.path)
awaitConfirmation(confirmProbe) awaitConfirmation(confirmProbe)
awaitConfirmation(confirmProbe) awaitConfirmation(confirmProbe)
@ -136,7 +123,7 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist
subscribeToConfirmation(confirmProbe) subscribeToConfirmation(confirmProbe)
defaultTestChannel ! Deliver(PersistentRepr("a", confirmable = true), destination) defaultTestChannel ! Deliver(PersistentRepr("a", confirmable = true), destination.path)
expectMsgPF() { case m @ ConfirmablePersistent("a", _, _) m.confirm() } expectMsgPF() { case m @ ConfirmablePersistent("a", _, _) m.confirm() }
awaitConfirmation(confirmProbe) awaitConfirmation(confirmProbe)
@ -144,21 +131,21 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist
"redeliver on missing confirmation" in { "redeliver on missing confirmation" in {
val probe = TestProbe() val probe = TestProbe()
redeliverTestChannel ! Deliver(Persistent("b"), probe.ref) redeliverTestChannel ! Deliver(Persistent("b"), probe.ref.path)
probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) redeliveries should be(0) } probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) redeliveries should be(0) }
probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) redeliveries should be(1) } probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) redeliveries should be(1) }
probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) redeliveries should be(2); m.confirm() } probe.expectMsgPF() { case m @ ConfirmablePersistent("b", _, redeliveries) redeliveries should be(2); m.confirm() }
} }
"redeliver in correct relative order" in { "redeliver in correct relative order" in {
val deliveries = redeliverChannelSettings.redeliverMax + 1 val deliveries = redeliverChannelSettings(None).redeliverMax + 1
val interval = redeliverChannelSettings.redeliverInterval.toMillis / 5 * 4 val interval = redeliverChannelSettings(None).redeliverInterval.toMillis / 5 * 4
val probe = TestProbe() val probe = TestProbe()
val cycles = 9 val cycles = 9
1 to cycles foreach { i 1 to cycles foreach { i
redeliverTestChannel ! Deliver(Persistent(i), probe.ref) redeliverTestChannel ! Deliver(Persistent(i), probe.ref.path)
Thread.sleep(interval) Thread.sleep(interval)
} }
@ -176,13 +163,35 @@ abstract class ChannelSpec(config: Config) extends AkkaSpec(config) with Persist
"redeliver not more than redeliverMax on missing confirmation" in { "redeliver not more than redeliverMax on missing confirmation" in {
val probe = TestProbe() val probe = TestProbe()
redeliverTestChannel ! Deliver(PersistentRepr("a"), probe.ref) redeliverTestChannel ! Deliver(PersistentRepr("a"), probe.ref.path)
probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) redeliveries should be(0) } probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) redeliveries should be(0) }
probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) redeliveries should be(1) } probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) redeliveries should be(1) }
probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) redeliveries should be(2) } probe.expectMsgPF() { case m @ ConfirmablePersistent("a", _, redeliveries) redeliveries should be(2) }
probe.expectNoMsg(300 milliseconds) probe.expectNoMsg(300 milliseconds)
} }
"preserve message order to the same destination" in {
val probe = TestProbe()
val destination = system.actorOf(Props(classOf[TestReceiver], probe.ref))
1 to 10 foreach { i
defaultTestChannel ! Deliver(PersistentRepr(s"test-${i}"), destination.path)
}
1 to 10 foreach { i
probe.expectMsg(s"test-${i}")
}
}
"notify redelivery failure listener" in {
val probe = TestProbe()
val listener = system.actorOf(Props(classOf[TestListener], probe.ref))
val channel = createRedeliverTestChannel(Some(listener))
1 to 3 foreach { i channel ! Deliver(Persistent(i), system.deadLetters.path) }
probe.expectMsgAllOf(1, 2, 3)
system.stop(channel)
}
} }
} }

View file

@ -21,8 +21,10 @@ object FailureSpec {
akka.persistence.destination.chaos.confirm-failure-rate = 0.3 akka.persistence.destination.chaos.confirm-failure-rate = 0.3
akka.persistence.journal.plugin = "akka.persistence.journal.chaos" akka.persistence.journal.plugin = "akka.persistence.journal.chaos"
akka.persistence.journal.chaos.write-failure-rate = 0.3 akka.persistence.journal.chaos.write-failure-rate = 0.3
akka.persistence.journal.chaos.confirm-failure-rate = 0.2
akka.persistence.journal.chaos.delete-failure-rate = 0.3 akka.persistence.journal.chaos.delete-failure-rate = 0.3
akka.persistence.journal.chaos.replay-failure-rate = 0.3 akka.persistence.journal.chaos.replay-failure-rate = 0.25
akka.persistence.journal.chaos.read-highest-failure-rate = 0.1
akka.persistence.journal.chaos.class = akka.persistence.journal.chaos.ChaosJournal akka.persistence.journal.chaos.class = akka.persistence.journal.chaos.ChaosJournal
akka.persistence.snapshot-store.local.dir = "target/snapshots-failure-spec/" akka.persistence.snapshot-store.local.dir = "target/snapshots-failure-spec/"
""") """)
@ -70,7 +72,7 @@ object FailureSpec {
throw new TestException(debugMessage(s"rejected payload ${i}")) throw new TestException(debugMessage(s"rejected payload ${i}"))
} else { } else {
add(i) add(i)
channel forward Deliver(p, destination) channel forward Deliver(p, destination.path)
log.debug(debugMessage(s"processed payload ${i}")) log.debug(debugMessage(s"processed payload ${i}"))
} }
case PersistenceFailure(i: Int, _, _) case PersistenceFailure(i: Int, _, _)

View file

@ -15,6 +15,7 @@ object PerformanceSpec {
""" """
akka.persistence.performance.cycles.warmup = 300 akka.persistence.performance.cycles.warmup = 300
akka.persistence.performance.cycles.load = 1000 akka.persistence.performance.cycles.load = 1000
akka.persistence.publish-confirmations = on
""" """
case object StartMeasure case object StartMeasure
@ -166,15 +167,21 @@ class PerformanceSpec extends AkkaSpec(PersistenceSpec.config("leveldb", "Perfor
def stressPersistentChannel(): Unit = { def stressPersistentChannel(): Unit = {
val channel = system.actorOf(PersistentChannel.props()) val channel = system.actorOf(PersistentChannel.props())
val destination = system.actorOf(Props[PerformanceTestDestination]) val destination = system.actorOf(Props[PerformanceTestDestination])
1 to warmupCycles foreach { i channel ! Deliver(Persistent(s"msg${i}"), destination) } 1 to warmupCycles foreach { i channel ! Deliver(PersistentRepr(s"msg${i}", processorId = "test"), destination.path) }
channel ! Deliver(Persistent(StartMeasure), destination) channel ! Deliver(Persistent(StartMeasure), destination.path)
1 to loadCycles foreach { i channel ! Deliver(Persistent(s"msg${i}"), destination) } 1 to loadCycles foreach { i channel ! Deliver(PersistentRepr(s"msg${i}", processorId = "test"), destination.path) }
channel ! Deliver(Persistent(StopMeasure), destination) channel ! Deliver(Persistent(StopMeasure), destination.path)
expectMsgPF(100 seconds) { expectMsgPF(100 seconds) {
case throughput: Double println(f"\nthroughput = $throughput%.2f persistent commands per second") case throughput: Double println(f"\nthroughput = $throughput%.2f persistent messages per second")
} }
} }
def subscribeToConfirmation(probe: TestProbe): Unit =
system.eventStream.subscribe(probe.ref, classOf[DeliveredByPersistentChannel])
def awaitConfirmation(probe: TestProbe): Unit =
probe.expectMsgType[DeliveredByPersistentChannel]
"A command sourced processor" should { "A command sourced processor" should {
"have some reasonable throughput" in { "have some reasonable throughput" in {
stressCommandsourcedProcessor(None) stressCommandsourcedProcessor(None)
@ -198,7 +205,14 @@ class PerformanceSpec extends AkkaSpec(PersistenceSpec.config("leveldb", "Perfor
"A persistent channel" should { "A persistent channel" should {
"have some reasonable throughput" in { "have some reasonable throughput" in {
val probe = TestProbe()
subscribeToConfirmation(probe)
stressPersistentChannel() stressPersistentChannel()
probe.fishForMessage(100.seconds) {
case DeliveredByPersistentChannel(_, snr, _, _) snr == warmupCycles + loadCycles + 2
}
} }
} }
} }

View file

@ -50,6 +50,7 @@ object PersistenceSpec {
s""" s"""
akka.actor.serialize-creators = ${serialization} akka.actor.serialize-creators = ${serialization}
akka.actor.serialize-messages = ${serialization} akka.actor.serialize-messages = ${serialization}
akka.persistence.publish-confirmations = on
akka.persistence.publish-plugin-commands = on akka.persistence.publish-plugin-commands = on
akka.persistence.journal.plugin = "akka.persistence.journal.${plugin}" akka.persistence.journal.plugin = "akka.persistence.journal.${plugin}"
akka.persistence.journal.leveldb.dir = "target/journal-${test}" akka.persistence.journal.leveldb.dir = "target/journal-${test}"

View file

@ -12,88 +12,129 @@ import com.typesafe.config._
import akka.actor._ import akka.actor._
import akka.testkit._ import akka.testkit._
object PersistentChannelSpec {
class SlowDestination(probe: ActorRef, maxReceived: Long) extends Actor {
import context.dispatcher
val delay = 100.millis
var received = Vector.empty[ConfirmablePersistent]
def receive = {
case cp: ConfirmablePersistent
if (received.isEmpty) context.system.scheduler.scheduleOnce(delay, self, "confirm")
received :+= cp
case "confirm"
if (received.size > maxReceived) probe ! s"number of received messages to high: ${received.size}"
else probe ! received.head.payload
received.head.confirm()
received = received.tail
if (received.nonEmpty) context.system.scheduler.scheduleOnce(delay, self, "confirm")
}
}
}
abstract class PersistentChannelSpec(config: Config) extends ChannelSpec(config) { abstract class PersistentChannelSpec(config: Config) extends ChannelSpec(config) {
override def redeliverChannelSettings: PersistentChannelSettings = import PersistentChannelSpec._
PersistentChannelSettings(redeliverMax = 2, redeliverInterval = 100 milliseconds)
private def redeliverChannelSettings(listener: Option[ActorRef]): PersistentChannelSettings =
PersistentChannelSettings(redeliverMax = 2, redeliverInterval = 100 milliseconds, redeliverFailureListener = listener)
private def createDefaultTestChannel(name: String): ActorRef =
system.actorOf(PersistentChannel.props(s"${name}-default", PersistentChannelSettings()))
override def createDefaultTestChannel(): ActorRef = override def createDefaultTestChannel(): ActorRef =
system.actorOf(PersistentChannel.props(name, PersistentChannelSettings())) createDefaultTestChannel(name)
override def createRedeliverTestChannel(): ActorRef = override def createRedeliverTestChannel(): ActorRef =
system.actorOf(PersistentChannel.props(name, redeliverChannelSettings)) system.actorOf(PersistentChannel.props(s"${name}-redeliver", redeliverChannelSettings(None)))
override def createRedeliverTestChannel(listener: Option[ActorRef]): ActorRef =
system.actorOf(PersistentChannel.props(s"${name}-redeliver-listener", redeliverChannelSettings(listener)))
"A persistent channel" must { "A persistent channel" must {
"support disabling and re-enabling delivery" in {
val confirmProbe = TestProbe()
subscribeToConfirmation(confirmProbe)
defaultTestChannel ! Deliver(Persistent("a"), testActor)
expectMsgPF() { case m @ ConfirmablePersistent("a", _, _) m.confirm() }
awaitConfirmation(confirmProbe)
defaultTestChannel ! DisableDelivery
defaultTestChannel ! Deliver(Persistent("b"), testActor)
defaultTestChannel ! EnableDelivery
defaultTestChannel ! Deliver(Persistent("c"), testActor)
expectMsgPF() { case m @ ConfirmablePersistent("b", _, _) m.confirm() }
expectMsgPF() { case m @ ConfirmablePersistent("c", _, _) m.confirm() }
}
"support Persistent replies to Deliver senders" in { "support Persistent replies to Deliver senders" in {
val channel1 = system.actorOf(PersistentChannel.props(s"${name}-with-reply", PersistentChannelSettings(replyPersistent = true))) val channel1 = system.actorOf(PersistentChannel.props(s"${name}-with-reply", PersistentChannelSettings(replyPersistent = true)))
channel1 ! Deliver(Persistent("a"), system.deadLetters) channel1 ! Deliver(Persistent("a"), system.deadLetters.path)
expectMsgPF() { case Persistent("a", 1) } expectMsgPF() { case Persistent("a", _) }
channel1 ! Deliver(PersistentRepr("b", sequenceNr = 13), system.deadLetters) channel1 ! Deliver(PersistentRepr("b", sequenceNr = 13), system.deadLetters.path)
expectMsgPF() { case Persistent("b", 13) } expectMsgPF() { case Persistent("b", 13) }
system.stop(channel1) system.stop(channel1)
} }
"must not modify certain persistent message field" in { "not modify certain persistent message fields" in {
val persistent1 = PersistentRepr(payload = "a", processorId = "p1", confirms = List("c1", "c2"), sender = defaultTestChannel, sequenceNr = 13) val persistent1 = PersistentRepr(payload = "a", processorId = "p1", confirms = List("c1", "c2"), sender = defaultTestChannel, sequenceNr = 13)
val persistent2 = PersistentRepr(payload = "b", processorId = "p1", confirms = List("c1", "c2"), sender = defaultTestChannel) val persistent2 = PersistentRepr(payload = "b", processorId = "p1", confirms = List("c1", "c2"), sender = defaultTestChannel)
defaultTestChannel ! Deliver(persistent1, testActor) defaultTestChannel ! Deliver(persistent1, testActor.path)
defaultTestChannel ! Deliver(persistent2, testActor) defaultTestChannel ! Deliver(persistent2, testActor.path)
expectMsgPF() { case cp @ ConfirmablePersistentImpl("a", 13, "p1", _, _, _, Seq("c1", "c2"), _, _, channel) cp.confirm() } expectMsgPF() { case cp @ ConfirmablePersistentImpl("a", 13, "p1", _, _, Seq("c1", "c2"), _, _, channel) cp.confirm() }
expectMsgPF() { case cp @ ConfirmablePersistentImpl("b", 2, "p1", _, _, _, Seq("c1", "c2"), _, _, channel) cp.confirm() } expectMsgPF() { case cp @ ConfirmablePersistentImpl("b", 2, "p1", _, _, Seq("c1", "c2"), _, _, channel) cp.confirm() }
} }
} "redeliver un-confirmed stored messages during recovery" in {
val confirmProbe = TestProbe()
val forwardProbe = TestProbe()
"A persistent channel" when { subscribeToConfirmation(confirmProbe)
"used standalone" must {
"redeliver un-confirmed stored messages during recovery" in {
val confirmProbe = TestProbe()
val forwardProbe = TestProbe()
subscribeToConfirmation(confirmProbe) val channel1 = createDefaultTestChannel("extra")
channel1 tell (Deliver(Persistent("a1"), forwardProbe.ref.path), null)
channel1 tell (Deliver(Persistent("a2"), forwardProbe.ref.path), null)
val channel1 = createDefaultTestChannel() forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a1", _, _) /* no confirmation */ }
channel1 tell (Deliver(Persistent("a1"), forwardProbe.ref), null) forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a2", _, _) m.confirm() }
channel1 tell (Deliver(Persistent("a2"), forwardProbe.ref), null)
forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a1", _, _) /* no confirmation */ } awaitConfirmation(confirmProbe)
forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a2", _, _) m.confirm() }
awaitConfirmation(confirmProbe) system.stop(channel1)
system.stop(channel1) val channel2 = createDefaultTestChannel("extra")
channel2 tell (Deliver(Persistent("a3"), forwardProbe.ref.path), null)
val channel2 = createDefaultTestChannel() forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a1", _, _) m.confirm() }
channel2 tell (Deliver(Persistent("a3"), forwardProbe.ref), null) forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a3", _, _) m.confirm() }
forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a1", _, _) m.confirm() } // sender still valid, no need to resolve awaitConfirmation(confirmProbe)
forwardProbe.expectMsgPF() { case m @ ConfirmablePersistent("a3", _, _) m.confirm() } awaitConfirmation(confirmProbe)
awaitConfirmation(confirmProbe) system.stop(channel2)
awaitConfirmation(confirmProbe) }
"not flood destinations" in {
val probe = TestProbe()
val settings = PersistentChannelSettings(
redeliverMax = 0,
redeliverInterval = 1.minute,
pendingConfirmationsMax = 4,
pendingConfirmationsMin = 2)
system.stop(channel2) val channel = system.actorOf(PersistentChannel.props(s"${name}-watermark", settings))
} val destination = system.actorOf(Props(classOf[SlowDestination], probe.ref, settings.pendingConfirmationsMax))
1 to 10 foreach { i channel ! Deliver(Persistent(i), destination.path) }
1 to 10 foreach { i probe.expectMsg(i) }
system.stop(channel)
}
"redeliver on reset" in {
val probe = TestProbe()
val settings = PersistentChannelSettings(
redeliverMax = 0,
redeliverInterval = 1.minute,
pendingConfirmationsMax = 4,
pendingConfirmationsMin = 2)
val channel = system.actorOf(PersistentChannel.props(s"${name}-reset", settings))
1 to 3 foreach { i channel ! Deliver(Persistent(i), probe.ref.path) }
1 to 3 foreach { i probe.expectMsgPF() { case ConfirmablePersistent(`i`, _, _) } }
channel ! Reset
1 to 3 foreach { i probe.expectMsgPF() { case ConfirmablePersistent(`i`, _, _) } }
system.stop(channel)
} }
} }
} }

View file

@ -12,8 +12,6 @@ import com.typesafe.config._
import akka.actor._ import akka.actor._
import akka.testkit._ import akka.testkit._
import akka.persistence.JournalProtocol.Confirm
object ProcessorChannelSpec { object ProcessorChannelSpec {
class TestProcessor(name: String) extends NamedProcessor(name) { class TestProcessor(name: String) extends NamedProcessor(name) {
val destination = context.actorOf(Props[TestDestination]) val destination = context.actorOf(Props[TestDestination])
@ -23,10 +21,10 @@ object ProcessorChannelSpec {
case m @ Persistent(s: String, _) if s.startsWith("a") case m @ Persistent(s: String, _) if s.startsWith("a")
// forward to destination via channel, // forward to destination via channel,
// destination replies to initial sender // destination replies to initial sender
channel forward Deliver(m.withPayload(s"fw: ${s}"), destination) channel forward Deliver(m.withPayload(s"fw: ${s}"), destination.path)
case m @ Persistent(s: String, _) if s.startsWith("b") case m @ Persistent(s: String, _) if s.startsWith("b")
// reply to sender via channel // reply to sender via channel
channel ! Deliver(m.withPayload(s"re: ${s}"), sender) channel ! Deliver(m.withPayload(s"re: ${s}"), sender.path)
} }
} }
@ -40,7 +38,7 @@ object ProcessorChannelSpec {
val channel = context.actorOf(Channel.props("channel", ChannelSettings(redeliverMax = 1, redeliverInterval = 100 milliseconds))) val channel = context.actorOf(Channel.props("channel", ChannelSettings(redeliverMax = 1, redeliverInterval = 100 milliseconds)))
def receive = { def receive = {
case p: Persistent channel ! Deliver(p, destination) case p: Persistent channel ! Deliver(p, destination.path)
case "replay" throw new TestException("replay requested") case "replay" throw new TestException("replay requested")
} }
} }
@ -52,7 +50,7 @@ object ProcessorChannelSpec {
def handleEvent(event: String) = { def handleEvent(event: String) = {
events = event :: events events = event :: events
channel ! Deliver(Persistent(event), destination) channel ! Deliver(Persistent(event), destination.path)
} }
def receiveReplay: Receive = { def receiveReplay: Receive = {
@ -83,10 +81,10 @@ abstract class ProcessorChannelSpec(config: Config) extends AkkaSpec(config) wit
} }
def subscribeToConfirmation(probe: TestProbe): Unit = def subscribeToConfirmation(probe: TestProbe): Unit =
system.eventStream.subscribe(probe.ref, classOf[Confirm]) system.eventStream.subscribe(probe.ref, classOf[Delivered])
def awaitConfirmation(probe: TestProbe): Unit = def awaitConfirmation(probe: TestProbe): Unit =
probe.expectMsgType[Confirm] probe.expectMsgType[Delivered]
def createTestProcessor(): ActorRef = def createTestProcessor(): ActorRef =
system.actorOf(Props(classOf[TestProcessor], name)) system.actorOf(Props(classOf[TestProcessor], name))

View file

@ -304,14 +304,14 @@ abstract class ProcessorSpec(config: Config) extends AkkaSpec(config) with Persi
"support single message deletions" in { "support single message deletions" in {
val deleteProbe = TestProbe() val deleteProbe = TestProbe()
system.eventStream.subscribe(deleteProbe.ref, classOf[Delete]) system.eventStream.subscribe(deleteProbe.ref, classOf[DeleteMessages])
val processor1 = namedProcessor[DeleteMessageTestProcessor] val processor1 = namedProcessor[DeleteMessageTestProcessor]
processor1 ! Persistent("c") processor1 ! Persistent("c")
processor1 ! Persistent("d") processor1 ! Persistent("d")
processor1 ! Persistent("e") processor1 ! Persistent("e")
processor1 ! Delete1(4) processor1 ! Delete1(4)
deleteProbe.expectMsgType[Delete] deleteProbe.expectMsgType[DeleteMessages]
val processor2 = namedProcessor[DeleteMessageTestProcessor] val processor2 = namedProcessor[DeleteMessageTestProcessor]
processor2 ! GetState processor2 ! GetState
@ -321,19 +321,29 @@ abstract class ProcessorSpec(config: Config) extends AkkaSpec(config) with Persi
"support bulk message deletions" in { "support bulk message deletions" in {
val deleteProbe = TestProbe() val deleteProbe = TestProbe()
system.eventStream.subscribe(deleteProbe.ref, classOf[Delete]) system.eventStream.subscribe(deleteProbe.ref, classOf[DeleteMessagesTo])
val processor1 = namedProcessor[DeleteMessageTestProcessor] val processor1 = namedProcessor[DeleteMessageTestProcessor]
processor1 ! Persistent("c") processor1 ! Persistent("c")
processor1 ! Persistent("d") processor1 ! Persistent("d")
processor1 ! Persistent("e") processor1 ! Persistent("e")
processor1 ! DeleteN(4) processor1 ! DeleteN(4)
deleteProbe.expectMsgType[Delete] deleteProbe.expectMsgType[DeleteMessagesTo]
val processor2 = namedProcessor[DeleteMessageTestProcessor] val processor2 = namedProcessor[DeleteMessageTestProcessor]
processor2 ! GetState processor2 ! GetState
expectMsg(List("e-5")) expectMsg(List("e-5"))
processor2 ! Persistent("f")
processor2 ! Persistent("g")
processor2 ! DeleteN(6)
deleteProbe.expectMsgType[DeleteMessagesTo]
val processor3 = namedProcessor[DeleteMessageTestProcessor]
processor3 ! GetState
expectMsg(List("g-7"))
} }
} }

View file

@ -0,0 +1,279 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence
import scala.concurrent.duration._
import com.typesafe.config.Config
import akka.actor._
import akka.testkit._
object ViewSpec {
class TestProcessor(name: String, probe: ActorRef) extends NamedProcessor(name) {
def receive = {
case Persistent(payload, sequenceNr)
probe ! s"${payload}-${sequenceNr}"
}
}
class TestView(name: String, probe: ActorRef, interval: FiniteDuration, var failAt: Option[String]) extends View {
def this(name: String, probe: ActorRef, interval: FiniteDuration) =
this(name, probe, interval, None)
def this(name: String, probe: ActorRef) =
this(name, probe, 100.milliseconds)
override def autoUpdateInterval: FiniteDuration = interval.dilated(context.system)
override val processorId: String = name
var last: String = _
def receive = {
case "get"
probe ! last
case "boom"
throw new TestException("boom")
case Persistent(payload, _) if Some(payload) == failAt
throw new TestException("boom")
case Persistent(payload, sequenceNr)
last = s"replicated-${payload}-${sequenceNr}"
probe ! last
}
override def postRestart(reason: Throwable): Unit = {
super.postRestart(reason)
failAt = None
}
}
class PassiveTestView(name: String, probe: ActorRef, var failAt: Option[String]) extends View {
override val processorId: String = name
override def autoUpdate: Boolean = false
override def autoUpdateReplayMax: Long = 0L // no message replay during initial recovery
var last: String = _
def receive = {
case "get"
probe ! last
case Persistent(payload, _) if Some(payload) == failAt
throw new TestException("boom")
case Persistent(payload, sequenceNr)
last = s"replicated-${payload}-${sequenceNr}"
}
override def postRestart(reason: Throwable): Unit = {
super.postRestart(reason)
failAt = None
}
}
class TestDestination(probe: ActorRef) extends Actor {
def receive = {
case cp @ ConfirmablePersistent(payload, sequenceNr, _)
cp.confirm()
probe ! s"${payload}-${sequenceNr}"
}
}
class EmittingView(name: String, destination: ActorRef) extends View {
override def autoUpdateInterval: FiniteDuration = 100.milliseconds.dilated(context.system)
override val processorId: String = name
val channel = context.actorOf(Channel.props(s"${name}-channel"))
def receive = {
case "restart"
throw new TestException("restart requested")
case Persistent(payload, sequenceNr)
channel ! Deliver(Persistent(s"emitted-${payload}"), destination.path)
}
}
class SnapshottingView(name: String, probe: ActorRef) extends View {
override def autoUpdateInterval: FiniteDuration = 100.microseconds.dilated(context.system)
override val processorId: String = name
override val viewId: String = s"${name}-replicator"
var last: String = _
def receive = {
case "get"
probe ! last
case "snap"
saveSnapshot(last)
case "restart"
throw new TestException("restart requested")
case SaveSnapshotSuccess(_)
probe ! "snapped"
case SnapshotOffer(metadata, snapshot: String)
last = snapshot
probe ! last
case Persistent(payload, sequenceNr)
last = s"replicated-${payload}-${sequenceNr}"
probe ! last
}
}
}
abstract class ViewSpec(config: Config) extends AkkaSpec(config) with PersistenceSpec with ImplicitSender {
import ViewSpec._
var processor: ActorRef = _
var view: ActorRef = _
var processorProbe: TestProbe = _
var viewProbe: TestProbe = _
override protected def beforeEach(): Unit = {
super.beforeEach()
processorProbe = TestProbe()
viewProbe = TestProbe()
processor = system.actorOf(Props(classOf[TestProcessor], name, processorProbe.ref))
processor ! Persistent("a")
processor ! Persistent("b")
processorProbe.expectMsg("a-1")
processorProbe.expectMsg("b-2")
}
override protected def afterEach(): Unit = {
system.stop(processor)
system.stop(view)
super.afterEach()
}
def subscribeToConfirmation(probe: TestProbe): Unit =
system.eventStream.subscribe(probe.ref, classOf[Delivered])
def awaitConfirmation(probe: TestProbe): Unit =
probe.expectMsgType[Delivered]
"A view" must {
"receive past updates from a processor" in {
view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref))
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
}
"receive live updates from a processor" in {
view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref))
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
processor ! Persistent("c")
viewProbe.expectMsg("replicated-c-3")
}
"run updates at specified interval" in {
view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 2.seconds))
// initial update is done on start
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
// live updates takes 5 seconds to replicate
processor ! Persistent("c")
viewProbe.expectNoMsg(1.second)
viewProbe.expectMsg("replicated-c-3")
}
"run updates on user request" in {
view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 5.seconds))
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
processor ! Persistent("c")
processorProbe.expectMsg("c-3")
view ! Update(await = false)
viewProbe.expectMsg("replicated-c-3")
}
"run updates on user request and await update" in {
view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 5.seconds))
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
processor ! Persistent("c")
processorProbe.expectMsg("c-3")
view ! Update(await = true)
view ! "get"
viewProbe.expectMsg("replicated-c-3")
}
"run updates again on failure outside an update cycle" in {
view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 5.seconds))
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
view ! "boom"
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
}
"run updates again on failure during an update cycle" in {
processor ! Persistent("c")
processorProbe.expectMsg("c-3")
view = system.actorOf(Props(classOf[TestView], name, viewProbe.ref, 5.seconds, Some("b")))
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
viewProbe.expectMsg("replicated-c-3")
}
"run size-limited updates on user request" in {
processor ! Persistent("c")
processor ! Persistent("d")
processor ! Persistent("e")
processor ! Persistent("f")
processorProbe.expectMsg("c-3")
processorProbe.expectMsg("d-4")
processorProbe.expectMsg("e-5")
processorProbe.expectMsg("f-6")
view = system.actorOf(Props(classOf[PassiveTestView], name, viewProbe.ref, None))
view ! Update(await = true, replayMax = 2)
view ! "get"
viewProbe.expectMsg("replicated-b-2")
view ! Update(await = true, replayMax = 1)
view ! "get"
viewProbe.expectMsg("replicated-c-3")
view ! Update(await = true, replayMax = 4)
view ! "get"
viewProbe.expectMsg("replicated-f-6")
}
}
"A view" can {
"use channels" in {
val confirmProbe = TestProbe()
val destinationProbe = TestProbe()
val destination = system.actorOf(Props(classOf[TestDestination], destinationProbe.ref))
subscribeToConfirmation(confirmProbe)
view = system.actorOf(Props(classOf[EmittingView], name, destination))
destinationProbe.expectMsg("emitted-a-1")
destinationProbe.expectMsg("emitted-b-2")
awaitConfirmation(confirmProbe)
awaitConfirmation(confirmProbe)
view ! "restart"
processor ! Persistent("c")
destinationProbe.expectMsg("emitted-c-3")
awaitConfirmation(confirmProbe)
}
"take snapshots" in {
view = system.actorOf(Props(classOf[SnapshottingView], name, viewProbe.ref))
viewProbe.expectMsg("replicated-a-1")
viewProbe.expectMsg("replicated-b-2")
view ! "snap"
viewProbe.expectMsg("snapped")
view ! "restart"
processor ! Persistent("c")
viewProbe.expectMsg("replicated-b-2")
viewProbe.expectMsg("replicated-c-3")
}
}
}
class LeveldbViewSpec extends ViewSpec(PersistenceSpec.config("leveldb", "LeveldbViewSpec"))
class InmemViewSpec extends ViewSpec(PersistenceSpec.config("inmem", "InmemViewSpec"))

View file

@ -4,7 +4,7 @@
package akka.persistence.journal.chaos package akka.persistence.journal.chaos
import scala.collection.immutable.Seq import scala.collection.immutable
import scala.concurrent.Future import scala.concurrent.Future
import scala.concurrent.forkjoin.ThreadLocalRandom import scala.concurrent.forkjoin.ThreadLocalRandom
@ -15,11 +15,17 @@ import akka.persistence.journal.inmem.InmemMessages
class WriteFailedException(ps: Seq[PersistentRepr]) class WriteFailedException(ps: Seq[PersistentRepr])
extends TestException(s"write failed for payloads = [${ps.map(_.payload)}]") extends TestException(s"write failed for payloads = [${ps.map(_.payload)}]")
class ReplayFailedException(ps: Seq[PersistentRepr]) class ConfirmFailedException(cs: Seq[PersistentConfirmation])
extends TestException(s"replay failed after payloads = [${ps.map(_.payload)}]") extends TestException(s"write failed for confirmations = [${cs.map(c s"${c.processorId}-${c.sequenceNr}-${c.channelId}")}]")
class DeleteFailedException(processorId: String, fromSequenceNr: Long, toSequenceNr: Long) class ReplayFailedException(ps: Seq[PersistentRepr])
extends TestException(s"delete failed for processor id = [${processorId}], from sequence number = [${fromSequenceNr}], to sequence number = [${toSequenceNr}]") extends TestException(s"recovery failed after replaying payloads = [${ps.map(_.payload)}]")
class ReadHighestFailedException
extends TestException(s"recovery failed when reading highest sequence number")
class DeleteFailedException(messageIds: immutable.Seq[PersistentId])
extends TestException(s"delete failed for message ids = [${messageIds}]")
/** /**
* Keep [[ChaosJournal]] state in an external singleton so that it survives journal restarts. * Keep [[ChaosJournal]] state in an external singleton so that it survives journal restarts.
@ -32,33 +38,44 @@ class ChaosJournal extends SyncWriteJournal {
val config = context.system.settings.config.getConfig("akka.persistence.journal.chaos") val config = context.system.settings.config.getConfig("akka.persistence.journal.chaos")
val writeFailureRate = config.getDouble("write-failure-rate") val writeFailureRate = config.getDouble("write-failure-rate")
val confirmFailureRate = config.getDouble("confirm-failure-rate")
val deleteFailureRate = config.getDouble("delete-failure-rate") val deleteFailureRate = config.getDouble("delete-failure-rate")
val replayFailureRate = config.getDouble("replay-failure-rate") val replayFailureRate = config.getDouble("replay-failure-rate")
val readHighestFailureRate = config.getDouble("read-highest-failure-rate")
def random = ThreadLocalRandom.current def random = ThreadLocalRandom.current
def write(persistentBatch: Seq[PersistentRepr]): Unit = def writeMessages(messages: immutable.Seq[PersistentRepr]): Unit =
if (shouldFail(writeFailureRate)) throw new WriteFailedException(persistentBatch) if (shouldFail(writeFailureRate)) throw new WriteFailedException(messages)
else persistentBatch.foreach(add) else messages.foreach(add)
def delete(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, permanent: Boolean): Unit = def writeConfirmations(confirmations: immutable.Seq[PersistentConfirmation]): Unit =
if (shouldFail(deleteFailureRate)) throw new DeleteFailedException(processorId, fromSequenceNr, toSequenceNr) if (shouldFail(confirmFailureRate)) throw new ConfirmFailedException(confirmations)
else fromSequenceNr to toSequenceNr foreach { snr if (permanent) del(processorId, snr) else update(processorId, snr)(_.update(deleted = true)) } else confirmations.foreach(cnf update(cnf.processorId, cnf.sequenceNr)(p p.update(confirms = cnf.channelId +: p.confirms)))
def confirm(processorId: String, sequenceNr: Long, channelId: String): Unit = def deleteMessages(messageIds: immutable.Seq[PersistentId], permanent: Boolean): Unit =
update(processorId, sequenceNr)(p p.update(confirms = channelId +: p.confirms)) if (shouldFail(deleteFailureRate)) throw new DeleteFailedException(messageIds)
else if (permanent) messageIds.foreach(mid update(mid.processorId, mid.sequenceNr)(_.update(deleted = true)))
else messageIds.foreach(mid del(mid.processorId, mid.sequenceNr))
def replayAsync(processorId: String, fromSequenceNr: Long, toSequenceNr: Long)(replayCallback: (PersistentRepr) Unit): Future[Long] = def deleteMessagesTo(processorId: String, toSequenceNr: Long, permanent: Boolean): Unit =
(1L to toSequenceNr).map(PersistentIdImpl(processorId, _))
def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: (PersistentRepr) Unit): Future[Unit] =
if (shouldFail(replayFailureRate)) { if (shouldFail(replayFailureRate)) {
val rm = read(processorId, fromSequenceNr, toSequenceNr) val rm = read(processorId, fromSequenceNr, toSequenceNr, max)
val sm = rm.take(random.nextInt(rm.length + 1)) val sm = rm.take(random.nextInt(rm.length + 1))
sm.foreach(replayCallback) sm.foreach(replayCallback)
Future.failed(new ReplayFailedException(sm)) Future.failed(new ReplayFailedException(sm))
} else { } else {
read(processorId, fromSequenceNr, toSequenceNr).foreach(replayCallback) read(processorId, fromSequenceNr, toSequenceNr, max).foreach(replayCallback)
Future.successful(maxSequenceNr(processorId)) Future.successful(())
} }
def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] =
if (shouldFail(readHighestFailureRate)) Future.failed(new ReadHighestFailedException)
else Future.successful(highestSequenceNr(processorId))
def shouldFail(rate: Double): Boolean = def shouldFail(rate: Double): Boolean =
random.nextDouble() < rate random.nextDouble() < rate
} }

View file

@ -5,17 +5,21 @@
package akka.persistence.serialization package akka.persistence.serialization
import scala.collection.immutable import scala.collection.immutable
import scala.concurrent._
import scala.concurrent.duration._
import scala.util._
import com.typesafe.config._ import com.typesafe.config._
import akka.actor._ import akka.actor._
import akka.pattern.ask
import akka.persistence._ import akka.persistence._
import akka.persistence.JournalProtocol.Confirm
import akka.serialization._ import akka.serialization._
import akka.testkit._ import akka.testkit._
import akka.util.Timeout
object SerializerSpecConfigs { object SerializerSpecConfigs {
val customSerializers = val customSerializers = ConfigFactory.parseString(
""" """
akka.actor { akka.actor {
serializers { serializers {
@ -27,9 +31,9 @@ object SerializerSpecConfigs {
"akka.persistence.serialization.MySnapshot" = my-snapshot "akka.persistence.serialization.MySnapshot" = my-snapshot
} }
} }
""" """)
val remoteCommon = val remote = ConfigFactory.parseString(
""" """
akka { akka {
actor { actor {
@ -37,16 +41,16 @@ object SerializerSpecConfigs {
} }
remote { remote {
enabled-transports = ["akka.remote.netty.tcp"] enabled-transports = ["akka.remote.netty.tcp"]
netty.tcp.hostname = "127.0.0.1" netty.tcp {
hostname = "127.0.0.1"
port = 0
}
} }
loglevel = ERROR loglevel = ERROR
log-dead-letters = 0 log-dead-letters = 0
log-dead-letters-during-shutdown = off log-dead-letters-during-shutdown = off
} }
""" """)
val systemA = "akka.remote.netty.tcp.port = 0"
val systemB = "akka.remote.netty.tcp.port = 0"
def config(configs: String*): Config = def config(configs: String*): Config =
configs.foldLeft(ConfigFactory.empty)((r, c) r.withFallback(ConfigFactory.parseString(c))) configs.foldLeft(ConfigFactory.empty)((r, c) r.withFallback(ConfigFactory.parseString(c)))
@ -54,7 +58,7 @@ object SerializerSpecConfigs {
import SerializerSpecConfigs._ import SerializerSpecConfigs._
class SnapshotSerializerPersistenceSpec extends AkkaSpec(config(customSerializers)) { class SnapshotSerializerPersistenceSpec extends AkkaSpec(customSerializers) {
val serialization = SerializationExtension(system) val serialization = SerializationExtension(system)
"A snapshot serializer" must { "A snapshot serializer" must {
@ -70,13 +74,13 @@ class SnapshotSerializerPersistenceSpec extends AkkaSpec(config(customSerializer
} }
} }
class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers)) { class MessageSerializerPersistenceSpec extends AkkaSpec(customSerializers) {
val serialization = SerializationExtension(system) val serialization = SerializationExtension(system)
"A message serializer" when { "A message serializer" when {
"not given a manifest" must { "not given a manifest" must {
"handle custom ConfirmablePersistent message serialization" in { "handle custom ConfirmablePersistent message serialization" in {
val persistent = PersistentRepr(MyPayload("a"), 13, "p1", true, true, 3, List("c1", "c2"), confirmable = true, Confirm("p2", 14, "c2"), testActor, testActor) val persistent = PersistentRepr(MyPayload("a"), 13, "p1", true, 3, List("c1", "c2"), confirmable = true, DeliveredByChannel("p2", "c2", 14), testActor, testActor)
val serializer = serialization.findSerializerFor(persistent) val serializer = serialization.findSerializerFor(persistent)
val bytes = serializer.toBinary(persistent) val bytes = serializer.toBinary(persistent)
@ -85,7 +89,7 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers
deserialized should be(persistent.withPayload(MyPayload(".a."))) deserialized should be(persistent.withPayload(MyPayload(".a.")))
} }
"handle custom Persistent message serialization" in { "handle custom Persistent message serialization" in {
val persistent = PersistentRepr(MyPayload("a"), 13, "p1", true, true, 0, List("c1", "c2"), confirmable = false, Confirm("p2", 14, "c2"), testActor, testActor) val persistent = PersistentRepr(MyPayload("a"), 13, "p1", true, 0, List("c1", "c2"), confirmable = false, DeliveredByChannel("p2", "c2", 14), testActor, testActor)
val serializer = serialization.findSerializerFor(persistent) val serializer = serialization.findSerializerFor(persistent)
val bytes = serializer.toBinary(persistent) val bytes = serializer.toBinary(persistent)
@ -96,7 +100,7 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers
} }
"given a PersistentRepr manifest" must { "given a PersistentRepr manifest" must {
"handle custom ConfirmablePersistent message serialization" in { "handle custom ConfirmablePersistent message serialization" in {
val persistent = PersistentRepr(MyPayload("b"), 13, "p1", true, true, 3, List("c1", "c2"), confirmable = true, Confirm("p2", 14, "c2"), testActor, testActor) val persistent = PersistentRepr(MyPayload("b"), 13, "p1", true, 3, List("c1", "c2"), confirmable = true, DeliveredByChannel("p2", "c2", 14), testActor, testActor)
val serializer = serialization.findSerializerFor(persistent) val serializer = serialization.findSerializerFor(persistent)
val bytes = serializer.toBinary(persistent) val bytes = serializer.toBinary(persistent)
@ -105,7 +109,7 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers
deserialized should be(persistent.withPayload(MyPayload(".b."))) deserialized should be(persistent.withPayload(MyPayload(".b.")))
} }
"handle custom Persistent message serialization" in { "handle custom Persistent message serialization" in {
val persistent = PersistentRepr(MyPayload("b"), 13, "p1", true, true, 3, List("c1", "c2"), confirmable = true, Confirm("p2", 14, "c2"), testActor, testActor) val persistent = PersistentRepr(MyPayload("b"), 13, "p1", true, 3, List("c1", "c2"), confirmable = true, DeliveredByChannel("p2", "c2", 14), testActor, testActor)
val serializer = serialization.findSerializerFor(persistent) val serializer = serialization.findSerializerFor(persistent)
val bytes = serializer.toBinary(persistent) val bytes = serializer.toBinary(persistent)
@ -115,12 +119,21 @@ class MessageSerializerPersistenceSpec extends AkkaSpec(config(customSerializers
} }
} }
"given a Confirm manifest" must { "given a Confirm manifest" must {
"handle Confirm message serialization" in { "handle DeliveryByChannel message serialization" in {
val confirmation = Confirm("x", 2, "y") val confirmation = DeliveredByChannel("p2", "c2", 14)
val serializer = serialization.findSerializerFor(confirmation) val serializer = serialization.findSerializerFor(confirmation)
val bytes = serializer.toBinary(confirmation) val bytes = serializer.toBinary(confirmation)
val deserialized = serializer.fromBinary(bytes, Some(classOf[Confirm])) val deserialized = serializer.fromBinary(bytes, Some(classOf[DeliveredByChannel]))
deserialized should be(confirmation)
}
"handle DeliveredByPersistentChannel message serialization" in {
val confirmation = DeliveredByPersistentChannel("c2", 14)
val serializer = serialization.findSerializerFor(confirmation)
val bytes = serializer.toBinary(confirmation)
val deserialized = serializer.fromBinary(bytes, Some(classOf[DeliveredByPersistentChannel]))
deserialized should be(confirmation) deserialized should be(confirmation)
} }
@ -140,19 +153,27 @@ object MessageSerializerRemotingSpec {
case PersistentBatch(Persistent(MyPayload(data), _) +: tail) sender ! s"b${data}" case PersistentBatch(Persistent(MyPayload(data), _) +: tail) sender ! s"b${data}"
case ConfirmablePersistent(MyPayload(data), _, _) sender ! s"c${data}" case ConfirmablePersistent(MyPayload(data), _, _) sender ! s"c${data}"
case Persistent(MyPayload(data), _) sender ! s"p${data}" case Persistent(MyPayload(data), _) sender ! s"p${data}"
case p @ Confirm(pid, msnr, cid, wsnr, ep) sender ! s"${pid},${msnr},${cid},${wsnr},${ep.path.name.startsWith("testActor")}" case DeliveredByChannel(pid, cid, msnr, dsnr, ep) sender ! s"${pid},${cid},${msnr},${dsnr},${ep.path.name.startsWith("testActor")}"
case DeliveredByPersistentChannel(cid, msnr, dsnr, ep) sender ! s"${cid},${msnr},${dsnr},${ep.path.name.startsWith("testActor")}"
case Deliver(Persistent(payload, _), dp) context.actorSelection(dp) ! payload
} }
} }
def port(system: ActorSystem) = def port(system: ActorSystem) =
system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress.port.get address(system).port.get
def address(system: ActorSystem) =
system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
} }
class MessageSerializerRemotingSpec extends AkkaSpec(config(systemA).withFallback(config(customSerializers, remoteCommon))) with ImplicitSender { class MessageSerializerRemotingSpec extends AkkaSpec(remote.withFallback(customSerializers)) with ImplicitSender {
import MessageSerializerRemotingSpec._ implicit val timeout = Timeout(5.seconds)
val remoteSystem = ActorSystem("remote", config(systemB).withFallback(config(customSerializers, remoteCommon))) import MessageSerializerRemotingSpec._
val localActor = system.actorOf(Props(classOf[LocalActor], port(remoteSystem))) import system.dispatcher
val remoteSystem = ActorSystem("remote", remote.withFallback(customSerializers))
val localActor = system.actorOf(Props(classOf[LocalActor], port(remoteSystem)), "local")
override protected def atStartup() { override protected def atStartup() {
remoteSystem.actorOf(Props[RemoteActor], "remote") remoteSystem.actorOf(Props[RemoteActor], "remote")
@ -176,9 +197,17 @@ class MessageSerializerRemotingSpec extends AkkaSpec(config(systemA).withFallbac
localActor ! PersistentBatch(immutable.Seq(Persistent(MyPayload("a")))) localActor ! PersistentBatch(immutable.Seq(Persistent(MyPayload("a"))))
expectMsg("b.a.") expectMsg("b.a.")
} }
"serialize Confirm messages during remoting" in { "serialize DeliveredByChannel messages during remoting" in {
localActor ! Confirm("a", 2, "b", 3, testActor) localActor ! DeliveredByChannel("a", "b", 2, 3, testActor)
expectMsg("a,2,b,3,true") expectMsg("a,b,2,3,true")
}
"serialize DeliveredByPersistentChannel messages during remoting" in {
localActor ! DeliveredByPersistentChannel("c", 2, 3, testActor)
expectMsg("c,2,3,true")
}
"serialize Deliver messages during remoting" in {
localActor ! Deliver(Persistent("a"), ActorPath.fromString(testActor.path.toStringWithAddress(address(system))))
expectMsg("a")
} }
} }
} }

View file

@ -22,7 +22,9 @@ public class ProcessorChannelExample {
if (message instanceof Persistent) { if (message instanceof Persistent) {
Persistent msg = (Persistent)message; Persistent msg = (Persistent)message;
System.out.println("processed " + msg.payload()); System.out.println("processed " + msg.payload());
channel.tell(Deliver.create(msg.withPayload("processed " + msg.payload()), destination), getSelf()); channel.tell(Deliver.create(msg.withPayload("processed " + msg.payload()), destination.path()), getSelf());
} else if (message instanceof String) {
System.out.println("reply = " + message);
} }
} }
} }
@ -32,8 +34,9 @@ public class ProcessorChannelExample {
public void onReceive(Object message) throws Exception { public void onReceive(Object message) throws Exception {
if (message instanceof ConfirmablePersistent) { if (message instanceof ConfirmablePersistent) {
ConfirmablePersistent msg = (ConfirmablePersistent)message; ConfirmablePersistent msg = (ConfirmablePersistent)message;
msg.confirm();
System.out.println("received " + msg.payload()); System.out.println("received " + msg.payload());
getSender().tell(String.format("re: %s (%d)", msg.payload(), msg.sequenceNr()), null);
msg.confirm();
} }
} }
} }

View file

@ -0,0 +1,90 @@
package sample.persistence.japi;
import java.util.Scanner;
import akka.actor.*;
import akka.persistence.*;
public class ViewExample {
public static class ExampleProcessor extends UntypedProcessor {
@Override
public String processorId() {
return "processor-5";
}
@Override
public void onReceive(Object message) throws Exception {
if (message instanceof Persistent) {
Persistent p = (Persistent)message;
System.out.println(String.format("processor received %s (sequence nr = %d)", p.payload(), p.sequenceNr()));
}
}
}
public static class ExampleView extends UntypedView {
private final ActorRef destination = getContext().actorOf(Props.create(ExampleDestination.class));
private final ActorRef channel = getContext().actorOf(Channel.props("channel"));
private int numReplicated = 0;
@Override
public String viewId() {
return "view-5";
}
@Override
public String processorId() {
return "processor-5";
}
@Override
public void onReceive(Object message) throws Exception {
if (message instanceof Persistent) {
Persistent p = (Persistent)message;
numReplicated += 1;
System.out.println(String.format("view received %s (sequence nr = %d, num replicated = %d)", p.payload(), p.sequenceNr(), numReplicated));
channel.tell(Deliver.create(p.withPayload("replicated-" + p.payload()), destination.path()), getSelf());
} else if (message instanceof SnapshotOffer) {
SnapshotOffer so = (SnapshotOffer)message;
numReplicated = (Integer)so.snapshot();
System.out.println(String.format("view received snapshot offer %s (metadata = %s)", numReplicated, so.metadata()));
} else if (message.equals("snap")) {
saveSnapshot(numReplicated);
}
}
}
public static class ExampleDestination extends UntypedActor {
@Override
public void onReceive(Object message) throws Exception {
if (message instanceof ConfirmablePersistent) {
ConfirmablePersistent cp = (ConfirmablePersistent)message;
System.out.println(String.format("destination received %s (sequence nr = %s)", cp.payload(), cp.sequenceNr()));
cp.confirm();
}
}
}
public static void main(String... args) throws Exception {
final ActorSystem system = ActorSystem.create("example");
final ActorRef processor = system.actorOf(Props.create(ExampleProcessor.class));
final ActorRef view = system.actorOf(Props.create(ExampleView.class));
Scanner scanner = new Scanner(System.in);
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
if (line.equals("exit")) {
break;
} else if (line.equals("sync")) {
view.tell(Update.create(false), null);
} else if (line.equals("snap")) {
view.tell("snap", null);
} else {
processor.tell(Persistent.create(line), null);
}
}
system.shutdown();
}
}

View file

@ -21,8 +21,8 @@ object ConversationRecoveryExample extends App {
println(s"received ping ${counter} times ...") println(s"received ping ${counter} times ...")
m.confirm() m.confirm()
if (!recoveryRunning) Thread.sleep(1000) if (!recoveryRunning) Thread.sleep(1000)
pongChannel ! Deliver(m.withPayload(Pong), sender, Resolve.Destination) pongChannel ! Deliver(m.withPayload(Pong), sender.path)
case "init" => if (counter == 0) pongChannel ! Deliver(Persistent(Pong), sender) case "init" => if (counter == 0) pongChannel ! Deliver(Persistent(Pong), sender.path)
} }
override def preStart() = () override def preStart() = ()
@ -38,7 +38,7 @@ object ConversationRecoveryExample extends App {
println(s"received pong ${counter} times ...") println(s"received pong ${counter} times ...")
m.confirm() m.confirm()
if (!recoveryRunning) Thread.sleep(1000) if (!recoveryRunning) Thread.sleep(1000)
pingChannel ! Deliver(m.withPayload(Ping), sender, Resolve.Destination) pingChannel ! Deliver(m.withPayload(Ping), sender.path)
} }
override def preStart() = () override def preStart() = ()

View file

@ -5,20 +5,19 @@
package sample.persistence package sample.persistence
import akka.actor._ import akka.actor._
import akka.pattern.ask
import akka.persistence._ import akka.persistence._
import akka.util.Timeout
object ProcessorChannelExample extends App { object ProcessorChannelExample extends App {
class ExampleProcessor extends Processor { class ExampleProcessor extends Processor {
val channel = context.actorOf(Channel.props, "channel") val channel = context.actorOf(Channel.props, "channel")
val destination = context.actorOf(Props[ExampleDestination]) val destination = context.actorOf(Props[ExampleDestination])
var received: List[Persistent] = Nil
def receive = { def receive = {
case p @ Persistent(payload, _) => case p @ Persistent(payload, _) =>
println(s"processed ${payload}") println(s"processed ${payload}")
channel forward Deliver(p.withPayload(s"processed ${payload}"), destination) channel ! Deliver(p.withPayload(s"processed ${payload}"), destination.path)
case reply: String =>
println(s"reply = ${reply}")
} }
} }
@ -34,11 +33,8 @@ object ProcessorChannelExample extends App {
val system = ActorSystem("example") val system = ActorSystem("example")
val processor = system.actorOf(Props(classOf[ExampleProcessor]), "processor-1") val processor = system.actorOf(Props(classOf[ExampleProcessor]), "processor-1")
implicit val timeout = Timeout(3000) processor ! Persistent("a")
import system.dispatcher processor ! Persistent("b")
processor ? Persistent("a") onSuccess { case reply => println(s"reply = ${reply}") }
processor ? Persistent("b") onSuccess { case reply => println(s"reply = ${reply}") }
Thread.sleep(1000) Thread.sleep(1000)
system.shutdown() system.shutdown()

View file

@ -0,0 +1,105 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package sample.persistence
import scala.concurrent.duration._
import com.typesafe.config._
import akka.actor._
import akka.persistence._
object ProcessorChannelRemoteExample {
val config = ConfigFactory.parseString(
"""
akka {
actor {
provider = "akka.remote.RemoteActorRefProvider"
}
remote {
enabled-transports = ["akka.remote.netty.tcp"]
netty.tcp.hostname = "127.0.0.1"
}
persistence {
journal.leveldb.dir = "target/example/journal"
snapshot-store.local.dir = "target/example/snapshots"
}
loglevel = INFO
log-dead-letters = 0
log-dead-letters-during-shutdown = off
}
""")
}
object SenderApp extends App {
import ProcessorChannelRemoteExample._
class ExampleProcessor(destination: ActorPath) extends Processor {
val listener = context.actorOf(Props[ExampleListener])
val channel = context.actorOf(Channel.props(ChannelSettings(
redeliverMax = 5,
redeliverInterval = 1.second,
redeliverFailureListener = Some(listener))), "channel")
def receive = {
case p @ Persistent(payload, _) =>
println(s"[processor] received payload: ${payload} (replayed = ${recoveryRunning})")
channel ! Deliver(p.withPayload(s"processed ${payload}"), destination)
case "restart" =>
throw new Exception("restart requested")
case reply: String =>
println(s"[processor] received reply: ${reply}")
}
}
class ExampleListener extends Actor {
def receive = {
case RedeliverFailure(messages) =>
println(s"unable to deliver ${messages.length} messages, restarting processor to resend messages ...")
context.parent ! "restart"
}
}
val receiverPath = ActorPath.fromString("akka.tcp://receiver@127.0.0.1:44317/user/receiver")
val senderConfig = ConfigFactory.parseString("""
akka.persistence.journal.leveldb.dir = "target/example/journal"
akka.persistence.snapshot-store.local.dir = "target/example/snapshots"
akka.remote.netty.tcp.port = 44316
""")
val system = ActorSystem("sender", config.withFallback(senderConfig))
val sender = system.actorOf(Props(classOf[ExampleProcessor], receiverPath))
@annotation.tailrec
def read(line: String): Unit = line match {
case "exit" | null =>
case msg =>
sender ! Persistent(msg)
read(Console.readLine())
}
read(Console.readLine())
system.shutdown()
}
object ReceiverApp extends App {
import ProcessorChannelRemoteExample._
class ExampleDestination extends Actor {
def receive = {
case p @ ConfirmablePersistent(payload, snr, _) =>
println(s"[destination] received payload: ${payload}")
sender ! s"re: ${payload} (snr = ${snr})"
p.confirm()
}
}
val receiverConfig = ConfigFactory.parseString("akka.remote.netty.tcp.port = 44317")
val system = ActorSystem("receiver", config.withFallback(receiverConfig))
system.actorOf(Props[ExampleDestination], "receiver")
}

View file

@ -0,0 +1,71 @@
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package sample.persistence
import akka.actor._
import akka.persistence._
object ViewExample extends App {
class ExampleProcessor extends Processor {
override def processorId = "processor-5"
def receive = {
case Persistent(payload, sequenceNr) =>
println(s"processor received ${payload} (sequence nr = ${sequenceNr})")
}
}
class ExampleView extends View {
private var numReplicated = 0
override def processorId = "processor-5"
override def viewId = "view-5"
private val destination = context.actorOf(Props[ExampleDestination])
private val channel = context.actorOf(Channel.props("channel"))
def receive = {
case "snap" =>
saveSnapshot(numReplicated)
case SnapshotOffer(metadata, snapshot: Int) =>
numReplicated = snapshot
println(s"view received snapshot offer ${snapshot} (metadata = ${metadata})")
case Persistent(payload, sequenceNr) =>
numReplicated += 1
println(s"view received ${payload} (sequence nr = ${sequenceNr}, num replicated = ${numReplicated})")
channel ! Deliver(Persistent(s"replicated-${payload}"), destination.path)
}
}
class ExampleDestination extends Actor {
def receive = {
case cp @ ConfirmablePersistent(payload, sequenceNr, _) =>
println(s"destination received ${payload} (sequence nr = ${sequenceNr})")
cp.confirm()
}
}
val system = ActorSystem("example")
val processor = system.actorOf(Props(classOf[ExampleProcessor]))
val view = system.actorOf(Props(classOf[ExampleView]))
@annotation.tailrec
def read(line: String): Unit = line match {
case "exit" | null =>
case "sync" =>
view ! Update(await = false)
read(Console.readLine())
case "snap" =>
view ! "snap"
read(Console.readLine())
case msg =>
processor ! Persistent(msg)
read(Console.readLine())
}
read(Console.readLine())
system.shutdown()
}